code
stringlengths
38
801k
repo_path
stringlengths
6
263
pub const sabaton = @import("../../sabaton.zig"); pub const io = sabaton.io_impl.uart_mmio_32; pub const ElfType = [*]u8; pub const panic = sabaton.panic; const std = @import("std"); pub const display = @import("display.zig"); pub const acpi = struct { pub fn init() void { if(sabaton.fw_cfg.find_file("etc/acpi/tables")) |tables| { if(sabaton.fw_cfg.find_file("etc/acpi/rsdp")) |rsdp| { const rsdp_bytes = sabaton.pmm.alloc_aligned(rsdp.size, .Hole); const table_bytes = sabaton.pmm.alloc_aligned(tables.size, .Hole); rsdp.read(rsdp_bytes); tables.read(table_bytes); sabaton.acpi.init(rsdp_bytes, table_bytes); } } } }; var page_size: u64 = 0x1000; pub fn get_page_size() u64 { return page_size; } export fn _main() linksection(".text.main") noreturn { page_size = sabaton.paging.detect_page_size(); sabaton.fw_cfg.init_from_dtb(); @call(.{.modifier = .always_inline}, sabaton.main, .{}); } pub fn get_kernel() [*]u8 { if(sabaton.fw_cfg.find_file("opt/Sabaton/kernel")) |kernel| { sabaton.log_hex("fw_cfg kernel of size ", kernel.size); const kernel_bytes = sabaton.pmm.alloc_aligned(kernel.size, .ReclaimableData); kernel.read(kernel_bytes); return kernel_bytes.ptr; } return sabaton.near("kernel_file_loc").read([*]u8); } pub fn get_dtb() []u8 { return sabaton.near("dram_base").read([*]u8)[0..0x100000]; } pub fn get_dram() []u8 { return sabaton.near("dram_base").read([*]u8)[0..get_dram_size()]; } pub fn map_platform(root: *sabaton.paging.Root) void { sabaton.paging.map(0, 0, 1024 * 1024 * 1024, .rw, .mmio, root); sabaton.paging.map(sabaton.upper_half_phys_base, 0, 1024 * 1024 * 1024, .rw, .mmio, root); sabaton.pci.init_from_dtb(root); } // Dram size varies as you can set different amounts of RAM for your VM fn get_dram_size() u64 { const memory_blob = sabaton.vital(sabaton.dtb.find("memory@", "reg"), "Cannot find memory in dtb", false); const base = std.mem.readIntBig(u64, memory_blob[0..8]); const size = std.mem.readIntBig(u64, memory_blob[8..16]); if(sabaton.safety and base != sabaton.near("dram_base").read(u64)) { sabaton.log_hex("dtb has wrong memory base: ", base); unreachable; } return size; } pub fn add_platform_tags(kernel_header: *sabaton.Stivale2hdr) void { sabaton.add_tag(&sabaton.near("uart_tag").addr(sabaton.Stivale2tag)[0]); sabaton.add_tag(&sabaton.near("devicetree_tag").addr(sabaton.Stivale2tag)[0]); }
src/platform/virt_aarch64/main.zig
const std = @import("std"); const assert = std.debug.assert; pub const config = @import("config.zig"); pub const Account = packed struct { id: u128, /// Opaque third-party identifier to link this account (many-to-one) to an external entity: user_data: u128, /// Reserved for accounting policy primitives: reserved: [48]u8, unit: u16, /// A chart of accounts code describing the type of account (e.g. clearing, settlement): code: u16, flags: AccountFlags, debits_reserved: u64, debits_accepted: u64, credits_reserved: u64, credits_accepted: u64, timestamp: u64 = 0, comptime { assert(@sizeOf(Account) == 128); } pub fn debits_exceed_credits(self: *const Account, amount: u64) bool { return (self.flags.debits_must_not_exceed_credits and self.debits_reserved + self.debits_accepted + amount > self.credits_accepted); } pub fn credits_exceed_debits(self: *const Account, amount: u64) bool { return (self.flags.credits_must_not_exceed_debits and self.credits_reserved + self.credits_accepted + amount > self.debits_accepted); } pub fn jsonStringify(self: Account, options: std.json.StringifyOptions, writer: anytype) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"id\":{},", .{self.id}); try std.fmt.format(writer, "\"user_data\":\"{x:0>32}\",", .{self.user_data}); try std.fmt.format(writer, "\"reserved\":\"{x:0>48}\",", .{self.reserved}); try std.fmt.format(writer, "\"unit\":{},", .{self.unit}); try std.fmt.format(writer, "\"code\":{},", .{self.code}); try writer.writeAll("\"flags\":"); try std.json.stringify(self.flags, .{}, writer); try writer.writeAll(","); try std.fmt.format(writer, "\"debits_reserved\":{},", .{self.debits_reserved}); try std.fmt.format(writer, "\"debits_accepted\":{},", .{self.debits_accepted}); try std.fmt.format(writer, "\"credits_reserved\":{},", .{self.credits_reserved}); try std.fmt.format(writer, "\"credits_accepted\":{},", .{self.credits_accepted}); try std.fmt.format(writer, "\"timestamp\":\"{}\"", .{self.timestamp}); try writer.writeAll("}"); } }; pub const AccountFlags = packed struct { /// When the .linked flag is specified, it links an event with the next event in the batch, to /// create a chain of events, of arbitrary length, which all succeed or fail together. The tail /// of a chain is denoted by the first event without this flag. The last event in a batch may /// therefore never have the .linked flag set as this would leave a chain open-ended. Multiple /// chains or individual events may coexist within a batch to succeed or fail independently. /// Events within a chain are executed within order, or are rolled back on error, so that the /// effect of each event in the chain is visible to the next, and so that the chain is either /// visible or invisible as a unit to subsequent events after the chain. The event that was the /// first to break the chain will have a unique error result. Other events in the chain will /// have their error result set to .linked_event_failed. linked: bool = false, debits_must_not_exceed_credits: bool = false, credits_must_not_exceed_debits: bool = false, padding: u29 = 0, comptime { assert(@sizeOf(AccountFlags) == @sizeOf(u32)); } pub fn jsonStringify( self: AccountFlags, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{}"); } }; pub const Transfer = packed struct { id: u128, debit_account_id: u128, credit_account_id: u128, /// Opaque third-party identifier to link this transfer (many-to-one) to an external entity: user_data: u128, /// Reserved for accounting policy primitives: reserved: [32]u8, timeout: u64, /// A chart of accounts code describing the reason for the transfer (e.g. deposit, settlement): code: u32, flags: TransferFlags, amount: u64, timestamp: u64 = 0, comptime { assert(@sizeOf(Transfer) == 128); } pub fn jsonStringify( self: Transfer, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"id\":{},", .{self.id}); try std.fmt.format(writer, "\"debit_account_id\":{},", .{self.debit_account_id}); try std.fmt.format(writer, "\"credit_account_id\":{},", .{self.credit_account_id}); try std.fmt.format(writer, "\"user_data\":\"{x:0>32}\",", .{self.user_data}); try std.fmt.format(writer, "\"reserved\":\"{x:0>64}\",", .{self.reserved}); try std.fmt.format(writer, "\"code\":{},", .{self.code}); try writer.writeAll("\"flags\":"); try std.json.stringify(self.flags, .{}, writer); try writer.writeAll(","); try std.fmt.format(writer, "\"amount\":{},", .{self.amount}); try std.fmt.format(writer, "\"timeout\":{},", .{self.timeout}); try std.fmt.format(writer, "\"timestamp\":{}", .{self.timestamp}); try writer.writeAll("}"); } }; pub const TransferFlags = packed struct { linked: bool = false, two_phase_commit: bool = false, condition: bool = false, padding: u29 = 0, comptime { assert(@sizeOf(TransferFlags) == @sizeOf(u32)); } pub fn jsonStringify( self: TransferFlags, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"accept\":{},", .{self.accept}); try std.fmt.format(writer, "\"reject\":{},", .{self.reject}); try std.fmt.format(writer, "\"auto_commit\":{},", .{self.auto_commit}); try std.fmt.format(writer, "\"condition\":{}", .{self.condition}); try writer.writeAll("}"); } }; pub const Commit = packed struct { id: u128, /// Reserved for accounting policy primitives: reserved: [32]u8, /// A chart of accounts code describing the reason for the accept/reject: code: u32, flags: CommitFlags, timestamp: u64 = 0, comptime { assert(@sizeOf(Commit) == 64); } pub fn jsonStringify( self: Commit, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"id\":{},", .{self.id}); try std.fmt.format(writer, "\"reserved\":\"{x:0>64}\",", .{self.reserved}); try std.fmt.format(writer, "\"code\":{},", .{self.code}); try writer.writeAll("\"flags\":"); try std.json.stringify(self.flags, .{}, writer); try writer.writeAll(","); try std.fmt.format(writer, "\"timestamp\":{}", .{self.timestamp}); try writer.writeAll("}"); } }; pub const CommitFlags = packed struct { linked: bool = false, reject: bool = false, preimage: bool = false, padding: u29 = 0, comptime { assert(@sizeOf(CommitFlags) == @sizeOf(u32)); } pub fn jsonStringify( self: CommitFlags, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"accept\":{},", .{self.accept}); try std.fmt.format(writer, "\"reject\":{},", .{self.reject}); try std.fmt.format(writer, "\"preimage\":{}", .{self.preimage}); try writer.writeAll("}"); } }; pub const CreateAccountResult = packed enum(u32) { ok, linked_event_failed, exists, exists_with_different_user_data, exists_with_different_reserved_field, exists_with_different_unit, exists_with_different_code, exists_with_different_flags, exceeds_credits, exceeds_debits, reserved_field, reserved_flag_padding, }; pub const CreateTransferResult = packed enum(u32) { ok, linked_event_failed, exists, exists_with_different_debit_account_id, exists_with_different_credit_account_id, exists_with_different_user_data, exists_with_different_reserved_field, exists_with_different_code, exists_with_different_amount, exists_with_different_timeout, exists_with_different_flags, exists_and_already_committed_and_accepted, exists_and_already_committed_and_rejected, reserved_field, reserved_flag_padding, debit_account_not_found, credit_account_not_found, accounts_are_the_same, accounts_have_different_units, amount_is_zero, exceeds_credits, exceeds_debits, two_phase_commit_must_timeout, timeout_reserved_for_two_phase_commit, }; pub const CommitTransferResult = packed enum(u32) { ok, linked_event_failed, reserved_field, reserved_flag_padding, transfer_not_found, transfer_not_two_phase_commit, transfer_expired, already_auto_committed, already_committed, already_committed_but_accepted, already_committed_but_rejected, debit_account_not_found, credit_account_not_found, debit_amount_was_not_reserved, credit_amount_was_not_reserved, exceeds_credits, exceeds_debits, condition_requires_preimage, preimage_requires_condition, preimage_invalid, }; pub const CreateAccountsResult = packed struct { index: u32, result: CreateAccountResult, comptime { assert(@sizeOf(CreateAccountsResult) == 8); } pub fn jsonStringify( self: CreateAccountResults, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"index\":{},", .{self.index}); try std.fmt.format(writer, "\"result\":\"{}\"", .{@tagName(self.result)}); try writer.writeAll("}"); } }; pub const CreateTransfersResult = packed struct { index: u32, result: CreateTransferResult, comptime { assert(@sizeOf(CreateTransfersResult) == 8); } pub fn jsonStringify( self: CreateTransferResults, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"index\":{},", .{self.index}); try std.fmt.format(writer, "\"result\":\"{}\"", .{@tagName(self.result)}); try writer.writeAll("}"); } }; pub const CommitTransfersResult = packed struct { index: u32, result: CommitTransferResult, comptime { assert(@sizeOf(CommitTransfersResult) == 8); } pub fn jsonStringify( self: CommitTransferResults, options: std.json.StringifyOptions, writer: anytype, ) !void { try writer.writeAll("{"); try std.fmt.format(writer, "\"index\":{},", .{self.index}); try std.fmt.format(writer, "\"result\":\"{}\"", .{@tagName(self.result)}); try writer.writeAll("}"); } }; comptime { const target = std.Target.current; if (target.os.tag != .linux and !target.isDarwin()) { @compileError("linux or macos required for io"); } // We require little-endian architectures everywhere for efficient network deserialization: if (target.cpu.arch.endian() != std.builtin.Endian.Little) { @compileError("big-endian systems not supported"); } }
src/tigerbeetle.zig
const std = @import("std"); const expect = std.testing.expect; // would cause lose wake up event // threadlocal var threadlocal_resetevent = std.AutoResetEvent{}; /// Condition variable /// support wait(), timedWait(), signal(), signalAll() pub const Condition = struct { ///this struct is on stack pub const Node = struct { status: i32=1, //if status equal 1, Node is in queue or about to be in queue, orelse it has been removed from queue prev: ?*Node=null, next: ?*Node=null, reset: std.ResetEvent, }; head: ?*Node=null, tail: ?*Node=null, mutex: ?*std.Mutex, const Self = @This(); /// Initializes a new condition variable. pub fn init(mutex: *std.Mutex) Self { return Self {.head = null, .tail=null, .mutex=mutex}; } fn remove_head(self: *Self) *Node{ var node: *Node = self.head.?; self.head = node.next; if(node.next == null){ expect(self.tail == node); self.tail = null; } else { node.next.?.prev = null; } return node; } fn remove_waiter(self: *Self, node: *Node) void { expect(self.head != null); if (self.head.? == node){ const n = self.remove_head(); expect(n == node); } else { const prev = node.prev.?; prev.next = node.next; if(node.next != null){ node.next.?.prev = prev; } else { expect(self.tail == node); self.tail = prev; } } } fn add_waiter(self: *Self, node: *Node) void { if(self.head == null){ expect(self.tail == null); self.head = node; self.tail = node; } else { expect(self.tail != null); expect(self.tail != node); node.prev = self.tail; self.tail.?.next = node; self.tail = node; } } // wait until signaled. The thread who call signal()/signalAll() remove node from queue pub fn wait(self: *Self) void { var node = Node{.status = 1, .reset = std.ResetEvent.init()}; defer node.reset.deinit(); self.add_waiter(&node); var lock = std.Mutex.Held{.mutex = self.mutex.?}; lock.release(); //while(@atomicLoad(i32, &node.status, .SeqCst) != 0) { node.reset.wait(); var val = @atomicLoad(i32, &node.status, .SeqCst); if(val != 0){ std.debug.print("waked but status not 0 : {} {} {*} {*}\n", .{std.Thread.getCurrentId(),val, node.next, node.prev}); } //} lock = self.mutex.?.acquire(); } /// wait until signaled or timeout, if signaled, node has already been dequeue, orelse its still in queue, /// and remove node ourselves pub fn timedWait(self: *Self, timeout: u64) i64 { var node = Node{.status = 1, .reset = std.ResetEvent.init()}; defer node.reset.deinit(); self.add_waiter(&node); var lock = std.Mutex.Held{.mutex = self.mutex.?}; lock.release(); const deadline = std.time.nanoTimestamp() + @intCast(i128, timeout); const ret = node.reset.timedWait(timeout); const remain = deadline - std.time.nanoTimestamp(); if(@atomicLoad(i32, &node.status, .SeqCst) == 0) { lock = self.mutex.?.acquire(); return @intCast(i64, remain); } else { lock = self.mutex.?.acquire(); if (node.status != 0) self.remove_waiter(&node); if(ret) |_| { expect(false); // unexpected waken up } else |timeout_err| {} return @intCast(i64, remain); } } pub fn signal(self: *Self) void { if(self.head == null) return; var node: *Node = self.remove_head(); expect(node.prev == null); expect(node.next == self.head); node.next = null; node.prev = null; node.status = 0; node.reset.set(); } pub fn signalAll(self: *Self) void { var node: ?*Node = self.head; self.head = null; self.tail = null; while(node != null) { var cur = node.?; node = cur.next; cur.next = null; cur.prev = null; cur.status = 0; cur.reset.set(); } } };
Condition.zig
const std = @import("std"); const math = std.math; const assert = std.debug.assert; const L = std.unicode.utf8ToUtf16LeStringLiteral; const zwin32 = @import("zwin32"); const w = zwin32.base; const d3d12 = zwin32.d3d12; const hrPanic = zwin32.hrPanic; const hrPanicOnFail = zwin32.hrPanicOnFail; const zd3d12 = @import("zd3d12"); const common = @import("common"); const GuiRenderer = common.GuiRenderer; const c = common.c; const zm = @import("zmath"); // We need to export below symbols for DirectX 12 Agility SDK. pub export const D3D12SDKVersion: u32 = 4; pub export const D3D12SDKPath: [*:0]const u8 = ".\\d3d12\\"; const content_dir = @import("build_options").content_dir; const window_name = "zig-gamedev: intro 5"; const window_width = 1920; const window_height = 1080; const grid_size = 1024; const max_num_vertices = grid_size * grid_size; // By convention, we use 'Pso_' prefix for structures that are also defined in HLSL code // (see 'DrawConst' and 'FrameConst' in intro5.hlsl). const Pso_DrawConst = struct { object_to_world: [16]f32, }; const Pso_FrameConst = struct { world_to_clip: [16]f32, }; const Pso_Vertex = struct { position: [3]f32, }; const Vertex = struct { x: f32, y: f32, z: f32, }; const DemoState = struct { gctx: zd3d12.GraphicsContext, guictx: GuiRenderer, frame_stats: common.FrameStats, main_pso: zd3d12.PipelineHandle, vertex_buffer: zd3d12.ResourceHandle, vertex_data: std.MultiArrayList(Vertex), depth_texture: zd3d12.ResourceHandle, depth_texture_dsv: d3d12.CPU_DESCRIPTOR_HANDLE, simd_width: i32, camera: struct { position: [3]f32, forward: [3]f32, pitch: f32, yaw: f32, }, mouse: struct { cursor_prev_x: i32, cursor_prev_y: i32, }, }; fn init(gpa_allocator: std.mem.Allocator) DemoState { // Create application window and initialize dear imgui library. const window = common.initWindow(gpa_allocator, window_name, window_width, window_height) catch unreachable; // Create temporary memory allocator for use during initialization. We pass this allocator to all // subsystems that need memory and then free everyting with a single deallocation. var arena_allocator_state = std.heap.ArenaAllocator.init(gpa_allocator); defer arena_allocator_state.deinit(); const arena_allocator = arena_allocator_state.allocator(); // Create DirectX 12 context. var gctx = zd3d12.GraphicsContext.init(gpa_allocator, window); const main_pso = blk: { const input_layout_desc = [_]d3d12.INPUT_ELEMENT_DESC{ d3d12.INPUT_ELEMENT_DESC.init("POSITION", 0, .R32G32B32_FLOAT, 0, 0, .PER_VERTEX_DATA, 0), }; var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault(); pso_desc.InputLayout = .{ .pInputElementDescs = &input_layout_desc, .NumElements = input_layout_desc.len, }; pso_desc.RTVFormats[0] = .R8G8B8A8_UNORM; pso_desc.NumRenderTargets = 1; pso_desc.DSVFormat = .D32_FLOAT; pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf; pso_desc.PrimitiveTopologyType = .POINT; break :blk gctx.createGraphicsShaderPipeline( arena_allocator, &pso_desc, content_dir ++ "shaders/intro5.vs.cso", content_dir ++ "shaders/intro5.ps.cso", ); }; // Create vertex buffer and return a *handle* to the underlying Direct3D12 resource. const vertex_buffer = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &d3d12.RESOURCE_DESC.initBuffer(max_num_vertices * @sizeOf(Vertex)), d3d12.RESOURCE_STATE_COPY_DEST, null, ) catch |err| hrPanic(err); const vertex_data = blk: { var vertex_data = std.MultiArrayList(Vertex){}; vertex_data.ensureTotalCapacity(gpa_allocator, max_num_vertices) catch unreachable; var i: u32 = 0; while (i < max_num_vertices) : (i += 1) { vertex_data.appendAssumeCapacity(.{ .x = 0, .y = 0, .z = 0 }); } break :blk vertex_data; }; // Create depth texture resource. const depth_texture = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &blk: { var desc = d3d12.RESOURCE_DESC.initTex2d(.D32_FLOAT, gctx.viewport_width, gctx.viewport_height, 1); desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_DEPTH_STENCIL | d3d12.RESOURCE_FLAG_DENY_SHADER_RESOURCE; break :blk desc; }, d3d12.RESOURCE_STATE_DEPTH_WRITE, &d3d12.CLEAR_VALUE.initDepthStencil(.D32_FLOAT, 1.0, 0), ) catch |err| hrPanic(err); // Create depth texture 'view' - a descriptor which can be send to Direct3D 12 API. const depth_texture_dsv = gctx.allocateCpuDescriptors(.DSV, 1); gctx.device.CreateDepthStencilView( gctx.lookupResource(depth_texture).?, // Get the D3D12 resource from a handle. null, depth_texture_dsv, ); // Open D3D12 command list, setup descriptor heap, etc. After this call we can upload resources to the GPU, // draw 3D graphics etc. gctx.beginFrame(); // Create and upload graphics resources for dear imgui renderer. var guictx = GuiRenderer.init(arena_allocator, &gctx, 1, content_dir); // This will send command list to the GPU, call 'Present' and do some other bookkeeping. gctx.endFrame(); // Wait for the GPU to finish all commands. gctx.finishGpuCommands(); return .{ .gctx = gctx, .guictx = guictx, .frame_stats = common.FrameStats.init(), .main_pso = main_pso, .vertex_buffer = vertex_buffer, .vertex_data = vertex_data, .depth_texture = depth_texture, .depth_texture_dsv = depth_texture_dsv, .camera = .{ .position = [3]f32{ 0.0, 5.0, -5.0 }, .forward = [3]f32{ 0.0, 0.0, 1.0 }, .pitch = 0.175 * math.pi, .yaw = 0.0, }, .mouse = .{ .cursor_prev_x = 0, .cursor_prev_y = 0, }, .simd_width = 4, }; } fn deinit(demo: *DemoState, gpa_allocator: std.mem.Allocator) void { demo.gctx.finishGpuCommands(); demo.vertex_data.deinit(gpa_allocator); demo.guictx.deinit(&demo.gctx); demo.gctx.deinit(gpa_allocator); common.deinitWindow(gpa_allocator); demo.* = undefined; } fn update(demo: *DemoState) void { // Update frame counter and fps stats. demo.frame_stats.update(demo.gctx.window, window_name); const dt = demo.frame_stats.delta_time; // Update dear imgui common. After this call we can define our widgets. common.newImGuiFrame(dt); c.igSetNextWindowPos( c.ImVec2{ .x = @intToFloat(f32, demo.gctx.viewport_width) - 600.0 - 20, .y = 20.0 }, c.ImGuiCond_FirstUseEver, c.ImVec2{ .x = 0.0, .y = 0.0 }, ); c.igSetNextWindowSize(.{ .x = 600.0, .y = -1 }, c.ImGuiCond_Always); _ = c.igBegin( "Demo Settings", null, c.ImGuiWindowFlags_NoMove | c.ImGuiWindowFlags_NoResize | c.ImGuiWindowFlags_NoSavedSettings, ); c.igBulletText("", ""); c.igSameLine(0, -1); c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "Right Mouse Button + Drag", ""); c.igSameLine(0, -1); c.igText(" : rotate camera", ""); c.igBulletText("", ""); c.igSameLine(0, -1); c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "W, A, S, D", ""); c.igSameLine(0, -1); c.igText(" : move camera", ""); _ = c.igRadioButton_IntPtr("SIMD vector width 4", &demo.simd_width, 4); _ = c.igRadioButton_IntPtr("SIMD vector width 8", &demo.simd_width, 8); _ = c.igRadioButton_IntPtr("SIMD vector width 16", &demo.simd_width, 16); c.igEnd(); // Handle camera rotation with mouse. { var pos: w.POINT = undefined; _ = w.GetCursorPos(&pos); const delta_x = @intToFloat(f32, pos.x) - @intToFloat(f32, demo.mouse.cursor_prev_x); const delta_y = @intToFloat(f32, pos.y) - @intToFloat(f32, demo.mouse.cursor_prev_y); demo.mouse.cursor_prev_x = pos.x; demo.mouse.cursor_prev_y = pos.y; if (w.GetAsyncKeyState(w.VK_RBUTTON) < 0) { demo.camera.pitch += 0.0025 * delta_y; demo.camera.yaw += 0.0025 * delta_x; demo.camera.pitch = math.min(demo.camera.pitch, 0.48 * math.pi); demo.camera.pitch = math.max(demo.camera.pitch, -0.48 * math.pi); demo.camera.yaw = zm.modAngle(demo.camera.yaw); } } // Handle camera movement with 'WASD' keys. { const speed = zm.f32x4s(10.0); const delta_time = zm.f32x4s(demo.frame_stats.delta_time); const transform = zm.mul(zm.rotationX(demo.camera.pitch), zm.rotationY(demo.camera.yaw)); var forward = zm.normalize3(zm.mul(zm.f32x4(0.0, 0.0, 1.0, 0.0), transform)); zm.store(demo.camera.forward[0..], forward, 3); const right = speed * delta_time * zm.normalize3(zm.cross3(zm.f32x4(0.0, 1.0, 0.0, 0.0), forward)); forward = speed * delta_time * forward; // Load camera position from memory to SIMD register ('3' means that we want to load three components). var cpos = zm.load(demo.camera.position[0..], zm.Vec, 3); if (w.GetAsyncKeyState('W') < 0) { cpos += forward; } else if (w.GetAsyncKeyState('S') < 0) { cpos -= forward; } if (w.GetAsyncKeyState('D') < 0) { cpos += right; } else if (w.GetAsyncKeyState('A') < 0) { cpos -= right; } // Copy updated position from SIMD register to memory. zm.store(demo.camera.position[0..], cpos, 3); } } fn computeWaves(comptime T: type, vertex_data: std.MultiArrayList(Vertex), time: f32) void { const static = struct { const offsets = [16]f32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; }; const voffset = zm.load(static.offsets[0..], T, 0); const vtime = zm.splat(T, time); const xslice = vertex_data.items(.x); const yslice = vertex_data.items(.y); const zslice = vertex_data.items(.z); const scale: f32 = 0.05; var z_index: i32 = 0; while (z_index < grid_size) : (z_index += 1) { const z: f32 = scale * @intToFloat(f32, z_index - grid_size / 2); const vz = zm.splat(T, z); var x_index: i32 = 0; while (x_index < grid_size) : (x_index += zm.veclen(T)) { const x: f32 = scale * @intToFloat(f32, x_index - grid_size / 2); const vx = zm.splat(T, x) + voffset * zm.splat(T, scale); const d = zm.sqrt(vx * vx + vz * vz); const vy = zm.sin(d - vtime); const index = @intCast(usize, x_index + z_index * grid_size); zm.store(xslice[index..], vx, 0); zm.store(yslice[index..], vy, 0); zm.store(zslice[index..], vz, 0); } } } fn draw(demo: *DemoState) void { var gctx = &demo.gctx; const cam_world_to_view = zm.lookToLh( zm.load(demo.camera.position[0..], zm.Vec, 3), zm.load(demo.camera.forward[0..], zm.Vec, 3), zm.f32x4(0.0, 1.0, 0.0, 0.0), ); const cam_view_to_clip = zm.perspectiveFovLh( 0.25 * math.pi, @intToFloat(f32, gctx.viewport_width) / @intToFloat(f32, gctx.viewport_height), 0.01, 200.0, ); const cam_world_to_clip = zm.mul(cam_world_to_view, cam_view_to_clip); // Begin DirectX 12 rendering. gctx.beginFrame(); // Get current back buffer resource and transition it to 'render target' state. const back_buffer = gctx.getBackBuffer(); gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_RENDER_TARGET); gctx.flushResourceBarriers(); gctx.cmdlist.OMSetRenderTargets( 1, &[_]d3d12.CPU_DESCRIPTOR_HANDLE{back_buffer.descriptor_handle}, w.TRUE, &demo.depth_texture_dsv, ); gctx.cmdlist.ClearRenderTargetView( back_buffer.descriptor_handle, &[4]f32{ 0.2, 0.2, 0.2, 1.0 }, 0, null, ); gctx.cmdlist.ClearDepthStencilView(demo.depth_texture_dsv, d3d12.CLEAR_FLAG_DEPTH, 1.0, 0, 0, null); // Update dynamic mesh. { switch (demo.simd_width) { 4 => computeWaves(zm.F32x4, demo.vertex_data, @floatCast(f32, demo.frame_stats.time)), 8 => computeWaves(zm.F32x8, demo.vertex_data, @floatCast(f32, demo.frame_stats.time)), 16 => computeWaves(zm.F32x16, demo.vertex_data, @floatCast(f32, demo.frame_stats.time)), else => unreachable, } const xslice = demo.vertex_data.items(.x); const yslice = demo.vertex_data.items(.y); const zslice = demo.vertex_data.items(.z); const verts = gctx.allocateUploadBufferRegion(Pso_Vertex, @intCast(u32, demo.vertex_data.len)); var i: usize = 0; while (i < demo.vertex_data.len) : (i += 1) { verts.cpu_slice[i].position = [3]f32{ xslice[i], yslice[i], zslice[i] }; } gctx.addTransitionBarrier(demo.vertex_buffer, d3d12.RESOURCE_STATE_COPY_DEST); gctx.flushResourceBarriers(); gctx.cmdlist.CopyBufferRegion( gctx.lookupResource(demo.vertex_buffer).?, 0, verts.buffer, verts.buffer_offset, verts.cpu_slice.len * @sizeOf(@TypeOf(verts.cpu_slice[0])), ); gctx.addTransitionBarrier(demo.vertex_buffer, d3d12.RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); gctx.flushResourceBarriers(); } gctx.setCurrentPipeline(demo.main_pso); // Set input assembler (IA) state. gctx.cmdlist.IASetPrimitiveTopology(.POINTLIST); gctx.cmdlist.IASetVertexBuffers(0, 1, &[_]d3d12.VERTEX_BUFFER_VIEW{.{ .BufferLocation = gctx.lookupResource(demo.vertex_buffer).?.GetGPUVirtualAddress(), .SizeInBytes = max_num_vertices * @sizeOf(Pso_Vertex), .StrideInBytes = @sizeOf(Pso_Vertex), }}); // Upload per-frame constant data (camera xform). { // Allocate memory for one instance of Pso_FrameConst structure. const mem = gctx.allocateUploadMemory(Pso_FrameConst, 1); // Copy 'cam_world_to_clip' matrix to upload memory. We need to transpose it because // HLSL uses column-major matrices by default (zmath uses row-major matrices). zm.storeMat(mem.cpu_slice[0].world_to_clip[0..], zm.transpose(cam_world_to_clip)); // Set GPU handle of our allocated memory region so that it is visible to the shader. gctx.cmdlist.SetGraphicsRootConstantBufferView( 0, // Slot index in Root Signature (CBV(b0), see intro5.hlsl). mem.gpu_base, ); } gctx.cmdlist.DrawInstanced(max_num_vertices, 1, 0, 0); // Draw dear imgui widgets. demo.guictx.draw(gctx); gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_PRESENT); gctx.flushResourceBarriers(); // Call 'Present' and prepare for the next frame. gctx.endFrame(); } pub fn main() !void { // Initialize some low-level Windows stuff (DPI awarness, COM), check Windows version and also check // if DirectX 12 Agility SDK is supported. common.init(); defer common.deinit(); // Create main memory allocator for our application. var gpa_allocator_state = std.heap.GeneralPurposeAllocator(.{}){}; defer { const leaked = gpa_allocator_state.deinit(); std.debug.assert(leaked == false); } const gpa_allocator = gpa_allocator_state.allocator(); var demo = init(gpa_allocator); defer deinit(&demo, gpa_allocator); while (true) { var message = std.mem.zeroes(w.user32.MSG); const has_message = w.user32.peekMessageA(&message, null, 0, 0, w.user32.PM_REMOVE) catch false; if (has_message) { _ = w.user32.translateMessage(&message); _ = w.user32.dispatchMessageA(&message); if (message.message == w.user32.WM_QUIT) { break; } } else { update(&demo); draw(&demo); } } }
samples/intro/src/intro5.zig
const std = @import("std"); const mem = std.mem; const fs = std.fs; const maxBytesRead = std.math.maxInt(u32); const Brain = @import("brain.zig").Brain; const usage = \\Usage: brain [command] \\ \\ Commands: \\ \\ code [BRAIN] Give brainfuck code to execute \\ file [PATH] Execute the code found in a .brain file \\ help Print this help message and exit \\ test Enters interactive mode \\ ; pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = &arena.allocator; const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); const stdout = std.io.getStdOut().writer(); if (args.len <= 1) { try stdout.print("{s}", .{usage}); std.process.exit(1); } const command = args[1]; const commandArgs = args[2..]; var brain = try Brain.init(allocator, Brain.defaultMemorySize); defer brain.deinit(); if (mem.eql(u8, command, "help")) { try stdout.print("{s}", .{usage}); } else if (mem.eql(u8, command, "code")) { if (commandArgs.len < 1) { std.debug.print("Error: Missing [CODE] argument.\n", .{}); std.process.exit(1); } const code: []const u8 = commandArgs[0]; // TODO: Implement optimize function // _ = brain.compile(code); const steps = try brain.interpret(code, 0, false); } else if (mem.eql(u8, command, "file")) { if (commandArgs.len < 1) { std.debug.print("Error: Missing [PATH] argument.\n", .{}); std.process.exit(1); } // TODO: Check .brain extension? const file = try fs.cwd().openFile(commandArgs[0], .{ .read = true }); defer file.close(); var code = try file.readToEndAlloc(allocator, maxBytesRead); defer allocator.free(code); // _ = brain.compile(code); const steps = try brain.interpret(code, 0, false); } else if (mem.eql(u8, command, "test")) { const stdin = std.io.getStdIn().reader(); while (true) { try stdout.print("brainfuckz> ", .{}); const code = try stdin.readUntilDelimiterAlloc(allocator, '\n', maxBytesRead); defer allocator.free(code); // try stdout.print("{s}", .{code}); const steps = try brain.interpret(code, 0, false); try stdout.print("\n", .{}); } } else { // TODO: Print unrecognized and exit std.debug.print("{any}", .{args}); } }
src/main.zig
const std = @import("std"); const expect = std.testing.expect; pub const TextInfo = struct { font_size: i16, flags: u8, charset: u8, stretch_h: u16, aa: u8, padding_up: u8, padding_right: u8, padding_down: u8, padding_left: u8, spacing_horiz: u8, spacing_vert: u8, outline: u8, font_name: []u8, }; pub const TextCommon = struct { line_height: u16, base: u16, scale_w: u16, scale_h: u16, pages: u16, flags: u8, alpha: u8, red: u8, green: u8, blue: u8, }; pub const TextChar = packed struct { id: u32, x: u16, y: u16, width: u16, height: u16, offset_x: i16, offset_y: i16, xadvance: i16, page: u8, channel: u8, }; pub const KerningPair = packed struct { first: u32, second: u32, amount: i16, }; pub const FontInfo = struct { allocator: *std.mem.Allocator, info: TextInfo, common: TextCommon, pages: [][]u8, chars: []TextChar, kerning_pairs: []KerningPair, pub fn deinit(self: @This()) void { self.allocator.free(self.info.font_name); for (self.pages) |s| { self.allocator.free(s); } self.allocator.free(self.pages); self.allocator.free(self.chars); self.allocator.free(self.kerning_pairs); } }; pub const LoadError = error{ NotFound, BadHeader, IncompatibleVersion, UnexpectedBlock, }; const BlockTag = enum(u8) { None, Info, Common, Pages, Chars, KerningPairs, }; pub fn loadBinaryFromPath(filepath: []const u8, allocator: *std.mem.Allocator) !FontInfo { var file = try std.fs.cwd().openFile(filepath, .{ .read = true, }); defer file.close(); var data = try file.readToEndAlloc(allocator, 4 * 1024 * 1024); // max size 4 MB defer allocator.free(data); var stream = std.io.fixedBufferStream(data); return loadBinary(stream.reader(), allocator); } pub fn loadBinary(stream: anytype, allocator: *std.mem.Allocator) !FontInfo { { var header = [_]u8{0} ** 3; _ = try stream.read(header[0..]); if (std.mem.eql(u8, header[0..], "BMF"[0..2])) { return LoadError.BadHeader; } const version = try stream.readByte(); if (version != 3) { std.debug.print("bmfont load expected version 3, got version {}\n", .{version}); return LoadError.IncompatibleVersion; } } var tag: BlockTag = @intToEnum(BlockTag, try stream.readByte()); if (tag != .Info) { return LoadError.UnexpectedBlock; } _ = try stream.readIntNative(i32); // skip block size const k_maxFontNameLength = 256; const text_info = TextInfo{ .font_size = try stream.readIntNative(i16), .flags = try stream.readIntNative(u8), .charset = try stream.readIntNative(u8), .stretch_h = try stream.readIntNative(u16), .aa = try stream.readIntNative(u8), .padding_up = try stream.readIntNative(u8), .padding_right = try stream.readIntNative(u8), .padding_down = try stream.readIntNative(u8), .padding_left = try stream.readIntNative(u8), .spacing_horiz = try stream.readIntNative(u8), .spacing_vert = try stream.readIntNative(u8), .outline = try stream.readIntNative(u8), .font_name = try stream.readUntilDelimiterAlloc(allocator, 0, k_maxFontNameLength), }; errdefer allocator.free(text_info.font_name); tag = @intToEnum(BlockTag, try stream.readByte()); if (tag != .Common) { return LoadError.UnexpectedBlock; } _ = try stream.readIntNative(i32); // skip block size const text_common = TextCommon{ .line_height = try stream.readIntNative(u16), .base = try stream.readIntNative(u16), .scale_w = try stream.readIntNative(u16), .scale_h = try stream.readIntNative(u16), .pages = try stream.readIntNative(u16), .flags = try stream.readIntNative(u8), .alpha = try stream.readIntNative(u8), .red = try stream.readIntNative(u8), .green = try stream.readIntNative(u8), .blue = try stream.readIntNative(u8), }; tag = @intToEnum(BlockTag, try stream.readByte()); if (tag != .Pages) { return LoadError.UnexpectedBlock; } var pages: ?[][]u8 = null; errdefer { if (pages != null) { for (pages.?) |s| { allocator.free(s); } allocator.free(pages.?); } } { const block_size = try stream.readIntNative(i32); // skip block size var remaining: usize = @intCast(usize, block_size); var strings = std.ArrayList([]u8).init(allocator); while (remaining > 0) { var s: []u8 = try stream.readUntilDelimiterAlloc(allocator, 0, k_maxFontNameLength); remaining -= s.len + 1; try strings.append(s); } pages = strings.toOwnedSlice(); } tag = @intToEnum(BlockTag, try stream.readByte()); if (tag != .Chars) { return LoadError.UnexpectedBlock; } var chars: ?[]TextChar = null; errdefer { if (chars != null) { allocator.free(chars.?); } } { var block_size = try stream.readIntNative(i32); // skip block size const struct_size = @sizeOf(TextChar); comptime { std.debug.assert(@sizeOf(TextChar) == 20); } var count: usize = @divExact(@intCast(usize, block_size), struct_size); chars = try allocator.alloc(TextChar, count); try stream.readNoEof(std.mem.sliceAsBytes(chars.?)); } var kerning_pairs: ?[]KerningPair = null; errdefer { if (kerning_pairs != null) { allocator.free(kerning_pairs.?); } } tag = @intToEnum(BlockTag, stream.readByte() catch 0); if (tag == .KerningPairs) { var block_size = try stream.readIntNative(i32); // skip block size const struct_size = @sizeOf(KerningPair); comptime { std.debug.assert(@sizeOf(KerningPair) == 10); } var count: usize = @divExact(@intCast(usize, block_size), struct_size); kerning_pairs = try allocator.alloc(KerningPair, count); try stream.readNoEof(std.mem.sliceAsBytes(kerning_pairs.?)); } if (kerning_pairs == null) { kerning_pairs = try allocator.alloc(KerningPair, 0); } return FontInfo{ .allocator = allocator, .info = text_info, .common = text_common, .pages = pages.?, .chars = chars.?, .kerning_pairs = kerning_pairs.?, }; } test "single page no kerning" { var allocator = std.testing.allocator; var info: FontInfo = try loadBinaryFromPath("test/consolas.fnt", allocator); defer info.deinit(); try expect(std.mem.eql(u8, info.info.font_name, "Consolas")); try expect(std.mem.eql(u8, info.pages[0], "consolas_0.png")); } test "multi page with kerning" { var allocator = std.testing.allocator; var info: FontInfo = try loadBinaryFromPath("test/dejavu.fnt", allocator); defer info.deinit(); try expect(std.mem.eql(u8, info.info.font_name, "DejaVu Sans")); try expect(std.mem.eql(u8, info.pages[0], "dejavu_0.png")); try expect(std.mem.eql(u8, info.pages[1], "dejavu_1.png")); try expect(std.mem.eql(u8, info.pages[2], "dejavu_2.png")); try expect(std.mem.eql(u8, info.pages[3], "dejavu_3.png")); try expect(std.mem.eql(u8, info.pages[4], "dejavu_4.png")); }
bmfont.zig
const utils = @import("utils"); const georgios = @import("georgios"); const kernel = @import("root").kernel; const print = kernel.print; const kthreading = kernel.threading; const ps2 = @import("ps2.zig"); const interrupts = @import("interrupts.zig"); const vbe = @import("vbe.zig"); pub const interrupt_number: u8 = 100; pub fn handle(_: u32, interrupt_stack: *const interrupts.Stack) void { const call_number = interrupt_stack.eax; const arg1 = interrupt_stack.ebx; const arg2 = interrupt_stack.ecx; const arg3 = interrupt_stack.edx; const arg4 = interrupt_stack.edi; _ = arg4; const arg5 = interrupt_stack.esi; _ = arg5; // TODO: Using pointers for args can cause Zig's alignment checks to fail. // Find a way around this without turning off safety? @setRuntimeSafety(false); switch (call_number) { // This file is parsed by generate_system_calls.py to generate the // system call interface functions used by programs. Before a system // call implementation there should be a SYSCALL comment with something // like the intended Zig signature in it. This pseudo-signature is the // same as normal, but with 2 differences: // // - One is a & before names of arguments intended to be passed to the // system call implementation as pointers. This is necessary for things // larger than a register. // Example: \\ SYSCALL: cool_syscall(&cool_arg: []const u8) void // // - The other is an optional name after the arguments that sets the name // of the return value in the generated function. // Example: \\ SYSCALL: cool_syscall() cool_return: u32` // // What registers the arguments and return values use depend on the // argN constants above and the order they appear in the signature. // Example: \\ SYSCALL: cool_syscall(a: u32, b: u32) c: u32 // a should be read from arg1, b should be read from arg2, and the // return value should be written to arg3. // // System calls that return Zig errors should use ValueOrError and must // set something. This type exists because Zig errors can only safely // be used within the same compilation. To get around that this type // translates the kernel Zig errors to ABI-stable values to pass across // the system call boundary. In user space, the same type tries to // translate the values back to Zig errors. If the kernel error type is // unknown to the user program it gets the utils.Error.Unknown error. // // The following must come after a SYSCALL comment and before the // system call implementation: // // - An IMPORT comment will combine with other system call imports and // import Zig namespace needed for the system calls arguments and // return. // Example: \\ IMPORT: cool "cool" // Will insert: const cool = @import("cool") // // TODO: Documentation Comments // TODO: C alternative interface syscalls? // SYSCALL: print_string(&s: []const u8) void 0 => print.string(@intToPtr(*[]const u8, arg1).*), // SYSCALL: yield() void 2 => { if (kthreading.debug) print.string("\nY"); kernel.threading_mgr.yield(); }, // SYSCALL: exit(status: u8) noreturn 3 => { // TODO: Use status if (kthreading.debug) print.string("\nE"); kernel.threading_mgr.remove_current_thread(); }, // SYSCALL: exec(info: *const georgios.ProcessInfo) georgios.ExecError!void // IMPORT: georgios "georgios.zig" 4 => { const ValueOrError = georgios.system_calls.ValueOrError(void, georgios.ExecError); // Should not be able to create kernel mode process from a system // call that can be called from user mode. var info = @intToPtr(*const georgios.ProcessInfo, arg1).*; info.kernel_mode = false; const rv = @intToPtr(*ValueOrError, arg2); if (kernel.exec(&info)) |pid| { kernel.threading_mgr.wait_for_process(pid); rv.set_value(.{}); } else |e| { rv.set_error(e); } }, // SYSCALL: get_key(&blocking: georgios.Blocking) key: ?georgios.keyboard.Event // IMPORT: georgios "georgios.zig" 5 => { const blocking = @intToPtr(*georgios.Blocking, arg1).* == .Blocking; const rv = @intToPtr(*?georgios.keyboard.Event, arg2); while (true) { if (ps2.get_key()) |key| { rv.* = key; break; } else if (blocking) { kernel.threading_mgr.wait_for_keyboard(); } else { rv.* = null; break; } } }, // TODO: Return Zig Error // SYSCALL: next_dir_entry(iter: *georgios.DirEntry) bool // IMPORT: georgios "georgios.zig" 6 => { const entry = @intToPtr(*georgios.DirEntry, arg1); const failure_ptr = @intToPtr(*bool, arg2); failure_ptr.* = false; kernel.filesystem.impl.next_dir_entry(entry) catch |e| { print.format("next_dir_entry failed in dir {}: {}\n", .{entry.dir, @errorName(e)}); failure_ptr.* = true; return; }; }, // SYSCALL: print_hex(value: u32) void 7 => print.hex(arg1), // SYSCALL: file_open(&path: []const u8) georgios.fs.Error!georgios.io.File.Id 8 => { const ValueOrError = georgios.system_calls.ValueOrError( georgios.io.File.Id, georgios.fs.Error); const path = @intToPtr(*[]const u8, arg1); const rv = @intToPtr(*ValueOrError, arg2); if (kernel.filesystem.open(path.*)) |fs_file| { rv.set_value(fs_file.io_file.id.?); } else |e| { rv.set_error(e); } }, // SYSCALL: file_read(id: georgios.io.File.Id, &to: []u8) georgios.io.FileError!usize 9 => { const ValueOrError = georgios.system_calls.ValueOrError( usize, georgios.io.FileError); const id = arg1; const to = @intToPtr(*[]u8, arg2); const rv = @intToPtr(*ValueOrError, arg3); if (kernel.filesystem.file_id_read(id, to.*)) |read| { rv.set_value(read); } else |e| { rv.set_error(e); } }, // SYSCALL: file_write(id: georgios.io.File.Id, &from: []const u8) georgios.io.FileError!usize 10 => { const ValueOrError = georgios.system_calls.ValueOrError( usize, georgios.io.FileError); const id = arg1; const from = @intToPtr(*[]const u8, arg2); const rv = @intToPtr(*ValueOrError, arg3); if (kernel.filesystem.file_id_write(id, from.*)) |written| { rv.set_value(written); } else |e| { rv.set_error(e); } }, // SYSCALL: file_seek(id: georgios.io.File.Id, offset: isize, seek_type: georgios.io.File.SeekType) georgios.io.FileError!usize 11 => { // TODO @panic("file_seek called"); }, // SYSCALL: file_close(id: georgios.io.File.Id) georgios.io.FileError!void 12 => { const ValueOrError = georgios.system_calls.ValueOrError( void, georgios.io.FileError); const id = arg1; const rv = @intToPtr(*ValueOrError, arg2); if (kernel.filesystem.file_id_close(id)) { rv.set_value(.{}); } else |e| { rv.set_error(e); } }, // SYSCALL: get_cwd(&buffer: []u8) georgios.threading.Error![]const u8 13 => { const ValueOrError = georgios.system_calls.ValueOrError( []const u8, georgios.threading.Error); const buffer = @intToPtr(*[]u8, arg1).*; const rv = @intToPtr(*ValueOrError, arg2); if (kernel.threading_mgr.get_cwd(buffer)) |dir| { rv.set_value(dir); } else |e| { rv.set_error(e); } }, // SYSCALL: set_cwd(&dir: []const u8) georgios.ThreadingOrFsError!void 14 => { const ValueOrError = georgios.system_calls.ValueOrError( void, georgios.ThreadingOrFsError); const dir = @intToPtr(*[]const u8, arg1).*; const rv = @intToPtr(*ValueOrError, arg2); if (kernel.threading_mgr.set_cwd(dir)) { rv.set_value(.{}); } else |e| { rv.set_error(e); } }, // SYSCALL: sleep_milliseconds(&ms: u64) void 15 => { kernel.threading_mgr.sleep_milliseconds(@intToPtr(*u64, arg1).*); }, // SYSCALL: sleep_seconds(&s: u64) void 16 => { kernel.threading_mgr.sleep_seconds(@intToPtr(*u64, arg1).*); }, // SYSCALL: time() u64 17 => { @intToPtr(*u64, arg1).* = kernel.platform.time(); }, // SYSCALL: get_process_id() u32 18 => { const r = @intToPtr(*u32, arg1); if (kernel.threading_mgr.current_process) |p| { r.* = p.id; } else { r.* = 0; } }, // SYSCALL: get_thread_id() u32 19 => { const r = @intToPtr(*u32, arg1); if (kernel.threading_mgr.current_thread) |t| { r.* = t.id; } else { r.* = 0; } }, // SYSCALL: overflow_kernel_stack() void 20 => { overflow_kernel_stack(); }, // SYSCALL: console_width() u32 21 => { const r = @intToPtr(*u32, arg1); r.* = kernel.console.width; }, // SYSCALL: console_height() u32 22 => { const r = @intToPtr(*u32, arg1); r.* = kernel.console.height; }, // SYSCALL: vbe_res() ?utils.Point 23 => { const rv = @intToPtr(*?utils.Point, arg1); rv.* = vbe.get_res(); }, // SYSCALL: vbe_draw_raw_image_chunk(&data: []const u8, w: u32, &pos: utils.Point, &last: utils.Point) void 24 => { const data = @intToPtr(*[]const u8, arg1).*; const width = arg2; const pos = @intToPtr(*utils.Point, arg3); const last = @intToPtr(*utils.Point, arg4); vbe.draw_raw_image_chunk(data, width, pos, last); }, // SYSCALL: vbe_flush_buffer() void 25 => { vbe.flush_buffer(); }, else => @panic("Invalid System Call"), } } fn overflow_kernel_stack() void { if (kernel.threading_mgr.current_thread) |thread| { print.format("kernelmode_stack: {:a} - {:a}", .{ thread.impl.kernelmode_stack.start, thread.impl.kernelmode_stack.end(), }); } overflow_kernel_stack_i(); } fn overflow_kernel_stack_i() void { var use_to_find_guard_page: [128]u8 = undefined; print.format("overflow_kernel_stack: esp: {:a}\n", .{ asm volatile ("mov %%esp, %[x]" : [x] "=r" (-> usize))}); for (use_to_find_guard_page) |*ptr, i| { ptr.* = @truncate(u8, i); } overflow_kernel_stack_i(); }
kernel/platform/system_calls.zig
const std = @import("std"); const fs = std.fs; const fmt = std.fmt; const mem = std.mem; const math = std.math; const json = std.json; const ChildProcess = std.ChildProcess; const log = @import("../src/md/log.zig"); const translate = @import("../src/md/translate.zig"); const Node = @import("../src/md/parse.zig").Node; const TestError = error{ TestNotFound, CouldNotCreateTempDirectory, DockerRunFailed, }; pub const TestKey = enum { markdown, html, }; const ValidationOutStream = struct { const Self = @This(); expected_remaining: []const u8, pub const OutStream = std.io.OutStream(*Self, Error, write); pub const Error = error{DifferentData}; fn init(exp: []const u8) Self { return .{ .expected_remaining = exp, }; } pub fn outStream(self: *Self) OutStream { return .{ .context = self }; } fn write(self: *Self, bytes: []const u8) Error!usize { if (self.expected_remaining.len < bytes.len) { std.debug.warn( \\====== expected this output: ========= \\{} \\======== instead found this: ========= \\{} \\======================================\n , .{ self.expected_remaining, bytes, }); return error.DifferentData; } if (!mem.eql(u8, self.expected_remaining[0..bytes.len], bytes)) { std.debug.warn( \\====== expected this output: ========= \\{} \\======== instead found this: ========= \\{} \\======================================\n , .{ self.expected_remaining[0..bytes.len], bytes, }); return error.DifferentData; } self.expected_remaining = self.expected_remaining[bytes.len..]; return bytes.len; } }; /// Caller owns returned memory pub fn getTest(allocator: *mem.Allocator, number: i32, key: TestKey) ![]const u8 { const cwd = fs.cwd(); // path is relative to test.zig in the project root const source = try cwd.readFileAlloc(allocator, "test/spec/commonmark_spec_0.29.json", math.maxInt(usize)); defer allocator.free(source); var json_parser = std.json.Parser.init(allocator, true); defer json_parser.deinit(); var json_tree = try json_parser.parse(source); defer json_tree.deinit(); const stdout = &std.io.getStdOut().outStream(); for (json_tree.root.Array.items) |value, i| { var example_num = value.Object.get("example").?.Integer; if (example_num == number) { return try allocator.dupe(u8, value.Object.get(@tagName(key)).?.String); } } return TestError.TestNotFound; } pub fn mktmp(allocator: *mem.Allocator) ![]const u8 { const cwd = try fs.path.resolve(allocator, &[_][]const u8{"."}); defer allocator.free(cwd); var out = try exec(allocator, cwd, true, &[_][]const u8{ "mktemp", "-d" }); defer allocator.free(out.stdout); defer allocator.free(out.stderr); // defer allocator.free(out); log.Debugf("mktemp return: {}\n", .{out}); return allocator.dupe(u8, std.mem.trim(u8, out.stdout, &std.ascii.spaces)); } pub fn writeFile(allocator: *mem.Allocator, absoluteDirectory: []const u8, fileName: []const u8, contents: []const u8) ![]const u8 { var filePath = try fs.path.join(allocator, &[_][]const u8{ absoluteDirectory, fileName }); log.Debugf("writeFile path: {}\n", .{filePath}); const file = try std.fs.createFileAbsolute(filePath, .{}); defer file.close(); try file.writeAll(contents); return filePath; } pub fn writeJson(allocator: *mem.Allocator, tempDir: []const u8, name: []const u8, value: anytype) ![]const u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); try json.stringify(value, json.StringifyOptions{ .whitespace = .{ .indent = .{ .Space = 4 }, .separator = true, }, }, buf.outStream()); return writeFile(allocator, tempDir, name, buf.items); } fn exec(allocator: *mem.Allocator, cwd: []const u8, expect_0: bool, argv: []const []const u8) !ChildProcess.ExecResult { const max_output_size = 100 * 1024; const result = ChildProcess.exec(.{ .allocator = allocator, .argv = argv, .cwd = cwd, .max_output_bytes = max_output_size, }) catch |err| { std.debug.warn("The following command failed:\n", .{}); // printCmd(cwd, argv); return err; }; // switch (result.term) { // .Exited => |code| { // if ((code != 0) == expect_0) { // std.debug.warn("The following command exited with error code {}:\n", .{code}); // // printCmd(cwd, argv); // std.debug.warn("stderr:\n{}\n", .{result.stderr}); // return error.CommandFailed; // } // }, // else => { // std.debug.warn("The following command terminated unexpectedly:\n", .{}); // // printCmd(cwd, argv); // std.debug.warn("stderr:\n{}\n", .{result.stderr}); // return error.CommandFailed; // }, // } return result; } pub fn debugPrintExecCommand(allocator: *mem.Allocator, arry: [][]const u8) !void { var cmd_buf = std.ArrayList(u8).init(allocator); defer cmd_buf.deinit(); for (arry) |a| { try cmd_buf.appendSlice(a); try cmd_buf.append(' '); } log.Debugf("exec cmd: {}\n", .{cmd_buf.items}); } pub fn dockerRunJsonDiff(allocator: *mem.Allocator, actualJson: []const u8, expectJson: []const u8) !void { const cwd = try fs.path.resolve(allocator, &[_][]const u8{"."}); defer allocator.free(cwd); var filemount = try std.mem.concat(allocator, u8, &[_][]const u8{ actualJson, ":", actualJson }); defer allocator.free(filemount); var file2mount = try std.mem.concat(allocator, u8, &[_][]const u8{ expectJson, ":", expectJson }); defer allocator.free(file2mount); // The long way around until there is a better way to compare json in Zig var cmd = &[_][]const u8{ "docker", "run", "-t", "-v", filemount, "-v", file2mount, "-w", cwd, "--rm", "bwowk/json-diff", "-C", expectJson, actualJson }; try debugPrintExecCommand(allocator, cmd); var diff = try exec(allocator, cwd, true, cmd); if (diff.term.Exited != 0) { log.Errorf("docker run failed:\n{}\n", .{diff.stdout}); return error.DockerRunFailed; } } /// compareJsonExpect tests parser output against a json test file containing the expected output /// - expected: The expected json output. Use @embedFile()! /// - value: The value to test against the expected json. This will be marshaled to json. /// - returns: An error or optional: null (on success) or "value" encoded as json on compare failure. pub fn compareJsonExpect(allocator: *mem.Allocator, expected: []const u8, value: anytype) !?[]const u8 { // check with zig stream validator var dumpBuf = std.ArrayList(u8).init(allocator); defer dumpBuf.deinit(); var stringyOpts = json.StringifyOptions{ .whitespace = .{ .indent = .{ .Space = 4 }, .separator = true, }, }; // human readable diff var tempDir = try mktmp(allocator); defer allocator.free(tempDir); var expectJsonPath = try writeFile(allocator, tempDir, "expect.json", expected); defer allocator.free(expectJsonPath); var actualJsonPath = try writeJson(allocator, tempDir, "actual.json", value); defer allocator.free(actualJsonPath); // FIXME: replace with zig json diff dockerRunJsonDiff(allocator, actualJsonPath, expectJsonPath) catch |err2| { try json.stringify(value, stringyOpts, dumpBuf.outStream()); return dumpBuf.toOwnedSlice(); }; return null; } /// compareHtmlExpect tests parser output against a json test file containing the expected output /// - expected: The expected html output. Use @embedFile()! /// - value: The translated parser output. /// - dumpHtml: If true, only the json value of "value" will be dumped to stdout. pub fn compareHtmlExpect(allocator: *std.mem.Allocator, expected: []const u8, value: *std.ArrayList(Node)) !?[]const u8 { var vos = ValidationOutStream.init(expected); var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); try translate.markdownToHtml(value, buf.outStream()); _ = vos.outStream().write(buf.items) catch |err| { return buf.items; }; return null; } pub fn dumpTest(input: []const u8) void { log.config(log.logger.Level.Debug, true); std.debug.warn("{}", .{"\n"}); log.Debugf("test:\n{}-- END OF TEST --\n", .{input}); }
test/util.zig
const std = @import("std"); const c = @import("c.zig"); const glsl = @cImport({ @cInclude("sokol/sokol_gfx.h"); @cInclude("shaders/triangle.glsl.h"); }); const State = struct { pass_action: c.sg_pass_action, main_pipeline: c.sg_pipeline, main_bindings: c.sg_bindings, }; var state: State = undefined; export fn init() void { var desc = std.mem.zeroes(c.sg_desc); desc.context = c.sapp_sgcontext(); c.sg_setup(&desc); state.pass_action.colors[0].action = .SG_ACTION_CLEAR; state.pass_action.colors[0].value = c.sg_color{ .r = 0.2, .g = 0.2, .b = 0.2, .a = 1.0 }; const vertices = [_]f32{ // positions // colors 0.0, 0.5, 0.5, 1.0, 0.0, 0.0, 1.0, 0.5, -0.5, 0.5, 0.0, 1.0, 0.0, 1.0, -0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 1.0, }; var buffer_desc = std.mem.zeroes(c.sg_buffer_desc); buffer_desc.size = vertices.len * @sizeOf(f32); buffer_desc.data = .{ .ptr = &vertices[0], .size = buffer_desc.size }; buffer_desc.type = .SG_BUFFERTYPE_VERTEXBUFFER; state.main_bindings.vertex_buffers[0] = c.sg_make_buffer(&buffer_desc); const shader_desc = @ptrCast([*c]const c.sg_shader_desc, glsl.triangle_shader_desc(glsl.sg_query_backend())); const shader = c.sg_make_shader(shader_desc); var pipeline_desc = std.mem.zeroes(c.sg_pipeline_desc); pipeline_desc.layout.attrs[0].format = .SG_VERTEXFORMAT_FLOAT3; pipeline_desc.layout.attrs[1].format = .SG_VERTEXFORMAT_FLOAT4; pipeline_desc.shader = shader; state.main_pipeline = c.sg_make_pipeline(&pipeline_desc); } export fn update() void { const width = c.sapp_width(); const height = c.sapp_height(); c.sg_begin_default_pass(&state.pass_action, width, height); c.sg_apply_pipeline(state.main_pipeline); c.sg_apply_bindings(&state.main_bindings); c.sg_draw(0, 3, 1); c.sg_end_pass(); c.sg_commit(); } export fn cleanup() void { c.sg_shutdown(); } pub fn main() void { var app_desc = std.mem.zeroes(c.sapp_desc); app_desc.width = 1280; app_desc.height = 720; app_desc.init_cb = init; app_desc.frame_cb = update; app_desc.cleanup_cb = cleanup; app_desc.window_title = "Triangle (sokol-zig)"; _ = c.sapp_run(&app_desc); }
src/example_triangle.zig
const std = @import("std"); const print = std.debug.print; const CFO = @import("./CFO.zig"); const OSHA = @import("./OSHA.zig"); const parse = @import("./parse.zig"); const FLIR = @import("./Old_FLIR.zig"); const page_allocator = std.heap.page_allocator; var the_cfo: ?*CFO = null; pub fn addr_lookup(addr: usize) usize { return if (the_cfo) |c| c.lookup(addr) else addr; } pub fn main() !void { print("Yes, I am your CFO (certified forklift operator)\n", .{}); const size = 1024 * 32; var arr1 = try std.heap.page_allocator.alloc(f64, size); var arr2 = try std.heap.page_allocator.alloc(f64, size); var i: usize = 0; while (i < size) : (i += 1) { arr1[i] = @intToFloat(f64, i); arr2[i] = 100000.0 * @intToFloat(f64, i); } const IPReg = CFO.IPReg; const idx: IPReg = .rcx; const arg1: IPReg = .rdi; const arg2: IPReg = .rsi; const arg3: IPReg = .rdx; const v0: u4 = 0; const v1: u4 = 1; const allocator = std.testing.allocator; var cfo = try CFO.init(allocator); defer cfo.deinit(); const start = cfo.get_target(); try cfo.enter(); try cfo.arit(.xor, idx, idx); const loop = cfo.get_target(); // try cfo.vmovurm(.sd, v0, CFO.a(idx)); try cfo.vmovurm(.sd, v0, CFO.qi(arg1, idx)); //try cfo.vmathfrm(.add, .sd, v0, v0, CFO.qi(arg2, idx)); try cfo.vmovurm(.sd, v1, CFO.qi(arg2, idx)); try cfo.vmathf(.add, .sd, v0, v0, v1); try cfo.vmovumr(.sd, CFO.qi(arg1, idx), v0); try cfo.aritri(.add, idx, 1); try cfo.arit(.cmp, idx, arg3); try cfo.jbck(.l, loop); // try cfo.trap(); try cfo.leave(); try cfo.ret(); const start_parse = cfo.get_target(); var flir = try FLIR.init(0, allocator); defer flir.deinit(); try flir.loop_start(); _ = try parse.parse(&flir, "xi = xi + yi;"); try flir.loop_end(); flir.live(true); _ = try flir.scanreg(true); flir.debug_print(false); try cfo.enter(); _ = try flir.codegen(&cfo, true); try cfo.leave(); try cfo.ret(); try cfo.dbg_nasm(allocator); const start_simd = cfo.get_target(); try cfo.enter(); try cfo.arit(.xor, idx, idx); const loop2 = cfo.get_target(); try cfo.vmovarm(.pd4, v0, CFO.qi(arg1, idx)); try cfo.vmathfrm(.add, .pd4, v0, v0, CFO.qi(arg2, idx)); try cfo.vmovamr(.pd4, CFO.qi(arg1, idx), v0); try cfo.aritri(.add, idx, 4); try cfo.arit(.cmp, idx, arg3); try cfo.jbck(.l, loop2); try cfo.vzeroupper(); try cfo.leave(); try cfo.ret(); const start_simd2 = cfo.get_target(); try cfo.enter(); try cfo.arit(.xor, idx, idx); const loop3 = cfo.get_target(); try cfo.vmovarm(.pd4, v0, CFO.qi(arg1, idx)); try cfo.vmovarm(.pd4, v1, CFO.qi(arg1, idx).o(32)); try cfo.vmathfrm(.add, .pd4, v0, v0, CFO.qi(arg2, idx)); try cfo.vmathfrm(.add, .pd4, v1, v1, CFO.qi(arg2, idx).o(32)); try cfo.vmovamr(.pd4, CFO.qi(arg1, idx), v0); try cfo.vmovamr(.pd4, CFO.qi(arg1, idx).o(32), v1); try cfo.aritri(.add, idx, 8); try cfo.arit(.cmp, idx, arg3); try cfo.jbck(.l, loop3); try cfo.vzeroupper(); try cfo.leave(); // try cfo.retnasm(); try cfo.ret(); // try cfo.dbg_test(); try cfo.finalize(); the_cfo = &cfo; defer the_cfo = null; OSHA.install(&cfo); defer OSHA.clear(); const scalar_add = cfo.get_ptr(start, fn (arg1: [*]f64, arg2: [*]f64, arg3: u64) callconv(.C) void); const parse_add = cfo.get_ptr(start_parse, fn (arg1: [*]f64, arg2: [*]f64, arg3: ?[*]f64, arg3: u64) callconv(.C) void); const simd_add = cfo.get_ptr(start_simd, fn (arg1: [*]f64, arg2: [*]f64, arg3: u64) callconv(.C) void); const simd2_add = cfo.get_ptr(start_simd2, fn (arg1: [*]f64, arg2: [*]f64, arg3: u64) callconv(.C) void); var timer = try std.time.Timer.start(); i = 0; while (i < 10) : (i += 1) { parse_add(arr1.ptr, arr2.ptr, null, size); const tid1p = timer.lap(); scalar_add(arr1.ptr, arr2.ptr, size); const tid1 = timer.lap(); simd_add(arr1.ptr, arr2.ptr, size); const tid2 = timer.lap(); simd2_add(arr1.ptr, arr2.ptr, size); const tid3 = timer.lap(); print("tidning: {}, {}, {}, {}\n", .{ tid1, tid1p, tid2, tid3 }); _ = timer.lap(); } print("did: {}\n", .{arr1[1]}); print("did: {}\n", .{arr1[2]}); print("did: {}\n", .{arr1[3]}); print("did: {}\n", .{arr1[1023]}); }
src/main.zig
const std = @import("std"); const luf = @import("luf"); const log = std.log.scoped(.luf_cli); pub fn main() !void { var gpa_alloc = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa_alloc.deinit(); const gpa = &gpa_alloc.allocator; const args = try parse(gpa); defer { for (args) |arg| gpa.free(arg); gpa.free(args); } const action = if (args.len >= 2) args[1] else return log.err("Missing argument 'build' or 'run'\n", .{}); if (!std.mem.eql(u8, action, "build") and !std.mem.eql(u8, action, "run")) return log.err("Invalid command '{s}'\n", .{action}); const file_path = if (args.len >= 3) args[2] else return log.err("Missing file path: 'luf {s} <file_path>'\n", .{action}); if (std.mem.eql(u8, action, "run")) { return runner(gpa, file_path); } var i: usize = 0; const output_name = while (i < args.len) : (i += 1) { if (std.mem.eql(u8, args[i], "-o") and args.len > i + 1) { break args[i + 1]; } } else null; try builder(gpa, file_path, output_name); } /// Parses the process' arguments fn parse(gpa: *std.mem.Allocator) ![][]const u8 { var list = std.ArrayList([]const u8).init(gpa); errdefer list.deinit(); var args = std.process.args(); while (args.next(gpa)) |arg| { try list.append(try arg); } return list.toOwnedSlice(); } /// Runs either a Luf source file or bytecode directly fn runner(gpa: *std.mem.Allocator, file_path: []const u8) !void { const file_type: enum { source, byte_code } = if (std.mem.endsWith(u8, file_path, ".luf")) .source else if (std.mem.endsWith(u8, file_path, ".blf")) .byte_code else return log.err("Unsupported file type, expected a '.luf' or '.blf' file\n", .{}); const file = std.fs.cwd().openFile(file_path, .{}) catch |err| { return log.err("Could not open file: '{s}'\nError: {s}\n", .{ file_path, @errorName(err) }); }; var vm = try luf.Vm.init(gpa); defer vm.deinit(); if (file_type == .source) { const file_data = try file.readToEndAlloc(gpa, std.math.maxInt(u64)); defer gpa.free(file_data); vm.compileAndRun(file_data) catch |err| { try vm.errors.write(file_data, std.io.getStdErr().writer()); }; return; } var byte_code = luf.byte_code.ByteCode.decodeFromStream(gpa, file.reader()) catch |err| { return log.err("Could not decode bytecode: {s}\n", .{@errorName(err)}); }; defer byte_code.deinit(); vm.loadCode(&byte_code); vm.run() catch { try vm.errors.write("", std.io.getStdErr().writer()); }; } /// Compiles the luf source code and outputs its bytecode which can then be ran later fn builder(gpa: *std.mem.Allocator, file_path: []const u8, output_name: ?[]const u8) !void { if (!std.mem.endsWith(u8, file_path, ".luf")) return log.err("Expected file with luf extension: '.luf' in path '{s}'\n", .{file_path}); const file = std.fs.cwd().openFile(file_path, .{}) catch |err| { return log.err("Could not open file with path '{s}'\nError recieved: {s}\n", .{ file_path, @errorName(err) }); }; const source = try file.readToEndAlloc(gpa, std.math.maxInt(u64)); defer gpa.free(source); var errors = luf.Errors.init(gpa); defer errors.deinit(); var cu = luf.compiler.compile(gpa, source, &errors) catch |_| { return errors.write(source, std.io.getStdErr().writer()); }; defer cu.deinit(); var byte_code = try luf.byte_code.Instructions.fromCu(gpa, cu); defer byte_code.deinit(); const final_output_name = output_name orelse blk: { const base = std.fs.path.basename(file_path); var new_name = try gpa.alloc(u8, base.len); std.mem.copy(u8, new_name, base); std.mem.copy(u8, new_name[new_name.len - 3 .. new_name.len], "blf"); break :blk new_name; }; // if `output_name` was null, we allocated memory to create the new name defer if (output_name == null) gpa.free(final_output_name); const output_file = std.fs.cwd().createFile(final_output_name, .{}) catch |err| { return log.err("Could not create output file '{s}'\nError '{s}'\n", .{ final_output_name, @errorName(err) }); }; byte_code.encodeToStream(output_file.writer()) catch |err| { return log.err("Could not write to output file '{s}'\nError '{s}'\n", .{ final_output_name, @errorName(err) }); }; }
src/cli/main.zig
const std = @import("std"); const iup = @import("iup.zig"); const MainLoop = iup.MainLoop; const Dialog = iup.Dialog; const Button = iup.Button; const MessageDlg = iup.MessageDlg; const Multiline = iup.Multiline; const Label = iup.Label; const Text = iup.Text; const VBox = iup.VBox; const HBox = iup.HBox; const Menu = iup.Menu; const SubMenu = iup.SubMenu; const Separator = iup.Separator; const Fill = iup.Fill; const Item = iup.Item; const FileDlg = iup.FileDlg; const Toggle = iup.Toggle; const Tabs = iup.Tabs; const ScreenSize = iup.ScreenSize; const Image = iup.Image; const ImageRgb = iup.ImageRgb; const ImageRgba = iup.ImageRgba; pub fn main() !void { try MainLoop.open(); defer MainLoop.close(); var dlg = try create_dialog(); defer dlg.deinit(); try dlg.showXY(.Center, .Center); try MainLoop.beginLoop(); } fn create_dialog() !*Dialog { var img_release = try images.getRelease(); var img_press = try images.getPress(); var img_inactive = try images.getInactive(); return try (Dialog.init() .setTitle("IUPforZig - Buttons") .setMinBox(false) .setMaxBox(false) .setMenuBox(false) .setSize(.Quarter, .Quarter) .setResize(false) .setChildren( .{ VBox.init() .setExpandChildren(true) .setMargin(10, 10) .setGap(10) .setChildren( .{ HBox.init().setChildren( .{ Button.init() .setTitle("Button with image") .setName("btn_image") .setCanFocus(false) .setImPress(img_press) .setImage(img_release) .setIMinActive(img_inactive) //wrong name! .setButtonCallback(btn_image_button), Button.init() .setTitle("on/off") .setActionCallback(btn_on_off_cb) .setName("btn_onoff"), Button.init() .setTitle("Exit") .setName("btn_exit") .setActionCallback(btn_exit_cb), }, ), Text.init() .setName("text") .setReadonly(true), Button.init() .setTitle("Big useless button") .setButtonCallback(btn_big_button_cb), }, ), }, ) .unwrap()); } fn btn_image_button(self: *Button, button: i32, pressed: i32, x: i32, y: i32, status: [:0]const u8) anyerror!void { _ = y; _ = x; _ = status; std.debug.print("BUTTON_CB(button={}, press={})\n", .{ button, pressed }); var text: *Text = self.getDialogChild("text").?.Text; if (button == '1') { if (pressed == 1) { text.setValue("Red button pressed"); } else { text.setValue("Red button released"); } } } fn btn_on_off_cb(button: *Button) anyerror!void { var btn_image: *Button = button.getDialogChild("btn_image").?.Button; var active = btn_image.getActive(); btn_image.setActive(!active); } fn btn_exit_cb(_: *Button) anyerror!void { MainLoop.exitLoop(); } fn btn_big_button_cb(self: *Button, button: i32, pressed: i32, x: i32, y: i32, status: [:0]const u8) anyerror!void { _ = self; _ = x; _ = y; _ = status; std.debug.print("BUTTON_CB(button={}, press={})\n", .{ button, pressed }); } const images = struct { /// Defines button's image /// Each index corresponds to a RGB index from 1 to 4 /// This image is a 16x16 matrix const pixelmap = [_]u8{ // zig fmt: off 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,4,4,3,3,3,3,2,2, 1,1,3,3,3,3,3,4,4,4,4,3,3,3,2,2, 1,1,3,3,3,3,3,4,4,4,4,3,3,3,2,2, 1,1,3,3,3,3,3,3,4,4,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,1,3,3,3,3,3,3,3,3,3,3,3,3,2,2, 1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, }; // zig fmt: on /// Defines pressed button's image pub fn getRelease() !*Image { return try (Image.init(16, 16, pixelmap[0..]) .setHandle("img_release") .setColors(1, .{ .r = 215, .g = 215, .b = 215 }) .setColors(2, .{ .r = 40, .g = 40, .b = 40 }) .setColors(3, .{ .r = 30, .g = 50, .b = 210 }) .setColors(4, .{ .r = 240, .g = 0, .b = 0 }) .unwrap()); } pub fn getPress() !*Image { return try (Image.init(16, 16, pixelmap[0..]) .setHandle("img_press") .setColors(1, .{ .r = 40, .g = 40, .b = 40 }) .setColors(2, .{ .r = 215, .g = 215, .b = 215 }) .setColors(3, .{ .r = 0, .g = 20, .b = 180 }) .setColors(4, .{ .r = 210, .g = 0, .b = 0 }) .unwrap()); } pub fn getInactive() !*Image { return try (Image.init(16, 16, pixelmap[0..]) .setHandle("img_inactive") .setColors(1, .{ .r = 215, .g = 215, .b = 215 }) .setColors(2, .{ .r = 40, .g = 40, .b = 40 }) .setColors(3, .{ .r = 100, .g = 100, .b = 100 }) .setColors(4, .{ .r = 200, .g = 200, .b = 200 }) .unwrap()); } };
src/button_example.zig
const std = @import("std"); const vk = @import("vk"); const gvk = @import("graphics.zig"); pub const Pipeline = struct { pipeline: vk.VkPipeline, layout: vk.VkPipelineLayout, pub fn deinit(self: Pipeline, device: vk.VkDevice) void { vk.destroyPipeline(device, self.pipeline, null); vk.destroyPipelineLayout(device, self.layout, null); } }; const PipelineOptions = struct { depth_test: bool = true, // Draw line vs filling triangles. line_mode: bool = false, }; pub fn createDefaultPipeline( device: vk.VkDevice, pass: vk.VkRenderPass, view_dim: vk.VkExtent2D, vert_spv: []align(4) const u8, frag_spv: []align(4) const u8, pvis_info: vk.VkPipelineVertexInputStateCreateInfo, pl_info: vk.VkPipelineLayoutCreateInfo, opts: PipelineOptions, ) Pipeline { const vert_mod = gvk.shader.createShaderModule(device, vert_spv); const frag_mod = gvk.shader.createShaderModule(device, frag_spv); // ShaderStages const vert_pss_info = vk.VkPipelineShaderStageCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = vk.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_mod, .pName = "main", .pNext = null, .flags = 0, .pSpecializationInfo = null, }; const frag_pss_info = vk.VkPipelineShaderStageCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = vk.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_mod, .pName = "main", .pNext = null, .flags = 0, .pSpecializationInfo = null, }; const stages = [_]vk.VkPipelineShaderStageCreateInfo{ vert_pss_info, frag_pss_info }; // InputAssemblyState const pias_info = vk.VkPipelineInputAssemblyStateCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, .topology = vk.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, .primitiveRestartEnable = vk.VK_FALSE, .pNext = null, .flags = 0, }; // ViewportState const viewport = [_]vk.VkViewport{vk.VkViewport{ .x = 0.0, .y = 0.0, .width = @intToFloat(f32, view_dim.width), .height = @intToFloat(f32, view_dim.height), .minDepth = 0, .maxDepth = 1, }}; const scissor = [_]vk.VkRect2D{vk.VkRect2D{ .offset = vk.VkOffset2D{ .x = 0, .y = 0 }, .extent = view_dim, }}; const pvs_info = vk.VkPipelineViewportStateCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, .viewportCount = 1, .pViewports = &viewport, .scissorCount = 1, .pScissors = &scissor, .pNext = null, .flags = 0, }; // RasterizationState const prs_info = vk.VkPipelineRasterizationStateCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, .depthClampEnable = vk.VK_FALSE, .rasterizerDiscardEnable = vk.VK_FALSE, .polygonMode = if (opts.line_mode) vk.VK_POLYGON_MODE_LINE else vk.VK_POLYGON_MODE_FILL, .lineWidth = 1.0, .cullMode = vk.VK_CULL_MODE_BACK_BIT, .frontFace = vk.VK_FRONT_FACE_COUNTER_CLOCKWISE, .depthBiasEnable = vk.VK_FALSE, .pNext = null, .flags = 0, .depthBiasConstantFactor = 0, .depthBiasClamp = 0, .depthBiasSlopeFactor = 0, }; // MultisampleState const pms_info = vk.VkPipelineMultisampleStateCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, .sampleShadingEnable = vk.VK_FALSE, .rasterizationSamples = vk.VK_SAMPLE_COUNT_1_BIT, .pNext = null, .flags = 0, .minSampleShading = 0, .pSampleMask = null, .alphaToCoverageEnable = 0, .alphaToOneEnable = 0, }; // ColorBlendAttachmentState // For now default to transparent blending. const pcba_state = vk.VkPipelineColorBlendAttachmentState{ .colorWriteMask = vk.VK_COLOR_COMPONENT_R_BIT | vk.VK_COLOR_COMPONENT_G_BIT | vk.VK_COLOR_COMPONENT_B_BIT | vk.VK_COLOR_COMPONENT_A_BIT, .blendEnable = vk.VK_TRUE, .srcColorBlendFactor = vk.VK_BLEND_FACTOR_SRC_ALPHA, .dstColorBlendFactor = vk.VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, .colorBlendOp = vk.VK_BLEND_OP_ADD, .srcAlphaBlendFactor = vk.VK_BLEND_FACTOR_ONE, .dstAlphaBlendFactor = vk.VK_BLEND_FACTOR_ZERO, .alphaBlendOp = vk.VK_BLEND_OP_ADD, // .blendEnable = vk.VK_FALSE, // .srcColorBlendFactor = vk.VK_BLEND_FACTOR_ZERO, // .dstColorBlendFactor = vk.VK_BLEND_FACTOR_ZERO, // .colorBlendOp = vk.VK_BLEND_OP_ADD, // .srcAlphaBlendFactor = vk.VK_BLEND_FACTOR_ZERO, // .dstAlphaBlendFactor = vk.VK_BLEND_FACTOR_ZERO, // .alphaBlendOp = vk.VK_BLEND_OP_ADD, }; // ColorBlendState const pcbs_info = vk.VkPipelineColorBlendStateCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, .logicOpEnable = vk.VK_FALSE, .logicOp = vk.VK_LOGIC_OP_COPY, .attachmentCount = 1, .pAttachments = &pcba_state, .blendConstants = [_]f32{ 0, 0, 0, 0 }, .pNext = null, .flags = 0, }; // DynamicState, allow these states to by dynamically set in command buffers. const dynamic_states = [_]vk.VkDynamicState{ vk.VK_DYNAMIC_STATE_SCISSOR, // VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE isn't widely supported. }; const dynamic_state_info = vk.VkPipelineDynamicStateCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, .dynamicStateCount = dynamic_states.len, .flags = 0, .pDynamicStates = &dynamic_states, .pNext = null, }; // DepthStencilState const depth_stencil_state = vk.VkPipelineDepthStencilStateCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, .pNext = null, .flags = 0, .depthTestEnable = vk.fromBool(opts.depth_test), .depthWriteEnable = vk.fromBool(opts.depth_test), // Note that this will overwrite fragments if the depth is greater than what's in the buffer. // This implies that 0 is the far side and should be the clear value while 1 is the near side. .depthCompareOp = vk.VK_COMPARE_OP_GREATER, .depthBoundsTestEnable = vk.VK_FALSE, .minDepthBounds = 0, .maxDepthBounds = 1, .stencilTestEnable = vk.VK_FALSE, .front = std.mem.zeroInit(vk.VkStencilOpState, .{}), .back = std.mem.zeroInit(vk.VkStencilOpState, .{}), }; var pipeline_layout: vk.VkPipelineLayout = undefined; var res = vk.createPipelineLayout(device, &pl_info, null, &pipeline_layout); const g_pipelines = [_]vk.VkGraphicsPipelineCreateInfo{vk.VkGraphicsPipelineCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, .stageCount = @intCast(u32, stages.len), .pStages = &stages, .pVertexInputState = &pvis_info, .pInputAssemblyState = &pias_info, .pViewportState = &pvs_info, .pRasterizationState = &prs_info, .pMultisampleState = &pms_info, .pColorBlendState = &pcbs_info, .layout = pipeline_layout, .renderPass = pass, .subpass = 0, .basePipelineHandle = null, .pNext = null, .flags = 0, .pTessellationState = null, .pDepthStencilState = &depth_stencil_state, .pDynamicState = &dynamic_state_info, .basePipelineIndex = 0, }}; var pipeln: vk.VkPipeline = undefined; res = vk.createGraphicsPipelines(device, null, @intCast(u32, g_pipelines.len), &g_pipelines, null, &pipeln); vk.assertSuccess(res); vk.destroyShaderModule(device, vert_mod, null); vk.destroyShaderModule(device, frag_mod, null); return .{ .pipeline = pipeln, .layout = pipeline_layout, }; }
graphics/src/backend/vk/pipeline.zig
const std = @import("std"); const aoc = @import("aoc-lib.zig"); fn calc(alloc: std.mem.Allocator, nums: []const usize, maxTurn: usize) usize { var lastSeen = alloc.alloc(usize, maxTurn) catch unreachable; defer alloc.free(lastSeen); std.mem.set(usize, lastSeen, 0); var n: usize = undefined; var p: usize = undefined; var t: usize = 1; while (t <= nums.len) : (t += 1) { n = nums[t - 1]; if (t > 1) { lastSeen[p] = t; } p = n; } while (t <= maxTurn) : (t += 1) { if (lastSeen[p] != 0) { n = t - lastSeen[p]; } else { n = 0; } lastSeen[p] = t; p = n; } return n; } fn part1(alloc: std.mem.Allocator, in: []const usize) usize { return calc(alloc, in, 2020); } fn part2(alloc: std.mem.Allocator, in: []const usize) usize { return calc(alloc, in, 30000000); } test "examples" { const test1 = try aoc.Ints(aoc.talloc, usize, aoc.test1file); defer aoc.talloc.free(test1); const test2 = try aoc.Ints(aoc.talloc, usize, aoc.test2file); defer aoc.talloc.free(test2); const test3 = try aoc.Ints(aoc.talloc, usize, aoc.test3file); defer aoc.talloc.free(test3); const test4 = try aoc.Ints(aoc.talloc, usize, aoc.test4file); defer aoc.talloc.free(test4); const test5 = try aoc.Ints(aoc.talloc, usize, aoc.test5file); defer aoc.talloc.free(test5); const test6 = try aoc.Ints(aoc.talloc, usize, aoc.test6file); defer aoc.talloc.free(test6); const test7 = try aoc.Ints(aoc.talloc, usize, aoc.test7file); defer aoc.talloc.free(test7); const inp = try aoc.Ints(aoc.talloc, usize, aoc.inputfile); defer aoc.talloc.free(inp); try aoc.assertEq(@as(usize, 436), part1(aoc.talloc, test1)); try aoc.assertEq(@as(usize, 1), part1(aoc.talloc, test2)); try aoc.assertEq(@as(usize, 10), part1(aoc.talloc, test3)); try aoc.assertEq(@as(usize, 27), part1(aoc.talloc, test4)); try aoc.assertEq(@as(usize, 78), part1(aoc.talloc, test5)); try aoc.assertEq(@as(usize, 438), part1(aoc.talloc, test6)); try aoc.assertEq(@as(usize, 1836), part1(aoc.talloc, test7)); try aoc.assertEq(@as(usize, 260), part1(aoc.talloc, inp)); try aoc.assertEq(@as(usize, 175594), part2(aoc.talloc, test1)); try aoc.assertEq(@as(usize, 2578), part2(aoc.talloc, test2)); try aoc.assertEq(@as(usize, 3544142), part2(aoc.talloc, test3)); try aoc.assertEq(@as(usize, 261214), part2(aoc.talloc, test4)); try aoc.assertEq(@as(usize, 6895259), part2(aoc.talloc, test5)); try aoc.assertEq(@as(usize, 18), part2(aoc.talloc, test6)); try aoc.assertEq(@as(usize, 362), part2(aoc.talloc, test7)); try aoc.assertEq(@as(usize, 950), part2(aoc.talloc, inp)); } fn day15(inp: []const u8, bench: bool) anyerror!void { const ints = try aoc.Ints(aoc.halloc, usize, inp); defer aoc.halloc.free(ints); var p1 = part1(aoc.halloc, ints); var p2 = part2(aoc.halloc, ints); if (!bench) { try aoc.print("Part 1: {}\nPart 2: {}\n", .{ p1, p2 }); } } pub fn main() anyerror!void { try aoc.benchme(aoc.input(), day15); }
2020/15/aoc.zig
const std = @import("std"); const math = std.math; const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64); const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64); const __multf3 = @import("mulXf3.zig").__multf3; const __mulxf3 = @import("mulXf3.zig").__mulxf3; const __muldf3 = @import("mulXf3.zig").__muldf3; const __mulsf3 = @import("mulXf3.zig").__mulsf3; // return true if equal // use two 64-bit integers intead of one 128-bit integer // because 128-bit integer constant can't be assigned directly fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool { const rep = @bitCast(u128, result); const hi = @intCast(u64, rep >> 64); const lo = @truncate(u64, rep); if (hi == expectedHi and lo == expectedLo) { return true; } // test other possible NaN representation(signal NaN) if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) { if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and ((hi & 0xffffffffffff) > 0 or lo > 0)) { return true; } } return false; } fn test__multf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void { const x = __multf3(a, b); if (compareResultLD(x, expected_hi, expected_lo)) return; @panic("__multf3 test failure"); } fn makeNaN128(rand: u64) f128 { const int_result = @as(u128, 0x7fff000000000000 | (rand & 0xffffffffffff)) << 64; const float_result = @bitCast(f128, int_result); return float_result; } test "multf3" { // qNaN * any = qNaN try test__multf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // NaN * any = NaN const a = makeNaN128(0x800030000000); try test__multf3(a, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // inf * any = inf try test__multf3(inf128, 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0); // any * any try test__multf3( @bitCast(f128, @as(u128, 0x40042eab345678439abcdefea5678234)), @bitCast(f128, @as(u128, 0x3ffeedcb34a235253948765432134675)), 0x400423e7f9e3c9fc, 0xd906c2c2a85777c4, ); try test__multf3( @bitCast(f128, @as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50)), @bitCast(f128, @as(u128, 0x3ff6ed8764648369535adf4be3214568)), 0x3fc52a163c6223fc, 0xc94c4bf0430768b4, ); try test__multf3( 0x1.234425696abcad34a35eeffefdcbap+456, 0x451.ed98d76e5d46e5f24323dff21ffp+600, 0x44293a91de5e0e94, 0xe8ed17cc2cdf64ac, ); try test__multf3( @bitCast(f128, @as(u128, 0x3f154356473c82a9fabf2d22ace345df)), @bitCast(f128, @as(u128, 0x3e38eda98765476743ab21da23d45679)), 0x3d4f37c1a3137cae, 0xfc6807048bc2836a, ); try test__multf3(0x1.23456734245345p-10000, 0x1.edcba524498724p-6497, 0x0, 0x0); // Denormal operands. try test__multf3( 0x0.0000000000000000000000000001p-16382, 0x1p16383, 0x3f90000000000000, 0x0, ); try test__multf3( 0x1p16383, 0x0.0000000000000000000000000001p-16382, 0x3f90000000000000, 0x0, ); try test__multf3(0x1.0000_0000_0000_0000_0000_0000_0001p+0, 0x1.8p+5, 0x4004_8000_0000_0000, 0x0000_0000_0000_0002); try test__multf3(0x1.0000_0000_0000_0000_0000_0000_0002p+0, 0x1.8p+5, 0x4004_8000_0000_0000, 0x0000_0000_0000_0003); } const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1))); fn test__mulxf3(a: f80, b: f80, expected: u80) !void { const x = __mulxf3(a, b); const rep = @bitCast(u80, x); if (rep == expected) return; if (math.isNan(@bitCast(f80, expected)) and math.isNan(x)) return; // We don't currently test NaN payload propagation return error.TestFailed; } test "mulxf3" { // NaN * any = NaN try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); try test__mulxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); // any * NaN = NaN try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80)); try test__mulxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80)); // NaN * inf = NaN try test__mulxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80)); // inf * NaN = NaN try test__mulxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80)); // inf * inf = inf try test__mulxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80))); // inf * -inf = -inf try test__mulxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, -math.inf(f80))); // -inf + inf = -inf try test__mulxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, -math.inf(f80))); // inf * any = inf try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80))); // any * inf = inf try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80))); // any * any try test__mulxf3(0x1.0p+0, 0x1.dcba987654321p+5, 0x4004_ee5d_4c3b_2a19_0800); try test__mulxf3(0x1.0000_0000_0000_0004p+0, 0x1.8p+5, 0x4004_C000_0000_0000_0003); // exact try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.0p+5, 0x4004_8000_0000_0000_0001); // exact try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.7ffep+5, 0x4004_BFFF_0000_0000_0001); // round down try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.8p+5, 0x4004_C000_0000_0000_0002); // round up to even try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.8002p+5, 0x4004_C001_0000_0000_0002); // round up try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.0p+6, 0x4005_8000_0000_0000_0001); // exact try test__mulxf3(0x1.0000_0001p+0, 0x1.0000_0001p+0, 0x3FFF_8000_0001_0000_0000); // round down to even try test__mulxf3(0x1.0000_0001p+0, 0x1.0000_0001_0002p+0, 0x3FFF_8000_0001_0001_0001); // round up }
lib/std/special/compiler_rt/mulXf3_test.zig
const std = @import("std"); const Builder = std.build.Builder; const glfw = @import("libs/mach-glfw/build.zig"); const system_sdk = @import("libs/mach-glfw/system_sdk.zig"); pub const LinuxWindowManager = enum { X11, Wayland, }; pub const Options = struct { /// Only respected on Linux. linux_window_manager: LinuxWindowManager = .X11, /// Only respected on Windows. Whether or not to build a UWP application, linking WindowsApp.lib /// instead of user32.lib. windows_uwp_app: bool = false, /// Only respected on Windows. d3d12: bool = true, /// Only respected on MacOS. metal: bool = true, }; pub fn link(b: *Builder, step: *std.build.LibExeObjStep, options: Options) void { const lib_mach_dawn_native = buildLibMachDawnNative(b, step); step.linkLibrary(lib_mach_dawn_native); const lib_dawn_common = buildLibDawnCommon(b, step); step.linkLibrary(lib_dawn_common); const lib_dawn_platform = buildLibDawnPlatform(b, step); step.linkLibrary(lib_dawn_platform); // dawn-native const lib_abseil_cpp = buildLibAbseilCpp(b, step); step.linkLibrary(lib_abseil_cpp); const lib_dawn_native = buildLibDawnNative(b, step, options); step.linkLibrary(lib_dawn_native); const lib_dawn_wire = buildLibDawnWire(b, step); step.linkLibrary(lib_dawn_wire); const lib_dawn_utils = buildLibDawnUtils(b, step, options); step.linkLibrary(lib_dawn_utils); const lib_spirv_tools = buildLibSPIRVTools(b, step); step.linkLibrary(lib_spirv_tools); const lib_tint = buildLibTint(b, step); step.linkLibrary(lib_tint); } fn buildLibMachDawnNative(b: *Builder, step: *std.build.LibExeObjStep) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("dawn-native-mach", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); glfw.link(b, lib, .{ .system_sdk = .{ .set_sysroot = false } }); lib.addCSourceFile("src/dawn/dawn_native_mach.cpp", &.{ include("libs/mach-glfw/upstream/glfw/include"), include("libs/dawn/out/Debug/gen/src/include"), include("libs/dawn/out/Debug/gen/src"), include("libs/dawn/src/include"), include("libs/dawn/src"), }); return lib; } // Builds common sources; derived from src/common/BUILD.gn fn buildLibDawnCommon(b: *Builder, step: *std.build.LibExeObjStep) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("dawn-common", main_abs); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); for ([_][]const u8{ "src/common/Assert.cpp", "src/common/DynamicLib.cpp", "src/common/GPUInfo.cpp", "src/common/Log.cpp", "src/common/Math.cpp", "src/common/RefCounted.cpp", "src/common/Result.cpp", "src/common/SlabAllocator.cpp", "src/common/SystemUtils.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, &.{include("libs/dawn/src")}); } const target = (std.zig.system.NativeTargetInfo.detect(b.allocator, step.target) catch unreachable).target; if (target.os.tag == .macos) { system_sdk.include(b, lib, .{}); lib.linkFramework("Foundation"); var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn/src/common/SystemUtils_mac.mm" }) catch unreachable; lib.addCSourceFile(abs_path, &.{include("libs/dawn/src")}); } return lib; } // Build dawn platform sources; derived from src/dawn_platform/BUILD.gn fn buildLibDawnPlatform(b: *Builder, step: *std.build.LibExeObjStep) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("dawn-platform", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); for ([_][]const u8{ "src/dawn_platform/DawnPlatform.cpp", "src/dawn_platform/WorkerThread.cpp", "src/dawn_platform/tracing/EventTracer.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, &.{ include("libs/dawn/src"), include("libs/dawn/src/include"), include("libs/dawn/out/Debug/gen/src/include"), }); } return lib; } // Builds dawn native sources; derived from src/dawn_native/BUILD.gn fn buildLibDawnNative(b: *Builder, step: *std.build.LibExeObjStep, options: Options) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("dawn-native", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); const target = (std.zig.system.NativeTargetInfo.detect(b.allocator, step.target) catch unreachable).target; const flags = &.{ "-DDAWN_ENABLE_BACKEND_METAL", "-DDAWN_ENABLE_BACKEND_NULL", include("libs/dawn/src"), include("libs/dawn/src/include"), include("libs/dawn/third_party/vulkan-deps/spirv-tools/src/include"), include("libs/dawn/third_party/abseil-cpp"), "-DTINT_BUILD_SPV_READER=1", "-DTINT_BUILD_SPV_WRITER=1", "-DTINT_BUILD_WGSL_READER=1", "-DTINT_BUILD_WGSL_WRITER=1", "-DTINT_BUILD_MSL_WRITER=1", "-DTINT_BUILD_HLSL_WRITER=1", include("libs/dawn/third_party/tint"), include("libs/dawn/third_party/tint/include"), include("libs/dawn/out/Debug/gen/src/include"), include("libs/dawn/out/Debug/gen/src"), }; // #if defined(DAWN_ENABLE_BACKEND_D3D12) // #if defined(DAWN_ENABLE_BACKEND_METAL) // #if defined(DAWN_ENABLE_BACKEND_VULKAN) // #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) // #if defined(DAWN_ENABLE_BACKEND_OPENGLES) // #if defined(DAWN_ENABLE_BACKEND_NULL) for ([_][]const u8{ "out/Debug/gen/src/dawn/dawn_thread_dispatch_proc.cpp", "out/Debug/gen/src/dawn/dawn_proc.c", "out/Debug/gen/src/dawn/webgpu_cpp.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } for ([_][]const u8{ "src/dawn_native/Adapter.cpp", "src/dawn_native/AsyncTask.cpp", "src/dawn_native/AttachmentState.cpp", "src/dawn_native/BackendConnection.cpp", "src/dawn_native/BindGroup.cpp", "src/dawn_native/BindGroupLayout.cpp", "src/dawn_native/BindingInfo.cpp", "src/dawn_native/BuddyAllocator.cpp", "src/dawn_native/BuddyMemoryAllocator.cpp", "src/dawn_native/Buffer.cpp", "src/dawn_native/BufferLocation.cpp", "src/dawn_native/CachedObject.cpp", "src/dawn_native/CallbackTaskManager.cpp", "src/dawn_native/CommandAllocator.cpp", "src/dawn_native/CommandBuffer.cpp", "src/dawn_native/CommandBufferStateTracker.cpp", "src/dawn_native/CommandEncoder.cpp", "src/dawn_native/CommandValidation.cpp", "src/dawn_native/Commands.cpp", "src/dawn_native/CompilationMessages.cpp", "src/dawn_native/ComputePassEncoder.cpp", "src/dawn_native/ComputePipeline.cpp", "src/dawn_native/CopyTextureForBrowserHelper.cpp", "src/dawn_native/CreatePipelineAsyncTask.cpp", "src/dawn_native/Device.cpp", "src/dawn_native/DynamicUploader.cpp", "src/dawn_native/EncodingContext.cpp", "src/dawn_native/Error.cpp", "src/dawn_native/ErrorData.cpp", "src/dawn_native/ErrorInjector.cpp", "src/dawn_native/ErrorScope.cpp", "src/dawn_native/Extensions.cpp", "src/dawn_native/ExternalTexture.cpp", "src/dawn_native/Format.cpp", "src/dawn_native/IndirectDrawMetadata.cpp", "src/dawn_native/IndirectDrawValidationEncoder.cpp", "src/dawn_native/Instance.cpp", "src/dawn_native/InternalPipelineStore.cpp", "src/dawn_native/Limits.cpp", "src/dawn_native/ObjectBase.cpp", "src/dawn_native/ObjectContentHasher.cpp", "src/dawn_native/PassResourceUsageTracker.cpp", "src/dawn_native/PerStage.cpp", "src/dawn_native/PersistentCache.cpp", "src/dawn_native/Pipeline.cpp", "src/dawn_native/PipelineLayout.cpp", "src/dawn_native/PooledResourceMemoryAllocator.cpp", "src/dawn_native/ProgrammablePassEncoder.cpp", "src/dawn_native/QueryHelper.cpp", "src/dawn_native/QuerySet.cpp", "src/dawn_native/Queue.cpp", "src/dawn_native/RenderBundle.cpp", "src/dawn_native/RenderBundleEncoder.cpp", "src/dawn_native/RenderEncoderBase.cpp", "src/dawn_native/RenderPassEncoder.cpp", "src/dawn_native/RenderPipeline.cpp", "src/dawn_native/ResourceMemoryAllocation.cpp", "src/dawn_native/RingBufferAllocator.cpp", "src/dawn_native/Sampler.cpp", "src/dawn_native/ScratchBuffer.cpp", "src/dawn_native/ShaderModule.cpp", "src/dawn_native/StagingBuffer.cpp", "src/dawn_native/Subresource.cpp", "src/dawn_native/Surface.cpp", "src/dawn_native/SwapChain.cpp", "src/dawn_native/Texture.cpp", "src/dawn_native/TintUtils.cpp", "src/dawn_native/Toggles.cpp", "src/dawn_native/VertexFormat.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // dawn_native_utils_gen for ([_][]const u8{ "out/Debug/gen/src/dawn_native/ChainUtils_autogen.cpp", "out/Debug/gen/src/dawn_native/ProcTable.cpp", "out/Debug/gen/src/dawn_native/wgpu_structs_autogen.cpp", "out/Debug/gen/src/dawn_native/ValidationUtils_autogen.cpp", "out/Debug/gen/src/dawn_native/webgpu_absl_format_autogen.cpp", "out/Debug/gen/src/dawn_native/ObjectType_autogen.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // TODO: could allow enable_vulkan_validation_layers here. See src/dawn_native/BUILD.gn // TODO: allow use_angle here. See src/dawn_native/BUILD.gn // TODO: could allow use_swiftshader here. See src/dawn_native/BUILD.gn // TODO: allow dawn_enable_vulkan here. See src/dawn_native/BUILD.gn // TODO: allow dawn_enable_opengl here. See src/dawn_native/BUILD.gn switch (target.os.tag) { .windows => { if (options.windows_uwp_app) { if (lib.build_mode == .Debug) { // DXGIGetDebugInterface1 is defined in dxgi.lib // But this API is tagged as a development-only capability // which implies that linking to this function will cause // the application to fail Windows store certification // So we only link to it in debug build when compiling for UWP. // In win32 we load dxgi.dll using LoadLibrary // so no need for static linking. lib.linkSystemLibrary("dxgi.lib"); } lib.linkSystemLibrary("user32.lib"); } // TODO: // if (dawn_enable_d3d12) { // libs += [ "dxguid.lib" ] // sources += [ // "d3d12/AdapterD3D12.cpp", // "d3d12/BackendD3D12.cpp", // "d3d12/BindGroupD3D12.cpp", // "d3d12/BindGroupLayoutD3D12.cpp", // "d3d12/BufferD3D12.cpp", // "d3d12/CPUDescriptorHeapAllocationD3D12.cpp", // "d3d12/CommandAllocatorManager.cpp", // "d3d12/CommandBufferD3D12.cpp", // "d3d12/CommandRecordingContext.cpp", // "d3d12/ComputePipelineD3D12.cpp", // "d3d12/D3D11on12Util.cpp", // "d3d12/D3D12Error.cpp", // "d3d12/D3D12Info.cpp", // "d3d12/DeviceD3D12.cpp", // "d3d12/GPUDescriptorHeapAllocationD3D12.cpp", // "d3d12/HeapAllocatorD3D12.cpp", // "d3d12/HeapD3D12.cpp", // "d3d12/NativeSwapChainImplD3D12.cpp", // "d3d12/PageableD3D12.cpp", // "d3d12/PipelineLayoutD3D12.cpp", // "d3d12/PlatformFunctions.cpp", // "d3d12/QuerySetD3D12.cpp", // "d3d12/QueueD3D12.cpp", // "d3d12/RenderPassBuilderD3D12.cpp", // "d3d12/RenderPipelineD3D12.cpp", // "d3d12/ResidencyManagerD3D12.cpp", // "d3d12/ResourceAllocatorManagerD3D12.cpp", // "d3d12/ResourceHeapAllocationD3D12.cpp", // "d3d12/SamplerD3D12.cpp", // "d3d12/SamplerHeapCacheD3D12.cpp", // "d3d12/ShaderModuleD3D12.cpp", // "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp", // "d3d12/StagingBufferD3D12.cpp", // "d3d12/StagingDescriptorAllocatorD3D12.cpp", // "d3d12/SwapChainD3D12.cpp", // "d3d12/TextureCopySplitter.cpp", // "d3d12/TextureD3D12.cpp", // "d3d12/UtilsD3D12.cpp", // ] // } }, .macos => { if (options.metal) { lib.linkFramework("Metal"); lib.linkFramework("CoreGraphics"); lib.linkFramework("Foundation"); lib.linkFramework("IOKit"); lib.linkFramework("IOSurface"); lib.linkFramework("QuartzCore"); for ([_][]const u8{ "src/dawn_native/metal/MetalBackend.mm", "src/dawn_native/Surface_metal.mm", "src/dawn_native/metal/BackendMTL.mm", "src/dawn_native/metal/BindGroupLayoutMTL.mm", "src/dawn_native/metal/BindGroupMTL.mm", "src/dawn_native/metal/BufferMTL.mm", "src/dawn_native/metal/CommandBufferMTL.mm", "src/dawn_native/metal/CommandRecordingContext.mm", "src/dawn_native/metal/ComputePipelineMTL.mm", "src/dawn_native/metal/DeviceMTL.mm", "src/dawn_native/metal/PipelineLayoutMTL.mm", "src/dawn_native/metal/QuerySetMTL.mm", "src/dawn_native/metal/QueueMTL.mm", "src/dawn_native/metal/RenderPipelineMTL.mm", "src/dawn_native/metal/SamplerMTL.mm", "src/dawn_native/metal/ShaderModuleMTL.mm", "src/dawn_native/metal/StagingBufferMTL.mm", "src/dawn_native/metal/SwapChainMTL.mm", "src/dawn_native/metal/TextureMTL.mm", "src/dawn_native/metal/UtilsMetal.mm", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } } }, else => { if (options.linux_window_manager == .X11) { lib.linkSystemLibrary("X11"); for ([_][]const u8{ "src/dawn_native/XlibXcbFunctions.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } } }, } for ([_][]const u8{ "src/dawn_native/null/DeviceNull.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // TODO: no vulkan on macos // TODO: // if (dawn_enable_opengl || dawn_enable_vulkan) { // sources += [ // "SpirvValidation.cpp", // ] // } // TODO: // if (dawn_enable_opengl) { // public_deps += [ // ":dawn_native_opengl_loader_gen", // "${dawn_root}/third_party/khronos:khronos_platform", // ] // sources += get_target_outputs(":dawn_native_opengl_loader_gen") // sources += [ // "opengl/BackendGL.cpp", // "opengl/BindGroupGL.cpp", // "opengl/BindGroupLayoutGL.cpp", // "opengl/BufferGL.cpp", // "opengl/CommandBufferGL.cpp", // "opengl/ComputePipelineGL.cpp", // "opengl/DeviceGL.cpp", // "opengl/GLFormat.cpp", // "opengl/NativeSwapChainImplGL.cpp", // "opengl/OpenGLFunctions.cpp", // "opengl/OpenGLVersion.cpp", // "opengl/PersistentPipelineStateGL.cpp", // "opengl/PipelineGL.cpp", // "opengl/PipelineLayoutGL.cpp", // "opengl/QuerySetGL.cpp", // "opengl/QueueGL.cpp", // "opengl/RenderPipelineGL.cpp", // "opengl/SamplerGL.cpp", // "opengl/ShaderModuleGL.cpp", // "opengl/SpirvUtils.cpp", // "opengl/SwapChainGL.cpp", // "opengl/TextureGL.cpp", // "opengl/UtilsGL.cpp", // ] // } // TODO: no vulkan on macos // TODO: // if (dawn_enable_vulkan) { // public_deps += [ "${dawn_root}/third_party/khronos:vulkan_headers" ] // sources += [ // "vulkan/AdapterVk.cpp", // "vulkan/BackendVk.cpp", // "vulkan/BindGroupLayoutVk.cpp", // "vulkan/BindGroupVk.cpp", // "vulkan/BufferVk.cpp", // "vulkan/CommandBufferVk.cpp", // "vulkan/ComputePipelineVk.cpp", // "vulkan/DescriptorSetAllocator.cpp", // "vulkan/DeviceVk.cpp", // "vulkan/FencedDeleter.cpp", // "vulkan/NativeSwapChainImplVk.cpp", // "vulkan/PipelineLayoutVk.cpp", // "vulkan/QuerySetVk.cpp", // "vulkan/QueueVk.cpp", // "vulkan/RenderPassCache.cpp", // "vulkan/RenderPipelineVk.cpp", // "vulkan/ResourceHeapVk.cpp", // "vulkan/ResourceMemoryAllocatorVk.cpp", // "vulkan/SamplerVk.cpp", // "vulkan/ShaderModuleVk.cpp", // "vulkan/StagingBufferVk.cpp", // "vulkan/SwapChainVk.cpp", // "vulkan/TextureVk.cpp", // "vulkan/UtilsVulkan.cpp", // "vulkan/VulkanError.cpp", // "vulkan/VulkanExtensions.cpp", // "vulkan/VulkanFunctions.cpp", // "vulkan/VulkanInfo.cpp", // ] // TODO: // if (is_chromeos) { // sources += [ // "vulkan/external_memory/MemoryServiceDmaBuf.cpp", // "vulkan/external_semaphore/SemaphoreServiceFD.cpp", // ] // defines += [ "DAWN_USE_SYNC_FDS" ] // } else if (is_linux) { // sources += [ // "vulkan/external_memory/MemoryServiceOpaqueFD.cpp", // "vulkan/external_semaphore/SemaphoreServiceFD.cpp", // ] // } else if (is_fuchsia) { // sources += [ // "vulkan/external_memory/MemoryServiceZirconHandle.cpp", // "vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp", // ] // } else { // sources += [ // "vulkan/external_memory/MemoryServiceNull.cpp", // "vulkan/external_semaphore/SemaphoreServiceNull.cpp", // ] // } // TODO: could add is_fuchsia checks if anybody cares about Fuchsia? // TODO: // if (enable_vulkan_validation_layers) { // defines += [ // "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS", // "DAWN_VK_DATA_DIR=\"$vulkan_data_subdir\"", // ] // } // if (enable_vulkan_loader) { // data_deps += [ "${dawn_vulkan_loader_dir}:libvulkan" ] // } // if (use_swiftshader) { // data_deps += [ // "${dawn_swiftshader_dir}/src/Vulkan:icd_file", // "${dawn_swiftshader_dir}/src/Vulkan:swiftshader_libvulkan", // ] // defines += [ // "DAWN_ENABLE_SWIFTSHADER", // "DAWN_SWIFTSHADER_VK_ICD_JSON=\"${swiftshader_icd_file_name}\"", // ] // } // } // TODO: // if (use_angle) { // data_deps += [ // "${dawn_angle_dir}:libEGL", // "${dawn_angle_dir}:libGLESv2", // ] // } // } // TODO: ??? // # The static and shared libraries for dawn_native. Most of the files are // # already compiled in dawn_native_sources, but we still need to compile // # files defining exported symbols. // dawn_component("dawn_native") { // DEFINE_PREFIX = "DAWN_NATIVE" // #Make headers publically visible // public_deps = [ ":dawn_native_headers" ] for ([_][]const u8{ "src/dawn_native/DawnNative.cpp", "src/dawn_native/null/NullBackend.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // if (dawn_enable_d3d12) { // sources += [ "d3d12/D3D12Backend.cpp" ] // } // if (dawn_enable_opengl) { // sources += [ "opengl/OpenGLBackend.cpp" ] // } // if (dawn_enable_vulkan) { // sources += [ "vulkan/VulkanBackend.cpp" ] // if (enable_vulkan_validation_layers) { // data_deps = // [ "${dawn_vulkan_validation_layers_dir}:vulkan_validation_layers" ] // if (!is_android) { // data_deps += // [ "${dawn_vulkan_validation_layers_dir}:vulkan_gen_json_files" ] // } // } // } // } return lib; } // Builds third party tint sources; derived from third_party/tint/src/BUILD.gn fn buildLibTint(b: *Builder, step: *std.build.LibExeObjStep) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("tint", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); const flags = &.{ "-DTINT_BUILD_SPV_READER=1", "-DTINT_BUILD_SPV_WRITER=1", "-DTINT_BUILD_WGSL_READER=1", "-DTINT_BUILD_WGSL_WRITER=1", "-DTINT_BUILD_MSL_WRITER=1", "-DTINT_BUILD_HLSL_WRITER=1", // Required for TINT_BUILD_SPV_READER=1 and TINT_BUILD_SPV_WRITER=1 include("libs/dawn/third_party/vulkan-deps"), include("libs/dawn/third_party/vulkan-deps/spirv-tools/src"), include("libs/dawn/third_party/vulkan-deps/spirv-tools/src/include"), include("libs/dawn/third_party/vulkan-deps/spirv-headers/src/include"), include("libs/dawn/out/Debug/gen/third_party/vulkan-deps/spirv-tools/src"), include("libs/dawn/out/Debug/gen/third_party/vulkan-deps/spirv-tools/src/include"), include("libs/dawn/third_party/tint"), include("libs/dawn/third_party/tint/include"), }; // libtint_core_all_src for ([_][]const u8{ "third_party/tint/src/ast/access.cc", "third_party/tint/src/ast/alias.cc", "third_party/tint/src/ast/array.cc", "third_party/tint/src/ast/array_accessor_expression.cc", "third_party/tint/src/ast/assignment_statement.cc", "third_party/tint/src/ast/ast_type.cc", "third_party/tint/src/ast/atomic.cc", "third_party/tint/src/ast/binary_expression.cc", "third_party/tint/src/ast/binding_decoration.cc", "third_party/tint/src/ast/bitcast_expression.cc", "third_party/tint/src/ast/block_statement.cc", "third_party/tint/src/ast/bool.cc", "third_party/tint/src/ast/bool_literal.cc", "third_party/tint/src/ast/break_statement.cc", "third_party/tint/src/ast/builtin.cc", "third_party/tint/src/ast/builtin_decoration.cc", "third_party/tint/src/ast/call_expression.cc", "third_party/tint/src/ast/call_statement.cc", "third_party/tint/src/ast/case_statement.cc", "third_party/tint/src/ast/constructor_expression.cc", "third_party/tint/src/ast/continue_statement.cc", "third_party/tint/src/ast/decoration.cc", "third_party/tint/src/ast/depth_multisampled_texture.cc", "third_party/tint/src/ast/depth_texture.cc", "third_party/tint/src/ast/disable_validation_decoration.cc", "third_party/tint/src/ast/discard_statement.cc", "third_party/tint/src/ast/else_statement.cc", "third_party/tint/src/ast/expression.cc", "third_party/tint/src/ast/external_texture.cc", "third_party/tint/src/ast/f32.cc", "third_party/tint/src/ast/fallthrough_statement.cc", "third_party/tint/src/ast/float_literal.cc", "third_party/tint/src/ast/for_loop_statement.cc", "third_party/tint/src/ast/function.cc", "third_party/tint/src/ast/group_decoration.cc", "third_party/tint/src/ast/i32.cc", "third_party/tint/src/ast/identifier_expression.cc", "third_party/tint/src/ast/if_statement.cc", "third_party/tint/src/ast/int_literal.cc", "third_party/tint/src/ast/internal_decoration.cc", "third_party/tint/src/ast/interpolate_decoration.cc", "third_party/tint/src/ast/invariant_decoration.cc", "third_party/tint/src/ast/literal.cc", "third_party/tint/src/ast/location_decoration.cc", "third_party/tint/src/ast/loop_statement.cc", "third_party/tint/src/ast/matrix.cc", "third_party/tint/src/ast/member_accessor_expression.cc", "third_party/tint/src/ast/module.cc", "third_party/tint/src/ast/multisampled_texture.cc", "third_party/tint/src/ast/node.cc", "third_party/tint/src/ast/override_decoration.cc", "third_party/tint/src/ast/pipeline_stage.cc", "third_party/tint/src/ast/pointer.cc", "third_party/tint/src/ast/return_statement.cc", "third_party/tint/src/ast/sampled_texture.cc", "third_party/tint/src/ast/sampler.cc", "third_party/tint/src/ast/scalar_constructor_expression.cc", "third_party/tint/src/ast/sint_literal.cc", "third_party/tint/src/ast/stage_decoration.cc", "third_party/tint/src/ast/statement.cc", "third_party/tint/src/ast/storage_class.cc", "third_party/tint/src/ast/storage_texture.cc", "third_party/tint/src/ast/stride_decoration.cc", "third_party/tint/src/ast/struct.cc", "third_party/tint/src/ast/struct_block_decoration.cc", "third_party/tint/src/ast/struct_member.cc", "third_party/tint/src/ast/struct_member_align_decoration.cc", "third_party/tint/src/ast/struct_member_offset_decoration.cc", "third_party/tint/src/ast/struct_member_size_decoration.cc", "third_party/tint/src/ast/switch_statement.cc", "third_party/tint/src/ast/texture.cc", "third_party/tint/src/ast/type_constructor_expression.cc", "third_party/tint/src/ast/type_decl.cc", "third_party/tint/src/ast/type_name.cc", "third_party/tint/src/ast/u32.cc", "third_party/tint/src/ast/uint_literal.cc", "third_party/tint/src/ast/unary_op.cc", "third_party/tint/src/ast/unary_op_expression.cc", "third_party/tint/src/ast/variable.cc", "third_party/tint/src/ast/variable_decl_statement.cc", "third_party/tint/src/ast/vector.cc", "third_party/tint/src/ast/void.cc", "third_party/tint/src/ast/workgroup_decoration.cc", "third_party/tint/src/castable.cc", "third_party/tint/src/clone_context.cc", "third_party/tint/src/debug.cc", "third_party/tint/src/demangler.cc", "third_party/tint/src/diagnostic/diagnostic.cc", "third_party/tint/src/diagnostic/formatter.cc", "third_party/tint/src/diagnostic/printer.cc", "third_party/tint/src/inspector/entry_point.cc", "third_party/tint/src/inspector/inspector.cc", "third_party/tint/src/inspector/resource_binding.cc", "third_party/tint/src/inspector/scalar.cc", "third_party/tint/src/intrinsic_table.cc", "third_party/tint/src/program.cc", "third_party/tint/src/program_builder.cc", "third_party/tint/src/program_id.cc", "third_party/tint/src/reader/reader.cc", "third_party/tint/src/resolver/resolver.cc", "third_party/tint/src/resolver/resolver_constants.cc", "third_party/tint/src/source.cc", "third_party/tint/src/symbol.cc", "third_party/tint/src/symbol_table.cc", "third_party/tint/src/transform/add_empty_entry_point.cc", "third_party/tint/src/transform/array_length_from_uniform.cc", "third_party/tint/src/transform/binding_remapper.cc", "third_party/tint/src/transform/calculate_array_length.cc", "third_party/tint/src/transform/canonicalize_entry_point_io.cc", "third_party/tint/src/transform/decompose_memory_access.cc", "third_party/tint/src/transform/decompose_strided_matrix.cc", "third_party/tint/src/transform/external_texture_transform.cc", "third_party/tint/src/transform/first_index_offset.cc", "third_party/tint/src/transform/fold_constants.cc", "third_party/tint/src/transform/fold_trivial_single_use_lets.cc", "third_party/tint/src/transform/for_loop_to_loop.cc", "third_party/tint/src/transform/inline_pointer_lets.cc", "third_party/tint/src/transform/loop_to_for_loop.cc", "third_party/tint/src/transform/manager.cc", "third_party/tint/src/transform/module_scope_var_to_entry_point_param.cc", "third_party/tint/src/transform/num_workgroups_from_uniform.cc", "third_party/tint/src/transform/pad_array_elements.cc", "third_party/tint/src/transform/promote_initializers_to_const_var.cc", "third_party/tint/src/transform/renamer.cc", "third_party/tint/src/transform/robustness.cc", "third_party/tint/src/transform/simplify.cc", "third_party/tint/src/transform/single_entry_point.cc", "third_party/tint/src/transform/transform.cc", "third_party/tint/src/transform/vertex_pulling.cc", "third_party/tint/src/transform/wrap_arrays_in_structs.cc", "third_party/tint/src/transform/zero_init_workgroup_memory.cc", "third_party/tint/src/writer/append_vector.cc", "third_party/tint/src/writer/float_to_string.cc", "third_party/tint/src/writer/text.cc", "third_party/tint/src/writer/text_generator.cc", "third_party/tint/src/writer/writer.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } const target = (std.zig.system.NativeTargetInfo.detect(b.allocator, step.target) catch unreachable).target; switch (target.os.tag) { .windows => lib.addCSourceFile(thisDir() ++ "/libs/dawn/third_party/tint/src/diagnostic/printer_windows.cc", flags), .linux => lib.addCSourceFile(thisDir() ++ "/libs/dawn/third_party/tint/src/diagnostic/printer_linux.cc", flags), else => lib.addCSourceFile(thisDir() ++ "/libs/dawn/third_party/tint/src/diagnostic/printer_other.cc", flags), } // libtint_sem_src for ([_][]const u8{ "third_party/tint/src/sem/array.cc", "third_party/tint/src/sem/atomic_type.cc", "third_party/tint/src/sem/block_statement.cc", "third_party/tint/src/sem/bool_type.cc", "third_party/tint/src/sem/call.cc", "third_party/tint/src/sem/call_target.cc", "third_party/tint/src/sem/constant.cc", "third_party/tint/src/sem/depth_multisampled_texture_type.cc", "third_party/tint/src/sem/depth_texture_type.cc", "third_party/tint/src/sem/expression.cc", "third_party/tint/src/sem/external_texture_type.cc", "third_party/tint/src/sem/f32_type.cc", "third_party/tint/src/sem/for_loop_statement.cc", "third_party/tint/src/sem/function.cc", "third_party/tint/src/sem/i32_type.cc", "third_party/tint/src/sem/if_statement.cc", "third_party/tint/src/sem/info.cc", "third_party/tint/src/sem/intrinsic.cc", "third_party/tint/src/sem/intrinsic_type.cc", "third_party/tint/src/sem/loop_statement.cc", "third_party/tint/src/sem/matrix_type.cc", "third_party/tint/src/sem/member_accessor_expression.cc", "third_party/tint/src/sem/multisampled_texture_type.cc", "third_party/tint/src/sem/node.cc", "third_party/tint/src/sem/parameter_usage.cc", "third_party/tint/src/sem/pointer_type.cc", "third_party/tint/src/sem/reference_type.cc", "third_party/tint/src/sem/sampled_texture_type.cc", "third_party/tint/src/sem/sampler_type.cc", "third_party/tint/src/sem/statement.cc", "third_party/tint/src/sem/storage_texture_type.cc", "third_party/tint/src/sem/struct.cc", "third_party/tint/src/sem/switch_statement.cc", "third_party/tint/src/sem/texture_type.cc", "third_party/tint/src/sem/type.cc", "third_party/tint/src/sem/type_manager.cc", "third_party/tint/src/sem/u32_type.cc", "third_party/tint/src/sem/variable.cc", "third_party/tint/src/sem/vector_type.cc", "third_party/tint/src/sem/void_type.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // libtint_spv_reader_src for ([_][]const u8{ "third_party/tint/src/reader/spirv/construct.cc", "third_party/tint/src/reader/spirv/entry_point_info.cc", "third_party/tint/src/reader/spirv/enum_converter.cc", "third_party/tint/src/reader/spirv/function.cc", "third_party/tint/src/reader/spirv/namer.cc", "third_party/tint/src/reader/spirv/parser.cc", "third_party/tint/src/reader/spirv/parser_impl.cc", "third_party/tint/src/reader/spirv/parser_type.cc", "third_party/tint/src/reader/spirv/usage.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // libtint_spv_writer_src for ([_][]const u8{ "third_party/tint/src/writer/spirv/binary_writer.cc", "third_party/tint/src/writer/spirv/builder.cc", "third_party/tint/src/writer/spirv/function.cc", "third_party/tint/src/writer/spirv/generator.cc", "third_party/tint/src/writer/spirv/instruction.cc", "third_party/tint/src/writer/spirv/operand.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // libtint_wgsl_reader_src for ([_][]const u8{ "third_party/tint/src/reader/wgsl/lexer.cc", "third_party/tint/src/reader/wgsl/parser.cc", "third_party/tint/src/reader/wgsl/parser_impl.cc", "third_party/tint/src/reader/wgsl/token.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // libtint_wgsl_writer_src for ([_][]const u8{ "third_party/tint/src/writer/wgsl/generator.cc", "third_party/tint/src/writer/wgsl/generator_impl.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // libtint_msl_writer_src for ([_][]const u8{ "third_party/tint/src/writer/msl/generator.cc", "third_party/tint/src/writer/msl/generator_impl.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // libtint_hlsl_writer_src for ([_][]const u8{ "third_party/tint/src/writer/hlsl/generator.cc", "third_party/tint/src/writer/hlsl/generator_impl.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } return lib; } // Builds third_party/vulkan-deps/spirv-tools sources; derived from third_party/vulkan-deps/spirv-tools/src/BUILD.gn fn buildLibSPIRVTools(b: *Builder, step: *std.build.LibExeObjStep) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("spirv-tools", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); const flags = &.{ include("libs/dawn/third_party/vulkan-deps/spirv-tools/src"), include("libs/dawn/third_party/vulkan-deps/spirv-tools/src/include"), include("libs/dawn/third_party/vulkan-deps/spirv-headers/src/include"), include("libs/dawn/out/Debug/gen/third_party/vulkan-deps/spirv-tools/src"), include("libs/dawn/out/Debug/gen/third_party/vulkan-deps/spirv-tools/src/include"), }; // spvtools for ([_][]const u8{ "third_party/vulkan-deps/spirv-tools/src/source/assembly_grammar.cpp", "third_party/vulkan-deps/spirv-tools/src/source/binary.cpp", "third_party/vulkan-deps/spirv-tools/src/source/diagnostic.cpp", "third_party/vulkan-deps/spirv-tools/src/source/disassemble.cpp", "third_party/vulkan-deps/spirv-tools/src/source/enum_string_mapping.cpp", "third_party/vulkan-deps/spirv-tools/src/source/ext_inst.cpp", "third_party/vulkan-deps/spirv-tools/src/source/extensions.cpp", "third_party/vulkan-deps/spirv-tools/src/source/libspirv.cpp", "third_party/vulkan-deps/spirv-tools/src/source/name_mapper.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opcode.cpp", "third_party/vulkan-deps/spirv-tools/src/source/operand.cpp", "third_party/vulkan-deps/spirv-tools/src/source/parsed_operand.cpp", "third_party/vulkan-deps/spirv-tools/src/source/print.cpp", "third_party/vulkan-deps/spirv-tools/src/source/spirv_endian.cpp", "third_party/vulkan-deps/spirv-tools/src/source/spirv_fuzzer_options.cpp", "third_party/vulkan-deps/spirv-tools/src/source/spirv_optimizer_options.cpp", "third_party/vulkan-deps/spirv-tools/src/source/spirv_reducer_options.cpp", "third_party/vulkan-deps/spirv-tools/src/source/spirv_target_env.cpp", "third_party/vulkan-deps/spirv-tools/src/source/spirv_validator_options.cpp", "third_party/vulkan-deps/spirv-tools/src/source/table.cpp", "third_party/vulkan-deps/spirv-tools/src/source/text.cpp", "third_party/vulkan-deps/spirv-tools/src/source/text_handler.cpp", "third_party/vulkan-deps/spirv-tools/src/source/util/bit_vector.cpp", "third_party/vulkan-deps/spirv-tools/src/source/util/parse_number.cpp", "third_party/vulkan-deps/spirv-tools/src/source/util/string_utils.cpp", "third_party/vulkan-deps/spirv-tools/src/source/util/timer.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // spvtools_val for ([_][]const u8{ "third_party/vulkan-deps/spirv-tools/src/source/val/basic_block.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/construct.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/function.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/instruction.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_adjacency.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_annotation.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_arithmetics.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_atomics.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_barriers.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_bitwise.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_builtins.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_capability.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_cfg.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_composites.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_constants.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_conversion.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_debug.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_decorations.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_derivatives.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_execution_limitations.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_extensions.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_function.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_id.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_image.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_instruction.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_interfaces.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_layout.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_literals.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_logicals.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_memory.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_memory_semantics.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_misc.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_mode_setting.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_non_uniform.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_primitives.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_scopes.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_small_type_uses.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validate_type.cpp", "third_party/vulkan-deps/spirv-tools/src/source/val/validation_state.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // spvtools_opt for ([_][]const u8{ "third_party/vulkan-deps/spirv-tools/src/source/opt/aggressive_dead_code_elim_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/amd_ext_to_khr.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/basic_block.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/block_merge_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/block_merge_util.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/build_module.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/ccp_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/cfg.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/cfg_cleanup_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/code_sink.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/combine_access_chains.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/compact_ids_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/composite.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/const_folding_rules.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/constants.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/control_dependence.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/convert_to_sampled_image_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/convert_to_half_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/copy_prop_arrays.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/dataflow.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/dead_branch_elim_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/dead_insert_elim_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/dead_variable_elimination.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/debug_info_manager.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/decoration_manager.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/def_use_manager.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/desc_sroa.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/dominator_analysis.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/dominator_tree.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/eliminate_dead_constant_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/eliminate_dead_functions_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/eliminate_dead_functions_util.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/eliminate_dead_members_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/feature_manager.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/fix_storage_class.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/flatten_decoration_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/fold.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/fold_spec_constant_op_and_composite_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/folding_rules.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/freeze_spec_constant_value_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/function.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/graphics_robust_access_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/if_conversion.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/inline_exhaustive_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/inline_opaque_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/inline_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/inst_bindless_check_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/inst_buff_addr_check_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/inst_debug_printf_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/instruction.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/instruction_list.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/instrument_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/interp_fixup_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/ir_context.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/ir_loader.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/licm_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/local_access_chain_convert_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/local_redundancy_elimination.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/local_single_block_elim_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/local_single_store_elim_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_dependence.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_dependence_helpers.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_descriptor.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_fission.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_fusion.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_fusion_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_peeling.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_unroller.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_unswitch_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/loop_utils.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/mem_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/merge_return_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/module.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/optimizer.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/pass_manager.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/private_to_local_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/propagator.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/reduce_load_size.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/redundancy_elimination.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/register_pressure.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/relax_float_ops_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/remove_duplicates_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/remove_unused_interface_variables_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/replace_invalid_opc.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/scalar_analysis.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/scalar_analysis_simplification.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/scalar_replacement_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/set_spec_constant_default_value_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/simplification_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/ssa_rewrite_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/strength_reduction_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/strip_debug_info_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/strip_reflect_info_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/struct_cfg_analysis.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/type_manager.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/types.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/unify_const_pass.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/upgrade_memory_model.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/value_number_table.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/vector_dce.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/workaround1209.cpp", "third_party/vulkan-deps/spirv-tools/src/source/opt/wrap_opkill.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // spvtools_link for ([_][]const u8{ "third_party/vulkan-deps/spirv-tools/src/source/link/linker.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } return lib; } // Builds third_party/abseil sources; derived from: // // ``` // $ find third_party/abseil-cpp/absl | grep '\.cc' | grep -v 'test' | grep -v 'benchmark' | grep -v gaussian_distribution_gentables | grep -v print_hash_of | grep -v chi_square // ``` // fn buildLibAbseilCpp(b: *Builder, step: *std.build.LibExeObjStep) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("abseil-cpp", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); system_sdk.include(b, lib, .{}); lib.linkFramework("CoreFoundation"); const flags = &.{include("libs/dawn/third_party/abseil-cpp")}; // absl for ([_][]const u8{ "third_party/abseil-cpp/absl/strings/match.cc", "third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc", "third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc", "third_party/abseil-cpp/absl/strings/internal/cordz_info.cc", "third_party/abseil-cpp/absl/strings/internal/cord_internal.cc", "third_party/abseil-cpp/absl/strings/internal/cordz_sample_token.cc", "third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc", "third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc", "third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc", "third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc", "third_party/abseil-cpp/absl/strings/internal/str_format/output.cc", "third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc", "third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc", "third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc", "third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc", "third_party/abseil-cpp/absl/strings/internal/cordz_handle.cc", "third_party/abseil-cpp/absl/strings/internal/memutil.cc", "third_party/abseil-cpp/absl/strings/internal/ostringstream.cc", "third_party/abseil-cpp/absl/strings/internal/pow10_helper.cc", "third_party/abseil-cpp/absl/strings/internal/utf8.cc", "third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc", "third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc", "third_party/abseil-cpp/absl/strings/internal/escaping.cc", "third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc", "third_party/abseil-cpp/absl/strings/string_view.cc", "third_party/abseil-cpp/absl/strings/str_cat.cc", "third_party/abseil-cpp/absl/strings/cord.cc", "third_party/abseil-cpp/absl/strings/ascii.cc", "third_party/abseil-cpp/absl/strings/numbers.cc", "third_party/abseil-cpp/absl/strings/charconv.cc", "third_party/abseil-cpp/absl/strings/str_split.cc", "third_party/abseil-cpp/absl/strings/substitute.cc", "third_party/abseil-cpp/absl/strings/escaping.cc", "third_party/abseil-cpp/absl/strings/str_replace.cc", "third_party/abseil-cpp/absl/types/bad_any_cast.cc", "third_party/abseil-cpp/absl/types/bad_optional_access.cc", "third_party/abseil-cpp/absl/types/bad_variant_access.cc", "third_party/abseil-cpp/absl/flags/parse.cc", "third_party/abseil-cpp/absl/flags/usage.cc", "third_party/abseil-cpp/absl/flags/internal/private_handle_accessor.cc", "third_party/abseil-cpp/absl/flags/internal/usage.cc", "third_party/abseil-cpp/absl/flags/internal/program_name.cc", "third_party/abseil-cpp/absl/flags/internal/flag.cc", "third_party/abseil-cpp/absl/flags/internal/commandlineflag.cc", "third_party/abseil-cpp/absl/flags/reflection.cc", "third_party/abseil-cpp/absl/flags/usage_config.cc", "third_party/abseil-cpp/absl/flags/flag.cc", "third_party/abseil-cpp/absl/flags/marshalling.cc", "third_party/abseil-cpp/absl/flags/commandlineflag.cc", "third_party/abseil-cpp/absl/synchronization/blocking_counter.cc", "third_party/abseil-cpp/absl/synchronization/mutex.cc", "third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc", "third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc", "third_party/abseil-cpp/absl/synchronization/internal/waiter.cc", "third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc", "third_party/abseil-cpp/absl/synchronization/barrier.cc", "third_party/abseil-cpp/absl/synchronization/notification.cc", "third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc", "third_party/abseil-cpp/absl/hash/internal/hash.cc", "third_party/abseil-cpp/absl/hash/internal/city.cc", "third_party/abseil-cpp/absl/debugging/symbolize.cc", "third_party/abseil-cpp/absl/debugging/failure_signal_handler.cc", "third_party/abseil-cpp/absl/debugging/leak_check_disable.cc", "third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc", "third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc", "third_party/abseil-cpp/absl/debugging/internal/stack_consumption.cc", "third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc", "third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc", "third_party/abseil-cpp/absl/debugging/internal/demangle.cc", "third_party/abseil-cpp/absl/debugging/leak_check.cc", "third_party/abseil-cpp/absl/debugging/stacktrace.cc", "third_party/abseil-cpp/absl/status/status_payload_printer.cc", "third_party/abseil-cpp/absl/status/status.cc", "third_party/abseil-cpp/absl/status/statusor.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc", "third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc", "third_party/abseil-cpp/absl/time/clock.cc", "third_party/abseil-cpp/absl/time/duration.cc", "third_party/abseil-cpp/absl/time/civil_time.cc", "third_party/abseil-cpp/absl/time/format.cc", "third_party/abseil-cpp/absl/time/time.cc", "third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc", "third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc", "third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc", "third_party/abseil-cpp/absl/numeric/int128.cc", "third_party/abseil-cpp/absl/random/gaussian_distribution.cc", "third_party/abseil-cpp/absl/random/discrete_distribution.cc", "third_party/abseil-cpp/absl/random/seed_gen_exception.cc", "third_party/abseil-cpp/absl/random/internal/seed_material.cc", "third_party/abseil-cpp/absl/random/internal/randen_slow.cc", "third_party/abseil-cpp/absl/random/internal/randen.cc", "third_party/abseil-cpp/absl/random/internal/randen_detect.cc", "third_party/abseil-cpp/absl/random/internal/randen_round_keys.cc", "third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc", "third_party/abseil-cpp/absl/random/internal/pool_urbg.cc", "third_party/abseil-cpp/absl/random/seed_sequences.cc", "third_party/abseil-cpp/absl/base/internal/spinlock_wait.cc", "third_party/abseil-cpp/absl/base/internal/periodic_sampler.cc", "third_party/abseil-cpp/absl/base/internal/cycleclock.cc", "third_party/abseil-cpp/absl/base/internal/spinlock.cc", "third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc", "third_party/abseil-cpp/absl/base/internal/scoped_set_env.cc", "third_party/abseil-cpp/absl/base/internal/sysinfo.cc", "third_party/abseil-cpp/absl/base/internal/raw_logging.cc", "third_party/abseil-cpp/absl/base/internal/throw_delegate.cc", "third_party/abseil-cpp/absl/base/internal/strerror.cc", "third_party/abseil-cpp/absl/base/internal/thread_identity.cc", "third_party/abseil-cpp/absl/base/internal/exponential_biased.cc", "third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc", "third_party/abseil-cpp/absl/base/log_severity.cc", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } return lib; } // Buids dawn wire sources; derived from src/dawn_wire/BUILD.gn fn buildLibDawnWire(b: *Builder, step: *std.build.LibExeObjStep) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("dawn-wire", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); const flags = &.{ include("libs/dawn/src"), include("libs/dawn/src/include"), include("libs/dawn/out/Debug/gen/src/include"), include("libs/dawn/out/Debug/gen/src"), }; // dawn_wire_gen for ([_][]const u8{ "out/Debug/gen/src/dawn_wire/WireCmd_autogen.cpp", "out/Debug/gen/src/dawn_wire/client/ApiProcs_autogen.cpp", "out/Debug/gen/src/dawn_wire/client/ClientHandlers_autogen.cpp", "out/Debug/gen/src/dawn_wire/server/ServerDoers_autogen.cpp", "out/Debug/gen/src/dawn_wire/server/ServerHandlers_autogen.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } // dawn_wire_gen for ([_][]const u8{ "src/dawn_wire/ChunkedCommandHandler.cpp", "src/dawn_wire/ChunkedCommandSerializer.cpp", "src/dawn_wire/Wire.cpp", "src/dawn_wire/WireClient.cpp", "src/dawn_wire/WireDeserializeAllocator.cpp", "src/dawn_wire/WireServer.cpp", "src/dawn_wire/client/Buffer.cpp", "src/dawn_wire/client/Client.cpp", "src/dawn_wire/client/ClientDoers.cpp", "src/dawn_wire/client/ClientInlineMemoryTransferService.cpp", "src/dawn_wire/client/Device.cpp", "src/dawn_wire/client/Queue.cpp", "src/dawn_wire/client/ShaderModule.cpp", "src/dawn_wire/server/Server.cpp", "src/dawn_wire/server/ServerBuffer.cpp", "src/dawn_wire/server/ServerDevice.cpp", "src/dawn_wire/server/ServerInlineMemoryTransferService.cpp", "src/dawn_wire/server/ServerQueue.cpp", "src/dawn_wire/server/ServerShaderModule.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } return lib; } // Builds dawn utils sources; derived from src/utils/BUILD.gn fn buildLibDawnUtils(b: *Builder, step: *std.build.LibExeObjStep, options: Options) *std.build.LibExeObjStep { var main_abs = std.fs.path.join(b.allocator, &.{ thisDir(), "src/dawn/dummy.zig" }) catch unreachable; const lib = b.addStaticLibrary("dawn-utils", main_abs); lib.install(); lib.setBuildMode(step.build_mode); lib.setTarget(step.target); lib.linkLibCpp(); glfw.link(b, lib, .{ .system_sdk = .{ .set_sysroot = false } }); const flags = &.{ "-DDAWN_ENABLE_BACKEND_METAL", "-DDAWN_ENABLE_BACKEND_NULL", include("libs/mach-glfw/upstream/glfw/include"), include("libs/dawn/src"), include("libs/dawn/src/include"), include("libs/dawn/out/Debug/gen/src/include"), }; for ([_][]const u8{ "src/utils/BackendBinding.cpp", "src/utils/NullBinding.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } const target = (std.zig.system.NativeTargetInfo.detect(b.allocator, step.target) catch unreachable).target; switch (target.os.tag) { .windows => { if (options.d3d12) { for ([_][]const u8{ "src/utils/D3D12Binding.cpp", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } } }, .macos => { if (options.metal) { for ([_][]const u8{ "src/utils/MetalBinding.mm", }) |path| { var abs_path = std.fs.path.join(b.allocator, &.{ thisDir(), "libs/dawn", path }) catch unreachable; lib.addCSourceFile(abs_path, flags); } } }, else => { if (options.linux_window_manager == .X11) {} }, } // if (dawn_enable_opengl) { // sources += [ "OpenGLBinding.cpp" ] // } // if (dawn_enable_vulkan) { // sources += [ "VulkanBinding.cpp" ] // } return lib; } fn include(comptime rel: []const u8) []const u8 { return "-I" ++ thisDir() ++ "/" ++ rel; } fn thisDir() []const u8 { return std.fs.path.dirname(@src().file) orelse "."; }
gpu/build_dawn.zig
const std = @import("std"); const mem = std.mem; const ChangesWhenCasemapped = @This(); allocator: *mem.Allocator, array: []bool, lo: u21 = 65, hi: u21 = 125251, pub fn init(allocator: *mem.Allocator) !ChangesWhenCasemapped { var instance = ChangesWhenCasemapped{ .allocator = allocator, .array = try allocator.alloc(bool, 125187), }; mem.set(bool, instance.array, false); var index: u21 = 0; index = 0; while (index <= 25) : (index += 1) { instance.array[index] = true; } index = 32; while (index <= 57) : (index += 1) { instance.array[index] = true; } instance.array[116] = true; index = 127; while (index <= 149) : (index += 1) { instance.array[index] = true; } index = 151; while (index <= 181) : (index += 1) { instance.array[index] = true; } index = 183; while (index <= 246) : (index += 1) { instance.array[index] = true; } index = 248; while (index <= 331) : (index += 1) { instance.array[index] = true; } index = 333; while (index <= 345) : (index += 1) { instance.array[index] = true; } index = 347; while (index <= 360) : (index += 1) { instance.array[index] = true; } index = 363; while (index <= 376) : (index += 1) { instance.array[index] = true; } index = 379; while (index <= 380) : (index += 1) { instance.array[index] = true; } instance.array[382] = true; index = 387; while (index <= 479) : (index += 1) { instance.array[index] = true; } index = 481; while (index <= 498) : (index += 1) { instance.array[index] = true; } index = 505; while (index <= 531) : (index += 1) { instance.array[index] = true; } index = 533; while (index <= 534) : (index += 1) { instance.array[index] = true; } instance.array[536] = true; index = 538; while (index <= 539) : (index += 1) { instance.array[index] = true; } index = 543; while (index <= 544) : (index += 1) { instance.array[index] = true; } instance.array[546] = true; index = 548; while (index <= 549) : (index += 1) { instance.array[index] = true; } index = 551; while (index <= 555) : (index += 1) { instance.array[index] = true; } instance.array[558] = true; index = 560; while (index <= 561) : (index += 1) { instance.array[index] = true; } instance.array[564] = true; instance.array[572] = true; instance.array[575] = true; index = 577; while (index <= 578) : (index += 1) { instance.array[index] = true; } index = 582; while (index <= 587) : (index += 1) { instance.array[index] = true; } instance.array[593] = true; index = 604; while (index <= 605) : (index += 1) { instance.array[index] = true; } instance.array[772] = true; index = 815; while (index <= 818) : (index += 1) { instance.array[index] = true; } index = 821; while (index <= 822) : (index += 1) { instance.array[index] = true; } index = 826; while (index <= 828) : (index += 1) { instance.array[index] = true; } instance.array[830] = true; instance.array[837] = true; index = 839; while (index <= 841) : (index += 1) { instance.array[index] = true; } instance.array[843] = true; index = 845; while (index <= 864) : (index += 1) { instance.array[index] = true; } index = 866; while (index <= 912) : (index += 1) { instance.array[index] = true; } index = 916; while (index <= 948) : (index += 1) { instance.array[index] = true; } index = 950; while (index <= 954) : (index += 1) { instance.array[index] = true; } index = 956; while (index <= 1088) : (index += 1) { instance.array[index] = true; } index = 1097; while (index <= 1262) : (index += 1) { instance.array[index] = true; } index = 1264; while (index <= 1301) : (index += 1) { instance.array[index] = true; } index = 1312; while (index <= 1350) : (index += 1) { instance.array[index] = true; } index = 4191; while (index <= 4228) : (index += 1) { instance.array[index] = true; } instance.array[4230] = true; instance.array[4236] = true; index = 4239; while (index <= 4281) : (index += 1) { instance.array[index] = true; } index = 4284; while (index <= 4286) : (index += 1) { instance.array[index] = true; } index = 4959; while (index <= 5044) : (index += 1) { instance.array[index] = true; } index = 5047; while (index <= 5052) : (index += 1) { instance.array[index] = true; } index = 7231; while (index <= 7239) : (index += 1) { instance.array[index] = true; } index = 7247; while (index <= 7289) : (index += 1) { instance.array[index] = true; } index = 7292; while (index <= 7294) : (index += 1) { instance.array[index] = true; } instance.array[7480] = true; instance.array[7484] = true; instance.array[7501] = true; index = 7615; while (index <= 7770) : (index += 1) { instance.array[index] = true; } instance.array[7773] = true; index = 7775; while (index <= 7892) : (index += 1) { instance.array[index] = true; } index = 7895; while (index <= 7900) : (index += 1) { instance.array[index] = true; } index = 7903; while (index <= 7940) : (index += 1) { instance.array[index] = true; } index = 7943; while (index <= 7948) : (index += 1) { instance.array[index] = true; } index = 7951; while (index <= 7958) : (index += 1) { instance.array[index] = true; } instance.array[7960] = true; instance.array[7962] = true; instance.array[7964] = true; index = 7966; while (index <= 7996) : (index += 1) { instance.array[index] = true; } index = 7999; while (index <= 8051) : (index += 1) { instance.array[index] = true; } index = 8053; while (index <= 8059) : (index += 1) { instance.array[index] = true; } instance.array[8061] = true; index = 8065; while (index <= 8067) : (index += 1) { instance.array[index] = true; } index = 8069; while (index <= 8075) : (index += 1) { instance.array[index] = true; } index = 8079; while (index <= 8082) : (index += 1) { instance.array[index] = true; } index = 8085; while (index <= 8090) : (index += 1) { instance.array[index] = true; } index = 8095; while (index <= 8107) : (index += 1) { instance.array[index] = true; } index = 8113; while (index <= 8115) : (index += 1) { instance.array[index] = true; } index = 8117; while (index <= 8123) : (index += 1) { instance.array[index] = true; } instance.array[8421] = true; index = 8425; while (index <= 8426) : (index += 1) { instance.array[index] = true; } instance.array[8433] = true; instance.array[8461] = true; index = 8479; while (index <= 8510) : (index += 1) { instance.array[index] = true; } index = 8514; while (index <= 8515) : (index += 1) { instance.array[index] = true; } index = 9333; while (index <= 9384) : (index += 1) { instance.array[index] = true; } index = 11199; while (index <= 11245) : (index += 1) { instance.array[index] = true; } index = 11247; while (index <= 11293) : (index += 1) { instance.array[index] = true; } index = 11295; while (index <= 11311) : (index += 1) { instance.array[index] = true; } index = 11313; while (index <= 11314) : (index += 1) { instance.array[index] = true; } index = 11316; while (index <= 11317) : (index += 1) { instance.array[index] = true; } index = 11325; while (index <= 11426) : (index += 1) { instance.array[index] = true; } index = 11434; while (index <= 11437) : (index += 1) { instance.array[index] = true; } index = 11441; while (index <= 11442) : (index += 1) { instance.array[index] = true; } index = 11455; while (index <= 11492) : (index += 1) { instance.array[index] = true; } instance.array[11494] = true; instance.array[11500] = true; index = 42495; while (index <= 42540) : (index += 1) { instance.array[index] = true; } index = 42559; while (index <= 42586) : (index += 1) { instance.array[index] = true; } index = 42721; while (index <= 42734) : (index += 1) { instance.array[index] = true; } index = 42737; while (index <= 42798) : (index += 1) { instance.array[index] = true; } index = 42808; while (index <= 42822) : (index += 1) { instance.array[index] = true; } index = 42826; while (index <= 42828) : (index += 1) { instance.array[index] = true; } index = 42831; while (index <= 42835) : (index += 1) { instance.array[index] = true; } index = 42837; while (index <= 42861) : (index += 1) { instance.array[index] = true; } index = 42863; while (index <= 42878) : (index += 1) { instance.array[index] = true; } index = 42881; while (index <= 42889) : (index += 1) { instance.array[index] = true; } index = 42932; while (index <= 42933) : (index += 1) { instance.array[index] = true; } instance.array[43794] = true; index = 43823; while (index <= 43902) : (index += 1) { instance.array[index] = true; } index = 64191; while (index <= 64197) : (index += 1) { instance.array[index] = true; } index = 64210; while (index <= 64214) : (index += 1) { instance.array[index] = true; } index = 65248; while (index <= 65273) : (index += 1) { instance.array[index] = true; } index = 65280; while (index <= 65305) : (index += 1) { instance.array[index] = true; } index = 66495; while (index <= 66574) : (index += 1) { instance.array[index] = true; } index = 66671; while (index <= 66706) : (index += 1) { instance.array[index] = true; } index = 66711; while (index <= 66746) : (index += 1) { instance.array[index] = true; } index = 68671; while (index <= 68721) : (index += 1) { instance.array[index] = true; } index = 68735; while (index <= 68785) : (index += 1) { instance.array[index] = true; } index = 71775; while (index <= 71838) : (index += 1) { instance.array[index] = true; } index = 93695; while (index <= 93758) : (index += 1) { instance.array[index] = true; } index = 125119; while (index <= 125186) : (index += 1) { instance.array[index] = true; } // Placeholder: 0. Struct name, 1. Code point kind return instance; } pub fn deinit(self: *ChangesWhenCasemapped) void { self.allocator.free(self.array); } // isChangesWhenCasemapped checks if cp is of the kind Changes_When_Casemapped. pub fn isChangesWhenCasemapped(self: ChangesWhenCasemapped, cp: u21) bool { if (cp < self.lo or cp > self.hi) return false; const index = cp - self.lo; return if (index >= self.array.len) false else self.array[index]; }
src/components/autogen/DerivedCoreProperties/ChangesWhenCasemapped.zig
const std = @import("std"); const testing = std.testing; const ArrayList = std.ArrayList; pub const deserialize = @import("deserialize.zig").deserialize; const hasFn = std.meta.trait.hasFn; const implementsRLP = hasFn("encodeToRLP"); pub fn serialize(comptime T: type, data: T, list: *ArrayList(u8)) !void { if (comptime implementsRLP(T)) { return data.encodeToRLP(list); } const info = @typeInfo(T); return switch (info) { .Int => switch (data) { 0...127 => list.append(@truncate(u8, data)), else => { try list.append(128 + @sizeOf(T)); try list.writer().writeIntBig(T, data); }, }, .Array => { // shortcut for byte lists if (@sizeOf(info.Array.child) == 1) { if (data.len < 56) { try list.append(128 + data.len); } else { comptime var length_length = 0; comptime { var l = @sizeOf(T); while (l != 0) : (l >>= 8) { length_length += 1; } } try list.append(183 + length_length); comptime var i = 0; comptime var length = @sizeOf(T); inline while (i < length_length) : (i += 1) { try list.append(@truncate(u8, length)); length >>= 8; } } _ = try list.writer().write(data[0..]); } else { var tlist = ArrayList(u8).init(testing.allocator); defer tlist.deinit(); for (data) |item| { try serialize(info.Array.child, item, &tlist); } if (tlist.items.len < 56) { try list.append(128 + @truncate(u8, tlist.items.len)); } else { const index = list.items.len; try list.append(0); var length = tlist.items.len; var length_length: u8 = 0; while (length != 0) : (length >>= 8) { try list.append(@truncate(u8, length)); length_length += 1; } list.items[index] = 183 + length_length; } _ = try list.writer().write(tlist.items); } }, .Struct => |sinfo| { var tlist = ArrayList(u8).init(testing.allocator); defer tlist.deinit(); inline for (sinfo.fields) |field| { try serialize(field.field_type, @field(data, field.name), &tlist); } if (tlist.items.len < 56) { try list.append(192 + @truncate(u8, tlist.items.len)); } else { const index = list.items.len; try list.append(0); var length = tlist.items.len; var length_length: u8 = 0; while (length != 0) : (length >>= 8) { try list.append(@truncate(u8, length)); length_length += 1; } list.items[index] = 183 + length_length; } _ = try list.writer().write(tlist.items); }, .Pointer => |ptr| { switch (ptr.size) { .Slice => { // Simple case: string if (@sizeOf(ptr.child) == 1) { try list.append(128 + @truncate(u8, data.len)); _ = try list.writer().write(data); } else { var tlist = ArrayList(u8).init(testing.allocator); defer tlist.deinit(); for (data) |item| { try serialize(ptr.child, item, &tlist); } if (tlist.items.len < 56) { try list.append(192 + @truncate(u8, tlist.items.len)); } else { const index = list.items.len; try list.append(0); var length = tlist.items.len; var length_length: u8 = 0; while (length != 0) : (length >>= 8) { try list.append(@truncate(u8, length)); length_length += 1; } list.items[index] = 183 + length_length; } _ = try list.writer().write(tlist.items); } }, .One => { try serialize(ptr.child, data.*, list); }, else => return error.UnsupportedType, } }, .Optional => |opt| { if (data == null) { try list.append(0x80); } else { try serialize(opt.child, data.?, list); } }, .Null => { try list.append(0x80); }, .Bool => { try list.append(if (data) 1 else 0); }, else => return error.UnsupportedType, }; } test "serialize an integer" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); try serialize(u8, 42, &list); const expected1 = [_]u8{42}; try testing.expect(std.mem.eql(u8, list.items[0..], expected1[0..])); list.clearRetainingCapacity(); try serialize(u8, 129, &list); const expected2 = [_]u8{ 129, 129 }; try testing.expect(std.mem.eql(u8, list.items[0..], expected2[0..])); list.clearRetainingCapacity(); try serialize(u8, 128, &list); const expected3 = [_]u8{ 129, 128 }; try testing.expect(std.mem.eql(u8, list.items[0..], expected3[0..])); list.clearRetainingCapacity(); try serialize(u16, 0xabcd, &list); const expected4 = [_]u8{ 130, 0xab, 0xcd }; try testing.expect(std.mem.eql(u8, list.items[0..], expected4[0..])); // Check that multi-byte values that are < 128 also serialize as a // single byte integer. list.clearRetainingCapacity(); try serialize(u16, 42, &list); try testing.expect(std.mem.eql(u8, list.items[0..], expected1[0..])); list.clearRetainingCapacity(); try serialize(u32, 0xdeadbeef, &list); const expected6 = [_]u8{ 132, 0xde, 0xad, 0xbe, 0xef }; try testing.expect(std.mem.eql(u8, list.items[0..], expected6[0..])); } test "serialize a byte array" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); const src = [_]u8{ 1, 2, 3, 4 }; try serialize([4]u8, src, &list); const expected = [_]u8{ 132, 1, 2, 3, 4 }; try testing.expect(std.mem.eql(u8, list.items[0..], expected[0..])); list.clearRetainingCapacity(); const src8x58 = [_]u8{0xab} ** 58; try serialize([58]u8, src8x58, &list); const expected8x58 = [_]u8{ 0xb8, 0x3a } ++ [_]u8{0xab} ** 58; try testing.expect(std.mem.eql(u8, list.items[0..], expected8x58[0..])); list.clearRetainingCapacity(); const src8x1K = [_]u8{0xab} ** 1024; try serialize(@TypeOf(src8x1K), src8x1K, &list); const expected8x1K = [_]u8{ 0xb9, 0x00, 0x04 } ++ [_]u8{0xab} ** 1024; try testing.expect(std.mem.eql(u8, list.items[0..], expected8x1K[0..])); } test "serialize a u16 array" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); const src16 = [_]u16{ 0xabcd, 0xef01 }; try serialize([2]u16, src16, &list); const expected16 = [_]u8{ 134, 130, 0xab, 0xcd, 130, 0xef, 0x01 }; try testing.expect(std.mem.eql(u8, list.items[0..], expected16[0..])); list.clearRetainingCapacity(); const src16x1K = [_]u16{0xabcd} ** 1024; try serialize(@TypeOf(src16x1K), src16x1K, &list); const expected16x1K = [_]u8{ 0xb9, 0, 0x0C } ++ [_]u8{ 130, 0xab, 0xcd } ** 1024; try testing.expect(std.mem.eql(u8, list.items[0..], expected16x1K[0..])); } test "serialize a string" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); try serialize([]const u8, "hello", &list); const expected = [_]u8{ 133, 'h', 'e', 'l', 'l', 'o' }; try testing.expect(std.mem.eql(u8, list.items[0..], expected[0..])); } test "serialize a struct" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); const Person = struct { age: u8, name: []const u8, }; const jc = Person{ .age = 123, .name = "<NAME>" }; try serialize(Person, jc, &list); const expected = [_]u8{ 0xc2 + jc.name.len, 123, 128 + jc.name.len } ++ jc.name; try testing.expect(std.mem.eql(u8, list.items[0..], expected[0..])); } test "serialize a struct with functions" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); const Person = struct { age: u8, name: []const u8, pub fn sayHello() void { std.debug.print("hello", .{}); } }; const jc = Person{ .age = 123, .name = "<NAME>" }; try serialize(Person, jc, &list); const expected = [_]u8{ 0xc2 + jc.name.len, 123, 128 + jc.name.len } ++ jc.name; try testing.expect(std.mem.eql(u8, list.items[0..], expected[0..])); } test "serialize a boolean" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); try serialize(bool, false, &list); var expected = [_]u8{0}; try testing.expect(std.mem.eql(u8, list.items[0..], expected[0..])); list.clearRetainingCapacity(); expected[0] = 1; try serialize(bool, true, &list); try testing.expect(std.mem.eql(u8, list.items[0..], expected[0..])); } const RLPEncodablePerson = struct { name: []const u8, age: u8, pub fn encodeToRLP(self: RLPEncodablePerson, list: *ArrayList(u8)) !void { return list.append(42); } }; test "custom serializer" { var list = ArrayList(u8).init(testing.allocator); defer list.deinit(); const jdoe = RLPEncodablePerson{ .name = "<NAME>", .age = 57 }; try serialize(RLPEncodablePerson, jdoe, &list); try testing.expect(list.items.len == 1); try testing.expect(list.items[0] == 42); }
src/main.zig
const std = @import("std"); const Record = @import("lsmtree").Record; const KeyLengthType = @import("lsmtree").KeyLengthType; const Pointer = @import("lsmtree").Pointer; const Op = @import("lsmtree").Op; const Error = error{ArrayTooSmall}; // Writes into the provided array a Pointer byte array using the provided Record pub fn fromRecord(r: Record, buf: []u8, file_offset: usize) usize { // op buf[0] = @enumToInt(r.op); var offset: usize = 1; // key length std.mem.writeIntSliceLittle(KeyLengthType, buf[offset .. offset + @sizeOf(KeyLengthType)], @intCast(KeyLengthType, r.key.len)); offset += @sizeOf(KeyLengthType); // key std.mem.copy(u8, buf[offset .. offset + r.key.len], r.key); offset += r.key.len; //offset std.mem.writeIntSliceLittle(usize, buf[offset .. offset + @sizeOf(@TypeOf(file_offset))], file_offset); offset += @sizeOf(@TypeOf(file_offset)); return offset; } // Get a byte array representation of a pointer using the provided allocator. // FREE the returned array using the provided allocator. The array look like this: // 1 byte: Operation // 2 bytes: Key size // X bytes: Key // 8 bytes: Offset in the data pub fn toBytesAlloc(self: Pointer, allocator: *std.mem.Allocator) ![]u8 { var buf = try allocator.alloc(u8, self.bytesLen()); _ = toBytes(self, buf) catch |err| return err; return buf; } // Get a byte array representation of a pointer using the provided array. // It must be at least of the size of Pointer.bytesLen(). The array look like this: // 1 byte: Operation // 2 bytes: Key size // X bytes: Key // 8 bytes: Offset in the data pub fn toBytes(pointer: Pointer, buf: []u8) Error!usize { if (pointer.bytesLen() > buf.len) { return Error.ArrayTooSmall; } var offset: usize = 0; // Op buf[0] = @enumToInt(pointer.op); // std.mem.writeIntSliceLittle(u8, buf[0], @enumToInt(pointer.op)); offset += 1; // key length std.mem.writeIntSliceLittle(KeyLengthType, buf[offset .. offset + @sizeOf(KeyLengthType)], @truncate(KeyLengthType, pointer.key.len)); offset += @sizeOf(KeyLengthType); // key std.mem.copy(u8, buf[offset .. offset + pointer.key.len], pointer.key); offset += pointer.key.len; //offset std.mem.writeIntSliceLittle(usize, buf[offset .. offset + @sizeOf(@TypeOf(pointer.byte_offset))], pointer.byte_offset); offset += @sizeOf(@TypeOf(pointer.byte_offset)); return offset; } // Reads the provided array and return a Pointer from the contents. If the contents of the array // are not correct, it will return a corrupted Pointer. // The size of this array is expected to be X + 11 being X the key length pub fn fromBytes(bytes: []u8) !Pointer { // A minimum size no the array of 12 is expected or the array doesn't have // the minimum information if (bytes.len < 12) { return Error.ArrayTooSmall; } //Op var op = @intToEnum(Op, bytes[0]); var offset: usize = 1; //Key length var key_length = std.mem.readIntSliceLittle(KeyLengthType, bytes[offset .. offset + @sizeOf(KeyLengthType)]); offset += @sizeOf(KeyLengthType); // Key var key = bytes[offset .. offset + key_length]; offset += key_length; // Offset var byte_offset = std.mem.readIntSliceLittle(usize, bytes[offset .. offset + @sizeOf(usize)]); return Pointer{ .key = key, .byte_offset = byte_offset, .op = op, }; } test "pointer.fromBytes" { var buf = [_]u8{ 0, //Op 5, 0, //key length 104, 101, 108, 108, 111, //hello (the key) 100, 0, 0, 0, 0, 0, 0, 0, //offset }; const p = try fromBytes(&buf); const eq = std.testing.expectEqual; try eq(@as(usize, 5), p.key.len); try eq(@as(usize, 100), p.byte_offset); try std.testing.expectEqualSlices(u8, "hello", p.key); try eq(buf.len, p.bytesLen()); } test "pointer.toBytes" { var buf = [_]u8{ 0, //Op 5, 0, //key length 104, 101, 108, 108, 111, //hello (the key) 100, 0, 0, 0, 0, 0, 0, 0, //offset }; var p = Pointer{ .op = Op.Create, .key = "hello", .byte_offset = 100, }; var alloc = std.testing.allocator; var res = try toBytesAlloc(p, &alloc); defer alloc.free(res); for (buf) |b, i| { // std.debug.print("{d} vs {d}\n", .{ res[i], b }); try std.testing.expectEqual(res[i], b); } } test "pointer.fromRecord" { var alloc = std.testing.allocator; var r = try Record.init("hello", "world", Op.Delete, &alloc); defer r.deinit(); var buf: [20]u8 = undefined; const size = fromRecord(r.*, &buf, 99); try std.testing.expectEqual(@as(usize, 16), size); try std.testing.expectEqual(@as(u8, 5), buf[1]); try std.testing.expectEqual(@as(u8, 99), buf[8]); try std.testing.expectEqualStrings("hello", buf[3..8]); } test "pointer.try contains" { const String = @import("string").String; var alloc = std.testing.allocator; var s = String.init(&alloc); defer s.deinit(); try s.concat("hello"); const res = s.find("ello"); try std.testing.expect(res.? == 1); }
src/serialize/pointer.zig
const datetime = @import("datetime"); const mecha = @import("mecha"); const std = @import("std"); const debug = std.debug; const heap = std.heap; const json = std.json; const math = std.math; const mem = std.mem; pub const Season = enum(u4) { spring, summer, fall, winter, undef, }; pub const Site = enum(u3) { anidb, anilist, anisearch, kitsu, livechart, myanimelist, pub fn url(site: Site) []const u8 { return switch (site) { .anidb => "https://anidb.net/anime/", .anilist => "https://anilist.co/anime/", .anisearch => "https://anisearch.com/anime/", .kitsu => "https://kitsu.io/anime/", .livechart => "https://livechart.me/anime/", .myanimelist => "https://myanimelist.net/anime/", }; } }; pub const Id = struct { site: Site, id: u32, pub fn fromUrl(url: []const u8) !Id { inline for (@typeInfo(Site).Enum.fields) |field| { const site = @field(Site, field.name); const site_url = site.url(); if (mem.startsWith(u8, url, site_url)) { const id = try std.fmt.parseUnsigned(u32, url[site_url.len..], 10); return Id{ .site = site, .id = id }; } } return error.InvalidUrl; } }; const link_size = 159; const str_size = 179; pub const Info = struct { anidb: u32, anilist: u32, anisearch: u32, kitsu: u32, livechart: u32, myanimelist: u32, title: [str_size:0]u8, image: [str_size:0]u8, year: u16, episodes: u16, type: Type, season: Season, pub const Type = enum(u4) { tv, movie, ova, ona, special, unknown, }; pub fn id(info: Info) Id { inline for (@typeInfo(Site).Enum.fields) |field| { if (@field(info, field.name) != math.maxInt(u32)) return .{ .site = @field(Site, field.name), .id = @field(info, field.name) }; } return .{ .site = Site.anidb, .id = info.anidb }; } fn getId(site: Site, urls: []const []const u8) u32 { for (urls) |url| { const res = Id.fromUrl(url) catch continue; if (res.site == site) return res.id; } return math.maxInt(u32); } pub fn fromJsonList(stream: *json.TokenStream, allocator: mem.Allocator) !std.MultiArrayList(Info) { try expectJsonToken(stream, .ObjectBegin); try expectJsonString(stream, "data"); try expectJsonToken(stream, .ArrayBegin); var res = std.MultiArrayList(Info){}; errdefer res.deinit(allocator); while (true) { const token = (try stream.next()) orelse return error.UnexpectEndOfStream; if (token == .ArrayEnd) break; // HACK: Put back token. This code is actually not correct. The reason // the token field exists in TokenStream is for cases when the // StreamingParser generates two tokens. If we hit that case, then // we're trying to throw away a token here. Jees this api is not // fun when you wonna do custom deserialization. debug.assert(stream.token == null); stream.token = token; try res.append(allocator, try fromJson(stream)); } try expectJsonToken(stream, .ObjectEnd); return res; } pub fn fromJson(stream: *json.TokenStream) !Info { var buf: [std.mem.page_size * 20]u8 = undefined; var fba = heap.FixedBufferAllocator.init(&buf); @setEvalBranchQuota(100000000); const entry = try json.parse( struct { sources: []const []const u8, title: []const u8, type: enum { TV, MOVIE, OVA, ONA, SPECIAL, UNKNOWN }, episodes: u16, status: []const u8, animeSeason: struct { season: enum { SPRING, SUMMER, FALL, WINTER, UNDEFINED }, year: ?u16, }, picture: []const u8, thumbnail: []const u8, synonyms: []const []const u8, relations: []const []const u8, tags: []const []const u8, }, stream, .{ .allocator = fba.allocator(), .allow_trailing_data = true }, ); if (entry.sources.len == 0) return error.InvalidEntry; const toBuf = sliceToZBuf(u8, str_size, 0); return Info{ .anidb = getId(.anidb, entry.sources), .anilist = getId(.anilist, entry.sources), .anisearch = getId(.anisearch, entry.sources), .kitsu = getId(.kitsu, entry.sources), .livechart = getId(.livechart, entry.sources), .myanimelist = getId(.myanimelist, entry.sources), .title = toBuf(fba.allocator(), entry.title) catch return error.InvalidEntry, .image = toBuf(fba.allocator(), entry.picture) catch return error.InvalidEntry, .type = switch (entry.type) { .TV => .tv, .MOVIE => .movie, .OVA => .ova, .ONA => .ona, .SPECIAL => .special, .UNKNOWN => .unknown, }, .year = entry.animeSeason.year orelse 0, .season = switch (entry.animeSeason.season) { .SPRING => .spring, .SUMMER => .summer, .FALL => .fall, .WINTER => .winter, .UNDEFINED => .undef, }, .episodes = entry.episodes, }; } }; pub const List = struct { entries: std.ArrayListUnmanaged(Entry), // Omg, stop making the deinit function take a mutable pointer plz... pub fn deinit(list: *List, allocator: mem.Allocator) void { list.entries.deinit(allocator); } pub fn fromDsv(allocator: mem.Allocator, dsv: []const u8) !List { var res = std.ArrayListUnmanaged(Entry){}; errdefer res.deinit(allocator); var it = mem.tokenize(u8, dsv, "\n"); while (it.next()) |line| try res.append(allocator, try Entry.fromDsv(line)); return List{ .entries = res }; } pub fn writeToDsv(list: List, writer: anytype) !void { list.sort(); for (list.entries.items) |entry| { try entry.writeToDsv(writer); try writer.writeAll("\n"); } } pub fn findWithId(list: List, id: Id) ?*Entry { return list.find(id, struct { fn match(i: Id, entry: Entry) bool { return entry.id.id == i.id and entry.id.site == i.site; } }.match); } pub fn find(list: List, ctx: anytype, match: fn (@TypeOf(ctx), Entry) bool) ?*Entry { for (list.entries.items) |*entry| { if (match(ctx, entry.*)) return entry; } return null; } pub fn sort(list: List) void { std.sort.sort(Entry, list.entries.items, {}, struct { fn lessThan(_: void, a: Entry, b: Entry) bool { return a.lessThan(b); } }.lessThan); } }; pub const Entry = struct { date: datetime.Date, status: Status, episodes: usize, watched: usize, title: [str_size:0]u8, id: Id, pub const Status = enum { complete, dropped, on_hold, plan_to_watch, watching, pub fn fromString(_: mem.Allocator, str: []const u8) !Status { return std.ComptimeStringMap(Status, .{ .{ "c", .complete }, .{ "d", .dropped }, .{ "o", .on_hold }, .{ "p", .plan_to_watch }, .{ "w", .watching }, }).get(str) orelse return error.ParserFailed; } pub fn toString(s: Status) []const u8 { return switch (s) { .complete => "c", .dropped => "d", .on_hold => "o", .plan_to_watch => "p", .watching => "w", }; } }; pub fn lessThan(a: Entry, b: Entry) bool { switch (a.date.cmp(b.date)) { .lt => return true, .gt => return false, .eq => {}, } switch (math.order(@enumToInt(a.status), @enumToInt(b.status))) { .lt => return true, .gt => return false, .eq => {}, } switch (math.order(a.episodes, b.episodes)) { .lt => return true, .gt => return false, .eq => {}, } switch (math.order(a.watched, b.watched)) { .lt => return true, .gt => return false, .eq => {}, } switch (mem.order(u8, &a.title, &b.title)) { .lt => return true, .gt => return false, .eq => {}, } switch (math.order(@enumToInt(a.id.site), @enumToInt(b.id.site))) { .lt => return true, .gt => return false, .eq => {}, } switch (math.order(a.id.id, b.id.id)) { .lt => return true, .gt => return false, .eq => {}, } return false; } pub fn fromDsv(row: []const u8) !Entry { var fba = heap.FixedBufferAllocator.init(""); return (dsv(fba.allocator(), row) catch return error.InvalidEntry).value; } pub fn writeToDsv(entry: Entry, writer: anytype) !void { try writer.print("{d:4>2}-{d:0>2}-{d:0>2}\t{s}\t{}\t{}\t{s}\t{s}{d}", .{ entry.date.year, entry.date.month, entry.date.day, entry.status.toString(), entry.episodes, entry.watched, mem.sliceTo(&entry.title, 0), entry.id.site.url(), entry.id.id, }); } const dsv = mecha.map(Entry, mecha.toStruct(Entry), mecha.combine(.{ date, mecha.ascii.char('\t'), status, mecha.ascii.char('\t'), mecha.int(usize, .{ .parse_sign = false }), mecha.ascii.char('\t'), mecha.int(usize, .{ .parse_sign = false }), mecha.ascii.char('\t'), string, mecha.ascii.char('\t'), link, mecha.eos, })); const date = mecha.map(datetime.Date, mecha.toStruct(datetime.Date), mecha.combine(.{ mecha.int(u16, .{ .parse_sign = false }), mecha.ascii.char('-'), mecha.int(u4, .{ .parse_sign = false }), mecha.ascii.char('-'), mecha.int(u8, .{ .parse_sign = false }), })); const status = mecha.convert(Status, Status.fromString, any); const string = mecha.convert([str_size:0]u8, sliceToZBuf(u8, str_size, 0), any); const link = mecha.convert(Id, struct { fn conv(_: mem.Allocator, in: []const u8) !Id { return Id.fromUrl(in); } }.conv, any); const any = mecha.many(mecha.ascii.not(mecha.ascii.char('\t')), .{ .collect = false }); }; fn expectJsonToken(stream: *json.TokenStream, id: std.meta.Tag(json.Token)) !void { const token = (try stream.next()) orelse return error.UnexpectEndOfStream; if (token != id) return error.UnexpectJsonToken; } fn expectJsonString(stream: *json.TokenStream, string: []const u8) !void { const token = switch ((try stream.next()) orelse return error.UnexpectEndOfStream) { .String => |string_token| string_token, else => return error.UnexpectJsonToken, }; // TODO: Man, I really wanted to use `json.encodesTo` but the Zig standard library // said "No fun allowed" so I'll have to make do with `mem.eql` even though // that is the wrong api for this task... if (!mem.eql(u8, string, token.slice(stream.slice, stream.i - 1))) return error.UnexpectJsonString; } fn sliceToZBuf( comptime T: type, comptime len: usize, comptime sentinel: T, ) fn (mem.Allocator, []const T) mecha.Error![len:sentinel]T { return struct { fn func(_: mem.Allocator, slice: []const T) mecha.Error![len:sentinel]T { if (slice.len > len) return error.ParserFailed; var res: [len:sentinel]T = [_:sentinel]T{sentinel} ** len; mem.copy(T, &res, slice); return res; } }.func; }
src/anime.zig
const std = @import("std"); const os = std.os; const time = os.time; const assert = std.debug.assert; pub const Status = enum { Locked, Unlocked, Timeout, }; pub const Umutex = struct { atom: std.atomic.Int(usize), /// uMutexes are allocated pub fn init(allocator: *std.mem.Allocator) !*Umutex { var self: *Umutex = try allocator.createOne(Umutex); self.atom = std.atomic.Int(usize).init(1); return self; } /// Helper fn to lock the mutex with no timeout and no delay between checks(spinlocks) /// Returns true when lock is aquired pub inline fn lock(self: *Umutex) Status { return self.lockDelayTimeout(0, 0); } /// Helper fn to lock with a sleep between attempts /// Returns true when lock is aquired pub inline fn lockDelay(self: *Umutex, nanoDelay: u64) Status { return self.lockDelayTimeout(nanoDelay, 0); } /// Helper fn to lock with no delay(spinlock) with a timeout /// Returns true when lock is aquired; false on timeout pub inline fn lockTimeout(self: *Umutex, miliTimeout: u64) Status { return self.lockDelayTimeout(0, miliTimeout); } /// Mutex locking with optional sleep between checks and timeout pub fn lockDelayTimeout(self: *Umutex, nanoDelay: u64, miliTimeout: u64) Status { var startTime: u64 = 0; if (miliTimeout > 0){ startTime = time.milliTimestamp(); } while (self.atom.xchg(0) != 1){ if (miliTimeout > 0 and (time.milliTimestamp() - startTime) > miliTimeout){ return Status.Timeout; } if (nanoDelay > 0) { time.sleep(nanoDelay); } } return Status.Locked; } /// Atomically unlocks the mutex; no other checks are done pub fn unlock(self: *Umutex) Status { // If this assersion fails, then umutex was incorrectly unlocked earlier from somewhere else assert(self.atom.xchg(1) == 0); return Status.Unlocked; } /// Atomically checks if mutex is locked or unlocked pub fn peek(self: *Umutex) Status { if (self.atom.get() == 1){ return Status.Unlocked; } return Status.Locked; } }; test "uMutex lock, unlock, and peeking" { var da = std.heap.DirectAllocator.init(); defer da.deinit(); var allocator = &da.allocator; var mutex = try Umutex.init(allocator); defer allocator.destroy(mutex); assert(mutex.lock() == Status.Locked); assert(mutex.peek() == Status.Locked); assert(mutex.unlock() == Status.Unlocked); assert(mutex.peek() == Status.Unlocked); } test "uMutex lock and relock with timeout" { var da = std.heap.DirectAllocator.init(); defer da.deinit(); var allocator = &da.allocator; var mutex = try Umutex.init(allocator); defer allocator.destroy(mutex); assert(mutex.lock() == Status.Locked); assert(mutex.lockTimeout(50) == Status.Timeout); } test "uMutex atomic counting with threads" { // this test is a clone of the thread test of std.mutex var da = std.heap.DirectAllocator.init(); defer da.deinit(); var allocator = &da.allocator; var mutex = try Umutex.init(allocator); defer allocator.destroy(mutex); var context = Context{ .mutex = mutex, .data = 0, }; const thread_count = 10; var threads: [thread_count]*std.os.Thread = undefined; for (threads) |*t| { t.* = try std.os.spawnThread(&context, worker); } for (threads) |t| t.wait(); std.debug.assertOrPanic(context.data == thread_count * Context.incr_count); } const Context = struct { mutex: *Umutex, data: i128, const incr_count = 10000; }; fn worker(ctx: *Context) void { var i: usize = 0; while (i != Context.incr_count) : (i += 1) { _ = ctx.mutex.lockDelay(10 * time.millisecond); defer _ = ctx.mutex.unlock(); ctx.data += 1; } }
umutex.zig
const std = @import("std"); const assert = std.debug.assert; const fs = std.fs; const mem = std.mem; const zbs = std.build; const ScanProtocolsStep = @import("deps/zig-wayland/build.zig").ScanProtocolsStep; /// While a river release is in development, this string should contain the version in development /// with the "-dev" suffix. /// When a release is tagged, the "-dev" suffix should be removed for the commit that gets tagged. /// Directly after the tagged commit, the version should be bumped and the "-dev" suffix added. const version = "0.1.0"; pub fn build(b: *zbs.Builder) !void { const target = b.standardTargetOptions(.{}); const mode = b.standardReleaseOptions(); const xwayland = b.option( bool, "xwayland", "Set to true to enable xwayland support", ) orelse false; const man_pages = b.option( bool, "man-pages", "Set to true to build man pages. Requires scdoc. Defaults to true if scdoc is found.", ) orelse scdoc_found: { _ = b.findProgram(&[_][]const u8{"scdoc"}, &[_][]const u8{}) catch |err| switch (err) { error.FileNotFound => break :scdoc_found false, else => return err, }; break :scdoc_found true; }; const bash_completion = b.option( bool, "bash-completion", "Set to true to install bash completion for riverctl. Defaults to true.", ) orelse true; const zsh_completion = b.option( bool, "zsh-completion", "Set to true to install zsh completion for riverctl. Defaults to true.", ) orelse true; const fish_completion = b.option( bool, "fish-completion", "Set to true to install fish completion for riverctl. Defaults to true.", ) orelse true; const full_version = blk: { if (mem.endsWith(u8, version, "-dev")) { var ret: u8 = undefined; const git_dir = try fs.path.join(b.allocator, &[_][]const u8{ b.build_root, ".git" }); const git_commit_hash = b.execAllowFail( &[_][]const u8{ "git", "--git-dir", git_dir, "--work-tree", b.build_root, "rev-parse", "--short", "HEAD" }, &ret, .Inherit, ) catch break :blk version; break :blk try std.fmt.allocPrintZ(b.allocator, "{s}-{s}", .{ version, mem.trim(u8, git_commit_hash, &std.ascii.spaces), }); } else { break :blk version; } }; const scanner = ScanProtocolsStep.create(b); scanner.addSystemProtocol("stable/xdg-shell/xdg-shell.xml"); scanner.addSystemProtocol("unstable/pointer-gestures/pointer-gestures-unstable-v1.xml"); scanner.addSystemProtocol("unstable/xdg-output/xdg-output-unstable-v1.xml"); scanner.addSystemProtocol("unstable/pointer-constraints/pointer-constraints-unstable-v1.xml"); scanner.addProtocolPath("protocol/river-control-unstable-v1.xml"); scanner.addProtocolPath("protocol/river-status-unstable-v1.xml"); scanner.addProtocolPath("protocol/river-layout-v3.xml"); scanner.addProtocolPath("protocol/wlr-layer-shell-unstable-v1.xml"); scanner.addProtocolPath("protocol/wlr-output-power-management-unstable-v1.xml"); { const river = b.addExecutable("river", "river/main.zig"); river.setTarget(target); river.setBuildMode(mode); river.addBuildOption(bool, "xwayland", xwayland); river.addBuildOption([:0]const u8, "version", full_version); addServerDeps(river, scanner); river.install(); } { const riverctl = b.addExecutable("riverctl", "riverctl/main.zig"); riverctl.setTarget(target); riverctl.setBuildMode(mode); riverctl.addBuildOption([:0]const u8, "version", full_version); riverctl.step.dependOn(&scanner.step); riverctl.addPackage(scanner.getPkg()); riverctl.addPackagePath("flags", "common/flags.zig"); riverctl.linkLibC(); riverctl.linkSystemLibrary("wayland-client"); scanner.addCSource(riverctl); riverctl.install(); } { const rivertile = b.addExecutable("rivertile", "rivertile/main.zig"); rivertile.setTarget(target); rivertile.setBuildMode(mode); rivertile.addBuildOption([:0]const u8, "version", full_version); rivertile.step.dependOn(&scanner.step); rivertile.addPackage(scanner.getPkg()); rivertile.addPackagePath("flags", "common/flags.zig"); rivertile.linkLibC(); rivertile.linkSystemLibrary("wayland-client"); scanner.addCSource(rivertile); rivertile.install(); } { const file = try fs.path.join(b.allocator, &[_][]const u8{ b.cache_root, "river-protocols.pc" }); const pkgconfig_file = try std.fs.cwd().createFile(file, .{}); const writer = pkgconfig_file.writer(); try writer.print( \\prefix={s} \\datadir=${{prefix}}/share \\pkgdatadir=${{datadir}}/river-protocols \\ \\Name: river-protocols \\URL: https://github.com/riverwm/river \\Description: protocol files for the river wayland compositor \\Version: {s} , .{ b.install_prefix, full_version }); defer pkgconfig_file.close(); b.installFile("protocol/river-layout-v3.xml", "share/river-protocols/river-layout-v3.xml"); b.installFile(file, "share/pkgconfig/river-protocols.pc"); } if (man_pages) { const scdoc_step = ScdocStep.create(b); try scdoc_step.install(); } if (bash_completion) { b.installFile("completions/bash/riverctl", "share/bash-completion/completions/riverctl"); } if (zsh_completion) { b.installFile("completions/zsh/_riverctl", "share/zsh/site-functions/_riverctl"); } if (fish_completion) { b.installFile("completions/fish/riverctl.fish", "share/fish/vendor_completions.d/riverctl.fish"); } { const river_test = b.addTest("river/test_main.zig"); river_test.setTarget(target); river_test.setBuildMode(mode); river_test.addBuildOption(bool, "xwayland", xwayland); addServerDeps(river_test, scanner); const test_step = b.step("test", "Run the tests"); test_step.dependOn(&river_test.step); } } fn addServerDeps(exe: *zbs.LibExeObjStep, scanner: *ScanProtocolsStep) void { const wayland = scanner.getPkg(); const xkbcommon = zbs.Pkg{ .name = "xkbcommon", .path = "deps/zig-xkbcommon/src/xkbcommon.zig" }; const pixman = zbs.Pkg{ .name = "pixman", .path = "deps/zig-pixman/pixman.zig" }; const wlroots = zbs.Pkg{ .name = "wlroots", .path = "deps/zig-wlroots/src/wlroots.zig", .dependencies = &[_]zbs.Pkg{ wayland, xkbcommon, pixman }, }; exe.step.dependOn(&scanner.step); exe.linkLibC(); exe.linkSystemLibrary("libevdev"); exe.linkSystemLibrary("libinput"); exe.addPackage(wayland); exe.linkSystemLibrary("wayland-server"); exe.addPackage(xkbcommon); exe.linkSystemLibrary("xkbcommon"); exe.addPackage(pixman); exe.linkSystemLibrary("pixman-1"); exe.addPackage(wlroots); exe.linkSystemLibrary("wlroots"); exe.addPackagePath("flags", "common/flags.zig"); exe.addCSourceFile("river/wlroots_log_wrapper.c", &[_][]const u8{ "-std=c99", "-O2" }); // TODO: remove when zig issue #131 is implemented scanner.addCSource(exe); } const ScdocStep = struct { const scd_paths = [_][]const u8{ "doc/river.1.scd", "doc/riverctl.1.scd", "doc/rivertile.1.scd", }; builder: *zbs.Builder, step: zbs.Step, fn create(builder: *zbs.Builder) *ScdocStep { const self = builder.allocator.create(ScdocStep) catch @panic("out of memory"); self.* = init(builder); return self; } fn init(builder: *zbs.Builder) ScdocStep { return ScdocStep{ .builder = builder, .step = zbs.Step.init(.Custom, "Generate man pages", builder.allocator, make), }; } fn make(step: *zbs.Step) !void { const self = @fieldParentPtr(ScdocStep, "step", step); for (scd_paths) |path| { const command = try std.fmt.allocPrint( self.builder.allocator, "scdoc < {s} > {s}", .{ path, path[0..(path.len - 4)] }, ); _ = try self.builder.exec(&[_][]const u8{ "sh", "-c", command }); } } fn install(self: *ScdocStep) !void { self.builder.getInstallStep().dependOn(&self.step); for (scd_paths) |path| { const path_no_ext = path[0..(path.len - 4)]; const basename_no_ext = fs.path.basename(path_no_ext); const section = path_no_ext[(path_no_ext.len - 1)..]; const output = try std.fmt.allocPrint( self.builder.allocator, "share/man/man{s}/{s}", .{ section, basename_no_ext }, ); self.builder.installFile(path_no_ext, output); } } };
source/river-0.1.0/build.zig
const std = @import("std"); usingnamespace (@import("../machine.zig")); usingnamespace (@import("../util.zig")); // QuickRef: https://www.felixcloutier.com/x86/xchg const reg = Operand.register; const regRm = Operand.registerRm; const memRm = Operand.memoryRmDef; const memRmSeg = Operand.memoryRm; test "xchg" { const m16 = Machine.init(.x86_16); const m32 = Machine.init(.x86_32); const m64 = Machine.init(.x64); debugPrint(false); { // This is a special edge case on x86-64. Cant use encoding 90 for // XCHG EAX, EAX testOp2(m64, .XCHG, reg(.AX), reg(.AX), "66 90"); testOp2(m64, .XCHG, reg(.EAX), reg(.EAX), "87 c0"); testOp2(m64, .XCHG, reg(.RAX), reg(.RAX), "48 90"); testOp2(m64, .XCHG, reg(.AX), regRm(.AX), "66 87 c0"); testOp2(m64, .XCHG, reg(.EAX), regRm(.EAX), "87 c0"); testOp2(m64, .XCHG, reg(.RAX), regRm(.RAX), "48 87 c0"); // other register combinations still work same way on x86-64 testOp2(m64, .XCHG, reg(.AX), reg(.CX), "66 91"); testOp2(m64, .XCHG, reg(.EAX), reg(.ECX), "91"); testOp2(m64, .XCHG, reg(.RAX), reg(.RCX), "48 91"); testOp2(m64, .XCHG, reg(.AX), reg(.DI), "66 97"); testOp2(m64, .XCHG, reg(.EAX), reg(.EDI), "97"); testOp2(m64, .XCHG, reg(.RAX), reg(.RDI), "48 97"); testOp2(m64, .XCHG, reg(.AX), reg(.SI), "66 96"); testOp2(m64, .XCHG, reg(.EAX), reg(.ESI), "96"); testOp2(m64, .XCHG, reg(.RAX), reg(.RSI), "48 96"); // on 16/32 bit can use encoding 90 for XCHG EAX, EAX testOp2(m32, .XCHG, reg(.AX), reg(.AX), "66 90"); testOp2(m32, .XCHG, reg(.EAX), reg(.EAX), "90"); testOp2(m32, .XCHG, reg(.RAX), reg(.RAX), AsmError.InvalidOperand); testOp2(m16, .XCHG, reg(.AX), reg(.AX), "90"); testOp2(m16, .XCHG, reg(.EAX), reg(.EAX), "66 90"); testOp2(m16, .XCHG, reg(.RAX), reg(.RAX), AsmError.InvalidOperand); } { testOp2(m64, .XCHG, reg(.EAX), reg(.R15D), "41 97"); testOp2(m64, .XCHG, reg(.R15W), reg(.AX), "66 41 97"); testOp2(m64, .XCHG, reg(.R15), reg(.RAX), "49 97"); testOp2(m64, .XCHG, reg(.R15), regRm(.R14), "4d 87 fe"); testOp2(m64, .XCHG, regRm(.R15), reg(.R14), "4d 87 f7"); testOp2(m64, .XCHG, reg(.R15), regRm(.R14), "4d 87 fe"); testOp2(m64, .XCHG, reg(.R15B), regRm(.SIL), "44 86 fe"); testOp2(m64, .XCHG, regRm(.R15B), reg(.SIL), "41 86 f7"); testOp2(m64, .XCHG, reg(.R15), reg(.AX), AsmError.InvalidOperand); testOp2(m64, .XCHG, reg(.EAX), reg(.R15W), AsmError.InvalidOperand); } { testOp2(m64, .XCHG, reg(.EAX), memRm(.DWORD, .RAX, 0x11), "87 40 11"); testOp2(m64, .XCHG, memRm(.DWORD, .RAX, 0x11), reg(.EAX), "87 40 11"); } }
src/x86/tests/xchg.zig
const proto = @import("proto"); const std = @import("std"); const gdb = @import("gdb"); const client = @import("client.zig"); pub const target_xml = @embedFile("aarch64.xml"); pub const num_default_registers = 32; pub const HookedInstr = [4]u8; fn readWithEL(comptime EL: comptime_int) !Frame { const RemoteFrame = proto.aarch64.ElFrame(EL); var frame: RemoteFrame = undefined; try client.device_reader.readNoEof(std.mem.asBytes(&frame)[@sizeOf(proto.aarch64.FrameHeader)..]); std.log.info("Raw frame: {any}", .{frame}); return Frame{ .gpr = frame.gpr, .debugger_el = EL, .el1 = frame.el1, .gdb_halt_message = undefined, .el2 = if (comptime (EL < 2)) undefined else frame.el2, .el3 = if (comptime (EL < 3)) undefined else frame.el3, }; } pub fn readFrame() !Frame { while (true) { const first_byte = try client.device_reader.readByte(); if (first_byte == proto.escape_byte) { var frame_header: proto.aarch64.FrameHeader = undefined; frame_header.escape_byte = first_byte; try client.device_reader.readNoEof(std.mem.asBytes(&frame_header)[1..]); std.log.info("Frame header: {any}", .{frame_header}); var frame = try switch (frame_header.current_el) { 1 => readWithEL(1), 2 => readWithEL(2), 3 => readWithEL(3), else => unreachable, }; const ELR = frame.debuggerRegs().ELR.*; const hit_step_hook = blk: { // We have to translate this back into an address for us // since we may be using a different page map than the debuggee const current_instr_addr = frame.operableAddr(ELR, .r) catch |err| { switch (err) { error.CannotWrite => unreachable, error.PaddrNotIdentityMapped, error.NotMapped, => break :blk false, else => {}, } return err; }; break :blk client.hasStepHookAtAddr(current_instr_addr); }; const ESR = frame.debuggerRegs().ESR.*; const ec = @truncate(u6, ESR >> 26) & 0x3F; switch (ec) { 0b010111, // AA64 SMC 0b010110, // AA64 HVC 0b010101, // AA64 SVC 0b010011, // AA32 SVC => { if (hit_step_hook) { frame.gdb_halt_message = "S05"; // SIGTRAP // We need to rewind the instruction pointer to // execute the real instruction once it has been restored frame.debuggerRegs().ELR.* -= 4; } else { frame.gdb_halt_message = "T05swbreak:;"; // SIGTRAP from software breakpoint } }, 0b001110 => { // Illegal execution state frame.gdb_halt_message = "S04"; // SIGILL }, 0b001000 => { // Invalid opcode frame.gdb_halt_message = "S04"; // SIGILL }, 0b100010 => { // PC alignment fault frame.gdb_halt_message = "S07"; // SIGBUS }, 0b100000, // Page fault on instruction fetch, lower EL 0b100001, // Page fault on instruction fetch, same EL 0b100100, // Page fault on data access, lower EL 0b100101, // Page fault on data access, same EL => { frame.gdb_halt_message = "S0B"; // SIGSEGV }, 0b111100 => { // BRK instruction, same EL if (hit_step_hook) { frame.gdb_halt_message = "S05"; // SIGTRAP } else { frame.gdb_halt_message = "T05swbreak:;"; // SIGTRAP from software breakpoint } }, else => |v| { std.log.err("BAD ESR??? 0b{b}", .{v}); frame.gdb_halt_message = "S02"; // SIGINT }, } return frame; } } } fn atOp( op1: u3, CR_m: u1, op2: u3, Xt: u5, ) u32 { // zig fmt: off return 0xD5087800 | (@as(u32, op1) << 16) | (@as(u32, CR_m) << 8) | (@as(u32, op2) << 5) | (@as(u32, Xt) << 0) ; // zig fmt: on } fn atInstr( rw: client.RW, // Require read or write access el: enum { el0, el1, el2, el3, el1pan }, // el1pan is EL1 but with PAN checks stages: enum { stage1, stages12 }, // Do just stage1 or both stage1 and 2 translation Xt: u5, // Register with virtual address ) error{InvalidArgument}!u32 { return switch (stages) { .stages12 => switch (el) { .el0 => switch (rw) { .r => atOp(4, 0, 6, Xt), .w => atOp(4, 0, 7, Xt), }, .el1 => switch (rw) { .r => atOp(4, 0, 4, Xt), .w => atOp(4, 0, 5, Xt), }, else => error.InvalidArgument, }, .stage1 => switch (el) { .el0 => switch (rw) { .r => atOp(0, 0, 2, Xt), .w => atOp(0, 0, 3, Xt), }, .el1 => switch (rw) { .r => atOp(0, 0, 0, Xt), .w => atOp(0, 0, 1, Xt), }, .el1pan => switch (rw) { .r => atOp(0, 1, 0, Xt), .w => atOp(0, 1, 1, Xt), }, .el2 => switch (rw) { .r => atOp(4, 0, 0, Xt), .w => atOp(4, 0, 1, Xt), }, .el3 => switch (rw) { .r => atOp(6, 0, 0, Xt), .w => atOp(6, 0, 1, Xt), }, }, }; } fn calcNextAddr(instr_addr: u64, imm: anytype) u64 { const imm_len = @bitSizeOf(@TypeOf(imm)); // First, make the imm signed const s_imm = @bitCast(std.meta.Int(.signed, imm_len), imm); // Now make it an instruction address offset const full_s_imm = @intCast(i64, s_imm) * 4; // Wrapping add the values return instr_addr +% @bitCast(u64, full_s_imm); } fn spsrSavedEl(spsr: u64) u2 { return @truncate(u2, spsr >> 2); } fn translateVaddr(vaddr: u64, el: u2, op: client.RW) !?u64 { // TODO: Do something more reasonable here. // Is there any way we can succeed with the page table walk, try to access it and catch the SError // in case the address is mapped but not backed by any hardware? if (vaddr == 0) return null; const instr = try atInstr( op, switch (el) { 0 => .el0, 1 => .el1, 2 => .el2, 3 => .el3, }, .stage1, // Assume 2 stage translation is disabled for now 0, // vaddr in X0 ); try client.sendDeviceCommand(.{ .command_type = .resolve_addr, .addr = vaddr, .size = instr, }); const PAR_EL1 = try client.device_reader.readIntLittle(u64); std.log.info("Addr translation of 0x{X} at EL{d} for op {any} -> PAR_EL1=0x{X}", .{ vaddr, el, op, PAR_EL1 }); const attr = @truncate(u8, PAR_EL1 >> 56); const paddr = @truncate(u48, PAR_EL1 & ~@as(u64, 0xFFF)); const ns = @truncate(u1, PAR_EL1 >> 9); const sh = @truncate(u2, PAR_EL1 >> 7); const f = @truncate(u1, PAR_EL1); if (f != 0) { std.log.info("Result: Not mapped!", .{}); return null; } std.log.info("Result addr: 0x{X}, attr=0x{X} ns={} sh={X}", .{ paddr, attr, ns != 0, sh, }); return paddr; } pub const Frame = struct { gpr: proto.aarch64.GPRs, debugger_el: u2, fake_cpsr: u64 = undefined, // Thanks GDB for requiring this on aarch64 el1: proto.aarch64.EL1Regs, el2: proto.aarch64.EL2Regs, el3: proto.aarch64.EL3Regs, gdb_halt_message: []const u8 = undefined, pub fn pagingEnabled(self: *@This(), el: u2) bool { switch (el) { 0, 1 => return @truncate(u1, self.el1.SCTLR_EL1) == 1, 2 => return @truncate(u1, self.el2.SCTLR_EL2) == 1, 3 => return @truncate(u1, self.el3.SCTLR_EL3) == 1, } } pub fn pageSize(self: *@This(), addr: u64) usize { // @TODO _ = self; _ = addr; return 0x1000; //switch (self.savedEl()) { // 0, 1 => return calcPageSize(self.el1.TCR, addr), // 2 => return calcPageSize(self.el2.TCR, addr), // 3 => return calcPageSize(self.el3.TCR, addr), //} } fn addrForOpAtEL(self: *@This(), vaddr: u64, other_el: u2, op: client.RW) !u64 { // @TODO: Don't ignore self.pagingEnabled() calls below, speed can // be gained here. Problem is that I'm not really sure how to tell if // 0xFFFFFFFFFFFF0000 et al is a bad address otherwise. if (self.debugger_el == other_el) { // Same EL // We only need to verify we can do the op on the current EL if (true or self.pagingEnabled(self.debugger_el)) { if (try translateVaddr(vaddr, self.debugger_el, op)) |_| { return vaddr; } if (op == .w) { // Check if we failed because of our permissions if (try translateVaddr(vaddr, self.debugger_el, .r)) |_| { return error.CannotWrite; } } return error.NotMapped; } return vaddr; } else { // Lower EL // First we have to get the paddr of the vaddr accessed // We only check for read access on the lower EL, we may be able to write even if they can't const paddr = if (true or self.pagingEnabled(other_el)) ((try translateVaddr(vaddr, other_el, .r)) orelse return error.NotMapped) else vaddr; // Now we need a virtual address for us to write to, check if this addr happens to be identity mapped // Maybe we could look for it elsewhere, but this is fine for now if (false or !self.pagingEnabled(self.debugger_el)) return paddr; const debugger_paddr = (try translateVaddr(paddr, self.debugger_el, op)) orelse { if (op == .w) { if (try translateVaddr(paddr, self.debugger_el, .r)) |_| { return error.CannotWrite; } } return error.PaddrNotIdentityMapped; }; if (paddr != debugger_paddr) return error.PaddrNotIdentityMapped; return paddr; } } pub fn operableAddr(self: *@This(), vaddr: u64, op: client.RW) !u64 { return self.addrForOpAtEL(vaddr, self.savedEl(), op); } fn debuggerRegs(self: *@This()) struct { ESR: *u64, ELR: *u64, FAR: *u64, SPSR: *u64, } { return switch (self.debugger_el) { 1 => .{ .ESR = &self.el1.ESR_EL1, .ELR = &self.el1.ELR_EL1, .FAR = &self.el1.FAR_EL1, .SPSR = &self.el1.SPSR_EL1, }, 2 => .{ .ESR = &self.el2.ESR_EL2, .ELR = &self.el2.ELR_EL2, .FAR = &self.el2.FAR_EL2, .SPSR = &self.el2.SPSR_EL2, }, 3 => .{ .ESR = &self.el3.ESR_EL3, .ELR = &self.el3.ELR_EL3, .FAR = &self.el3.FAR_EL3, .SPSR = &self.el3.SPSR_EL3, }, else => unreachable, }; } fn savedEl(self: *@This()) u2 { return spsrSavedEl(self.debuggerRegs().SPSR.*); } pub fn writeGdbReg(self: *@This(), regnum: usize, value: u64) bool { if(regnum == 33) { @panic("Handle writing to fake CPSR"); } else { if(self.getGdbReg(regnum)) |reg| { reg.* = value; return true; } else { return false; } } } // Get a register identified by "regnum" in target_xml pub fn getGdbReg(self: *@This(), regnum: usize) ?*u64 { return switch (regnum) { // zig fmt: off 0 => &self.gpr.X0, 1 => &self.gpr.X1, 2 => &self.gpr.X2, 3 => &self.gpr.X3, 4 => &self.gpr.X4, 5 => &self.gpr.X5, 6 => &self.gpr.X6, 7 => &self.gpr.X7, 8 => &self.gpr.X8, 9 => &self.gpr.X9, 10 => &self.gpr.X10, 11 => &self.gpr.X11, 12 => &self.gpr.X12, 13 => &self.gpr.X13, 14 => &self.gpr.X14, 15 => &self.gpr.X15, 16 => &self.gpr.X16, 17 => &self.gpr.X17, 18 => &self.gpr.X18, 19 => &self.gpr.X19, 20 => &self.gpr.X20, 21 => &self.gpr.X21, 22 => &self.gpr.X22, 23 => &self.gpr.X23, 24 => &self.gpr.X24, 25 => &self.gpr.X25, 26 => &self.gpr.X26, 27 => &self.gpr.X27, 28 => &self.gpr.X28, 29 => &self.gpr.X29, 30 => &self.gpr.X30, 31 => &self.gpr.SP, // zig fmt: on 32 => self.debuggerRegs().ELR, 33 => { // zig fmt: off self.fake_cpsr = 0 | (self.debuggerRegs().SPSR.* & (0 | (7 << 6) // AIF | (0x1F << 27) // NZCVQ | (1 << 22) // PAN | (1 << 9) // E )) | (1 << 4) // RES1 | @as(u64, switch (self.debuggerRegs().SPSR.* & 0xF) { // M 0b0000 => 0b0000, // EL0t => User 0b0100 => 0b1111, // EL1t => System 0b0101 => 0b0010, // EL1h => IRQ 0b1000 => 0b1010, // EL2t => Hypervisor 0b1001 => 0b1010, // EL2h => Hypervisor 0b1100 => 0b0110, // EL3t => Monitor 0b1101 => 0b0110, // EL3h => Monitor else => undefined, }) ; // zig fmt: on std.log.info("Fake CPSR: 0x{X}", .{self.fake_cpsr}); return &self.fake_cpsr; }, 34 => if (self.debugger_el < 3) null else &self.el3.ELR_EL3, 35 => if (self.debugger_el < 3) null else &self.el3.SPSR_EL3, 36 => if (self.debugger_el < 3) null else &self.el3.FAR_EL3, 37 => if (self.debugger_el < 3) null else &self.el3.ESR_EL3, 38 => if (self.debugger_el < 2) null else &self.el2.ESR_EL2, 39 => if (self.debugger_el < 2) null else &self.el2.SPSR_EL2, 40 => if (self.debugger_el < 2) null else &self.el2.FAR_EL2, 41 => if (self.debugger_el < 2) null else &self.el2.ESR_EL2, 42 => if (self.debugger_el < 1) null else &self.el1.ESR_EL1, 43 => if (self.debugger_el < 1) null else &self.el1.SPSR_EL1, 44 => if (self.debugger_el < 1) null else &self.el1.FAR_EL1, 45 => if (self.debugger_el < 1) null else &self.el1.ESR_EL1, 46 => if (self.debugger_el < 3) null else &self.el3.TTBR0_EL3, 47 => if (self.debugger_el < 3) null else &self.el3.TCR_EL3, 48 => if (self.debugger_el < 3) null else &self.el3.MAIR_EL3, 49 => if (self.debugger_el < 2) null else &self.el2.TTBR0_EL2, 50 => if (self.debugger_el < 2) null else &self.el2.TCR_EL2, 51 => if (self.debugger_el < 2) null else &self.el2.MAIR_EL2, 52 => if (self.debugger_el < 1) null else &self.el1.TTBR0_EL1, 53 => if (self.debugger_el < 1) null else &self.el1.TTBR1_EL1, 54 => if (self.debugger_el < 1) null else &self.el1.TCR_EL1, 55 => if (self.debugger_el < 1) null else &self.el1.MAIR_EL1, 56 => if (self.debugger_el < 3) null else &self.el3.SCTLR_EL3, 57 => if (self.debugger_el < 3) null else &self.el3.SCR_EL3, 58 => if (self.debugger_el < 2) null else &self.el2.SCTLR_EL2, 59 => if (self.debugger_el < 2) null else &self.el2.HCR_EL2, 60 => if (self.debugger_el < 2) null else &self.el2.CPTR_EL2, 61 => if (self.debugger_el < 2) null else &self.el2.HSTR_EL2, 62 => if (self.debugger_el < 2) null else &self.el2.CNTHCTL_EL2, 63 => if (self.debugger_el < 2) null else &self.el2.CNTVOFF_EL2, 64 => if (self.debugger_el < 2) null else &self.el2.VTCR_EL2, 65 => if (self.debugger_el < 1) null else &self.el1.SCTLR_EL1, 66 => if (self.debugger_el < 2) null else &self.el2.CPACR_EL1, else => null, }; } pub fn informHalt(self: *@This(), gdb_stream: *gdb.Stream) !void { try gdb_stream.send(self.gdb_halt_message); } fn hookWithInstrAtAddrForEL(self: *@This(), addr: u64, EL: u2, instr: [4]u8, save_callback: anytype) !void { // Make sure the address is writeable const our_addr = self.addrForOpAtEL(addr, EL, .w) catch |err| { std.log.err("Cannot write hook at address 0x{X} at EL{d}!! Cause: {}", .{ addr, EL, err }); return; }; // Save the current instruction try save_callback(our_addr, try client.readBytes(our_addr, 4)); // Write the new one try client.writeMemory(our_addr, &instr); } fn swHookWithInstrAtAddr(self: *@This(), addr: u64, instr: [4]u8) !void { return self.hookWithInstrAtAddrForEL(addr, self.savedEl(), instr, client.swBreakpointHook); } fn stepHookWithInstrAtAddr(self: *@This(), addr: u64, instr: [4]u8) !void { return self.hookWithInstrAtAddrForEL(addr, self.savedEl(), instr, client.singleStepHook); } fn hookWithInstr(self: *@This(), instr: [4]u8) !void { const current_addr = self.debuggerRegs().ELR.*; const curr_instr_bytes = try client.readBytes(current_addr, 4); const curr_instr = std.mem.readIntLittle(u32, &curr_instr_bytes); // zig fmt: off switch (@truncate(u8, curr_instr >> 24)) { // Conditional branch to immediate (B.cond) // 0b0101010x 0b01010100, 0b01010101, // Compare and branch to immediate (CB{N,}Z) // 0bx011010x 0b00110100, 0b00110101, 0b10110100, 0b10110101, => { const imm = @truncate(u19, curr_instr >> 5); try self.stepHookWithInstrAtAddr(calcNextAddr(current_addr, imm), instr); try self.stepHookWithInstrAtAddr(current_addr + 4, instr); }, // Unconditional branch to register (BR, BLR, RET, ...) // 0b1101011x 0b11010110, 0b11010111, => { const opc = @truncate(u4, curr_instr >> 21); const op2 = @truncate(u5, curr_instr >> 16); const op3 = @truncate(u6, curr_instr >> 10); const Rn = @truncate(u5, curr_instr >> 5); const op4 = @truncate(u5, curr_instr); if (opc == 0b0100 and op2 == 0b11111 and op3 == 0b000000 and Rn == 0b11111 and op4 == 0b00000) { // ERET const debuggee_elr = switch (self.savedEl()) { 0 => { std.log.err("EL0 can't ERET!", .{}); return; }, 1 => self.el1.ELR_EL1, 2 => self.el2.ELR_EL2, 3 => self.el3.ELR_EL3, }; const debuggee_spsr = switch (self.savedEl()) { 0 => unreachable, 1 => self.el1.SPSR_EL1, 2 => self.el2.SPSR_EL2, 3 => self.el3.SPSR_EL3, }; const debuggee_target_el = spsrSavedEl(debuggee_spsr); try self.hookWithInstrAtAddrForEL(debuggee_elr, debuggee_target_el, instr, client.singleStepHook); } else { // Probably just an unconditional branch to the value of Rn // If the register number is 31, that encodes XZR and not SP const reg_val = if (Rn == 31) 0 else (self.getGdbReg(Rn) orelse unreachable).*; try self.stepHookWithInstrAtAddr(reg_val, instr); } }, // Unconditional branch to immediate (B, BL) // 0bx00101xx 0b00010100, 0b00010101, 0b00010110, 0b00010111, 0b10010100, 0b10010101, 0b10010110, 0b10010111, => { const imm = @truncate(u26, curr_instr); try self.stepHookWithInstrAtAddr(calcNextAddr(current_addr, imm), instr); }, // Test and branch to immediate (TB{N,}Z) // 0bx011011x 0b00110110, 0b00110111, 0b10110110, 0b10110111, => { const imm = @truncate(u14, curr_instr >> 5); try self.stepHookWithInstrAtAddr(calcNextAddr(current_addr, imm), instr); try self.stepHookWithInstrAtAddr(current_addr + 4, instr); }, // We assume everything that isn't a branch will just end up at PC + 4. // I... Think that's correct...? else => { try self.stepHookWithInstrAtAddr(current_addr + 4, instr); }, } // zig fmt: on } fn chooseInstructionForBreakpoint(self: *@This()) [4]u8 { // Oh boy, stepping with a debugger in EL3 is a bit of a mess. // Debug exceptions except BRK are not available. This means that // if we want to trap from a lower EL into EL3, we have to use an // SMC instruction. That's fine and all except for the fact that // they advance the PC by 4 _BEFORE_ triggering the exception // for obvious reasons. That means we have to rewind the PC by 4 // on one of these before resuming. // First, let's figure out if we're running at EL3 or a lower EL. if (self.savedEl() < 3) { // We need to do an `smc` call to get back into the debugger. return .{ 0xA3, 0x8A, 0x14, 0xD4 }; // SMC #42069 } else { // We could do either an `smc` or `brk` in struction to get back into the debugger return .{ 0xA0, 0x8A, 0x34, 0xD4 }; // BRK #42069 } } pub fn setSwBreak(self: *@This(), addr: u64) !void { try self.swHookWithInstrAtAddr(addr, self.chooseInstructionForBreakpoint()); } pub fn doStep(self: *@This(), step: bool) !void { if (step and self.debugger_el == 3) { try self.hookWithInstr(self.chooseInstructionForBreakpoint()); } else { const ss_bit: u64 = 1 << 21; if (step) { self.debuggerRegs().SPSR.* |= ss_bit; } else { self.debuggerRegs().SPSR.* &= ~ss_bit; } } try client.resumeExecution(); try switch (self.debugger_el) { 1 => self.writeFrameAtEL(1), 2 => self.writeFrameAtEL(2), 3 => self.writeFrameAtEL(3), else => unreachable, }; } fn writeFrameAtEL(self: *@This(), comptime el: u2) !void { const RemoteFrame = proto.aarch64.ElFrame(el); var frame = std.mem.zeroes(RemoteFrame); frame.el1 = self.el1; frame.gpr = self.gpr; if (comptime (el >= 2)) { frame.el2 = self.el2; } if (comptime (el >= 3)) { frame.el3 = self.el3; } try client.device_writer.writeIntLittle(u32, @intCast(u32, @sizeOf(RemoteFrame))); try client.device_writer.writeAll(std.mem.asBytes(&frame)); } };
src/client/aarch64.zig
const std = @import("std"); const arch = @import("root").arch; const kernel = @import("kernel.zig"); const logger = @TypeOf(kernel.logger).childOf(@typeName(@This())){}; pub fn directMapping() *DirectMapping { return arch.mm.directMapping(); } pub fn directTranslate(phys: PhysicalAddress) VirtualAddress { const virt = directMapping().to_virt(phys); return virt; } pub fn frameAllocator() *FrameAllocator { return arch.mm.frameAllocator(); } const Dumper = struct { const Self = @This(); const Mapping = struct { virt: VirtualAddress, phys: PhysicalAddress, size: usize, }; prev: ?Mapping = null, pub fn walk(self: *Self, virt: VirtualAddress, phys: PhysicalAddress, page_size: usize) void { if (self.prev == null) { self.prev = Mapping{ .virt = virt, .phys = phys, .size = page_size }; return; } const prev = self.prev.?; // Check if we can merge mapping const next_virt = prev.virt.add(prev.size); const next_phys = prev.phys.add(prev.size); if (next_virt.eq(virt) and next_phys.eq(phys)) { self.prev.?.size += page_size; return; } logger.log("{} -> {} (0x{x} bytes)\n", .{ prev.virt, prev.phys, prev.size }); self.prev = Mapping{ .virt = virt, .phys = phys, .size = page_size }; } pub fn done(self: @This()) void { if (self.prev) |prev| { logger.log("{} -> {} (0x{x} bytes)\n", .{ prev.virt, prev.phys, prev.size }); } } }; pub fn dump_vm_mappings(vm: *VirtualMemory) void { var visitor = Dumper{}; vm.vm_impl.walk(Dumper, &visitor); } fn AddrWrapper(comptime name: []const u8, comptime T: type) type { return struct { const Type = T; const Self = @This(); value: Type, pub fn new(value: Type) Self { return .{ .value = value }; } pub fn into_pointer(self: Self, comptime P: type) P { return @intToPtr(P, self.value); } pub fn add(self: Self, val: Type) Self { return .{ .value = self.value + val }; } pub fn sub(self: Self, val: Type) Self { return .{ .value = self.value - val }; } pub fn le(self: Self, other: Self) bool { return self.value <= other.value; } pub fn lt(self: Self, other: Self) bool { return self.value < other.value; } pub fn eq(self: Self, other: Self) bool { return self.value == other.value; } pub fn span(from: Self, to: Self) usize { return to.value - from.value; } pub fn max(self: Self, other: Self) Self { return if (other.value > self.value) other else self; } pub fn alignForward(self: Self, val: anytype) Self { return Self.new(std.mem.alignForward(self.value, val)); } pub fn format(self: Self, fmt: []const u8, options: std.fmt.FormatOptions, stream: anytype) !void { _ = fmt; try stream.writeAll(name); try stream.writeAll("{"); try std.fmt.formatInt(self.value, 16, .lower, options, stream); try stream.writeAll("}"); } pub fn isAligned(self: Self, val: anytype) bool { return std.mem.isAligned(self.value, val); } }; } test "AddrWrapper" { const expect = std.testing.expect; const Addr = AddrWrapper("TestAddr", u64); const v1 = Addr.new(0x1000); const v2 = Addr.new(0x2000); try expect(v1.add(0x1000).eq(v2)); try expect(v2.sub(0x1000).eq(v1)); try expect(v1.le(v1)); try expect(v2.eq(v2)); try expect(Addr.span(v1, v2) == 0x1000); try expect(v1.max(v2).eq(v2)); } pub const VirtualAddress = AddrWrapper("VirtualAddress", arch.mm.VirtAddrType); pub const PhysicalAddress = AddrWrapper("PhysicalAddress", arch.mm.PhysAddrType); pub const DirectMapping = struct { /// Simple mapping from boot time virt_start: VirtualAddress, size: usize, pub fn init(start: VirtualAddress, size: usize) DirectMapping { return .{ .virt_start = start, .size = size, }; } pub fn set_size(self: @This(), value: usize) void { self.size = value; } pub fn set_base(self: @This(), value: VirtualAddress) void { self.virt_start = value; } fn virt_end(self: @This()) VirtualAddress { return self.virt_start.add(self.size); } pub fn to_phys(self: @This(), addr: VirtualAddress) PhysicalAddress { std.debug.assert(self.virt_start.le(addr) and addr.lt(self.virt_end())); return PhysicalAddress.new(addr.value - self.virt_start.value); } pub fn to_virt(self: @This(), addr: PhysicalAddress) VirtualAddress { std.debug.assert(addr.value < self.size); return VirtualAddress.new(addr.value + self.virt_start.value); } }; pub var kernel_vm: VirtualMemory = undefined; pub const VirtualMemory = struct { vm_impl: *arch.mm.VirtualMemoryImpl, const Self = @This(); pub const Protection = struct { read: bool, write: bool, execute: bool, user: bool, pub const RWX = Protection{ .read = true, .write = true, .execute = true, .user = false, }; pub const RW = Protection{ .read = true, .write = true, .execute = false, .user = false, }; pub const R = Protection{ .read = true, .write = false, .execute = false, .user = false, }; }; pub fn init(vm_impl: *arch.mm.VirtualMemoryImpl) VirtualMemory { return .{ .vm_impl = vm_impl }; } pub fn map_memory(self: Self, where: VirtualAddress, what: PhysicalAddress, length: usize, protection: Protection) !VirtualMemoryRange { return self.vm_impl.map_memory(where, what, length, protection); } pub fn map_io(self: Self, what: PhysicalAddress, length: usize) !VirtualMemoryRange { return self.vm_impl.map_io(what, length); } pub fn unmap(self: Self, range: VirtualMemoryRange) !void { return self.vm_impl.unmap(range); } pub fn switch_to(self: Self) void { self.vm_impl.switch_to(); } }; pub const MemoryAllocator = struct { frame_allocator: *FrameAllocator, freelist: std.SinglyLinkedList(void), main_chunk: ?[]align(0x10) u8, const Self = @This(); const max_alloc = 0x1000; pub fn new(frame_allocator: *FrameAllocator) MemoryAllocator { return .{ .frame_allocator = frame_allocator, .freelist = std.SinglyLinkedList(void){ .first = null }, .main_chunk = null, }; } pub fn alloc_bytes(self: *Self, size: usize) ![]align(0x10) u8 { if (self.main_chunk == null or self.main_chunk.?.len < size) { const frame = (try self.frame_allocator.alloc_frame()); const virt = directTranslate(frame); var main_chunk: []align(0x10) u8 = undefined; main_chunk.ptr = virt.into_pointer([*]align(0x10) u8); main_chunk.len = 0x1000; self.main_chunk = main_chunk; } if (size <= self.main_chunk.?.len) { var result = self.main_chunk.?[0..size]; var rest = self.main_chunk.?[size..]; self.main_chunk = rest; return result; } return error{OutOfMemory}.OutOfMemory; } pub fn alloc(self: *Self, comptime T: type) !*T { const buffer = try self.alloc_bytes(@sizeOf(T)); return @ptrCast(*T, buffer); } pub fn free(_: *u8) void {} }; pub const FrameAllocator = struct { // next physical address to allocate next_free: PhysicalAddress, limit: PhysicalAddress, freelist: std.SinglyLinkedList(void), const PAGE_SIZE = 0x1000; const Self = @This(); const OutOfMemory = error.OutOfMemory; pub fn new(memory: PhysicalMemoryRange) FrameAllocator { return .{ .next_free = memory.base.alignForward(PAGE_SIZE), .limit = memory.get_end(), .freelist = std.SinglyLinkedList(void){ .first = null }, }; } pub fn alloc_zero_frame(self: *Self) !PhysicalAddress { const frame = try self.alloc_frame(); const buf = directMapping().to_virt(frame).into_pointer(*[PAGE_SIZE]u8); std.mem.set(u8, buf, 0); return frame; } pub fn alloc_zero_aligned(self: *Self, alignment: usize, n: usize) !PhysicalAddress { const frame = try self.alloc_aligned(alignment, n); const buf = directMapping().to_virt(frame).into_pointer([*]u8); std.mem.set(u8, buf[0..(n * PAGE_SIZE)], 0); return frame; } pub fn alloc_aligned(self: *Self, alignment: usize, n: usize) !PhysicalAddress { // Skip until we have aligned page while (!self.next_free.isAligned(alignment)) { const frame = try self.alloc_pool(1); self.free_frame(frame); } return self.alloc_pool(n); } pub fn alloc_frame(self: *Self) !PhysicalAddress { // Try allocating from freelist if (self.freelist.popFirst()) |node| { const virt_addr = VirtualAddress.new(@ptrToInt(node)); const phys_addr = directMapping().to_phys(virt_addr); return phys_addr; } // No free pages in list return self.alloc_pool(1); } pub fn alloc_pool(self: *Self, n: u64) !PhysicalAddress { const page = self.next_free; const allocation_size = PAGE_SIZE * n; self.next_free = self.next_free.add(allocation_size); if (self.limit.le(self.next_free)) { self.next_free = self.next_free.sub(allocation_size); return OutOfMemory; } std.debug.assert(page.isAligned(PAGE_SIZE)); return page; } pub fn free_frame(self: *Self, addr: PhysicalAddress) void { std.debug.assert(std.mem.isAligned(addr.value, PAGE_SIZE)); const virt_addr = directMapping().to_virt(addr); const node = virt_addr.into_pointer(*@TypeOf(self.freelist).Node); self.freelist.prepend(node); } }; pub const PhysicalMemoryRange = MemoryRange(PhysicalAddress); pub const VirtualMemoryRange = MemoryRange(VirtualAddress); pub fn MemoryRange(comptime T: type) type { return struct { const Self = @This(); base: T, size: usize, pub fn sized(base: T, size: usize) Self { return .{ .base = base, .size = size }; } pub fn get_end(self: Self) T { return self.base.add(self.size); } pub fn from_range(start: T, end: T) Self { std.debug.assert(start.lt(end)); const size = T.span(start, end); return .{ .base = start, .size = size }; } pub fn as_bytes(self: Self) []u8 { const ptr = @intToPtr([*]u8, self.base.value); return ptr[0..(self.size)]; } }; } var memory_allocator: MemoryAllocator = undefined; pub fn memoryAllocator() *MemoryAllocator { return &memory_allocator; } pub fn init() void { arch.mm.init(); memory_allocator = MemoryAllocator.new(frameAllocator()); }
kernel/mm.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const gui = @import("gui"); const nvg = @import("nanovg"); const Rect = @import("gui/geometry.zig").Rect; const Point = @import("gui/geometry.zig").Point; const BlendMode = @import("color.zig").BlendMode; const image_alpha_data = @embedFile("../data/blendmodealpha.png"); const image_replace_data = @embedFile("../data/blendmodereplace.png"); widget: gui.Widget, allocator: Allocator, active: BlendMode = .replace, image_alpha: nvg.Image, image_replace: nvg.Image, rects: [2]Rect(f32), onChangedFn: ?fn (*Self) void = null, const pad = 5; const Self = @This(); pub fn init(allocator: Allocator, rect: Rect(f32), vg: nvg) !*Self { var self = try allocator.create(Self); self.* = Self{ .widget = gui.Widget.init(allocator, rect), .allocator = allocator, .rects = [_]Rect(f32){ Rect(f32).make(pad + 1, 33 - 27, rect.w - 2 * pad - 2, 27), Rect(f32).make(pad + 1, 33, rect.w - 2 * pad - 2, 27), }, .image_alpha = vg.createImageMem(image_alpha_data, .{ .nearest = true }), .image_replace = vg.createImageMem(image_replace_data, .{ .nearest = true }), }; self.widget.onMouseDownFn = onMouseDown; self.widget.drawFn = draw; return self; } pub fn deinit(self: *Self, vg: nvg) void { vg.deleteImage(self.image_alpha); vg.deleteImage(self.image_replace); self.widget.deinit(); self.allocator.destroy(self); } fn setActive(self: *Self, active: BlendMode) void { if (self.active != active) { self.active = active; if (self.onChangedFn) |onChanged| onChanged(self); } } fn onMouseDown(widget: *gui.Widget, event: *const gui.MouseEvent) void { if (!widget.enabled) return; if (event.button == .left) { var self = @fieldParentPtr(Self, "widget", widget); const point = Point(f32).make(event.x, event.y); for (self.rects) |rect, i| { if (rect.contains(point)) { self.setActive(@intToEnum(BlendMode, @intCast(u1, i))); break; } } } } pub fn draw(widget: *gui.Widget, vg: nvg) void { const self = @fieldParentPtr(Self, "widget", widget); const rect = widget.relative_rect; vg.save(); defer vg.restore(); vg.translate(rect.x, rect.y); gui.drawPanel(vg, 0, 0, rect.w, rect.h, 1, false, false); gui.drawPanelInset(vg, pad, pad, rect.w - 2 * pad, rect.h - 2 * pad, 1); const active_rect = self.rects[@enumToInt(self.active)]; vg.beginPath(); vg.rect(active_rect.x, active_rect.y, active_rect.w, active_rect.h); vg.fillColor(if (widget.enabled) gui.theme_colors.focus else gui.theme_colors.shadow); vg.fill(); const alpha: f32 = if (widget.enabled) 1 else 0.5; vg.beginPath(); vg.rect(32, 33 - 25, 32, 24); vg.fillPaint(vg.imagePattern(32, 33 - 25, 32, 24, 0, self.image_alpha, alpha)); vg.fill(); vg.beginPath(); vg.rect(32, 33 + 1, 32, 24); vg.fillPaint(vg.imagePattern(32, 33 + 1, 32, 24, 0, self.image_replace, alpha)); vg.fill(); }
src/BlendModeWidget.zig
pub const WLDP_LOCKDOWN_UNDEFINED = @as(u32, 0); pub const WLDP_LOCKDOWN_DEFINED_FLAG = @as(u32, 2147483648); pub const WLDP_LOCKDOWN_CONFIG_CI_FLAG = @as(u32, 1); pub const WLDP_LOCKDOWN_CONFIG_CI_AUDIT_FLAG = @as(u32, 2); pub const WLDP_LOCKDOWN_UMCIENFORCE_FLAG = @as(u32, 4); pub const WLDP_LOCKDOWN_AUDIT_FLAG = @as(u32, 8); pub const WLDP_LOCKDOWN_EXCLUSION_FLAG = @as(u32, 16); pub const WLDP_HOST_INFORMATION_REVISION = @as(u32, 1); pub const WLDP_FLAGS_SKIPSIGNATUREVALIDATION = @as(u32, 256); pub const MAX_TDI_ENTITIES = @as(u32, 4096); pub const INFO_CLASS_GENERIC = @as(u32, 256); pub const INFO_CLASS_PROTOCOL = @as(u32, 512); pub const INFO_CLASS_IMPLEMENTATION = @as(u32, 768); pub const INFO_TYPE_PROVIDER = @as(u32, 256); pub const INFO_TYPE_ADDRESS_OBJECT = @as(u32, 512); pub const INFO_TYPE_CONNECTION = @as(u32, 768); pub const ENTITY_LIST_ID = @as(u32, 0); pub const INVALID_ENTITY_INSTANCE = @as(i32, -1); pub const CONTEXT_SIZE = @as(u32, 16); pub const ENTITY_TYPE_ID = @as(u32, 1); pub const CO_TL_NBF = @as(u32, 1024); pub const CO_TL_SPX = @as(u32, 1026); pub const CO_TL_TCP = @as(u32, 1028); pub const CO_TL_SPP = @as(u32, 1030); pub const CL_TL_NBF = @as(u32, 1025); pub const CL_TL_UDP = @as(u32, 1027); pub const ER_ICMP = @as(u32, 896); pub const CL_NL_IPX = @as(u32, 769); pub const CL_NL_IP = @as(u32, 771); pub const AT_ARP = @as(u32, 640); pub const AT_NULL = @as(u32, 642); pub const IF_GENERIC = @as(u32, 512); pub const IF_MIB = @as(u32, 514); pub const IOCTL_TDI_TL_IO_CONTROL_ENDPOINT = @as(u32, 2162744); pub const DCI_VERSION = @as(u32, 256); pub const DCICREATEPRIMARYSURFACE = @as(u32, 1); pub const DCICREATEOFFSCREENSURFACE = @as(u32, 2); pub const DCICREATEOVERLAYSURFACE = @as(u32, 3); pub const DCIENUMSURFACE = @as(u32, 4); pub const DCIESCAPE = @as(u32, 5); pub const DCI_OK = @as(u32, 0); pub const DCI_FAIL_GENERIC = @as(i32, -1); pub const DCI_FAIL_UNSUPPORTEDVERSION = @as(i32, -2); pub const DCI_FAIL_INVALIDSURFACE = @as(i32, -3); pub const DCI_FAIL_UNSUPPORTED = @as(i32, -4); pub const DCI_ERR_CURRENTLYNOTAVAIL = @as(i32, -5); pub const DCI_ERR_INVALIDRECT = @as(i32, -6); pub const DCI_ERR_UNSUPPORTEDFORMAT = @as(i32, -7); pub const DCI_ERR_UNSUPPORTEDMASK = @as(i32, -8); pub const DCI_ERR_TOOBIGHEIGHT = @as(i32, -9); pub const DCI_ERR_TOOBIGWIDTH = @as(i32, -10); pub const DCI_ERR_TOOBIGSIZE = @as(i32, -11); pub const DCI_ERR_OUTOFMEMORY = @as(i32, -12); pub const DCI_ERR_INVALIDPOSITION = @as(i32, -13); pub const DCI_ERR_INVALIDSTRETCH = @as(i32, -14); pub const DCI_ERR_INVALIDCLIPLIST = @as(i32, -15); pub const DCI_ERR_SURFACEISOBSCURED = @as(i32, -16); pub const DCI_ERR_XALIGN = @as(i32, -17); pub const DCI_ERR_YALIGN = @as(i32, -18); pub const DCI_ERR_XYALIGN = @as(i32, -19); pub const DCI_ERR_WIDTHALIGN = @as(i32, -20); pub const DCI_ERR_HEIGHTALIGN = @as(i32, -21); pub const DCI_STATUS_POINTERCHANGED = @as(u32, 1); pub const DCI_STATUS_STRIDECHANGED = @as(u32, 2); pub const DCI_STATUS_FORMATCHANGED = @as(u32, 4); pub const DCI_STATUS_SURFACEINFOCHANGED = @as(u32, 8); pub const DCI_STATUS_CHROMAKEYCHANGED = @as(u32, 16); pub const DCI_STATUS_WASSTILLDRAWING = @as(u32, 32); pub const DCI_SURFACE_TYPE = @as(u32, 15); pub const DCI_PRIMARY = @as(u32, 0); pub const DCI_OFFSCREEN = @as(u32, 1); pub const DCI_OVERLAY = @as(u32, 2); pub const DCI_VISIBLE = @as(u32, 16); pub const DCI_CHROMAKEY = @as(u32, 32); pub const DCI_1632_ACCESS = @as(u32, 64); pub const DCI_DWORDSIZE = @as(u32, 128); pub const DCI_DWORDALIGN = @as(u32, 256); pub const DCI_WRITEONLY = @as(u32, 512); pub const DCI_ASYNC = @as(u32, 1024); pub const DCI_CAN_STRETCHX = @as(u32, 4096); pub const DCI_CAN_STRETCHY = @as(u32, 8192); pub const DCI_CAN_STRETCHXN = @as(u32, 16384); pub const DCI_CAN_STRETCHYN = @as(u32, 32768); pub const DCI_CANOVERLAY = @as(u32, 65536); pub const FILE_FLAG_OPEN_REQUIRING_OPLOCK = @as(u32, 262144); pub const PROGRESS_CONTINUE = @as(u32, 0); pub const PROGRESS_CANCEL = @as(u32, 1); pub const PROGRESS_STOP = @as(u32, 2); pub const PROGRESS_QUIET = @as(u32, 3); pub const COPY_FILE_FAIL_IF_EXISTS = @as(u32, 1); pub const COPY_FILE_RESTARTABLE = @as(u32, 2); pub const COPY_FILE_OPEN_SOURCE_FOR_WRITE = @as(u32, 4); pub const COPY_FILE_ALLOW_DECRYPTED_DESTINATION = @as(u32, 8); pub const COPY_FILE_COPY_SYMLINK = @as(u32, 2048); pub const COPY_FILE_NO_BUFFERING = @as(u32, 4096); pub const COPY_FILE_REQUEST_SECURITY_PRIVILEGES = @as(u32, 8192); pub const COPY_FILE_RESUME_FROM_PAUSE = @as(u32, 16384); pub const COPY_FILE_NO_OFFLOAD = @as(u32, 262144); pub const COPY_FILE_IGNORE_EDP_BLOCK = @as(u32, 4194304); pub const COPY_FILE_IGNORE_SOURCE_ENCRYPTION = @as(u32, 8388608); pub const COPY_FILE_DONT_REQUEST_DEST_WRITE_DAC = @as(u32, 33554432); pub const COPY_FILE_REQUEST_COMPRESSED_TRAFFIC = @as(u32, 268435456); pub const COPY_FILE_OPEN_AND_COPY_REPARSE_POINT = @as(u32, 2097152); pub const COPY_FILE_DIRECTORY = @as(u32, 128); pub const COPY_FILE_SKIP_ALTERNATE_STREAMS = @as(u32, 32768); pub const COPY_FILE_DISABLE_PRE_ALLOCATION = @as(u32, 67108864); pub const COPY_FILE_ENABLE_LOW_FREE_SPACE_MODE = @as(u32, 134217728); pub const PIPE_UNLIMITED_INSTANCES = @as(u32, 255); pub const FAIL_FAST_GENERATE_EXCEPTION_ADDRESS = @as(u32, 1); pub const FAIL_FAST_NO_HARD_ERROR_DLG = @as(u32, 2); pub const DTR_CONTROL_DISABLE = @as(u32, 0); pub const DTR_CONTROL_ENABLE = @as(u32, 1); pub const DTR_CONTROL_HANDSHAKE = @as(u32, 2); pub const RTS_CONTROL_DISABLE = @as(u32, 0); pub const RTS_CONTROL_ENABLE = @as(u32, 1); pub const RTS_CONTROL_HANDSHAKE = @as(u32, 2); pub const RTS_CONTROL_TOGGLE = @as(u32, 3); pub const GMEM_NOCOMPACT = @as(u32, 16); pub const GMEM_NODISCARD = @as(u32, 32); pub const GMEM_MODIFY = @as(u32, 128); pub const GMEM_DISCARDABLE = @as(u32, 256); pub const GMEM_NOT_BANKED = @as(u32, 4096); pub const GMEM_SHARE = @as(u32, 8192); pub const GMEM_DDESHARE = @as(u32, 8192); pub const GMEM_NOTIFY = @as(u32, 16384); pub const GMEM_VALID_FLAGS = @as(u32, 32626); pub const GMEM_INVALID_HANDLE = @as(u32, 32768); pub const GMEM_DISCARDED = @as(u32, 16384); pub const GMEM_LOCKCOUNT = @as(u32, 255); pub const VOLUME_NAME_DOS = @as(u32, 0); pub const VOLUME_NAME_GUID = @as(u32, 1); pub const VOLUME_NAME_NT = @as(u32, 2); pub const VOLUME_NAME_NONE = @as(u32, 4); pub const DRIVE_UNKNOWN = @as(u32, 0); pub const DRIVE_NO_ROOT_DIR = @as(u32, 1); pub const DRIVE_REMOVABLE = @as(u32, 2); pub const DRIVE_FIXED = @as(u32, 3); pub const DRIVE_REMOTE = @as(u32, 4); pub const DRIVE_CDROM = @as(u32, 5); pub const DRIVE_RAMDISK = @as(u32, 6); pub const FILE_TYPE_UNKNOWN = @as(u32, 0); pub const FILE_TYPE_DISK = @as(u32, 1); pub const FILE_TYPE_CHAR = @as(u32, 2); pub const FILE_TYPE_PIPE = @as(u32, 3); pub const FILE_TYPE_REMOTE = @as(u32, 32768); pub const NOPARITY = @as(u32, 0); pub const ODDPARITY = @as(u32, 1); pub const EVENPARITY = @as(u32, 2); pub const MARKPARITY = @as(u32, 3); pub const SPACEPARITY = @as(u32, 4); pub const ONESTOPBIT = @as(u32, 0); pub const ONE5STOPBITS = @as(u32, 1); pub const TWOSTOPBITS = @as(u32, 2); pub const IGNORE = @as(u32, 0); pub const INFINITE = @as(u32, 4294967295); pub const CBR_110 = @as(u32, 110); pub const CBR_300 = @as(u32, 300); pub const CBR_600 = @as(u32, 600); pub const CBR_1200 = @as(u32, 1200); pub const CBR_2400 = @as(u32, 2400); pub const CBR_4800 = @as(u32, 4800); pub const CBR_9600 = @as(u32, 9600); pub const CBR_14400 = @as(u32, 14400); pub const CBR_19200 = @as(u32, 19200); pub const CBR_38400 = @as(u32, 38400); pub const CBR_56000 = @as(u32, 56000); pub const CBR_57600 = @as(u32, 57600); pub const CBR_115200 = @as(u32, 115200); pub const CBR_128000 = @as(u32, 128000); pub const CBR_256000 = @as(u32, 256000); pub const CE_TXFULL = @as(u32, 256); pub const CE_PTO = @as(u32, 512); pub const CE_IOE = @as(u32, 1024); pub const CE_DNS = @as(u32, 2048); pub const CE_OOP = @as(u32, 4096); pub const CE_MODE = @as(u32, 32768); pub const IE_BADID = @as(i32, -1); pub const IE_OPEN = @as(i32, -2); pub const IE_NOPEN = @as(i32, -3); pub const IE_MEMORY = @as(i32, -4); pub const IE_DEFAULT = @as(i32, -5); pub const IE_HARDWARE = @as(i32, -10); pub const IE_BYTESIZE = @as(i32, -11); pub const IE_BAUDRATE = @as(i32, -12); pub const RESETDEV = @as(u32, 7); pub const LPTx = @as(u32, 128); pub const S_QUEUEEMPTY = @as(u32, 0); pub const S_THRESHOLD = @as(u32, 1); pub const S_ALLTHRESHOLD = @as(u32, 2); pub const S_NORMAL = @as(u32, 0); pub const S_LEGATO = @as(u32, 1); pub const S_STACCATO = @as(u32, 2); pub const S_PERIOD512 = @as(u32, 0); pub const S_PERIOD1024 = @as(u32, 1); pub const S_PERIOD2048 = @as(u32, 2); pub const S_PERIODVOICE = @as(u32, 3); pub const S_WHITE512 = @as(u32, 4); pub const S_WHITE1024 = @as(u32, 5); pub const S_WHITE2048 = @as(u32, 6); pub const S_WHITEVOICE = @as(u32, 7); pub const S_SERDVNA = @as(i32, -1); pub const S_SEROFM = @as(i32, -2); pub const S_SERMACT = @as(i32, -3); pub const S_SERQFUL = @as(i32, -4); pub const S_SERBDNT = @as(i32, -5); pub const S_SERDLN = @as(i32, -6); pub const S_SERDCC = @as(i32, -7); pub const S_SERDTP = @as(i32, -8); pub const S_SERDVL = @as(i32, -9); pub const S_SERDMD = @as(i32, -10); pub const S_SERDSH = @as(i32, -11); pub const S_SERDPT = @as(i32, -12); pub const S_SERDFQ = @as(i32, -13); pub const S_SERDDR = @as(i32, -14); pub const S_SERDSR = @as(i32, -15); pub const S_SERDST = @as(i32, -16); pub const NMPWAIT_NOWAIT = @as(u32, 1); pub const OFS_MAXPATHNAME = @as(u32, 128); pub const MAXINTATOM = @as(u32, 49152); pub const SCS_32BIT_BINARY = @as(u32, 0); pub const SCS_DOS_BINARY = @as(u32, 1); pub const SCS_WOW_BINARY = @as(u32, 2); pub const SCS_PIF_BINARY = @as(u32, 3); pub const SCS_POSIX_BINARY = @as(u32, 4); pub const SCS_OS216_BINARY = @as(u32, 5); pub const SCS_64BIT_BINARY = @as(u32, 6); pub const FIBER_FLAG_FLOAT_SWITCH = @as(u32, 1); pub const FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = @as(u32, 1); pub const FILE_SKIP_SET_EVENT_ON_HANDLE = @as(u32, 2); pub const HINSTANCE_ERROR = @as(u32, 32); pub const FORMAT_MESSAGE_MAX_WIDTH_MASK = @as(u32, 255); pub const FILE_ENCRYPTABLE = @as(u32, 0); pub const FILE_IS_ENCRYPTED = @as(u32, 1); pub const FILE_SYSTEM_ATTR = @as(u32, 2); pub const FILE_ROOT_DIR = @as(u32, 3); pub const FILE_SYSTEM_DIR = @as(u32, 4); pub const FILE_UNKNOWN = @as(u32, 5); pub const FILE_SYSTEM_NOT_SUPPORT = @as(u32, 6); pub const FILE_USER_DISALLOWED = @as(u32, 7); pub const FILE_READ_ONLY = @as(u32, 8); pub const FILE_DIR_DISALLOWED = @as(u32, 9); pub const EFS_USE_RECOVERY_KEYS = @as(u32, 1); pub const CREATE_FOR_IMPORT = @as(u32, 1); pub const CREATE_FOR_DIR = @as(u32, 2); pub const OVERWRITE_HIDDEN = @as(u32, 4); pub const EFSRPC_SECURE_ONLY = @as(u32, 8); pub const EFS_DROP_ALTERNATE_STREAMS = @as(u32, 16); pub const BACKUP_INVALID = @as(u32, 0); pub const BACKUP_GHOSTED_FILE_EXTENTS = @as(u32, 11); pub const STREAM_NORMAL_ATTRIBUTE = @as(u32, 0); pub const STREAM_MODIFIED_WHEN_READ = @as(u32, 1); pub const STREAM_CONTAINS_SECURITY = @as(u32, 2); pub const STREAM_CONTAINS_PROPERTIES = @as(u32, 4); pub const STREAM_SPARSE_ATTRIBUTE = @as(u32, 8); pub const STREAM_CONTAINS_GHOSTED_FILE_EXTENTS = @as(u32, 16); pub const SHUTDOWN_NORETRY = @as(u32, 1); pub const PROTECTION_LEVEL_SAME = @as(u32, 4294967295); pub const PROC_THREAD_ATTRIBUTE_NUMBER = @as(u32, 65535); pub const PROC_THREAD_ATTRIBUTE_THREAD = @as(u32, 65536); pub const PROC_THREAD_ATTRIBUTE_INPUT = @as(u32, 131072); pub const PROC_THREAD_ATTRIBUTE_ADDITIVE = @as(u32, 262144); pub const PROCESS_CREATION_MITIGATION_POLICY_DEP_ENABLE = @as(u32, 1); pub const PROCESS_CREATION_MITIGATION_POLICY_DEP_ATL_THUNK_ENABLE = @as(u32, 2); pub const PROCESS_CREATION_MITIGATION_POLICY_SEHOP_ENABLE = @as(u32, 4); pub const PROCESS_CREATION_CHILD_PROCESS_RESTRICTED = @as(u32, 1); pub const PROCESS_CREATION_CHILD_PROCESS_OVERRIDE = @as(u32, 2); pub const PROCESS_CREATION_CHILD_PROCESS_RESTRICTED_UNLESS_SECURE = @as(u32, 4); pub const PROCESS_CREATION_ALL_APPLICATION_PACKAGES_OPT_OUT = @as(u32, 1); pub const PROCESS_CREATION_DESKTOP_APP_BREAKAWAY_ENABLE_PROCESS_TREE = @as(u32, 1); pub const PROCESS_CREATION_DESKTOP_APP_BREAKAWAY_DISABLE_PROCESS_TREE = @as(u32, 2); pub const PROCESS_CREATION_DESKTOP_APP_BREAKAWAY_OVERRIDE = @as(u32, 4); pub const ATOM_FLAG_GLOBAL = @as(u32, 2); pub const BASE_SEARCH_PATH_ENABLE_SAFE_SEARCHMODE = @as(u32, 1); pub const BASE_SEARCH_PATH_DISABLE_SAFE_SEARCHMODE = @as(u32, 65536); pub const BASE_SEARCH_PATH_PERMANENT = @as(u32, 32768); pub const COPYFILE2_MESSAGE_COPY_OFFLOAD = @as(i32, 1); pub const EVENTLOG_FULL_INFO = @as(u32, 0); pub const OPERATION_API_VERSION = @as(u32, 1); pub const MAX_COMPUTERNAME_LENGTH = @as(u32, 15); pub const LOGON32_PROVIDER_WINNT35 = @as(u32, 1); pub const LOGON32_PROVIDER_VIRTUAL = @as(u32, 4); pub const LOGON_ZERO_PASSWORD_BUFFER = @as(u32, 2147483648); pub const HW_PROFILE_GUIDLEN = @as(u32, 39); pub const DOCKINFO_UNDOCKED = @as(u32, 1); pub const DOCKINFO_DOCKED = @as(u32, 2); pub const DOCKINFO_USER_SUPPLIED = @as(u32, 4); pub const TC_NORMAL = @as(u32, 0); pub const TC_HARDERR = @as(u32, 1); pub const TC_GP_TRAP = @as(u32, 2); pub const TC_SIGNAL = @as(u32, 3); pub const AC_LINE_OFFLINE = @as(u32, 0); pub const AC_LINE_ONLINE = @as(u32, 1); pub const AC_LINE_BACKUP_POWER = @as(u32, 2); pub const AC_LINE_UNKNOWN = @as(u32, 255); pub const BATTERY_FLAG_HIGH = @as(u32, 1); pub const BATTERY_FLAG_LOW = @as(u32, 2); pub const BATTERY_FLAG_CRITICAL = @as(u32, 4); pub const BATTERY_FLAG_CHARGING = @as(u32, 8); pub const BATTERY_FLAG_NO_BATTERY = @as(u32, 128); pub const BATTERY_FLAG_UNKNOWN = @as(u32, 255); pub const BATTERY_PERCENTAGE_UNKNOWN = @as(u32, 255); pub const SYSTEM_STATUS_FLAG_POWER_SAVING_ON = @as(u32, 1); pub const BATTERY_LIFE_UNKNOWN = @as(u32, 4294967295); pub const ACTCTX_FLAG_PROCESSOR_ARCHITECTURE_VALID = @as(u32, 1); pub const ACTCTX_FLAG_LANGID_VALID = @as(u32, 2); pub const ACTCTX_FLAG_ASSEMBLY_DIRECTORY_VALID = @as(u32, 4); pub const ACTCTX_FLAG_RESOURCE_NAME_VALID = @as(u32, 8); pub const ACTCTX_FLAG_SET_PROCESS_DEFAULT = @as(u32, 16); pub const ACTCTX_FLAG_APPLICATION_NAME_VALID = @as(u32, 32); pub const ACTCTX_FLAG_SOURCE_IS_ASSEMBLYREF = @as(u32, 64); pub const ACTCTX_FLAG_HMODULE_VALID = @as(u32, 128); pub const DEACTIVATE_ACTCTX_FLAG_FORCE_EARLY_DEACTIVATION = @as(u32, 1); pub const FIND_ACTCTX_SECTION_KEY_RETURN_HACTCTX = @as(u32, 1); pub const FIND_ACTCTX_SECTION_KEY_RETURN_FLAGS = @as(u32, 2); pub const FIND_ACTCTX_SECTION_KEY_RETURN_ASSEMBLY_METADATA = @as(u32, 4); pub const ACTIVATION_CONTEXT_BASIC_INFORMATION_DEFINED = @as(u32, 1); pub const QUERY_ACTCTX_FLAG_USE_ACTIVE_ACTCTX = @as(u32, 4); pub const QUERY_ACTCTX_FLAG_ACTCTX_IS_HMODULE = @as(u32, 8); pub const QUERY_ACTCTX_FLAG_ACTCTX_IS_ADDRESS = @as(u32, 16); pub const QUERY_ACTCTX_FLAG_NO_ADDREF = @as(u32, 2147483648); pub const RESTART_MAX_CMD_LINE = @as(u32, 1024); pub const RECOVERY_DEFAULT_PING_INTERVAL = @as(u32, 5000); pub const FILE_RENAME_FLAG_REPLACE_IF_EXISTS = @as(u32, 1); pub const FILE_RENAME_FLAG_POSIX_SEMANTICS = @as(u32, 2); pub const FILE_RENAME_FLAG_SUPPRESS_PIN_STATE_INHERITANCE = @as(u32, 4); pub const FILE_DISPOSITION_FLAG_DO_NOT_DELETE = @as(u32, 0); pub const FILE_DISPOSITION_FLAG_DELETE = @as(u32, 1); pub const FILE_DISPOSITION_FLAG_POSIX_SEMANTICS = @as(u32, 2); pub const FILE_DISPOSITION_FLAG_FORCE_IMAGE_SECTION_CHECK = @as(u32, 4); pub const FILE_DISPOSITION_FLAG_ON_CLOSE = @as(u32, 8); pub const FILE_DISPOSITION_FLAG_IGNORE_READONLY_ATTRIBUTE = @as(u32, 16); pub const STORAGE_INFO_FLAGS_ALIGNED_DEVICE = @as(u32, 1); pub const STORAGE_INFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE = @as(u32, 2); pub const STORAGE_INFO_OFFSET_UNKNOWN = @as(u32, 4294967295); pub const REMOTE_PROTOCOL_INFO_FLAG_LOOPBACK = @as(u32, 1); pub const REMOTE_PROTOCOL_INFO_FLAG_OFFLINE = @as(u32, 2); pub const REMOTE_PROTOCOL_INFO_FLAG_PERSISTENT_HANDLE = @as(u32, 4); pub const RPI_FLAG_SMB2_SHARECAP_TIMEWARP = @as(u32, 2); pub const RPI_FLAG_SMB2_SHARECAP_DFS = @as(u32, 8); pub const RPI_FLAG_SMB2_SHARECAP_CONTINUOUS_AVAILABILITY = @as(u32, 16); pub const RPI_FLAG_SMB2_SHARECAP_SCALEOUT = @as(u32, 32); pub const RPI_FLAG_SMB2_SHARECAP_CLUSTER = @as(u32, 64); pub const RPI_SMB2_FLAG_SERVERCAP_DFS = @as(u32, 1); pub const RPI_SMB2_FLAG_SERVERCAP_LEASING = @as(u32, 2); pub const RPI_SMB2_FLAG_SERVERCAP_LARGEMTU = @as(u32, 4); pub const RPI_SMB2_FLAG_SERVERCAP_MULTICHANNEL = @as(u32, 8); pub const RPI_SMB2_FLAG_SERVERCAP_PERSISTENT_HANDLES = @as(u32, 16); pub const RPI_SMB2_FLAG_SERVERCAP_DIRECTORY_LEASING = @as(u32, 32); pub const MICROSOFT_WINBASE_H_DEFINE_INTERLOCKED_CPLUSPLUS_OVERLOADS = @as(u32, 0); pub const CODEINTEGRITY_OPTION_ENABLED = @as(u32, 1); pub const CODEINTEGRITY_OPTION_TESTSIGN = @as(u32, 2); pub const CODEINTEGRITY_OPTION_UMCI_ENABLED = @as(u32, 4); pub const CODEINTEGRITY_OPTION_UMCI_AUDITMODE_ENABLED = @as(u32, 8); pub const CODEINTEGRITY_OPTION_UMCI_EXCLUSIONPATHS_ENABLED = @as(u32, 16); pub const CODEINTEGRITY_OPTION_TEST_BUILD = @as(u32, 32); pub const CODEINTEGRITY_OPTION_PREPRODUCTION_BUILD = @as(u32, 64); pub const CODEINTEGRITY_OPTION_DEBUGMODE_ENABLED = @as(u32, 128); pub const CODEINTEGRITY_OPTION_FLIGHT_BUILD = @as(u32, 256); pub const CODEINTEGRITY_OPTION_FLIGHTING_ENABLED = @as(u32, 512); pub const CODEINTEGRITY_OPTION_HVCI_KMCI_ENABLED = @as(u32, 1024); pub const CODEINTEGRITY_OPTION_HVCI_KMCI_AUDITMODE_ENABLED = @as(u32, 2048); pub const CODEINTEGRITY_OPTION_HVCI_KMCI_STRICTMODE_ENABLED = @as(u32, 4096); pub const CODEINTEGRITY_OPTION_HVCI_IUM_ENABLED = @as(u32, 8192); pub const FILE_MAXIMUM_DISPOSITION = @as(u32, 5); pub const FILE_DIRECTORY_FILE = @as(u32, 1); pub const FILE_WRITE_THROUGH = @as(u32, 2); pub const FILE_SEQUENTIAL_ONLY = @as(u32, 4); pub const FILE_NO_INTERMEDIATE_BUFFERING = @as(u32, 8); pub const FILE_SYNCHRONOUS_IO_ALERT = @as(u32, 16); pub const FILE_SYNCHRONOUS_IO_NONALERT = @as(u32, 32); pub const FILE_NON_DIRECTORY_FILE = @as(u32, 64); pub const FILE_CREATE_TREE_CONNECTION = @as(u32, 128); pub const FILE_COMPLETE_IF_OPLOCKED = @as(u32, 256); pub const FILE_NO_EA_KNOWLEDGE = @as(u32, 512); pub const FILE_OPEN_REMOTE_INSTANCE = @as(u32, 1024); pub const FILE_RANDOM_ACCESS = @as(u32, 2048); pub const FILE_DELETE_ON_CLOSE = @as(u32, 4096); pub const FILE_OPEN_BY_FILE_ID = @as(u32, 8192); pub const FILE_OPEN_FOR_BACKUP_INTENT = @as(u32, 16384); pub const FILE_NO_COMPRESSION = @as(u32, 32768); pub const FILE_OPEN_REQUIRING_OPLOCK = @as(u32, 65536); pub const FILE_RESERVE_OPFILTER = @as(u32, 1048576); pub const FILE_OPEN_REPARSE_POINT = @as(u32, 2097152); pub const FILE_OPEN_NO_RECALL = @as(u32, 4194304); pub const FILE_OPEN_FOR_FREE_SPACE_QUERY = @as(u32, 8388608); pub const FILE_VALID_OPTION_FLAGS = @as(u32, 16777215); pub const FILE_VALID_PIPE_OPTION_FLAGS = @as(u32, 50); pub const FILE_VALID_MAILSLOT_OPTION_FLAGS = @as(u32, 50); pub const FILE_VALID_SET_FLAGS = @as(u32, 54); pub const FILE_SUPERSEDED = @as(u32, 0); pub const FILE_OPENED = @as(u32, 1); pub const FILE_CREATED = @as(u32, 2); pub const FILE_OVERWRITTEN = @as(u32, 3); pub const FILE_EXISTS = @as(u32, 4); pub const FILE_DOES_NOT_EXIST = @as(u32, 5); pub const WINWATCHNOTIFY_START = @as(u32, 0); pub const WINWATCHNOTIFY_STOP = @as(u32, 1); pub const WINWATCHNOTIFY_DESTROY = @as(u32, 2); pub const WINWATCHNOTIFY_CHANGING = @as(u32, 3); pub const WINWATCHNOTIFY_CHANGED = @as(u32, 4); pub const S_ASYNCHRONOUS = @import("../zig.zig").typedConst(HRESULT, @as(i32, 262632)); pub const RSC_FLAG_INF = @as(u32, 1); pub const RSC_FLAG_SKIPDISKSPACECHECK = @as(u32, 2); pub const RSC_FLAG_QUIET = @as(u32, 4); pub const RSC_FLAG_NGCONV = @as(u32, 8); pub const RSC_FLAG_UPDHLPDLLS = @as(u32, 16); pub const RSC_FLAG_DELAYREGISTEROCX = @as(u32, 512); pub const RSC_FLAG_SETUPAPI = @as(u32, 1024); pub const ALINF_QUIET = @as(u32, 4); pub const ALINF_NGCONV = @as(u32, 8); pub const ALINF_UPDHLPDLLS = @as(u32, 16); pub const ALINF_BKINSTALL = @as(u32, 32); pub const ALINF_ROLLBACK = @as(u32, 64); pub const ALINF_CHECKBKDATA = @as(u32, 128); pub const ALINF_ROLLBKDOALL = @as(u32, 256); pub const ALINF_DELAYREGISTEROCX = @as(u32, 512); pub const AIF_WARNIFSKIP = @as(u32, 1); pub const AIF_NOSKIP = @as(u32, 2); pub const AIF_NOVERSIONCHECK = @as(u32, 4); pub const AIF_FORCE_FILE_IN_USE = @as(u32, 8); pub const AIF_NOOVERWRITE = @as(u32, 16); pub const AIF_NO_VERSION_DIALOG = @as(u32, 32); pub const AIF_REPLACEONLY = @as(u32, 1024); pub const AIF_NOLANGUAGECHECK = @as(u32, 268435456); pub const AIF_QUIET = @as(u32, 536870912); pub const IE4_RESTORE = @as(u32, 1); pub const IE4_BACKNEW = @as(u32, 2); pub const IE4_NODELETENEW = @as(u32, 4); pub const IE4_NOMESSAGES = @as(u32, 8); pub const IE4_NOPROGRESS = @as(u32, 16); pub const IE4_NOENUMKEY = @as(u32, 32); pub const IE4_NO_CRC_MAPPING = @as(u32, 64); pub const IE4_REGSECTION = @as(u32, 128); pub const IE4_FRDOALL = @as(u32, 256); pub const IE4_UPDREFCNT = @as(u32, 512); pub const IE4_USEREFCNT = @as(u32, 1024); pub const IE4_EXTRAINCREFCNT = @as(u32, 2048); pub const IE4_REMOVREGBKDATA = @as(u32, 4096); pub const AADBE_ADD_ENTRY = @as(u32, 1); pub const AADBE_DEL_ENTRY = @as(u32, 2); pub const ADN_DEL_IF_EMPTY = @as(u32, 1); pub const ADN_DONT_DEL_SUBDIRS = @as(u32, 2); pub const ADN_DONT_DEL_DIR = @as(u32, 4); pub const ADN_DEL_UNC_PATHS = @as(u32, 8); pub const LIS_QUIET = @as(u32, 1); pub const LIS_NOGRPCONV = @as(u32, 2); pub const RUNCMDS_QUIET = @as(u32, 1); pub const RUNCMDS_NOWAIT = @as(u32, 2); pub const RUNCMDS_DELAYPOSTCMD = @as(u32, 4); pub const IME_MAXPROCESS = @as(u32, 32); pub const CP_HWND = @as(u32, 0); pub const CP_OPEN = @as(u32, 1); pub const CP_DIRECT = @as(u32, 2); pub const CP_LEVEL = @as(u32, 3); pub const VK_DBE_ALPHANUMERIC = @as(u32, 240); pub const VK_DBE_KATAKANA = @as(u32, 241); pub const VK_DBE_HIRAGANA = @as(u32, 242); pub const VK_DBE_SBCSCHAR = @as(u32, 243); pub const VK_DBE_DBCSCHAR = @as(u32, 244); pub const VK_DBE_ROMAN = @as(u32, 245); pub const VK_DBE_NOROMAN = @as(u32, 246); pub const VK_DBE_ENTERWORDREGISTERMODE = @as(u32, 247); pub const VK_DBE_ENTERIMECONFIGMODE = @as(u32, 248); pub const VK_DBE_FLUSHSTRING = @as(u32, 249); pub const VK_DBE_CODEINPUT = @as(u32, 250); pub const VK_DBE_NOCODEINPUT = @as(u32, 251); pub const VK_DBE_DETERMINESTRING = @as(u32, 252); pub const VK_DBE_ENTERDLGCONVERSIONMODE = @as(u32, 253); pub const MCW_DEFAULT = @as(u32, 0); pub const MCW_RECT = @as(u32, 1); pub const MCW_WINDOW = @as(u32, 2); pub const MCW_SCREEN = @as(u32, 4); pub const MCW_VERTICAL = @as(u32, 8); pub const MCW_HIDDEN = @as(u32, 16); pub const IME_MODE_ALPHANUMERIC = @as(u32, 1); pub const IME_MODE_SBCSCHAR = @as(u32, 2); pub const IME_MODE_KATAKANA = @as(u32, 2); pub const IME_MODE_HIRAGANA = @as(u32, 4); pub const IME_MODE_HANJACONVERT = @as(u32, 4); pub const IME_MODE_DBCSCHAR = @as(u32, 16); pub const IME_MODE_ROMAN = @as(u32, 32); pub const IME_MODE_NOROMAN = @as(u32, 64); pub const IME_MODE_CODEINPUT = @as(u32, 128); pub const IME_MODE_NOCODEINPUT = @as(u32, 256); pub const IME_GETIMECAPS = @as(u32, 3); pub const IME_SETOPEN = @as(u32, 4); pub const IME_GETOPEN = @as(u32, 5); pub const IME_GETVERSION = @as(u32, 7); pub const IME_SETCONVERSIONWINDOW = @as(u32, 8); pub const IME_SETCONVERSIONMODE = @as(u32, 16); pub const IME_GETCONVERSIONMODE = @as(u32, 17); pub const IME_SET_MODE = @as(u32, 18); pub const IME_SENDVKEY = @as(u32, 19); pub const IME_ENTERWORDREGISTERMODE = @as(u32, 24); pub const IME_SETCONVERSIONFONTEX = @as(u32, 25); pub const IME_BANJAtoJUNJA = @as(u32, 19); pub const IME_JUNJAtoBANJA = @as(u32, 20); pub const IME_JOHABtoKS = @as(u32, 21); pub const IME_KStoJOHAB = @as(u32, 22); pub const IMEA_INIT = @as(u32, 1); pub const IMEA_NEXT = @as(u32, 2); pub const IMEA_PREV = @as(u32, 3); pub const IME_REQUEST_CONVERT = @as(u32, 1); pub const IME_ENABLE_CONVERT = @as(u32, 2); pub const INTERIM_WINDOW = @as(u32, 0); pub const MODE_WINDOW = @as(u32, 1); pub const HANJA_WINDOW = @as(u32, 2); pub const IME_RS_ERROR = @as(u32, 1); pub const IME_RS_NOIME = @as(u32, 2); pub const IME_RS_TOOLONG = @as(u32, 5); pub const IME_RS_ILLEGAL = @as(u32, 6); pub const IME_RS_NOTFOUND = @as(u32, 7); pub const IME_RS_NOROOM = @as(u32, 10); pub const IME_RS_DISKERROR = @as(u32, 14); pub const IME_RS_INVALID = @as(u32, 17); pub const IME_RS_NEST = @as(u32, 18); pub const IME_RS_SYSTEMMODAL = @as(u32, 19); pub const WM_IME_REPORT = @as(u32, 640); pub const IR_STRINGSTART = @as(u32, 256); pub const IR_STRINGEND = @as(u32, 257); pub const IR_OPENCONVERT = @as(u32, 288); pub const IR_CHANGECONVERT = @as(u32, 289); pub const IR_CLOSECONVERT = @as(u32, 290); pub const IR_FULLCONVERT = @as(u32, 291); pub const IR_IMESELECT = @as(u32, 304); pub const IR_STRING = @as(u32, 320); pub const IR_DBCSCHAR = @as(u32, 352); pub const IR_UNDETERMINE = @as(u32, 368); pub const IR_STRINGEX = @as(u32, 384); pub const IR_MODEINFO = @as(u32, 400); pub const WM_WNT_CONVERTREQUESTEX = @as(u32, 265); pub const WM_CONVERTREQUEST = @as(u32, 266); pub const WM_CONVERTRESULT = @as(u32, 267); pub const WM_INTERIM = @as(u32, 268); pub const WM_IMEKEYDOWN = @as(u32, 656); pub const WM_IMEKEYUP = @as(u32, 657); pub const DDKERNELCAPS_SKIPFIELDS = @as(i32, 1); pub const DDKERNELCAPS_AUTOFLIP = @as(i32, 2); pub const DDKERNELCAPS_SETSTATE = @as(i32, 4); pub const DDKERNELCAPS_LOCK = @as(i32, 8); pub const DDKERNELCAPS_FLIPVIDEOPORT = @as(i32, 16); pub const DDKERNELCAPS_FLIPOVERLAY = @as(i32, 32); pub const DDKERNELCAPS_CAPTURE_SYSMEM = @as(i32, 64); pub const DDKERNELCAPS_CAPTURE_NONLOCALVIDMEM = @as(i32, 128); pub const DDKERNELCAPS_FIELDPOLARITY = @as(i32, 256); pub const DDKERNELCAPS_CAPTURE_INVERTED = @as(i32, 512); pub const DDIRQ_DISPLAY_VSYNC = @as(i32, 1); pub const DDIRQ_RESERVED1 = @as(i32, 2); pub const DDIRQ_VPORT0_VSYNC = @as(i32, 4); pub const DDIRQ_VPORT0_LINE = @as(i32, 8); pub const DDIRQ_VPORT1_VSYNC = @as(i32, 16); pub const DDIRQ_VPORT1_LINE = @as(i32, 32); pub const DDIRQ_VPORT2_VSYNC = @as(i32, 64); pub const DDIRQ_VPORT2_LINE = @as(i32, 128); pub const DDIRQ_VPORT3_VSYNC = @as(i32, 256); pub const DDIRQ_VPORT3_LINE = @as(i32, 512); pub const DDIRQ_VPORT4_VSYNC = @as(i32, 1024); pub const DDIRQ_VPORT4_LINE = @as(i32, 2048); pub const DDIRQ_VPORT5_VSYNC = @as(i32, 4096); pub const DDIRQ_VPORT5_LINE = @as(i32, 8192); pub const DDIRQ_VPORT6_VSYNC = @as(i32, 16384); pub const DDIRQ_VPORT6_LINE = @as(i32, 32768); pub const DDIRQ_VPORT7_VSYNC = @as(i32, 65536); pub const DDIRQ_VPORT7_LINE = @as(i32, 131072); pub const DDIRQ_VPORT8_VSYNC = @as(i32, 262144); pub const DDIRQ_VPORT8_LINE = @as(i32, 524288); pub const DDIRQ_VPORT9_VSYNC = @as(i32, 65536); pub const DDIRQ_VPORT9_LINE = @as(i32, 131072); //-------------------------------------------------------------------------------- // Section: Types (148) //-------------------------------------------------------------------------------- pub const NT_CREATE_FILE_DISPOSITION = enum(u32) { SUPERSEDE = 0, CREATE = 2, OPEN = 1, OPEN_IF = 3, OVERWRITE = 4, OVERWRITE_IF = 5, }; pub const FILE_SUPERSEDE = NT_CREATE_FILE_DISPOSITION.SUPERSEDE; pub const FILE_CREATE = NT_CREATE_FILE_DISPOSITION.CREATE; pub const FILE_OPEN = NT_CREATE_FILE_DISPOSITION.OPEN; pub const FILE_OPEN_IF = NT_CREATE_FILE_DISPOSITION.OPEN_IF; pub const FILE_OVERWRITE = NT_CREATE_FILE_DISPOSITION.OVERWRITE; pub const FILE_OVERWRITE_IF = NT_CREATE_FILE_DISPOSITION.OVERWRITE_IF; pub const TDIENTITY_ENTITY_TYPE = enum(u32) { GENERIC_ENTITY = 0, AT_ENTITY = 640, CL_NL_ENTITY = 769, CO_NL_ENTITY = 768, CL_TL_ENTITY = 1025, CO_TL_ENTITY = 1024, ER_ENTITY = 896, IF_ENTITY = 512, }; pub const GENERIC_ENTITY = TDIENTITY_ENTITY_TYPE.GENERIC_ENTITY; pub const AT_ENTITY = TDIENTITY_ENTITY_TYPE.AT_ENTITY; pub const CL_NL_ENTITY = TDIENTITY_ENTITY_TYPE.CL_NL_ENTITY; pub const CO_NL_ENTITY = TDIENTITY_ENTITY_TYPE.CO_NL_ENTITY; pub const CL_TL_ENTITY = TDIENTITY_ENTITY_TYPE.CL_TL_ENTITY; pub const CO_TL_ENTITY = TDIENTITY_ENTITY_TYPE.CO_TL_ENTITY; pub const ER_ENTITY = TDIENTITY_ENTITY_TYPE.ER_ENTITY; pub const IF_ENTITY = TDIENTITY_ENTITY_TYPE.IF_ENTITY; pub const _D3DHAL_CALLBACKS = extern struct { placeholder: usize, // TODO: why is this type empty? }; pub const _D3DHAL_GLOBALDRIVERDATA = extern struct { placeholder: usize, // TODO: why is this type empty? }; pub const HWINWATCH = *opaque{}; pub const FEATURE_STATE_CHANGE_SUBSCRIPTION = isize; pub const FH_SERVICE_PIPE_HANDLE = isize; pub const LPFIBER_START_ROUTINE = fn( lpFiberParameter: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; pub const PFIBER_CALLOUT_ROUTINE = fn( lpParameter: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) ?*c_void; pub const JIT_DEBUG_INFO = extern struct { dwSize: u32, dwProcessorArchitecture: u32, dwThreadID: u32, dwReserved0: u32, lpExceptionAddress: u64, lpExceptionRecord: u64, lpContextRecord: u64, }; pub const PROC_THREAD_ATTRIBUTE_NUM = enum(i32) { ParentProcess = 0, HandleList = 2, GroupAffinity = 3, PreferredNode = 4, IdealProcessor = 5, UmsThread = 6, MitigationPolicy = 7, SecurityCapabilities = 9, ProtectionLevel = 11, JobList = 13, ChildProcessPolicy = 14, AllApplicationPackagesPolicy = 15, Win32kFilter = 16, SafeOpenPromptOriginClaim = 17, DesktopAppPolicy = 18, PseudoConsole = 22, }; pub const ProcThreadAttributeParentProcess = PROC_THREAD_ATTRIBUTE_NUM.ParentProcess; pub const ProcThreadAttributeHandleList = PROC_THREAD_ATTRIBUTE_NUM.HandleList; pub const ProcThreadAttributeGroupAffinity = PROC_THREAD_ATTRIBUTE_NUM.GroupAffinity; pub const ProcThreadAttributePreferredNode = PROC_THREAD_ATTRIBUTE_NUM.PreferredNode; pub const ProcThreadAttributeIdealProcessor = PROC_THREAD_ATTRIBUTE_NUM.IdealProcessor; pub const ProcThreadAttributeUmsThread = PROC_THREAD_ATTRIBUTE_NUM.UmsThread; pub const ProcThreadAttributeMitigationPolicy = PROC_THREAD_ATTRIBUTE_NUM.MitigationPolicy; pub const ProcThreadAttributeSecurityCapabilities = PROC_THREAD_ATTRIBUTE_NUM.SecurityCapabilities; pub const ProcThreadAttributeProtectionLevel = PROC_THREAD_ATTRIBUTE_NUM.ProtectionLevel; pub const ProcThreadAttributeJobList = PROC_THREAD_ATTRIBUTE_NUM.JobList; pub const ProcThreadAttributeChildProcessPolicy = PROC_THREAD_ATTRIBUTE_NUM.ChildProcessPolicy; pub const ProcThreadAttributeAllApplicationPackagesPolicy = PROC_THREAD_ATTRIBUTE_NUM.AllApplicationPackagesPolicy; pub const ProcThreadAttributeWin32kFilter = PROC_THREAD_ATTRIBUTE_NUM.Win32kFilter; pub const ProcThreadAttributeSafeOpenPromptOriginClaim = PROC_THREAD_ATTRIBUTE_NUM.SafeOpenPromptOriginClaim; pub const ProcThreadAttributeDesktopAppPolicy = PROC_THREAD_ATTRIBUTE_NUM.DesktopAppPolicy; pub const ProcThreadAttributePseudoConsole = PROC_THREAD_ATTRIBUTE_NUM.PseudoConsole; pub const HW_PROFILE_INFOA = extern struct { dwDockInfo: u32, szHwProfileGuid: [39]CHAR, szHwProfileName: [80]CHAR, }; pub const HW_PROFILE_INFOW = extern struct { dwDockInfo: u32, szHwProfileGuid: [39]u16, szHwProfileName: [80]u16, }; pub const ACTCTX_SECTION_KEYED_DATA_2600 = extern struct { cbSize: u32, ulDataFormatVersion: u32, lpData: ?*c_void, ulLength: u32, lpSectionGlobalData: ?*c_void, ulSectionGlobalDataLength: u32, lpSectionBase: ?*c_void, ulSectionTotalLength: u32, hActCtx: ?HANDLE, ulAssemblyRosterIndex: u32, }; pub const ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA = extern struct { lpInformation: ?*c_void, lpSectionBase: ?*c_void, ulSectionLength: u32, lpSectionGlobalDataBase: ?*c_void, ulSectionGlobalDataLength: u32, }; pub const ACTIVATION_CONTEXT_BASIC_INFORMATION = extern struct { hActCtx: ?HANDLE, dwFlags: u32, }; pub const PQUERYACTCTXW_FUNC = fn( dwFlags: u32, hActCtx: ?HANDLE, pvSubInstance: ?*c_void, ulInfoClass: u32, // TODO: what to do with BytesParamIndex 5? pvBuffer: ?*c_void, cbBuffer: usize, pcbWrittenOrRequired: ?*usize, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const APPLICATION_RECOVERY_CALLBACK = fn( pvParameter: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) u32; pub const FILE_CASE_SENSITIVE_INFO = extern struct { Flags: u32, }; pub const FILE_DISPOSITION_INFO_EX = extern struct { Flags: u32, }; pub const CLIENT_ID = extern struct { UniqueProcess: ?HANDLE, UniqueThread: ?HANDLE, }; pub const LDR_DATA_TABLE_ENTRY = extern struct { Reserved1: [2]?*c_void, InMemoryOrderLinks: LIST_ENTRY, Reserved2: [2]?*c_void, DllBase: ?*c_void, Reserved3: [2]?*c_void, FullDllName: UNICODE_STRING, Reserved4: [8]u8, Reserved5: [3]?*c_void, Anonymous: extern union { CheckSum: u32, Reserved6: ?*c_void, }, TimeDateStamp: u32, }; pub const PPS_POST_PROCESS_INIT_ROUTINE = fn( ) callconv(@import("std").os.windows.WINAPI) void; pub const OBJECT_ATTRIBUTES = extern struct { Length: u32, RootDirectory: ?HANDLE, ObjectName: ?*UNICODE_STRING, Attributes: u32, SecurityDescriptor: ?*c_void, SecurityQualityOfService: ?*c_void, }; pub const IO_STATUS_BLOCK = extern struct { Anonymous: extern union { Status: NTSTATUS, Pointer: ?*c_void, }, Information: usize, }; pub const PIO_APC_ROUTINE = fn( ApcContext: ?*c_void, IoStatusBlock: ?*IO_STATUS_BLOCK, Reserved: u32, ) callconv(@import("std").os.windows.WINAPI) void; pub const SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION = extern struct { IdleTime: LARGE_INTEGER, KernelTime: LARGE_INTEGER, UserTime: LARGE_INTEGER, Reserved1: [2]LARGE_INTEGER, Reserved2: u32, }; pub const SYSTEM_PROCESS_INFORMATION = extern struct { NextEntryOffset: u32, NumberOfThreads: u32, Reserved1: [48]u8, ImageName: UNICODE_STRING, BasePriority: i32, UniqueProcessId: ?HANDLE, Reserved2: ?*c_void, HandleCount: u32, SessionId: u32, Reserved3: ?*c_void, PeakVirtualSize: usize, VirtualSize: usize, Reserved4: u32, PeakWorkingSetSize: usize, WorkingSetSize: usize, Reserved5: ?*c_void, QuotaPagedPoolUsage: usize, Reserved6: ?*c_void, QuotaNonPagedPoolUsage: usize, PagefileUsage: usize, PeakPagefileUsage: usize, PrivatePageCount: usize, Reserved7: [6]LARGE_INTEGER, }; pub const SYSTEM_THREAD_INFORMATION = extern struct { Reserved1: [3]LARGE_INTEGER, Reserved2: u32, StartAddress: ?*c_void, ClientId: CLIENT_ID, Priority: i32, BasePriority: i32, Reserved3: u32, ThreadState: u32, WaitReason: u32, }; pub const SYSTEM_REGISTRY_QUOTA_INFORMATION = extern struct { RegistryQuotaAllowed: u32, RegistryQuotaUsed: u32, Reserved1: ?*c_void, }; pub const SYSTEM_BASIC_INFORMATION = extern struct { Reserved1: [24]u8, Reserved2: [4]?*c_void, NumberOfProcessors: i8, }; pub const SYSTEM_TIMEOFDAY_INFORMATION = extern struct { Reserved1: [48]u8, }; pub const SYSTEM_PERFORMANCE_INFORMATION = extern struct { Reserved1: [312]u8, }; pub const SYSTEM_EXCEPTION_INFORMATION = extern struct { Reserved1: [16]u8, }; pub const SYSTEM_LOOKASIDE_INFORMATION = extern struct { Reserved1: [32]u8, }; pub const SYSTEM_INTERRUPT_INFORMATION = extern struct { Reserved1: [24]u8, }; pub const SYSTEM_POLICY_INFORMATION = extern struct { Reserved1: [2]?*c_void, Reserved2: [3]u32, }; pub const FILE_INFORMATION_CLASS = enum(i32) { n = 1, }; pub const FileDirectoryInformation = FILE_INFORMATION_CLASS.n; pub const SYSTEM_CODEINTEGRITY_INFORMATION = extern struct { Length: u32, CodeIntegrityOptions: u32, }; pub const SYSTEM_INFORMATION_CLASS = enum(i32) { BasicInformation = 0, PerformanceInformation = 2, TimeOfDayInformation = 3, ProcessInformation = 5, ProcessorPerformanceInformation = 8, InterruptInformation = 23, ExceptionInformation = 33, RegistryQuotaInformation = 37, LookasideInformation = 45, CodeIntegrityInformation = 103, PolicyInformation = 134, }; pub const SystemBasicInformation = SYSTEM_INFORMATION_CLASS.BasicInformation; pub const SystemPerformanceInformation = SYSTEM_INFORMATION_CLASS.PerformanceInformation; pub const SystemTimeOfDayInformation = SYSTEM_INFORMATION_CLASS.TimeOfDayInformation; pub const SystemProcessInformation = SYSTEM_INFORMATION_CLASS.ProcessInformation; pub const SystemProcessorPerformanceInformation = SYSTEM_INFORMATION_CLASS.ProcessorPerformanceInformation; pub const SystemInterruptInformation = SYSTEM_INFORMATION_CLASS.InterruptInformation; pub const SystemExceptionInformation = SYSTEM_INFORMATION_CLASS.ExceptionInformation; pub const SystemRegistryQuotaInformation = SYSTEM_INFORMATION_CLASS.RegistryQuotaInformation; pub const SystemLookasideInformation = SYSTEM_INFORMATION_CLASS.LookasideInformation; pub const SystemCodeIntegrityInformation = SYSTEM_INFORMATION_CLASS.CodeIntegrityInformation; pub const SystemPolicyInformation = SYSTEM_INFORMATION_CLASS.PolicyInformation; pub const OBJECT_INFORMATION_CLASS = enum(i32) { BasicInformation = 0, TypeInformation = 2, }; pub const ObjectBasicInformation = OBJECT_INFORMATION_CLASS.BasicInformation; pub const ObjectTypeInformation = OBJECT_INFORMATION_CLASS.TypeInformation; pub const PUBLIC_OBJECT_BASIC_INFORMATION = extern struct { Attributes: u32, GrantedAccess: u32, HandleCount: u32, PointerCount: u32, Reserved: [10]u32, }; pub const PUBLIC_OBJECT_TYPE_INFORMATION = extern struct { TypeName: UNICODE_STRING, Reserved: [22]u32, }; pub const KEY_VALUE_ENTRY = extern struct { ValueName: ?*UNICODE_STRING, DataLength: u32, DataOffset: u32, Type: u32, }; pub const KEY_SET_INFORMATION_CLASS = enum(i32) { KeyWriteTimeInformation = 0, KeyWow64FlagsInformation = 1, KeyControlFlagsInformation = 2, KeySetVirtualizationInformation = 3, KeySetDebugInformation = 4, KeySetHandleTagsInformation = 5, MaxKeySetInfoClass = 6, }; pub const KeyWriteTimeInformation = KEY_SET_INFORMATION_CLASS.KeyWriteTimeInformation; pub const KeyWow64FlagsInformation = KEY_SET_INFORMATION_CLASS.KeyWow64FlagsInformation; pub const KeyControlFlagsInformation = KEY_SET_INFORMATION_CLASS.KeyControlFlagsInformation; pub const KeySetVirtualizationInformation = KEY_SET_INFORMATION_CLASS.KeySetVirtualizationInformation; pub const KeySetDebugInformation = KEY_SET_INFORMATION_CLASS.KeySetDebugInformation; pub const KeySetHandleTagsInformation = KEY_SET_INFORMATION_CLASS.KeySetHandleTagsInformation; pub const MaxKeySetInfoClass = KEY_SET_INFORMATION_CLASS.MaxKeySetInfoClass; pub const WINSTATIONINFOCLASS = enum(i32) { n = 8, }; pub const WinStationInformation = WINSTATIONINFOCLASS.n; pub const WINSTATIONINFORMATIONW = extern struct { Reserved2: [70]u8, LogonId: u32, Reserved3: [1140]u8, }; pub const PWINSTATIONQUERYINFORMATIONW = fn( param0: ?HANDLE, param1: u32, param2: WINSTATIONINFOCLASS, param3: ?*c_void, param4: u32, param5: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; const CLSID_CameraUIControl_Value = @import("../zig.zig").Guid.initString("16d5a2be-b1c5-47b3-8eae-ccbcf452c7e8"); pub const CLSID_CameraUIControl = &CLSID_CameraUIControl_Value; pub const CameraUIControlMode = enum(i32) { Browse = 0, Linear = 1, }; // NOTE: not creating aliases because this enum is 'Scoped' pub const CameraUIControlLinearSelectionMode = enum(i32) { Single = 0, Multiple = 1, }; // NOTE: not creating aliases because this enum is 'Scoped' pub const CameraUIControlCaptureMode = enum(i32) { PhotoOrVideo = 0, Photo = 1, Video = 2, }; // NOTE: not creating aliases because this enum is 'Scoped' pub const CameraUIControlPhotoFormat = enum(i32) { Jpeg = 0, Png = 1, JpegXR = 2, }; // NOTE: not creating aliases because this enum is 'Scoped' pub const CameraUIControlVideoFormat = enum(i32) { Mp4 = 0, Wmv = 1, }; // NOTE: not creating aliases because this enum is 'Scoped' pub const CameraUIControlViewType = enum(i32) { SingleItem = 0, ItemList = 1, }; // NOTE: not creating aliases because this enum is 'Scoped' // TODO: this type is limited to platform 'windows8.0' const IID_ICameraUIControlEventCallback_Value = @import("../zig.zig").Guid.initString("1bfa0c2c-fbcd-4776-bda4-88bf974e74f4"); pub const IID_ICameraUIControlEventCallback = &IID_ICameraUIControlEventCallback_Value; pub const ICameraUIControlEventCallback = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnStartupComplete: fn( self: *const ICameraUIControlEventCallback, ) callconv(@import("std").os.windows.WINAPI) void, OnSuspendComplete: fn( self: *const ICameraUIControlEventCallback, ) callconv(@import("std").os.windows.WINAPI) void, OnItemCaptured: fn( self: *const ICameraUIControlEventCallback, pszPath: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) void, OnItemDeleted: fn( self: *const ICameraUIControlEventCallback, pszPath: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) void, OnClosed: fn( self: *const ICameraUIControlEventCallback, ) callconv(@import("std").os.windows.WINAPI) void, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControlEventCallback_OnStartupComplete(self: *const T) callconv(.Inline) void { return @ptrCast(*const ICameraUIControlEventCallback.VTable, self.vtable).OnStartupComplete(@ptrCast(*const ICameraUIControlEventCallback, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControlEventCallback_OnSuspendComplete(self: *const T) callconv(.Inline) void { return @ptrCast(*const ICameraUIControlEventCallback.VTable, self.vtable).OnSuspendComplete(@ptrCast(*const ICameraUIControlEventCallback, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControlEventCallback_OnItemCaptured(self: *const T, pszPath: ?[*:0]const u16) callconv(.Inline) void { return @ptrCast(*const ICameraUIControlEventCallback.VTable, self.vtable).OnItemCaptured(@ptrCast(*const ICameraUIControlEventCallback, self), pszPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControlEventCallback_OnItemDeleted(self: *const T, pszPath: ?[*:0]const u16) callconv(.Inline) void { return @ptrCast(*const ICameraUIControlEventCallback.VTable, self.vtable).OnItemDeleted(@ptrCast(*const ICameraUIControlEventCallback, self), pszPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControlEventCallback_OnClosed(self: *const T) callconv(.Inline) void { return @ptrCast(*const ICameraUIControlEventCallback.VTable, self.vtable).OnClosed(@ptrCast(*const ICameraUIControlEventCallback, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_ICameraUIControl_Value = @import("../zig.zig").Guid.initString("b8733adf-3d68-4b8f-bb08-e28a0bed0376"); pub const IID_ICameraUIControl = &IID_ICameraUIControl_Value; pub const ICameraUIControl = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Show: fn( self: *const ICameraUIControl, pWindow: ?*IUnknown, mode: CameraUIControlMode, selectionMode: CameraUIControlLinearSelectionMode, captureMode: CameraUIControlCaptureMode, photoFormat: CameraUIControlPhotoFormat, videoFormat: CameraUIControlVideoFormat, bHasCloseButton: BOOL, pEventCallback: ?*ICameraUIControlEventCallback, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const ICameraUIControl, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Suspend: fn( self: *const ICameraUIControl, pbDeferralRequired: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Resume: fn( self: *const ICameraUIControl, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCurrentViewType: fn( self: *const ICameraUIControl, pViewType: ?*CameraUIControlViewType, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetActiveItem: fn( self: *const ICameraUIControl, pbstrActiveItemPath: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSelectedItems: fn( self: *const ICameraUIControl, ppSelectedItemPaths: ?*?*SAFEARRAY, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RemoveCapturedItem: fn( self: *const ICameraUIControl, pszPath: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_Show(self: *const T, pWindow: ?*IUnknown, mode: CameraUIControlMode, selectionMode: CameraUIControlLinearSelectionMode, captureMode: CameraUIControlCaptureMode, photoFormat: CameraUIControlPhotoFormat, videoFormat: CameraUIControlVideoFormat, bHasCloseButton: BOOL, pEventCallback: ?*ICameraUIControlEventCallback) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).Show(@ptrCast(*const ICameraUIControl, self), pWindow, mode, selectionMode, captureMode, photoFormat, videoFormat, bHasCloseButton, pEventCallback); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_Close(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).Close(@ptrCast(*const ICameraUIControl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_Suspend(self: *const T, pbDeferralRequired: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).Suspend(@ptrCast(*const ICameraUIControl, self), pbDeferralRequired); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_Resume(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).Resume(@ptrCast(*const ICameraUIControl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_GetCurrentViewType(self: *const T, pViewType: ?*CameraUIControlViewType) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).GetCurrentViewType(@ptrCast(*const ICameraUIControl, self), pViewType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_GetActiveItem(self: *const T, pbstrActiveItemPath: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).GetActiveItem(@ptrCast(*const ICameraUIControl, self), pbstrActiveItemPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_GetSelectedItems(self: *const T, ppSelectedItemPaths: ?*?*SAFEARRAY) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).GetSelectedItems(@ptrCast(*const ICameraUIControl, self), ppSelectedItemPaths); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICameraUIControl_RemoveCapturedItem(self: *const T, pszPath: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ICameraUIControl.VTable, self.vtable).RemoveCapturedItem(@ptrCast(*const ICameraUIControl, self), pszPath); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_EditionUpgradeHelper_Value = @import("../zig.zig").Guid.initString("01776df3-b9af-4e50-9b1c-56e93116d704"); pub const CLSID_EditionUpgradeHelper = &CLSID_EditionUpgradeHelper_Value; const CLSID_EditionUpgradeBroker_Value = @import("../zig.zig").Guid.initString("c4270827-4f39-45df-9288-12ff6b85a921"); pub const CLSID_EditionUpgradeBroker = &CLSID_EditionUpgradeBroker_Value; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IEditionUpgradeHelper_Value = @import("../zig.zig").Guid.initString("d3e9e342-5deb-43b6-849e-6913b85d503a"); pub const IID_IEditionUpgradeHelper = &IID_IEditionUpgradeHelper_Value; pub const IEditionUpgradeHelper = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CanUpgrade: fn( self: *const IEditionUpgradeHelper, isAllowed: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateOperatingSystem: fn( self: *const IEditionUpgradeHelper, contentId: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowProductKeyUI: fn( self: *const IEditionUpgradeHelper, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetOsProductContentId: fn( self: *const IEditionUpgradeHelper, contentId: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetGenuineLocalStatus: fn( self: *const IEditionUpgradeHelper, isGenuine: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeHelper_CanUpgrade(self: *const T, isAllowed: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeHelper.VTable, self.vtable).CanUpgrade(@ptrCast(*const IEditionUpgradeHelper, self), isAllowed); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeHelper_UpdateOperatingSystem(self: *const T, contentId: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeHelper.VTable, self.vtable).UpdateOperatingSystem(@ptrCast(*const IEditionUpgradeHelper, self), contentId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeHelper_ShowProductKeyUI(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeHelper.VTable, self.vtable).ShowProductKeyUI(@ptrCast(*const IEditionUpgradeHelper, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeHelper_GetOsProductContentId(self: *const T, contentId: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeHelper.VTable, self.vtable).GetOsProductContentId(@ptrCast(*const IEditionUpgradeHelper, self), contentId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeHelper_GetGenuineLocalStatus(self: *const T, isGenuine: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeHelper.VTable, self.vtable).GetGenuineLocalStatus(@ptrCast(*const IEditionUpgradeHelper, self), isGenuine); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IWindowsLockModeHelper_Value = @import("../zig.zig").Guid.initString("f342d19e-cc22-4648-bb5d-03ccf75b47c5"); pub const IID_IWindowsLockModeHelper = &IID_IWindowsLockModeHelper_Value; pub const IWindowsLockModeHelper = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSMode: fn( self: *const IWindowsLockModeHelper, isSmode: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWindowsLockModeHelper_GetSMode(self: *const T, isSmode: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IWindowsLockModeHelper.VTable, self.vtable).GetSMode(@ptrCast(*const IWindowsLockModeHelper, self), isSmode); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IEditionUpgradeBroker_Value = @import("../zig.zig").Guid.initString("ff19cbcf-9455-4937-b872-6b7929a460af"); pub const IID_IEditionUpgradeBroker = &IID_IEditionUpgradeBroker_Value; pub const IEditionUpgradeBroker = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, InitializeParentWindow: fn( self: *const IEditionUpgradeBroker, parentHandle: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateOperatingSystem: fn( self: *const IEditionUpgradeBroker, parameter: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowProductKeyUI: fn( self: *const IEditionUpgradeBroker, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CanUpgrade: fn( self: *const IEditionUpgradeBroker, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeBroker_InitializeParentWindow(self: *const T, parentHandle: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeBroker.VTable, self.vtable).InitializeParentWindow(@ptrCast(*const IEditionUpgradeBroker, self), parentHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeBroker_UpdateOperatingSystem(self: *const T, parameter: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeBroker.VTable, self.vtable).UpdateOperatingSystem(@ptrCast(*const IEditionUpgradeBroker, self), parameter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeBroker_ShowProductKeyUI(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeBroker.VTable, self.vtable).ShowProductKeyUI(@ptrCast(*const IEditionUpgradeBroker, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEditionUpgradeBroker_CanUpgrade(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEditionUpgradeBroker.VTable, self.vtable).CanUpgrade(@ptrCast(*const IEditionUpgradeBroker, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IContainerActivationHelper_Value = @import("../zig.zig").Guid.initString("b524f93f-80d5-4ec7-ae9e-d66e93ade1fa"); pub const IID_IContainerActivationHelper = &IID_IContainerActivationHelper_Value; pub const IContainerActivationHelper = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CanActivateClientVM: fn( self: *const IContainerActivationHelper, isAllowed: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IContainerActivationHelper_CanActivateClientVM(self: *const T, isAllowed: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IContainerActivationHelper.VTable, self.vtable).CanActivateClientVM(@ptrCast(*const IContainerActivationHelper, self), isAllowed); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IClipServiceNotificationHelper_Value = @import("../zig.zig").Guid.initString("c39948f0-6142-44fd-98ca-e1681a8d68b5"); pub const IID_IClipServiceNotificationHelper = &IID_IClipServiceNotificationHelper_Value; pub const IClipServiceNotificationHelper = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ShowToast: fn( self: *const IClipServiceNotificationHelper, titleText: ?BSTR, bodyText: ?BSTR, packageName: ?BSTR, appId: ?BSTR, launchCommand: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IClipServiceNotificationHelper_ShowToast(self: *const T, titleText: ?BSTR, bodyText: ?BSTR, packageName: ?BSTR, appId: ?BSTR, launchCommand: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IClipServiceNotificationHelper.VTable, self.vtable).ShowToast(@ptrCast(*const IClipServiceNotificationHelper, self), titleText, bodyText, packageName, appId, launchCommand); } };} pub usingnamespace MethodMixin(@This()); }; pub const FEATURE_CHANGE_TIME = enum(i32) { READ = 0, MODULE_RELOAD = 1, SESSION = 2, REBOOT = 3, }; pub const FEATURE_CHANGE_TIME_READ = FEATURE_CHANGE_TIME.READ; pub const FEATURE_CHANGE_TIME_MODULE_RELOAD = FEATURE_CHANGE_TIME.MODULE_RELOAD; pub const FEATURE_CHANGE_TIME_SESSION = FEATURE_CHANGE_TIME.SESSION; pub const FEATURE_CHANGE_TIME_REBOOT = FEATURE_CHANGE_TIME.REBOOT; pub const FEATURE_ENABLED_STATE = enum(i32) { DEFAULT = 0, DISABLED = 1, ENABLED = 2, }; pub const FEATURE_ENABLED_STATE_DEFAULT = FEATURE_ENABLED_STATE.DEFAULT; pub const FEATURE_ENABLED_STATE_DISABLED = FEATURE_ENABLED_STATE.DISABLED; pub const FEATURE_ENABLED_STATE_ENABLED = FEATURE_ENABLED_STATE.ENABLED; pub const FEATURE_ERROR = extern struct { hr: HRESULT, lineNumber: u16, file: ?[*:0]const u8, process: ?[*:0]const u8, module: ?[*:0]const u8, callerReturnAddressOffset: u32, callerModule: ?[*:0]const u8, message: ?[*:0]const u8, originLineNumber: u16, originFile: ?[*:0]const u8, originModule: ?[*:0]const u8, originCallerReturnAddressOffset: u32, originCallerModule: ?[*:0]const u8, originName: ?[*:0]const u8, }; pub const PFEATURE_STATE_CHANGE_CALLBACK = fn( context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; pub const DCICMD = extern struct { dwCommand: u32, dwParam1: u32, dwParam2: u32, dwVersion: u32, dwReserved: u32, }; pub const DCICREATEINPUT = extern struct { cmd: DCICMD, dwCompression: u32, dwMask: [3]u32, dwWidth: u32, dwHeight: u32, dwDCICaps: u32, dwBitCount: u32, lpSurface: ?*c_void, }; pub const DCISURFACEINFO = extern struct { dwSize: u32, dwDCICaps: u32, dwCompression: u32, dwMask: [3]u32, dwWidth: u32, dwHeight: u32, lStride: i32, dwBitCount: u32, dwOffSurface: usize, wSelSurface: u16, wReserved: u16, dwReserved1: u32, dwReserved2: u32, dwReserved3: u32, BeginAccess: isize, EndAccess: isize, DestroySurface: isize, }; pub const ENUM_CALLBACK = fn( lpSurfaceInfo: ?*DCISURFACEINFO, lpContext: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; pub const DCIENUMINPUT = extern struct { cmd: DCICMD, rSrc: RECT, rDst: RECT, EnumCallback: isize, lpContext: ?*c_void, }; pub const DCIOFFSCREEN = extern struct { dciInfo: DCISURFACEINFO, Draw: isize, SetClipList: isize, SetDestination: isize, }; pub const DCIOVERLAY = extern struct { dciInfo: DCISURFACEINFO, dwChromakeyValue: u32, dwChromakeyMask: u32, }; pub const WINWATCHNOTIFYPROC = fn( hww: ?HWINWATCH, hwnd: ?HWND, code: u32, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) void; pub const STRENTRYA = extern struct { pszName: ?PSTR, pszValue: ?PSTR, }; pub const STRENTRYW = extern struct { pszName: ?PWSTR, pszValue: ?PWSTR, }; pub const STRTABLEA = extern struct { cEntries: u32, pse: ?*STRENTRYA, }; pub const STRTABLEW = extern struct { cEntries: u32, pse: ?*STRENTRYW, }; pub const REGINSTALLA = fn( hm: ?HINSTANCE, pszSection: ?[*:0]const u8, pstTable: ?*STRTABLEA, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const CABINFOA = extern struct { pszCab: ?PSTR, pszInf: ?PSTR, pszSection: ?PSTR, szSrcPath: [260]CHAR, dwFlags: u32, }; pub const CABINFOW = extern struct { pszCab: ?PWSTR, pszInf: ?PWSTR, pszSection: ?PWSTR, szSrcPath: [260]u16, dwFlags: u32, }; pub const PERUSERSECTIONA = extern struct { szGUID: [59]CHAR, szDispName: [128]CHAR, szLocale: [10]CHAR, szStub: [1040]CHAR, szVersion: [32]CHAR, szCompID: [128]CHAR, dwIsInstalled: u32, bRollback: BOOL, }; pub const PERUSERSECTIONW = extern struct { szGUID: [59]u16, szDispName: [128]u16, szLocale: [10]u16, szStub: [1040]u16, szVersion: [32]u16, szCompID: [128]u16, dwIsInstalled: u32, bRollback: BOOL, }; pub const IMESTRUCT = extern struct { fnc: u32, wParam: WPARAM, wCount: u32, dchSource: u32, dchDest: u32, lParam1: LPARAM, lParam2: LPARAM, lParam3: LPARAM, }; pub const UNDETERMINESTRUCT = extern struct { dwSize: u32, uDefIMESize: u32, uDefIMEPos: u32, uUndetTextLen: u32, uUndetTextPos: u32, uUndetAttrPos: u32, uCursorPos: u32, uDeltaStart: u32, uDetermineTextLen: u32, uDetermineTextPos: u32, uDetermineDelimPos: u32, uYomiTextLen: u32, uYomiTextPos: u32, uYomiDelimPos: u32, }; pub const STRINGEXSTRUCT = extern struct { dwSize: u32, uDeterminePos: u32, uDetermineDelimPos: u32, uYomiPos: u32, uYomiDelimPos: u32, }; pub const DATETIME = extern struct { year: u16, month: u16, day: u16, hour: u16, min: u16, sec: u16, }; pub const IMEPROA = extern struct { hWnd: ?HWND, InstDate: DATETIME, wVersion: u32, szDescription: [50]u8, szName: [80]u8, szOptions: [30]u8, }; pub const IMEPROW = extern struct { hWnd: ?HWND, InstDate: DATETIME, wVersion: u32, szDescription: [50]u16, szName: [80]u16, szOptions: [30]u16, }; const CLSID_WebBrowser_V1_Value = @import("../zig.zig").Guid.initString("eab22ac3-30c1-11cf-a7eb-0000c05bae0b"); pub const CLSID_WebBrowser_V1 = &CLSID_WebBrowser_V1_Value; const CLSID_WebBrowser_Value = @import("../zig.zig").Guid.initString("8856f961-340a-11d0-a96b-00c04fd705a2"); pub const CLSID_WebBrowser = &CLSID_WebBrowser_Value; const CLSID_InternetExplorer_Value = @import("../zig.zig").Guid.initString("0002df01-0000-0000-c000-000000000046"); pub const CLSID_InternetExplorer = &CLSID_InternetExplorer_Value; const CLSID_InternetExplorerMedium_Value = @import("../zig.zig").Guid.initString("d5e8041d-920f-45e9-b8fb-b1deb82c6e5e"); pub const CLSID_InternetExplorerMedium = &CLSID_InternetExplorerMedium_Value; const CLSID_ShellBrowserWindow_Value = @import("../zig.zig").Guid.initString("c08afd90-f2a1-11d1-8455-00a0c91f3880"); pub const CLSID_ShellBrowserWindow = &CLSID_ShellBrowserWindow_Value; const CLSID_ShellWindows_Value = @import("../zig.zig").Guid.initString("9ba05972-f6a8-11cf-a442-00a0c90a8f39"); pub const CLSID_ShellWindows = &CLSID_ShellWindows_Value; const CLSID_ShellUIHelper_Value = @import("../zig.zig").Guid.initString("64ab4bb7-111e-11d1-8f79-00c04fc2fbe1"); pub const CLSID_ShellUIHelper = &CLSID_ShellUIHelper_Value; const CLSID_ShellNameSpace_Value = @import("../zig.zig").Guid.initString("55136805-b2de-11d1-b9f2-00a0c98bc547"); pub const CLSID_ShellNameSpace = &CLSID_ShellNameSpace_Value; const CLSID_CScriptErrorList_Value = @import("../zig.zig").Guid.initString("efd01300-160f-11d2-bb2e-00805ff7efca"); pub const CLSID_CScriptErrorList = &CLSID_CScriptErrorList_Value; pub const CommandStateChangeConstants = enum(i32) { UPDATECOMMANDS = -1, NAVIGATEFORWARD = 1, NAVIGATEBACK = 2, }; pub const CSC_UPDATECOMMANDS = CommandStateChangeConstants.UPDATECOMMANDS; pub const CSC_NAVIGATEFORWARD = CommandStateChangeConstants.NAVIGATEFORWARD; pub const CSC_NAVIGATEBACK = CommandStateChangeConstants.NAVIGATEBACK; pub const SecureLockIconConstants = enum(i32) { Unsecure = 0, Mixed = 1, SecureUnknownBits = 2, Secure40Bit = 3, Secure56Bit = 4, SecureFortezza = 5, Secure128Bit = 6, }; pub const secureLockIconUnsecure = SecureLockIconConstants.Unsecure; pub const secureLockIconMixed = SecureLockIconConstants.Mixed; pub const secureLockIconSecureUnknownBits = SecureLockIconConstants.SecureUnknownBits; pub const secureLockIconSecure40Bit = SecureLockIconConstants.Secure40Bit; pub const secureLockIconSecure56Bit = SecureLockIconConstants.Secure56Bit; pub const secureLockIconSecureFortezza = SecureLockIconConstants.SecureFortezza; pub const secureLockIconSecure128Bit = SecureLockIconConstants.Secure128Bit; pub const NewProcessCauseConstants = enum(i32) { t = 1, }; pub const ProtectedModeRedirect = NewProcessCauseConstants.t; pub const BrowserNavConstants = enum(i32) { OpenInNewWindow = 1, NoHistory = 2, NoReadFromCache = 4, NoWriteToCache = 8, AllowAutosearch = 16, BrowserBar = 32, Hyperlink = 64, EnforceRestricted = 128, NewWindowsManaged = 256, UntrustedForDownload = 512, TrustedForActiveX = 1024, OpenInNewTab = 2048, OpenInBackgroundTab = 4096, KeepWordWheelText = 8192, VirtualTab = 16384, BlockRedirectsXDomain = 32768, OpenNewForegroundTab = 65536, TravelLogScreenshot = 131072, DeferUnload = 262144, Speculative = 524288, SuggestNewWindow = 1048576, SuggestNewTab = 2097152, Reserved1 = 4194304, HomepageNavigate = 8388608, Refresh = 16777216, HostNavigation = 33554432, Reserved2 = 67108864, Reserved3 = 134217728, Reserved4 = 268435456, Reserved5 = 536870912, Reserved6 = 1073741824, Reserved7 = -2147483648, }; pub const navOpenInNewWindow = BrowserNavConstants.OpenInNewWindow; pub const navNoHistory = BrowserNavConstants.NoHistory; pub const navNoReadFromCache = BrowserNavConstants.NoReadFromCache; pub const navNoWriteToCache = BrowserNavConstants.NoWriteToCache; pub const navAllowAutosearch = BrowserNavConstants.AllowAutosearch; pub const navBrowserBar = BrowserNavConstants.BrowserBar; pub const navHyperlink = BrowserNavConstants.Hyperlink; pub const navEnforceRestricted = BrowserNavConstants.EnforceRestricted; pub const navNewWindowsManaged = BrowserNavConstants.NewWindowsManaged; pub const navUntrustedForDownload = BrowserNavConstants.UntrustedForDownload; pub const navTrustedForActiveX = BrowserNavConstants.TrustedForActiveX; pub const navOpenInNewTab = BrowserNavConstants.OpenInNewTab; pub const navOpenInBackgroundTab = BrowserNavConstants.OpenInBackgroundTab; pub const navKeepWordWheelText = BrowserNavConstants.KeepWordWheelText; pub const navVirtualTab = BrowserNavConstants.VirtualTab; pub const navBlockRedirectsXDomain = BrowserNavConstants.BlockRedirectsXDomain; pub const navOpenNewForegroundTab = BrowserNavConstants.OpenNewForegroundTab; pub const navTravelLogScreenshot = BrowserNavConstants.TravelLogScreenshot; pub const navDeferUnload = BrowserNavConstants.DeferUnload; pub const navSpeculative = BrowserNavConstants.Speculative; pub const navSuggestNewWindow = BrowserNavConstants.SuggestNewWindow; pub const navSuggestNewTab = BrowserNavConstants.SuggestNewTab; pub const navReserved1 = BrowserNavConstants.Reserved1; pub const navHomepageNavigate = BrowserNavConstants.HomepageNavigate; pub const navRefresh = BrowserNavConstants.Refresh; pub const navHostNavigation = BrowserNavConstants.HostNavigation; pub const navReserved2 = BrowserNavConstants.Reserved2; pub const navReserved3 = BrowserNavConstants.Reserved3; pub const navReserved4 = BrowserNavConstants.Reserved4; pub const navReserved5 = BrowserNavConstants.Reserved5; pub const navReserved6 = BrowserNavConstants.Reserved6; pub const navReserved7 = BrowserNavConstants.Reserved7; pub const RefreshConstants = enum(i32) { NORMAL = 0, IFEXPIRED = 1, COMPLETELY = 3, }; pub const REFRESH_NORMAL = RefreshConstants.NORMAL; pub const REFRESH_IFEXPIRED = RefreshConstants.IFEXPIRED; pub const REFRESH_COMPLETELY = RefreshConstants.COMPLETELY; const IID_IWebBrowser_Value = @import("../zig.zig").Guid.initString("eab22ac1-30c1-11cf-a7eb-0000c05bae0b"); pub const IID_IWebBrowser = &IID_IWebBrowser_Value; pub const IWebBrowser = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, GoBack: fn( self: *const IWebBrowser, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GoForward: fn( self: *const IWebBrowser, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GoHome: fn( self: *const IWebBrowser, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GoSearch: fn( self: *const IWebBrowser, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Navigate: fn( self: *const IWebBrowser, URL: ?BSTR, Flags: ?*VARIANT, TargetFrameName: ?*VARIANT, PostData: ?*VARIANT, Headers: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IWebBrowser, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh2: fn( self: *const IWebBrowser, Level: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Stop: fn( self: *const IWebBrowser, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Application: fn( self: *const IWebBrowser, ppDisp: ?*?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Parent: fn( self: *const IWebBrowser, ppDisp: ?*?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Container: fn( self: *const IWebBrowser, ppDisp: ?*?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Document: fn( self: *const IWebBrowser, ppDisp: ?*?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TopLevelContainer: fn( self: *const IWebBrowser, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Type: fn( self: *const IWebBrowser, Type: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Left: fn( self: *const IWebBrowser, pl: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Left: fn( self: *const IWebBrowser, Left: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Top: fn( self: *const IWebBrowser, pl: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Top: fn( self: *const IWebBrowser, Top: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Width: fn( self: *const IWebBrowser, pl: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Width: fn( self: *const IWebBrowser, Width: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Height: fn( self: *const IWebBrowser, pl: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Height: fn( self: *const IWebBrowser, Height: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LocationName: fn( self: *const IWebBrowser, LocationName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LocationURL: fn( self: *const IWebBrowser, LocationURL: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Busy: fn( self: *const IWebBrowser, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_GoBack(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).GoBack(@ptrCast(*const IWebBrowser, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_GoForward(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).GoForward(@ptrCast(*const IWebBrowser, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_GoHome(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).GoHome(@ptrCast(*const IWebBrowser, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_GoSearch(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).GoSearch(@ptrCast(*const IWebBrowser, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_Navigate(self: *const T, URL: ?BSTR, Flags: ?*VARIANT, TargetFrameName: ?*VARIANT, PostData: ?*VARIANT, Headers: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).Navigate(@ptrCast(*const IWebBrowser, self), URL, Flags, TargetFrameName, PostData, Headers); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).Refresh(@ptrCast(*const IWebBrowser, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_Refresh2(self: *const T, Level: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).Refresh2(@ptrCast(*const IWebBrowser, self), Level); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_Stop(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).Stop(@ptrCast(*const IWebBrowser, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Application(self: *const T, ppDisp: ?*?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Application(@ptrCast(*const IWebBrowser, self), ppDisp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Parent(self: *const T, ppDisp: ?*?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Parent(@ptrCast(*const IWebBrowser, self), ppDisp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Container(self: *const T, ppDisp: ?*?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Container(@ptrCast(*const IWebBrowser, self), ppDisp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Document(self: *const T, ppDisp: ?*?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Document(@ptrCast(*const IWebBrowser, self), ppDisp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_TopLevelContainer(self: *const T, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_TopLevelContainer(@ptrCast(*const IWebBrowser, self), pBool); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Type(self: *const T, Type: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Type(@ptrCast(*const IWebBrowser, self), Type); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Left(self: *const T, pl: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Left(@ptrCast(*const IWebBrowser, self), pl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_put_Left(self: *const T, Left: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).put_Left(@ptrCast(*const IWebBrowser, self), Left); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Top(self: *const T, pl: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Top(@ptrCast(*const IWebBrowser, self), pl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_put_Top(self: *const T, Top: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).put_Top(@ptrCast(*const IWebBrowser, self), Top); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Width(self: *const T, pl: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Width(@ptrCast(*const IWebBrowser, self), pl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_put_Width(self: *const T, Width: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).put_Width(@ptrCast(*const IWebBrowser, self), Width); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Height(self: *const T, pl: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Height(@ptrCast(*const IWebBrowser, self), pl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_put_Height(self: *const T, Height: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).put_Height(@ptrCast(*const IWebBrowser, self), Height); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_LocationName(self: *const T, LocationName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_LocationName(@ptrCast(*const IWebBrowser, self), LocationName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_LocationURL(self: *const T, LocationURL: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_LocationURL(@ptrCast(*const IWebBrowser, self), LocationURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser_get_Busy(self: *const T, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser.VTable, self.vtable).get_Busy(@ptrCast(*const IWebBrowser, self), pBool); } };} pub usingnamespace MethodMixin(@This()); }; const IID_DWebBrowserEvents_Value = @import("../zig.zig").Guid.initString("eab22ac2-30c1-11cf-a7eb-0000c05bae0b"); pub const IID_DWebBrowserEvents = &IID_DWebBrowserEvents_Value; pub const DWebBrowserEvents = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; const IID_IWebBrowserApp_Value = @import("../zig.zig").Guid.initString("0002df05-0000-0000-c000-000000000046"); pub const IID_IWebBrowserApp = &IID_IWebBrowserApp_Value; pub const IWebBrowserApp = extern struct { pub const VTable = extern struct { base: IWebBrowser.VTable, Quit: fn( self: *const IWebBrowserApp, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ClientToWindow: fn( self: *const IWebBrowserApp, pcx: ?*i32, pcy: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PutProperty: fn( self: *const IWebBrowserApp, Property: ?BSTR, vtValue: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetProperty: fn( self: *const IWebBrowserApp, Property: ?BSTR, pvtValue: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Name: fn( self: *const IWebBrowserApp, Name: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HWND: fn( self: *const IWebBrowserApp, pHWND: ?*SHANDLE_PTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FullName: fn( self: *const IWebBrowserApp, FullName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Path: fn( self: *const IWebBrowserApp, Path: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Visible: fn( self: *const IWebBrowserApp, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Visible: fn( self: *const IWebBrowserApp, Value: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_StatusBar: fn( self: *const IWebBrowserApp, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_StatusBar: fn( self: *const IWebBrowserApp, Value: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_StatusText: fn( self: *const IWebBrowserApp, StatusText: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_StatusText: fn( self: *const IWebBrowserApp, StatusText: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ToolBar: fn( self: *const IWebBrowserApp, Value: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ToolBar: fn( self: *const IWebBrowserApp, Value: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MenuBar: fn( self: *const IWebBrowserApp, Value: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_MenuBar: fn( self: *const IWebBrowserApp, Value: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FullScreen: fn( self: *const IWebBrowserApp, pbFullScreen: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_FullScreen: fn( self: *const IWebBrowserApp, bFullScreen: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IWebBrowser.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_Quit(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).Quit(@ptrCast(*const IWebBrowserApp, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_ClientToWindow(self: *const T, pcx: ?*i32, pcy: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).ClientToWindow(@ptrCast(*const IWebBrowserApp, self), pcx, pcy); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_PutProperty(self: *const T, Property: ?BSTR, vtValue: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).PutProperty(@ptrCast(*const IWebBrowserApp, self), Property, vtValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_GetProperty(self: *const T, Property: ?BSTR, pvtValue: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).GetProperty(@ptrCast(*const IWebBrowserApp, self), Property, pvtValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_Name(self: *const T, Name: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_Name(@ptrCast(*const IWebBrowserApp, self), Name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_HWND(self: *const T, pHWND: ?*SHANDLE_PTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_HWND(@ptrCast(*const IWebBrowserApp, self), pHWND); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_FullName(self: *const T, FullName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_FullName(@ptrCast(*const IWebBrowserApp, self), FullName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_Path(self: *const T, Path: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_Path(@ptrCast(*const IWebBrowserApp, self), Path); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_Visible(self: *const T, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_Visible(@ptrCast(*const IWebBrowserApp, self), pBool); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_put_Visible(self: *const T, Value: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).put_Visible(@ptrCast(*const IWebBrowserApp, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_StatusBar(self: *const T, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_StatusBar(@ptrCast(*const IWebBrowserApp, self), pBool); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_put_StatusBar(self: *const T, Value: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).put_StatusBar(@ptrCast(*const IWebBrowserApp, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_StatusText(self: *const T, StatusText: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_StatusText(@ptrCast(*const IWebBrowserApp, self), StatusText); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_put_StatusText(self: *const T, StatusText: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).put_StatusText(@ptrCast(*const IWebBrowserApp, self), StatusText); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_ToolBar(self: *const T, Value: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_ToolBar(@ptrCast(*const IWebBrowserApp, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_put_ToolBar(self: *const T, Value: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).put_ToolBar(@ptrCast(*const IWebBrowserApp, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_MenuBar(self: *const T, Value: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_MenuBar(@ptrCast(*const IWebBrowserApp, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_put_MenuBar(self: *const T, Value: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).put_MenuBar(@ptrCast(*const IWebBrowserApp, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_get_FullScreen(self: *const T, pbFullScreen: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).get_FullScreen(@ptrCast(*const IWebBrowserApp, self), pbFullScreen); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowserApp_put_FullScreen(self: *const T, bFullScreen: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowserApp.VTable, self.vtable).put_FullScreen(@ptrCast(*const IWebBrowserApp, self), bFullScreen); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IWebBrowser2_Value = @import("../zig.zig").Guid.initString("d30c1661-cdaf-11d0-8a3e-00c04fc9e26e"); pub const IID_IWebBrowser2 = &IID_IWebBrowser2_Value; pub const IWebBrowser2 = extern struct { pub const VTable = extern struct { base: IWebBrowserApp.VTable, Navigate2: fn( self: *const IWebBrowser2, URL: ?*VARIANT, Flags: ?*VARIANT, TargetFrameName: ?*VARIANT, PostData: ?*VARIANT, Headers: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryStatusWB: fn( self: *const IWebBrowser2, cmdID: OLECMDID, pcmdf: ?*OLECMDF, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ExecWB: fn( self: *const IWebBrowser2, cmdID: OLECMDID, cmdexecopt: OLECMDEXECOPT, pvaIn: ?*VARIANT, pvaOut: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowBrowserBar: fn( self: *const IWebBrowser2, pvaClsid: ?*VARIANT, pvarShow: ?*VARIANT, pvarSize: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReadyState: fn( self: *const IWebBrowser2, plReadyState: ?*READYSTATE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Offline: fn( self: *const IWebBrowser2, pbOffline: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Offline: fn( self: *const IWebBrowser2, bOffline: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Silent: fn( self: *const IWebBrowser2, pbSilent: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Silent: fn( self: *const IWebBrowser2, bSilent: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RegisterAsBrowser: fn( self: *const IWebBrowser2, pbRegister: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_RegisterAsBrowser: fn( self: *const IWebBrowser2, bRegister: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RegisterAsDropTarget: fn( self: *const IWebBrowser2, pbRegister: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_RegisterAsDropTarget: fn( self: *const IWebBrowser2, bRegister: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TheaterMode: fn( self: *const IWebBrowser2, pbRegister: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_TheaterMode: fn( self: *const IWebBrowser2, bRegister: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AddressBar: fn( self: *const IWebBrowser2, Value: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AddressBar: fn( self: *const IWebBrowser2, Value: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Resizable: fn( self: *const IWebBrowser2, Value: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Resizable: fn( self: *const IWebBrowser2, Value: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IWebBrowserApp.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_Navigate2(self: *const T, URL: ?*VARIANT, Flags: ?*VARIANT, TargetFrameName: ?*VARIANT, PostData: ?*VARIANT, Headers: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).Navigate2(@ptrCast(*const IWebBrowser2, self), URL, Flags, TargetFrameName, PostData, Headers); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_QueryStatusWB(self: *const T, cmdID: OLECMDID, pcmdf: ?*OLECMDF) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).QueryStatusWB(@ptrCast(*const IWebBrowser2, self), cmdID, pcmdf); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_ExecWB(self: *const T, cmdID: OLECMDID, cmdexecopt: OLECMDEXECOPT, pvaIn: ?*VARIANT, pvaOut: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).ExecWB(@ptrCast(*const IWebBrowser2, self), cmdID, cmdexecopt, pvaIn, pvaOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_ShowBrowserBar(self: *const T, pvaClsid: ?*VARIANT, pvarShow: ?*VARIANT, pvarSize: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).ShowBrowserBar(@ptrCast(*const IWebBrowser2, self), pvaClsid, pvarShow, pvarSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_ReadyState(self: *const T, plReadyState: ?*READYSTATE) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_ReadyState(@ptrCast(*const IWebBrowser2, self), plReadyState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_Offline(self: *const T, pbOffline: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_Offline(@ptrCast(*const IWebBrowser2, self), pbOffline); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_put_Offline(self: *const T, bOffline: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).put_Offline(@ptrCast(*const IWebBrowser2, self), bOffline); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_Silent(self: *const T, pbSilent: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_Silent(@ptrCast(*const IWebBrowser2, self), pbSilent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_put_Silent(self: *const T, bSilent: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).put_Silent(@ptrCast(*const IWebBrowser2, self), bSilent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_RegisterAsBrowser(self: *const T, pbRegister: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_RegisterAsBrowser(@ptrCast(*const IWebBrowser2, self), pbRegister); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_put_RegisterAsBrowser(self: *const T, bRegister: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).put_RegisterAsBrowser(@ptrCast(*const IWebBrowser2, self), bRegister); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_RegisterAsDropTarget(self: *const T, pbRegister: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_RegisterAsDropTarget(@ptrCast(*const IWebBrowser2, self), pbRegister); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_put_RegisterAsDropTarget(self: *const T, bRegister: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).put_RegisterAsDropTarget(@ptrCast(*const IWebBrowser2, self), bRegister); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_TheaterMode(self: *const T, pbRegister: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_TheaterMode(@ptrCast(*const IWebBrowser2, self), pbRegister); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_put_TheaterMode(self: *const T, bRegister: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).put_TheaterMode(@ptrCast(*const IWebBrowser2, self), bRegister); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_AddressBar(self: *const T, Value: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_AddressBar(@ptrCast(*const IWebBrowser2, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_put_AddressBar(self: *const T, Value: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).put_AddressBar(@ptrCast(*const IWebBrowser2, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_get_Resizable(self: *const T, Value: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).get_Resizable(@ptrCast(*const IWebBrowser2, self), Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWebBrowser2_put_Resizable(self: *const T, Value: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IWebBrowser2.VTable, self.vtable).put_Resizable(@ptrCast(*const IWebBrowser2, self), Value); } };} pub usingnamespace MethodMixin(@This()); }; const IID_DWebBrowserEvents2_Value = @import("../zig.zig").Guid.initString("34a715a0-6587-11d0-924a-0020afc7ac4d"); pub const IID_DWebBrowserEvents2 = &IID_DWebBrowserEvents2_Value; pub const DWebBrowserEvents2 = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; const IID_DShellWindowsEvents_Value = @import("../zig.zig").Guid.initString("fe4106e0-399a-11d0-a48c-00a0c90a8f39"); pub const IID_DShellWindowsEvents = &IID_DShellWindowsEvents_Value; pub const DShellWindowsEvents = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper_Value = @import("../zig.zig").Guid.initString("729fe2f8-1ea8-11d1-8f85-00c04fc2fbe1"); pub const IID_IShellUIHelper = &IID_IShellUIHelper_Value; pub const IShellUIHelper = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, ResetFirstBootMode: fn( self: *const IShellUIHelper, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResetSafeMode: fn( self: *const IShellUIHelper, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RefreshOfflineDesktop: fn( self: *const IShellUIHelper, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddFavorite: fn( self: *const IShellUIHelper, URL: ?BSTR, Title: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddChannel: fn( self: *const IShellUIHelper, URL: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDesktopComponent: fn( self: *const IShellUIHelper, URL: ?BSTR, Type: ?BSTR, Left: ?*VARIANT, Top: ?*VARIANT, Width: ?*VARIANT, Height: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsSubscribed: fn( self: *const IShellUIHelper, URL: ?BSTR, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NavigateAndFind: fn( self: *const IShellUIHelper, URL: ?BSTR, strQuery: ?BSTR, varTargetFrame: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ImportExportFavorites: fn( self: *const IShellUIHelper, fImport: i16, strImpExpPath: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AutoCompleteSaveForm: fn( self: *const IShellUIHelper, Form: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AutoScan: fn( self: *const IShellUIHelper, strSearch: ?BSTR, strFailureUrl: ?BSTR, pvarTargetFrame: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AutoCompleteAttach: fn( self: *const IShellUIHelper, Reserved: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowBrowserUI: fn( self: *const IShellUIHelper, bstrName: ?BSTR, pvarIn: ?*VARIANT, pvarOut: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_ResetFirstBootMode(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).ResetFirstBootMode(@ptrCast(*const IShellUIHelper, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_ResetSafeMode(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).ResetSafeMode(@ptrCast(*const IShellUIHelper, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_RefreshOfflineDesktop(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).RefreshOfflineDesktop(@ptrCast(*const IShellUIHelper, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_AddFavorite(self: *const T, URL: ?BSTR, Title: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).AddFavorite(@ptrCast(*const IShellUIHelper, self), URL, Title); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_AddChannel(self: *const T, URL: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).AddChannel(@ptrCast(*const IShellUIHelper, self), URL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_AddDesktopComponent(self: *const T, URL: ?BSTR, Type: ?BSTR, Left: ?*VARIANT, Top: ?*VARIANT, Width: ?*VARIANT, Height: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).AddDesktopComponent(@ptrCast(*const IShellUIHelper, self), URL, Type, Left, Top, Width, Height); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_IsSubscribed(self: *const T, URL: ?BSTR, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).IsSubscribed(@ptrCast(*const IShellUIHelper, self), URL, pBool); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_NavigateAndFind(self: *const T, URL: ?BSTR, strQuery: ?BSTR, varTargetFrame: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).NavigateAndFind(@ptrCast(*const IShellUIHelper, self), URL, strQuery, varTargetFrame); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_ImportExportFavorites(self: *const T, fImport: i16, strImpExpPath: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).ImportExportFavorites(@ptrCast(*const IShellUIHelper, self), fImport, strImpExpPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_AutoCompleteSaveForm(self: *const T, Form: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).AutoCompleteSaveForm(@ptrCast(*const IShellUIHelper, self), Form); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_AutoScan(self: *const T, strSearch: ?BSTR, strFailureUrl: ?BSTR, pvarTargetFrame: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).AutoScan(@ptrCast(*const IShellUIHelper, self), strSearch, strFailureUrl, pvarTargetFrame); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_AutoCompleteAttach(self: *const T, Reserved: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).AutoCompleteAttach(@ptrCast(*const IShellUIHelper, self), Reserved); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper_ShowBrowserUI(self: *const T, bstrName: ?BSTR, pvarIn: ?*VARIANT, pvarOut: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper.VTable, self.vtable).ShowBrowserUI(@ptrCast(*const IShellUIHelper, self), bstrName, pvarIn, pvarOut); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper2_Value = @import("../zig.zig").Guid.initString("a7fe6eda-1932-4281-b881-87b31b8bc52c"); pub const IID_IShellUIHelper2 = &IID_IShellUIHelper2_Value; pub const IShellUIHelper2 = extern struct { pub const VTable = extern struct { base: IShellUIHelper.VTable, AddSearchProvider: fn( self: *const IShellUIHelper2, URL: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RunOnceShown: fn( self: *const IShellUIHelper2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SkipRunOnce: fn( self: *const IShellUIHelper2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CustomizeSettings: fn( self: *const IShellUIHelper2, fSQM: i16, fPhishing: i16, bstrLocale: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SqmEnabled: fn( self: *const IShellUIHelper2, pfEnabled: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PhishingEnabled: fn( self: *const IShellUIHelper2, pfEnabled: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BrandImageUri: fn( self: *const IShellUIHelper2, pbstrUri: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SkipTabsWelcome: fn( self: *const IShellUIHelper2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DiagnoseConnection: fn( self: *const IShellUIHelper2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CustomizeClearType: fn( self: *const IShellUIHelper2, fSet: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsSearchProviderInstalled: fn( self: *const IShellUIHelper2, URL: ?BSTR, pdwResult: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsSearchMigrated: fn( self: *const IShellUIHelper2, pfMigrated: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DefaultSearchProvider: fn( self: *const IShellUIHelper2, pbstrName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RunOnceRequiredSettingsComplete: fn( self: *const IShellUIHelper2, fComplete: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RunOnceHasShown: fn( self: *const IShellUIHelper2, pfShown: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SearchGuideUrl: fn( self: *const IShellUIHelper2, pbstrUrl: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_AddSearchProvider(self: *const T, URL: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).AddSearchProvider(@ptrCast(*const IShellUIHelper2, self), URL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_RunOnceShown(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).RunOnceShown(@ptrCast(*const IShellUIHelper2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_SkipRunOnce(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).SkipRunOnce(@ptrCast(*const IShellUIHelper2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_CustomizeSettings(self: *const T, fSQM: i16, fPhishing: i16, bstrLocale: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).CustomizeSettings(@ptrCast(*const IShellUIHelper2, self), fSQM, fPhishing, bstrLocale); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_SqmEnabled(self: *const T, pfEnabled: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).SqmEnabled(@ptrCast(*const IShellUIHelper2, self), pfEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_PhishingEnabled(self: *const T, pfEnabled: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).PhishingEnabled(@ptrCast(*const IShellUIHelper2, self), pfEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_BrandImageUri(self: *const T, pbstrUri: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).BrandImageUri(@ptrCast(*const IShellUIHelper2, self), pbstrUri); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_SkipTabsWelcome(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).SkipTabsWelcome(@ptrCast(*const IShellUIHelper2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_DiagnoseConnection(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).DiagnoseConnection(@ptrCast(*const IShellUIHelper2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_CustomizeClearType(self: *const T, fSet: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).CustomizeClearType(@ptrCast(*const IShellUIHelper2, self), fSet); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_IsSearchProviderInstalled(self: *const T, URL: ?BSTR, pdwResult: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).IsSearchProviderInstalled(@ptrCast(*const IShellUIHelper2, self), URL, pdwResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_IsSearchMigrated(self: *const T, pfMigrated: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).IsSearchMigrated(@ptrCast(*const IShellUIHelper2, self), pfMigrated); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_DefaultSearchProvider(self: *const T, pbstrName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).DefaultSearchProvider(@ptrCast(*const IShellUIHelper2, self), pbstrName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_RunOnceRequiredSettingsComplete(self: *const T, fComplete: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).RunOnceRequiredSettingsComplete(@ptrCast(*const IShellUIHelper2, self), fComplete); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_RunOnceHasShown(self: *const T, pfShown: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).RunOnceHasShown(@ptrCast(*const IShellUIHelper2, self), pfShown); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper2_SearchGuideUrl(self: *const T, pbstrUrl: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper2.VTable, self.vtable).SearchGuideUrl(@ptrCast(*const IShellUIHelper2, self), pbstrUrl); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper3_Value = @import("../zig.zig").Guid.initString("528df2ec-d419-40bc-9b6d-dcdbf9c1b25d"); pub const IID_IShellUIHelper3 = &IID_IShellUIHelper3_Value; pub const IShellUIHelper3 = extern struct { pub const VTable = extern struct { base: IShellUIHelper2.VTable, AddService: fn( self: *const IShellUIHelper3, URL: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsServiceInstalled: fn( self: *const IShellUIHelper3, URL: ?BSTR, Verb: ?BSTR, pdwResult: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, InPrivateFilteringEnabled: fn( self: *const IShellUIHelper3, pfEnabled: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddToFavoritesBar: fn( self: *const IShellUIHelper3, URL: ?BSTR, Title: ?BSTR, Type: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BuildNewTabPage: fn( self: *const IShellUIHelper3, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRecentlyClosedVisible: fn( self: *const IShellUIHelper3, fVisible: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetActivitiesVisible: fn( self: *const IShellUIHelper3, fVisible: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ContentDiscoveryReset: fn( self: *const IShellUIHelper3, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsSuggestedSitesEnabled: fn( self: *const IShellUIHelper3, pfEnabled: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnableSuggestedSites: fn( self: *const IShellUIHelper3, fEnable: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NavigateToSuggestedSites: fn( self: *const IShellUIHelper3, bstrRelativeUrl: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowTabsHelp: fn( self: *const IShellUIHelper3, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowInPrivateHelp: fn( self: *const IShellUIHelper3, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper2.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_AddService(self: *const T, URL: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).AddService(@ptrCast(*const IShellUIHelper3, self), URL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_IsServiceInstalled(self: *const T, URL: ?BSTR, Verb: ?BSTR, pdwResult: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).IsServiceInstalled(@ptrCast(*const IShellUIHelper3, self), URL, Verb, pdwResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_InPrivateFilteringEnabled(self: *const T, pfEnabled: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).InPrivateFilteringEnabled(@ptrCast(*const IShellUIHelper3, self), pfEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_AddToFavoritesBar(self: *const T, URL: ?BSTR, Title: ?BSTR, Type: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).AddToFavoritesBar(@ptrCast(*const IShellUIHelper3, self), URL, Title, Type); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_BuildNewTabPage(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).BuildNewTabPage(@ptrCast(*const IShellUIHelper3, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_SetRecentlyClosedVisible(self: *const T, fVisible: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).SetRecentlyClosedVisible(@ptrCast(*const IShellUIHelper3, self), fVisible); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_SetActivitiesVisible(self: *const T, fVisible: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).SetActivitiesVisible(@ptrCast(*const IShellUIHelper3, self), fVisible); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_ContentDiscoveryReset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).ContentDiscoveryReset(@ptrCast(*const IShellUIHelper3, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_IsSuggestedSitesEnabled(self: *const T, pfEnabled: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).IsSuggestedSitesEnabled(@ptrCast(*const IShellUIHelper3, self), pfEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_EnableSuggestedSites(self: *const T, fEnable: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).EnableSuggestedSites(@ptrCast(*const IShellUIHelper3, self), fEnable); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_NavigateToSuggestedSites(self: *const T, bstrRelativeUrl: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).NavigateToSuggestedSites(@ptrCast(*const IShellUIHelper3, self), bstrRelativeUrl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_ShowTabsHelp(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).ShowTabsHelp(@ptrCast(*const IShellUIHelper3, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper3_ShowInPrivateHelp(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper3.VTable, self.vtable).ShowInPrivateHelp(@ptrCast(*const IShellUIHelper3, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper4_Value = @import("../zig.zig").Guid.initString("b36e6a53-8073-499e-824c-d776330a333e"); pub const IID_IShellUIHelper4 = &IID_IShellUIHelper4_Value; pub const IShellUIHelper4 = extern struct { pub const VTable = extern struct { base: IShellUIHelper3.VTable, msIsSiteMode: fn( self: *const IShellUIHelper4, pfSiteMode: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeShowThumbBar: fn( self: *const IShellUIHelper4, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeAddThumbBarButton: fn( self: *const IShellUIHelper4, bstrIconURL: ?BSTR, bstrTooltip: ?BSTR, pvarButtonID: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeUpdateThumbBarButton: fn( self: *const IShellUIHelper4, ButtonID: VARIANT, fEnabled: i16, fVisible: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeSetIconOverlay: fn( self: *const IShellUIHelper4, IconUrl: ?BSTR, pvarDescription: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeClearIconOverlay: fn( self: *const IShellUIHelper4, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msAddSiteMode: fn( self: *const IShellUIHelper4, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeCreateJumpList: fn( self: *const IShellUIHelper4, bstrHeader: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeAddJumpListItem: fn( self: *const IShellUIHelper4, bstrName: ?BSTR, bstrActionUri: ?BSTR, bstrIconUri: ?BSTR, pvarWindowType: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeClearJumpList: fn( self: *const IShellUIHelper4, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeShowJumpList: fn( self: *const IShellUIHelper4, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeAddButtonStyle: fn( self: *const IShellUIHelper4, uiButtonID: VARIANT, bstrIconUrl: ?BSTR, bstrTooltip: ?BSTR, pvarStyleID: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeShowButtonStyle: fn( self: *const IShellUIHelper4, uiButtonID: VARIANT, uiStyleID: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeActivate: fn( self: *const IShellUIHelper4, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msIsSiteModeFirstRun: fn( self: *const IShellUIHelper4, fPreserveState: i16, puiFirstRun: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msAddTrackingProtectionList: fn( self: *const IShellUIHelper4, URL: ?BSTR, bstrFilterName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msTrackingProtectionEnabled: fn( self: *const IShellUIHelper4, pfEnabled: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msActiveXFilteringEnabled: fn( self: *const IShellUIHelper4, pfEnabled: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper3.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msIsSiteMode(self: *const T, pfSiteMode: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msIsSiteMode(@ptrCast(*const IShellUIHelper4, self), pfSiteMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeShowThumbBar(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeShowThumbBar(@ptrCast(*const IShellUIHelper4, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeAddThumbBarButton(self: *const T, bstrIconURL: ?BSTR, bstrTooltip: ?BSTR, pvarButtonID: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeAddThumbBarButton(@ptrCast(*const IShellUIHelper4, self), bstrIconURL, bstrTooltip, pvarButtonID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeUpdateThumbBarButton(self: *const T, ButtonID: VARIANT, fEnabled: i16, fVisible: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeUpdateThumbBarButton(@ptrCast(*const IShellUIHelper4, self), ButtonID, fEnabled, fVisible); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeSetIconOverlay(self: *const T, IconUrl: ?BSTR, pvarDescription: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeSetIconOverlay(@ptrCast(*const IShellUIHelper4, self), IconUrl, pvarDescription); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeClearIconOverlay(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeClearIconOverlay(@ptrCast(*const IShellUIHelper4, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msAddSiteMode(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msAddSiteMode(@ptrCast(*const IShellUIHelper4, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeCreateJumpList(self: *const T, bstrHeader: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeCreateJumpList(@ptrCast(*const IShellUIHelper4, self), bstrHeader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeAddJumpListItem(self: *const T, bstrName: ?BSTR, bstrActionUri: ?BSTR, bstrIconUri: ?BSTR, pvarWindowType: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeAddJumpListItem(@ptrCast(*const IShellUIHelper4, self), bstrName, bstrActionUri, bstrIconUri, pvarWindowType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeClearJumpList(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeClearJumpList(@ptrCast(*const IShellUIHelper4, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeShowJumpList(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeShowJumpList(@ptrCast(*const IShellUIHelper4, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeAddButtonStyle(self: *const T, uiButtonID: VARIANT, bstrIconUrl: ?BSTR, bstrTooltip: ?BSTR, pvarStyleID: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeAddButtonStyle(@ptrCast(*const IShellUIHelper4, self), uiButtonID, bstrIconUrl, bstrTooltip, pvarStyleID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeShowButtonStyle(self: *const T, uiButtonID: VARIANT, uiStyleID: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeShowButtonStyle(@ptrCast(*const IShellUIHelper4, self), uiButtonID, uiStyleID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msSiteModeActivate(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msSiteModeActivate(@ptrCast(*const IShellUIHelper4, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msIsSiteModeFirstRun(self: *const T, fPreserveState: i16, puiFirstRun: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msIsSiteModeFirstRun(@ptrCast(*const IShellUIHelper4, self), fPreserveState, puiFirstRun); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msAddTrackingProtectionList(self: *const T, URL: ?BSTR, bstrFilterName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msAddTrackingProtectionList(@ptrCast(*const IShellUIHelper4, self), URL, bstrFilterName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msTrackingProtectionEnabled(self: *const T, pfEnabled: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msTrackingProtectionEnabled(@ptrCast(*const IShellUIHelper4, self), pfEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper4_msActiveXFilteringEnabled(self: *const T, pfEnabled: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper4.VTable, self.vtable).msActiveXFilteringEnabled(@ptrCast(*const IShellUIHelper4, self), pfEnabled); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper5_Value = @import("../zig.zig").Guid.initString("a2a08b09-103d-4d3f-b91c-ea455ca82efa"); pub const IID_IShellUIHelper5 = &IID_IShellUIHelper5_Value; pub const IShellUIHelper5 = extern struct { pub const VTable = extern struct { base: IShellUIHelper4.VTable, msProvisionNetworks: fn( self: *const IShellUIHelper5, bstrProvisioningXml: ?BSTR, puiResult: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msReportSafeUrl: fn( self: *const IShellUIHelper5, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeRefreshBadge: fn( self: *const IShellUIHelper5, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msSiteModeClearBadge: fn( self: *const IShellUIHelper5, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msDiagnoseConnectionUILess: fn( self: *const IShellUIHelper5, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msLaunchNetworkClientHelp: fn( self: *const IShellUIHelper5, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msChangeDefaultBrowser: fn( self: *const IShellUIHelper5, fChange: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper4.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper5_msProvisionNetworks(self: *const T, bstrProvisioningXml: ?BSTR, puiResult: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper5.VTable, self.vtable).msProvisionNetworks(@ptrCast(*const IShellUIHelper5, self), bstrProvisioningXml, puiResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper5_msReportSafeUrl(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper5.VTable, self.vtable).msReportSafeUrl(@ptrCast(*const IShellUIHelper5, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper5_msSiteModeRefreshBadge(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper5.VTable, self.vtable).msSiteModeRefreshBadge(@ptrCast(*const IShellUIHelper5, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper5_msSiteModeClearBadge(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper5.VTable, self.vtable).msSiteModeClearBadge(@ptrCast(*const IShellUIHelper5, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper5_msDiagnoseConnectionUILess(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper5.VTable, self.vtable).msDiagnoseConnectionUILess(@ptrCast(*const IShellUIHelper5, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper5_msLaunchNetworkClientHelp(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper5.VTable, self.vtable).msLaunchNetworkClientHelp(@ptrCast(*const IShellUIHelper5, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper5_msChangeDefaultBrowser(self: *const T, fChange: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper5.VTable, self.vtable).msChangeDefaultBrowser(@ptrCast(*const IShellUIHelper5, self), fChange); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper6_Value = @import("../zig.zig").Guid.initString("987a573e-46ee-4e89-96ab-ddf7f8fdc98c"); pub const IID_IShellUIHelper6 = &IID_IShellUIHelper6_Value; pub const IShellUIHelper6 = extern struct { pub const VTable = extern struct { base: IShellUIHelper5.VTable, msStopPeriodicTileUpdate: fn( self: *const IShellUIHelper6, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msStartPeriodicTileUpdate: fn( self: *const IShellUIHelper6, pollingUris: VARIANT, startTime: VARIANT, uiUpdateRecurrence: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msStartPeriodicTileUpdateBatch: fn( self: *const IShellUIHelper6, pollingUris: VARIANT, startTime: VARIANT, uiUpdateRecurrence: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msClearTile: fn( self: *const IShellUIHelper6, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msEnableTileNotificationQueue: fn( self: *const IShellUIHelper6, fChange: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msPinnedSiteState: fn( self: *const IShellUIHelper6, pvarSiteState: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msEnableTileNotificationQueueForSquare150x150: fn( self: *const IShellUIHelper6, fChange: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msEnableTileNotificationQueueForWide310x150: fn( self: *const IShellUIHelper6, fChange: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msEnableTileNotificationQueueForSquare310x310: fn( self: *const IShellUIHelper6, fChange: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msScheduledTileNotification: fn( self: *const IShellUIHelper6, bstrNotificationXml: ?BSTR, bstrNotificationId: ?BSTR, bstrNotificationTag: ?BSTR, startTime: VARIANT, expirationTime: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msRemoveScheduledTileNotification: fn( self: *const IShellUIHelper6, bstrNotificationId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msStartPeriodicBadgeUpdate: fn( self: *const IShellUIHelper6, pollingUri: ?BSTR, startTime: VARIANT, uiUpdateRecurrence: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msStopPeriodicBadgeUpdate: fn( self: *const IShellUIHelper6, ) callconv(@import("std").os.windows.WINAPI) HRESULT, msLaunchInternetOptions: fn( self: *const IShellUIHelper6, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper5.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msStopPeriodicTileUpdate(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msStopPeriodicTileUpdate(@ptrCast(*const IShellUIHelper6, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msStartPeriodicTileUpdate(self: *const T, pollingUris: VARIANT, startTime: VARIANT, uiUpdateRecurrence: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msStartPeriodicTileUpdate(@ptrCast(*const IShellUIHelper6, self), pollingUris, startTime, uiUpdateRecurrence); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msStartPeriodicTileUpdateBatch(self: *const T, pollingUris: VARIANT, startTime: VARIANT, uiUpdateRecurrence: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msStartPeriodicTileUpdateBatch(@ptrCast(*const IShellUIHelper6, self), pollingUris, startTime, uiUpdateRecurrence); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msClearTile(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msClearTile(@ptrCast(*const IShellUIHelper6, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msEnableTileNotificationQueue(self: *const T, fChange: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msEnableTileNotificationQueue(@ptrCast(*const IShellUIHelper6, self), fChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msPinnedSiteState(self: *const T, pvarSiteState: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msPinnedSiteState(@ptrCast(*const IShellUIHelper6, self), pvarSiteState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msEnableTileNotificationQueueForSquare150x150(self: *const T, fChange: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msEnableTileNotificationQueueForSquare150x150(@ptrCast(*const IShellUIHelper6, self), fChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msEnableTileNotificationQueueForWide310x150(self: *const T, fChange: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msEnableTileNotificationQueueForWide310x150(@ptrCast(*const IShellUIHelper6, self), fChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msEnableTileNotificationQueueForSquare310x310(self: *const T, fChange: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msEnableTileNotificationQueueForSquare310x310(@ptrCast(*const IShellUIHelper6, self), fChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msScheduledTileNotification(self: *const T, bstrNotificationXml: ?BSTR, bstrNotificationId: ?BSTR, bstrNotificationTag: ?BSTR, startTime: VARIANT, expirationTime: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msScheduledTileNotification(@ptrCast(*const IShellUIHelper6, self), bstrNotificationXml, bstrNotificationId, bstrNotificationTag, startTime, expirationTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msRemoveScheduledTileNotification(self: *const T, bstrNotificationId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msRemoveScheduledTileNotification(@ptrCast(*const IShellUIHelper6, self), bstrNotificationId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msStartPeriodicBadgeUpdate(self: *const T, pollingUri: ?BSTR, startTime: VARIANT, uiUpdateRecurrence: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msStartPeriodicBadgeUpdate(@ptrCast(*const IShellUIHelper6, self), pollingUri, startTime, uiUpdateRecurrence); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msStopPeriodicBadgeUpdate(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msStopPeriodicBadgeUpdate(@ptrCast(*const IShellUIHelper6, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper6_msLaunchInternetOptions(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper6.VTable, self.vtable).msLaunchInternetOptions(@ptrCast(*const IShellUIHelper6, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper7_Value = @import("../zig.zig").Guid.initString("60e567c8-9573-4ab2-a264-637c6c161cb1"); pub const IID_IShellUIHelper7 = &IID_IShellUIHelper7_Value; pub const IShellUIHelper7 = extern struct { pub const VTable = extern struct { base: IShellUIHelper6.VTable, SetExperimentalFlag: fn( self: *const IShellUIHelper7, bstrFlagString: ?BSTR, vfFlag: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetExperimentalFlag: fn( self: *const IShellUIHelper7, bstrFlagString: ?BSTR, vfFlag: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetExperimentalValue: fn( self: *const IShellUIHelper7, bstrValueString: ?BSTR, dwValue: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetExperimentalValue: fn( self: *const IShellUIHelper7, bstrValueString: ?BSTR, pdwValue: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResetAllExperimentalFlagsAndValues: fn( self: *const IShellUIHelper7, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNeedIEAutoLaunchFlag: fn( self: *const IShellUIHelper7, bstrUrl: ?BSTR, flag: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetNeedIEAutoLaunchFlag: fn( self: *const IShellUIHelper7, bstrUrl: ?BSTR, flag: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, HasNeedIEAutoLaunchFlag: fn( self: *const IShellUIHelper7, bstrUrl: ?BSTR, exists: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LaunchIE: fn( self: *const IShellUIHelper7, bstrUrl: ?BSTR, automated: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper6.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_SetExperimentalFlag(self: *const T, bstrFlagString: ?BSTR, vfFlag: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).SetExperimentalFlag(@ptrCast(*const IShellUIHelper7, self), bstrFlagString, vfFlag); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_GetExperimentalFlag(self: *const T, bstrFlagString: ?BSTR, vfFlag: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).GetExperimentalFlag(@ptrCast(*const IShellUIHelper7, self), bstrFlagString, vfFlag); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_SetExperimentalValue(self: *const T, bstrValueString: ?BSTR, dwValue: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).SetExperimentalValue(@ptrCast(*const IShellUIHelper7, self), bstrValueString, dwValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_GetExperimentalValue(self: *const T, bstrValueString: ?BSTR, pdwValue: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).GetExperimentalValue(@ptrCast(*const IShellUIHelper7, self), bstrValueString, pdwValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_ResetAllExperimentalFlagsAndValues(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).ResetAllExperimentalFlagsAndValues(@ptrCast(*const IShellUIHelper7, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_GetNeedIEAutoLaunchFlag(self: *const T, bstrUrl: ?BSTR, flag: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).GetNeedIEAutoLaunchFlag(@ptrCast(*const IShellUIHelper7, self), bstrUrl, flag); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_SetNeedIEAutoLaunchFlag(self: *const T, bstrUrl: ?BSTR, flag: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).SetNeedIEAutoLaunchFlag(@ptrCast(*const IShellUIHelper7, self), bstrUrl, flag); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_HasNeedIEAutoLaunchFlag(self: *const T, bstrUrl: ?BSTR, exists: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).HasNeedIEAutoLaunchFlag(@ptrCast(*const IShellUIHelper7, self), bstrUrl, exists); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper7_LaunchIE(self: *const T, bstrUrl: ?BSTR, automated: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper7.VTable, self.vtable).LaunchIE(@ptrCast(*const IShellUIHelper7, self), bstrUrl, automated); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper8_Value = @import("../zig.zig").Guid.initString("66debcf2-05b0-4f07-b49b-b96241a65db2"); pub const IID_IShellUIHelper8 = &IID_IShellUIHelper8_Value; pub const IShellUIHelper8 = extern struct { pub const VTable = extern struct { base: IShellUIHelper7.VTable, GetCVListData: fn( self: *const IShellUIHelper8, pbstrResult: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCVListLocalData: fn( self: *const IShellUIHelper8, pbstrResult: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetEMIEListData: fn( self: *const IShellUIHelper8, pbstrResult: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetEMIEListLocalData: fn( self: *const IShellUIHelper8, pbstrResult: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenFavoritesPane: fn( self: *const IShellUIHelper8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenFavoritesSettings: fn( self: *const IShellUIHelper8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LaunchInHVSI: fn( self: *const IShellUIHelper8, bstrUrl: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper7.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper8_GetCVListData(self: *const T, pbstrResult: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper8.VTable, self.vtable).GetCVListData(@ptrCast(*const IShellUIHelper8, self), pbstrResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper8_GetCVListLocalData(self: *const T, pbstrResult: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper8.VTable, self.vtable).GetCVListLocalData(@ptrCast(*const IShellUIHelper8, self), pbstrResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper8_GetEMIEListData(self: *const T, pbstrResult: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper8.VTable, self.vtable).GetEMIEListData(@ptrCast(*const IShellUIHelper8, self), pbstrResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper8_GetEMIEListLocalData(self: *const T, pbstrResult: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper8.VTable, self.vtable).GetEMIEListLocalData(@ptrCast(*const IShellUIHelper8, self), pbstrResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper8_OpenFavoritesPane(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper8.VTable, self.vtable).OpenFavoritesPane(@ptrCast(*const IShellUIHelper8, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper8_OpenFavoritesSettings(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper8.VTable, self.vtable).OpenFavoritesSettings(@ptrCast(*const IShellUIHelper8, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper8_LaunchInHVSI(self: *const T, bstrUrl: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper8.VTable, self.vtable).LaunchInHVSI(@ptrCast(*const IShellUIHelper8, self), bstrUrl); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellUIHelper9_Value = @import("../zig.zig").Guid.initString("6cdf73b0-7f2f-451f-bc0f-63e0f3284e54"); pub const IID_IShellUIHelper9 = &IID_IShellUIHelper9_Value; pub const IShellUIHelper9 = extern struct { pub const VTable = extern struct { base: IShellUIHelper8.VTable, GetOSSku: fn( self: *const IShellUIHelper9, pdwResult: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellUIHelper8.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellUIHelper9_GetOSSku(self: *const T, pdwResult: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellUIHelper9.VTable, self.vtable).GetOSSku(@ptrCast(*const IShellUIHelper9, self), pdwResult); } };} pub usingnamespace MethodMixin(@This()); }; const IID_DShellNameSpaceEvents_Value = @import("../zig.zig").Guid.initString("55136806-b2de-11d1-b9f2-00a0c98bc547"); pub const IID_DShellNameSpaceEvents = &IID_DShellNameSpaceEvents_Value; pub const DShellNameSpaceEvents = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellFavoritesNameSpace_Value = @import("../zig.zig").Guid.initString("55136804-b2de-11d1-b9f2-00a0c98bc547"); pub const IID_IShellFavoritesNameSpace = &IID_IShellFavoritesNameSpace_Value; pub const IShellFavoritesNameSpace = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, MoveSelectionUp: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveSelectionDown: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResetSort: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NewFolder: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Synchronize: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Import: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Export: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, InvokeContextMenuCommand: fn( self: *const IShellFavoritesNameSpace, strCommand: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveSelectionTo: fn( self: *const IShellFavoritesNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SubscriptionsEnabled: fn( self: *const IShellFavoritesNameSpace, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateSubscriptionForSelection: fn( self: *const IShellFavoritesNameSpace, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteSubscriptionForSelection: fn( self: *const IShellFavoritesNameSpace, pBool: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRoot: fn( self: *const IShellFavoritesNameSpace, bstrFullPath: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_MoveSelectionUp(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).MoveSelectionUp(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_MoveSelectionDown(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).MoveSelectionDown(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_ResetSort(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).ResetSort(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_NewFolder(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).NewFolder(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_Synchronize(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).Synchronize(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_Import(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).Import(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_Export(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).Export(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_InvokeContextMenuCommand(self: *const T, strCommand: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).InvokeContextMenuCommand(@ptrCast(*const IShellFavoritesNameSpace, self), strCommand); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_MoveSelectionTo(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).MoveSelectionTo(@ptrCast(*const IShellFavoritesNameSpace, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_get_SubscriptionsEnabled(self: *const T, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).get_SubscriptionsEnabled(@ptrCast(*const IShellFavoritesNameSpace, self), pBool); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_CreateSubscriptionForSelection(self: *const T, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).CreateSubscriptionForSelection(@ptrCast(*const IShellFavoritesNameSpace, self), pBool); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_DeleteSubscriptionForSelection(self: *const T, pBool: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).DeleteSubscriptionForSelection(@ptrCast(*const IShellFavoritesNameSpace, self), pBool); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellFavoritesNameSpace_SetRoot(self: *const T, bstrFullPath: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellFavoritesNameSpace.VTable, self.vtable).SetRoot(@ptrCast(*const IShellFavoritesNameSpace, self), bstrFullPath); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IShellNameSpace_Value = @import("../zig.zig").Guid.initString("e572d3c9-37be-4ae2-825d-d521763e3108"); pub const IID_IShellNameSpace = &IID_IShellNameSpace_Value; pub const IShellNameSpace = extern struct { pub const VTable = extern struct { base: IShellFavoritesNameSpace.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_EnumOptions: fn( self: *const IShellNameSpace, pgrfEnumFlags: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_EnumOptions: fn( self: *const IShellNameSpace, lVal: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SelectedItem: fn( self: *const IShellNameSpace, pItem: ?*?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SelectedItem: fn( self: *const IShellNameSpace, pItem: ?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Root: fn( self: *const IShellNameSpace, pvar: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Root: fn( self: *const IShellNameSpace, @"var": VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Depth: fn( self: *const IShellNameSpace, piDepth: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Depth: fn( self: *const IShellNameSpace, iDepth: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Mode: fn( self: *const IShellNameSpace, puMode: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Mode: fn( self: *const IShellNameSpace, uMode: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Flags: fn( self: *const IShellNameSpace, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Flags: fn( self: *const IShellNameSpace, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_TVFlags: fn( self: *const IShellNameSpace, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TVFlags: fn( self: *const IShellNameSpace, dwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Columns: fn( self: *const IShellNameSpace, bstrColumns: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Columns: fn( self: *const IShellNameSpace, bstrColumns: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CountViewTypes: fn( self: *const IShellNameSpace, piTypes: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetViewType: fn( self: *const IShellNameSpace, iType: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SelectedItems: fn( self: *const IShellNameSpace, ppid: ?*?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Expand: fn( self: *const IShellNameSpace, @"var": VARIANT, iDepth: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnselectAll: fn( self: *const IShellNameSpace, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IShellFavoritesNameSpace.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_EnumOptions(self: *const T, pgrfEnumFlags: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_EnumOptions(@ptrCast(*const IShellNameSpace, self), pgrfEnumFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_EnumOptions(self: *const T, lVal: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_EnumOptions(@ptrCast(*const IShellNameSpace, self), lVal); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_SelectedItem(self: *const T, pItem: ?*?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_SelectedItem(@ptrCast(*const IShellNameSpace, self), pItem); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_SelectedItem(self: *const T, pItem: ?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_SelectedItem(@ptrCast(*const IShellNameSpace, self), pItem); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_Root(self: *const T, pvar: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_Root(@ptrCast(*const IShellNameSpace, self), pvar); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_Root(self: *const T, @"var": VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_Root(@ptrCast(*const IShellNameSpace, self), @"var"); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_Depth(self: *const T, piDepth: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_Depth(@ptrCast(*const IShellNameSpace, self), piDepth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_Depth(self: *const T, iDepth: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_Depth(@ptrCast(*const IShellNameSpace, self), iDepth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_Mode(self: *const T, puMode: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_Mode(@ptrCast(*const IShellNameSpace, self), puMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_Mode(self: *const T, uMode: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_Mode(@ptrCast(*const IShellNameSpace, self), uMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_Flags(self: *const T, pdwFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_Flags(@ptrCast(*const IShellNameSpace, self), pdwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_Flags(self: *const T, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_Flags(@ptrCast(*const IShellNameSpace, self), dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_TVFlags(self: *const T, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_TVFlags(@ptrCast(*const IShellNameSpace, self), dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_TVFlags(self: *const T, dwFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_TVFlags(@ptrCast(*const IShellNameSpace, self), dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_Columns(self: *const T, bstrColumns: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_Columns(@ptrCast(*const IShellNameSpace, self), bstrColumns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_put_Columns(self: *const T, bstrColumns: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).put_Columns(@ptrCast(*const IShellNameSpace, self), bstrColumns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_get_CountViewTypes(self: *const T, piTypes: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).get_CountViewTypes(@ptrCast(*const IShellNameSpace, self), piTypes); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_SetViewType(self: *const T, iType: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).SetViewType(@ptrCast(*const IShellNameSpace, self), iType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_SelectedItems(self: *const T, ppid: ?*?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).SelectedItems(@ptrCast(*const IShellNameSpace, self), ppid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_Expand(self: *const T, @"var": VARIANT, iDepth: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).Expand(@ptrCast(*const IShellNameSpace, self), @"var", iDepth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IShellNameSpace_UnselectAll(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IShellNameSpace.VTable, self.vtable).UnselectAll(@ptrCast(*const IShellNameSpace, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IScriptErrorList_Value = @import("../zig.zig").Guid.initString("f3470f24-15fd-11d2-bb2e-00805ff7efca"); pub const IID_IScriptErrorList = &IID_IScriptErrorList_Value; pub const IScriptErrorList = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, advanceError: fn( self: *const IScriptErrorList, ) callconv(@import("std").os.windows.WINAPI) HRESULT, retreatError: fn( self: *const IScriptErrorList, ) callconv(@import("std").os.windows.WINAPI) HRESULT, canAdvanceError: fn( self: *const IScriptErrorList, pfCanAdvance: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, canRetreatError: fn( self: *const IScriptErrorList, pfCanRetreat: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getErrorLine: fn( self: *const IScriptErrorList, plLine: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getErrorChar: fn( self: *const IScriptErrorList, plChar: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getErrorCode: fn( self: *const IScriptErrorList, plCode: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getErrorMsg: fn( self: *const IScriptErrorList, pstr: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getErrorUrl: fn( self: *const IScriptErrorList, pstr: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getAlwaysShowLockState: fn( self: *const IScriptErrorList, pfAlwaysShowLocked: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getDetailsPaneOpen: fn( self: *const IScriptErrorList, pfDetailsPaneOpen: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, setDetailsPaneOpen: fn( self: *const IScriptErrorList, fDetailsPaneOpen: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, getPerErrorDisplay: fn( self: *const IScriptErrorList, pfPerErrorDisplay: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, setPerErrorDisplay: fn( self: *const IScriptErrorList, fPerErrorDisplay: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_advanceError(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).advanceError(@ptrCast(*const IScriptErrorList, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_retreatError(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).retreatError(@ptrCast(*const IScriptErrorList, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_canAdvanceError(self: *const T, pfCanAdvance: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).canAdvanceError(@ptrCast(*const IScriptErrorList, self), pfCanAdvance); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_canRetreatError(self: *const T, pfCanRetreat: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).canRetreatError(@ptrCast(*const IScriptErrorList, self), pfCanRetreat); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getErrorLine(self: *const T, plLine: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getErrorLine(@ptrCast(*const IScriptErrorList, self), plLine); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getErrorChar(self: *const T, plChar: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getErrorChar(@ptrCast(*const IScriptErrorList, self), plChar); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getErrorCode(self: *const T, plCode: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getErrorCode(@ptrCast(*const IScriptErrorList, self), plCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getErrorMsg(self: *const T, pstr: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getErrorMsg(@ptrCast(*const IScriptErrorList, self), pstr); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getErrorUrl(self: *const T, pstr: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getErrorUrl(@ptrCast(*const IScriptErrorList, self), pstr); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getAlwaysShowLockState(self: *const T, pfAlwaysShowLocked: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getAlwaysShowLockState(@ptrCast(*const IScriptErrorList, self), pfAlwaysShowLocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getDetailsPaneOpen(self: *const T, pfDetailsPaneOpen: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getDetailsPaneOpen(@ptrCast(*const IScriptErrorList, self), pfDetailsPaneOpen); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_setDetailsPaneOpen(self: *const T, fDetailsPaneOpen: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).setDetailsPaneOpen(@ptrCast(*const IScriptErrorList, self), fDetailsPaneOpen); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_getPerErrorDisplay(self: *const T, pfPerErrorDisplay: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).getPerErrorDisplay(@ptrCast(*const IScriptErrorList, self), pfPerErrorDisplay); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScriptErrorList_setPerErrorDisplay(self: *const T, fPerErrorDisplay: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IScriptErrorList.VTable, self.vtable).setPerErrorDisplay(@ptrCast(*const IScriptErrorList, self), fPerErrorDisplay); } };} pub usingnamespace MethodMixin(@This()); }; pub const JAVA_TRUST = extern struct { cbSize: u32, flag: u32, fAllActiveXPermissions: BOOL, fAllPermissions: BOOL, dwEncodingType: u32, pbJavaPermissions: ?*u8, cbJavaPermissions: u32, pbSigner: ?*u8, cbSigner: u32, pwszZone: ?[*:0]const u16, guidZone: Guid, hVerify: HRESULT, }; pub const TDIEntityID = extern struct { tei_entity: TDIENTITY_ENTITY_TYPE, tei_instance: u32, }; pub const TDIObjectID = extern struct { toi_entity: TDIEntityID, toi_class: u32, toi_type: u32, toi_id: u32, }; pub const tcp_request_query_information_ex_xp = extern struct { ID: TDIObjectID, Context: [2]usize, }; pub const tcp_request_query_information_ex_w2k = extern struct { ID: TDIObjectID, Context: [16]u8, }; pub const tcp_request_set_information_ex = extern struct { ID: TDIObjectID, BufferSize: u32, Buffer: [1]u8, }; pub const TDI_TL_IO_CONTROL_TYPE = enum(i32) { EndpointIoControlType = 0, SetSockOptIoControlType = 1, GetSockOptIoControlType = 2, SocketIoControlType = 3, }; pub const EndpointIoControlType = TDI_TL_IO_CONTROL_TYPE.EndpointIoControlType; pub const SetSockOptIoControlType = TDI_TL_IO_CONTROL_TYPE.SetSockOptIoControlType; pub const GetSockOptIoControlType = TDI_TL_IO_CONTROL_TYPE.GetSockOptIoControlType; pub const SocketIoControlType = TDI_TL_IO_CONTROL_TYPE.SocketIoControlType; pub const TDI_TL_IO_CONTROL_ENDPOINT = extern struct { Type: TDI_TL_IO_CONTROL_TYPE, Level: u32, Anonymous: extern union { IoControlCode: u32, OptionName: u32, }, InputBuffer: ?*c_void, InputBufferLength: u32, OutputBuffer: ?*c_void, OutputBufferLength: u32, }; pub const WLDP_HOST = enum(i32) { RUNDLL32 = 0, SVCHOST = 1, MAX = 2, }; pub const WLDP_HOST_RUNDLL32 = WLDP_HOST.RUNDLL32; pub const WLDP_HOST_SVCHOST = WLDP_HOST.SVCHOST; pub const WLDP_HOST_MAX = WLDP_HOST.MAX; pub const WLDP_HOST_ID = enum(i32) { UNKNOWN = 0, GLOBAL = 1, VBA = 2, WSH = 3, POWERSHELL = 4, IE = 5, MSI = 6, ALL = 7, MAX = 8, }; pub const WLDP_HOST_ID_UNKNOWN = WLDP_HOST_ID.UNKNOWN; pub const WLDP_HOST_ID_GLOBAL = WLDP_HOST_ID.GLOBAL; pub const WLDP_HOST_ID_VBA = WLDP_HOST_ID.VBA; pub const WLDP_HOST_ID_WSH = WLDP_HOST_ID.WSH; pub const WLDP_HOST_ID_POWERSHELL = WLDP_HOST_ID.POWERSHELL; pub const WLDP_HOST_ID_IE = WLDP_HOST_ID.IE; pub const WLDP_HOST_ID_MSI = WLDP_HOST_ID.MSI; pub const WLDP_HOST_ID_ALL = WLDP_HOST_ID.ALL; pub const WLDP_HOST_ID_MAX = WLDP_HOST_ID.MAX; pub const DECISION_LOCATION = enum(i32) { REFRESH_GLOBAL_DATA = 0, PARAMETER_VALIDATION = 1, AUDIT = 2, FAILED_CONVERT_GUID = 3, ENTERPRISE_DEFINED_CLASS_ID = 4, GLOBAL_BUILT_IN_LIST = 5, PROVIDER_BUILT_IN_LIST = 6, ENFORCE_STATE_LIST = 7, NOT_FOUND = 8, UNKNOWN = 9, }; pub const DECISION_LOCATION_REFRESH_GLOBAL_DATA = DECISION_LOCATION.REFRESH_GLOBAL_DATA; pub const DECISION_LOCATION_PARAMETER_VALIDATION = DECISION_LOCATION.PARAMETER_VALIDATION; pub const DECISION_LOCATION_AUDIT = DECISION_LOCATION.AUDIT; pub const DECISION_LOCATION_FAILED_CONVERT_GUID = DECISION_LOCATION.FAILED_CONVERT_GUID; pub const DECISION_LOCATION_ENTERPRISE_DEFINED_CLASS_ID = DECISION_LOCATION.ENTERPRISE_DEFINED_CLASS_ID; pub const DECISION_LOCATION_GLOBAL_BUILT_IN_LIST = DECISION_LOCATION.GLOBAL_BUILT_IN_LIST; pub const DECISION_LOCATION_PROVIDER_BUILT_IN_LIST = DECISION_LOCATION.PROVIDER_BUILT_IN_LIST; pub const DECISION_LOCATION_ENFORCE_STATE_LIST = DECISION_LOCATION.ENFORCE_STATE_LIST; pub const DECISION_LOCATION_NOT_FOUND = DECISION_LOCATION.NOT_FOUND; pub const DECISION_LOCATION_UNKNOWN = DECISION_LOCATION.UNKNOWN; pub const WLDP_KEY = enum(i32) { UNKNOWN = 0, OVERRIDE = 1, ALL_KEYS = 2, }; pub const KEY_UNKNOWN = WLDP_KEY.UNKNOWN; pub const KEY_OVERRIDE = WLDP_KEY.OVERRIDE; pub const KEY_ALL_KEYS = WLDP_KEY.ALL_KEYS; pub const VALUENAME = enum(i32) { UNKNOWN = 0, ENTERPRISE_DEFINED_CLASS_ID = 1, BUILT_IN_LIST = 2, }; pub const VALUENAME_UNKNOWN = VALUENAME.UNKNOWN; pub const VALUENAME_ENTERPRISE_DEFINED_CLASS_ID = VALUENAME.ENTERPRISE_DEFINED_CLASS_ID; pub const VALUENAME_BUILT_IN_LIST = VALUENAME.BUILT_IN_LIST; pub const WLDP_WINDOWS_LOCKDOWN_MODE = enum(i32) { UNLOCKED = 0, TRIAL = 1, LOCKED = 2, MAX = 3, }; pub const WLDP_WINDOWS_LOCKDOWN_MODE_UNLOCKED = WLDP_WINDOWS_LOCKDOWN_MODE.UNLOCKED; pub const WLDP_WINDOWS_LOCKDOWN_MODE_TRIAL = WLDP_WINDOWS_LOCKDOWN_MODE.TRIAL; pub const WLDP_WINDOWS_LOCKDOWN_MODE_LOCKED = WLDP_WINDOWS_LOCKDOWN_MODE.LOCKED; pub const WLDP_WINDOWS_LOCKDOWN_MODE_MAX = WLDP_WINDOWS_LOCKDOWN_MODE.MAX; pub const WLDP_WINDOWS_LOCKDOWN_RESTRICTION = enum(i32) { NONE = 0, NOUNLOCK = 1, NOUNLOCK_PERMANENT = 2, MAX = 3, }; pub const WLDP_WINDOWS_LOCKDOWN_RESTRICTION_NONE = WLDP_WINDOWS_LOCKDOWN_RESTRICTION.NONE; pub const WLDP_WINDOWS_LOCKDOWN_RESTRICTION_NOUNLOCK = WLDP_WINDOWS_LOCKDOWN_RESTRICTION.NOUNLOCK; pub const WLDP_WINDOWS_LOCKDOWN_RESTRICTION_NOUNLOCK_PERMANENT = WLDP_WINDOWS_LOCKDOWN_RESTRICTION.NOUNLOCK_PERMANENT; pub const WLDP_WINDOWS_LOCKDOWN_RESTRICTION_MAX = WLDP_WINDOWS_LOCKDOWN_RESTRICTION.MAX; pub const WLDP_HOST_INFORMATION = extern struct { dwRevision: u32, dwHostId: WLDP_HOST_ID, szSource: ?[*:0]const u16, hSource: ?HANDLE, }; pub const PWLDP_SETDYNAMICCODETRUST_API = fn( hFileHandle: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const PWLDP_ISDYNAMICCODEPOLICYENABLED_API = fn( pbEnabled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const PWLDP_QUERYDYNAMICODETRUST_API = fn( fileHandle: ?HANDLE, // TODO: what to do with BytesParamIndex 2? baseImage: ?*c_void, imageSize: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const PWLDP_QUERYWINDOWSLOCKDOWNMODE_API = fn( lockdownMode: ?*WLDP_WINDOWS_LOCKDOWN_MODE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const PWLDP_QUERYWINDOWSLOCKDOWNRESTRICTION_API = fn( LockdownRestriction: ?*WLDP_WINDOWS_LOCKDOWN_RESTRICTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const PWLDP_SETWINDOWSLOCKDOWNRESTRICTION_API = fn( LockdownRestriction: WLDP_WINDOWS_LOCKDOWN_RESTRICTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const PWLDP_WLDPISAPPAPPROVEDBYPOLICY_API = fn( PackageFamilyName: ?[*:0]const u16, PackageVersion: u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const tcp_request_query_information_ex32_xp = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { ID: TDIObjectID, Context: [4]u32, }, else => usize, // NOTE: this should be a @compileError but can't because of https://github.com/ziglang/zig/issues/9682 }; //-------------------------------------------------------------------------------- // Section: Functions (226) //-------------------------------------------------------------------------------- pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_lstrcmpW( String1: ?*u16, String2: ?*u16, ) callconv(@import("std").os.windows.WINAPI) i32; }, else => struct { } }; pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_lstrcmpiW( String1: ?*u16, String2: ?*u16, ) callconv(@import("std").os.windows.WINAPI) i32; }, else => struct { } }; pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_lstrlenW( String: ?*u16, ) callconv(@import("std").os.windows.WINAPI) i32; }, else => struct { } }; pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_wcschr( String: ?*u16, Character: u16, ) callconv(@import("std").os.windows.WINAPI) ?*u16; }, else => struct { } }; pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_wcscpy( Destination: ?*u16, Source: ?*u16, ) callconv(@import("std").os.windows.WINAPI) ?*u16; }, else => struct { } }; pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_wcsicmp( String1: ?*u16, String2: ?*u16, ) callconv(@import("std").os.windows.WINAPI) i32; }, else => struct { } }; pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_wcslen( String: ?*u16, ) callconv(@import("std").os.windows.WINAPI) usize; }, else => struct { } }; pub usingnamespace switch (@import("../zig.zig").arch) { .X64, .Arm64 => struct { pub extern "KERNEL32" fn uaw_wcsrchr( String: ?*u16, Character: u16, ) callconv(@import("std").os.windows.WINAPI) ?*u16; }, else => struct { } }; pub extern "api-ms-win-core-apiquery-l2-1-0" fn IsApiSetImplemented( Contract: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn QueryThreadCycleTime( ThreadHandle: ?HANDLE, CycleTime: ?*u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn QueryProcessCycleTime( ProcessHandle: ?HANDLE, CycleTime: ?*u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn QueryIdleProcessorCycleTime( BufferLength: ?*u32, // TODO: what to do with BytesParamIndex 0? ProcessorIdleCycleTime: ?*u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.1' pub extern "KERNEL32" fn QueryIdleProcessorCycleTimeEx( Group: u16, BufferLength: ?*u32, // TODO: what to do with BytesParamIndex 1? ProcessorIdleCycleTime: ?*u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-core-realtime-l1-1-1" fn QueryInterruptTimePrecise( lpInterruptTimePrecise: ?*u64, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-core-realtime-l1-1-1" fn QueryUnbiasedInterruptTimePrecise( lpUnbiasedInterruptTimePrecise: ?*u64, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-core-realtime-l1-1-1" fn QueryInterruptTime( lpInterruptTime: ?*u64, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows6.1' pub extern "KERNEL32" fn QueryUnbiasedInterruptTime( UnbiasedTime: ?*u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows10.0.15063' pub extern "api-ms-win-core-realtime-l1-1-2" fn QueryAuxiliaryCounterFrequency( lpAuxiliaryCounterFrequency: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows10.0.15063' pub extern "api-ms-win-core-realtime-l1-1-2" fn ConvertAuxiliaryCounterToPerformanceCounter( ullAuxiliaryCounterValue: u64, lpPerformanceCounterValue: ?*u64, lpConversionError: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows10.0.15063' pub extern "api-ms-win-core-realtime-l1-1-2" fn ConvertPerformanceCounterToAuxiliaryCounter( ullPerformanceCounterValue: u64, lpAuxiliaryCounterValue: ?*u64, lpConversionError: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn GlobalCompact( dwMinFree: u32, ) callconv(@import("std").os.windows.WINAPI) usize; pub extern "KERNEL32" fn GlobalFix( hMem: isize, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "KERNEL32" fn GlobalUnfix( hMem: isize, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "KERNEL32" fn GlobalWire( hMem: isize, ) callconv(@import("std").os.windows.WINAPI) ?*c_void; pub extern "KERNEL32" fn GlobalUnWire( hMem: isize, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn LocalShrink( hMem: isize, cbNewSize: u32, ) callconv(@import("std").os.windows.WINAPI) usize; pub extern "KERNEL32" fn LocalCompact( uMinFree: u32, ) callconv(@import("std").os.windows.WINAPI) usize; pub extern "KERNEL32" fn SetEnvironmentStringsA( NewEnvironment: ?[*]u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn SetHandleCount( uNumber: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "KERNEL32" fn RequestDeviceWakeup( hDevice: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn CancelDeviceWakeupRequest( hDevice: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn SetMessageWaitingIndicator( hMsgIndicator: ?HANDLE, ulMsgCount: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn MulDiv( nNumber: i32, nNumerator: i32, nDenominator: i32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetSystemRegistryQuota( pdwQuotaAllowed: ?*u32, pdwQuotaUsed: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn FileTimeToDosDateTime( lpFileTime: ?*const FILETIME, lpFatDate: ?*u16, lpFatTime: ?*u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn DosDateTimeToFileTime( wFatDate: u16, wFatTime: u16, lpFileTime: ?*FILETIME, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn _lopen( lpPathName: ?[*:0]const u8, iReadWrite: i32, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn _lcreat( lpPathName: ?[*:0]const u8, iAttribute: i32, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn _lread( hFile: i32, // TODO: what to do with BytesParamIndex 2? lpBuffer: ?*c_void, uBytes: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "KERNEL32" fn _lwrite( hFile: i32, // TODO: what to do with BytesParamIndex 2? lpBuffer: ?[*]const u8, uBytes: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "KERNEL32" fn _hread( hFile: i32, // TODO: what to do with BytesParamIndex 2? lpBuffer: ?*c_void, lBytes: i32, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn _hwrite( hFile: i32, // TODO: what to do with BytesParamIndex 2? lpBuffer: ?[*]const u8, lBytes: i32, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn _lclose( hFile: i32, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn _llseek( hFile: i32, lOffset: i32, iOrigin: i32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn SignalObjectAndWait( hObjectToSignal: ?HANDLE, hObjectToWaitOn: ?HANDLE, dwMilliseconds: u32, bAlertable: BOOL, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "KERNEL32" fn OpenMutexA( dwDesiredAccess: u32, bInheritHandle: BOOL, lpName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub extern "KERNEL32" fn OpenSemaphoreA( dwDesiredAccess: u32, bInheritHandle: BOOL, lpName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub extern "KERNEL32" fn CreateWaitableTimerA( lpTimerAttributes: ?*SECURITY_ATTRIBUTES, bManualReset: BOOL, lpTimerName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub extern "KERNEL32" fn OpenWaitableTimerA( dwDesiredAccess: u32, bInheritHandle: BOOL, lpTimerName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub extern "KERNEL32" fn CreateWaitableTimerExA( lpTimerAttributes: ?*SECURITY_ATTRIBUTES, lpTimerName: ?[*:0]const u8, dwFlags: u32, dwDesiredAccess: u32, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub extern "KERNEL32" fn GetStartupInfoA( lpStartupInfo: ?*STARTUPINFOA, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetFirmwareEnvironmentVariableA( lpName: ?[*:0]const u8, lpGuid: ?[*:0]const u8, // TODO: what to do with BytesParamIndex 3? pBuffer: ?*c_void, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetFirmwareEnvironmentVariableW( lpName: ?[*:0]const u16, lpGuid: ?[*:0]const u16, // TODO: what to do with BytesParamIndex 3? pBuffer: ?*c_void, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetFirmwareEnvironmentVariableExA( lpName: ?[*:0]const u8, lpGuid: ?[*:0]const u8, // TODO: what to do with BytesParamIndex 3? pBuffer: ?*c_void, nSize: u32, pdwAttribubutes: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetFirmwareEnvironmentVariableExW( lpName: ?[*:0]const u16, lpGuid: ?[*:0]const u16, // TODO: what to do with BytesParamIndex 3? pBuffer: ?*c_void, nSize: u32, pdwAttribubutes: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn SetFirmwareEnvironmentVariableA( lpName: ?[*:0]const u8, lpGuid: ?[*:0]const u8, // TODO: what to do with BytesParamIndex 3? pValue: ?*c_void, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn SetFirmwareEnvironmentVariableW( lpName: ?[*:0]const u16, lpGuid: ?[*:0]const u16, // TODO: what to do with BytesParamIndex 3? pValue: ?*c_void, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn SetFirmwareEnvironmentVariableExA( lpName: ?[*:0]const u8, lpGuid: ?[*:0]const u8, // TODO: what to do with BytesParamIndex 3? pValue: ?*c_void, nSize: u32, dwAttributes: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn SetFirmwareEnvironmentVariableExW( lpName: ?[*:0]const u16, lpGuid: ?[*:0]const u16, // TODO: what to do with BytesParamIndex 3? pValue: ?*c_void, nSize: u32, dwAttributes: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn IsNativeVhdBoot( NativeVhdBoot: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetProfileIntA( lpAppName: ?[*:0]const u8, lpKeyName: ?[*:0]const u8, nDefault: i32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetProfileIntW( lpAppName: ?[*:0]const u16, lpKeyName: ?[*:0]const u16, nDefault: i32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetProfileStringA( lpAppName: ?[*:0]const u8, lpKeyName: ?[*:0]const u8, lpDefault: ?[*:0]const u8, lpReturnedString: ?[*:0]u8, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetProfileStringW( lpAppName: ?[*:0]const u16, lpKeyName: ?[*:0]const u16, lpDefault: ?[*:0]const u16, lpReturnedString: ?[*:0]u16, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WriteProfileStringA( lpAppName: ?[*:0]const u8, lpKeyName: ?[*:0]const u8, lpString: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WriteProfileStringW( lpAppName: ?[*:0]const u16, lpKeyName: ?[*:0]const u16, lpString: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetProfileSectionA( lpAppName: ?[*:0]const u8, lpReturnedString: ?[*:0]u8, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetProfileSectionW( lpAppName: ?[*:0]const u16, lpReturnedString: ?[*:0]u16, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WriteProfileSectionA( lpAppName: ?[*:0]const u8, lpString: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WriteProfileSectionW( lpAppName: ?[*:0]const u16, lpString: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileIntA( lpAppName: ?[*:0]const u8, lpKeyName: ?[*:0]const u8, nDefault: i32, lpFileName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileIntW( lpAppName: ?[*:0]const u16, lpKeyName: ?[*:0]const u16, nDefault: i32, lpFileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileStringA( lpAppName: ?[*:0]const u8, lpKeyName: ?[*:0]const u8, lpDefault: ?[*:0]const u8, lpReturnedString: ?[*:0]u8, nSize: u32, lpFileName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileStringW( lpAppName: ?[*:0]const u16, lpKeyName: ?[*:0]const u16, lpDefault: ?[*:0]const u16, lpReturnedString: ?[*:0]u16, nSize: u32, lpFileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WritePrivateProfileStringA( lpAppName: ?[*:0]const u8, lpKeyName: ?[*:0]const u8, lpString: ?[*:0]const u8, lpFileName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WritePrivateProfileStringW( lpAppName: ?[*:0]const u16, lpKeyName: ?[*:0]const u16, lpString: ?[*:0]const u16, lpFileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileSectionA( lpAppName: ?[*:0]const u8, lpReturnedString: ?[*:0]u8, nSize: u32, lpFileName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileSectionW( lpAppName: ?[*:0]const u16, lpReturnedString: ?[*:0]u16, nSize: u32, lpFileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WritePrivateProfileSectionA( lpAppName: ?[*:0]const u8, lpString: ?[*:0]const u8, lpFileName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WritePrivateProfileSectionW( lpAppName: ?[*:0]const u16, lpString: ?[*:0]const u16, lpFileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileSectionNamesA( lpszReturnBuffer: ?[*:0]u8, nSize: u32, lpFileName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileSectionNamesW( lpszReturnBuffer: ?[*:0]u16, nSize: u32, lpFileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileStructA( lpszSection: ?[*:0]const u8, lpszKey: ?[*:0]const u8, // TODO: what to do with BytesParamIndex 3? lpStruct: ?*c_void, uSizeStruct: u32, szFile: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetPrivateProfileStructW( lpszSection: ?[*:0]const u16, lpszKey: ?[*:0]const u16, // TODO: what to do with BytesParamIndex 3? lpStruct: ?*c_void, uSizeStruct: u32, szFile: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WritePrivateProfileStructA( lpszSection: ?[*:0]const u8, lpszKey: ?[*:0]const u8, // TODO: what to do with BytesParamIndex 3? lpStruct: ?*c_void, uSizeStruct: u32, szFile: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WritePrivateProfileStructW( lpszSection: ?[*:0]const u16, lpszKey: ?[*:0]const u16, // TODO: what to do with BytesParamIndex 3? lpStruct: ?*c_void, uSizeStruct: u32, szFile: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn IsBadHugeReadPtr( lp: ?*const c_void, ucb: usize, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn IsBadHugeWritePtr( lp: ?*c_void, ucb: usize, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetComputerNameA( lpBuffer: ?[*:0]u8, nSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetComputerNameW( lpBuffer: ?[*:0]u16, nSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn DnsHostnameToComputerNameA( Hostname: ?[*:0]const u8, ComputerName: ?[*:0]u8, nSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn DnsHostnameToComputerNameW( Hostname: ?[*:0]const u16, ComputerName: ?[*:0]u16, nSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "ADVAPI32" fn GetUserNameA( lpBuffer: ?[*:0]u8, pcbBuffer: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "ADVAPI32" fn GetUserNameW( lpBuffer: ?[*:0]u16, pcbBuffer: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "ADVAPI32" fn IsTokenUntrusted( TokenHandle: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn SetTimerQueueTimer( TimerQueue: ?HANDLE, Callback: ?WAITORTIMERCALLBACK, Parameter: ?*c_void, DueTime: u32, Period: u32, PreferIo: BOOL, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub extern "KERNEL32" fn CancelTimerQueueTimer( TimerQueue: ?HANDLE, Timer: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "ADVAPI32" fn GetCurrentHwProfileA( lpHwProfileInfo: ?*HW_PROFILE_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "ADVAPI32" fn GetCurrentHwProfileW( lpHwProfileInfo: ?*HW_PROFILE_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn CreateJobSet( NumJob: u32, UserJobSet: [*]JOB_SET_ARRAY, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn ReplacePartitionUnit( TargetPartition: ?PWSTR, SparePartition: ?PWSTR, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn InitializeContext2( // TODO: what to do with BytesParamIndex 3? Buffer: ?*c_void, ContextFlags: u32, Context: ?*?*CONTEXT, ContextLength: ?*u32, XStateCompactionMask: u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "api-ms-win-core-backgroundtask-l1-1-0" fn RaiseCustomSystemEventTrigger( CustomSystemEventTriggerConfig: ?*CUSTOM_SYSTEM_EVENT_TRIGGER_CONFIG, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn NtClose( Handle: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtCreateFile( FileHandle: ?*?HANDLE, DesiredAccess: u32, ObjectAttributes: ?*OBJECT_ATTRIBUTES, IoStatusBlock: ?*IO_STATUS_BLOCK, AllocationSize: ?*LARGE_INTEGER, FileAttributes: u32, ShareAccess: FILE_SHARE_MODE, CreateDisposition: NT_CREATE_FILE_DISPOSITION, CreateOptions: u32, EaBuffer: ?*c_void, EaLength: u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtOpenFile( FileHandle: ?*?HANDLE, DesiredAccess: u32, ObjectAttributes: ?*OBJECT_ATTRIBUTES, IoStatusBlock: ?*IO_STATUS_BLOCK, ShareAccess: u32, OpenOptions: u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtRenameKey( KeyHandle: ?HANDLE, NewName: ?*UNICODE_STRING, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtNotifyChangeMultipleKeys( MasterKeyHandle: ?HANDLE, Count: u32, SubordinateObjects: ?[*]OBJECT_ATTRIBUTES, Event: ?HANDLE, ApcRoutine: ?PIO_APC_ROUTINE, ApcContext: ?*c_void, IoStatusBlock: ?*IO_STATUS_BLOCK, CompletionFilter: u32, WatchTree: BOOLEAN, // TODO: what to do with BytesParamIndex 10? Buffer: ?*c_void, BufferSize: u32, Asynchronous: BOOLEAN, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtQueryMultipleValueKey( KeyHandle: ?HANDLE, ValueEntries: [*]KEY_VALUE_ENTRY, EntryCount: u32, // TODO: what to do with BytesParamIndex 4? ValueBuffer: ?*c_void, BufferLength: ?*u32, RequiredBufferLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtSetInformationKey( KeyHandle: ?HANDLE, KeySetInformationClass: KEY_SET_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? KeySetInformation: ?*c_void, KeySetInformationLength: u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn NtDeviceIoControlFile( FileHandle: ?HANDLE, Event: ?HANDLE, ApcRoutine: ?PIO_APC_ROUTINE, ApcContext: ?*c_void, IoStatusBlock: ?*IO_STATUS_BLOCK, IoControlCode: u32, InputBuffer: ?*c_void, InputBufferLength: u32, OutputBuffer: ?*c_void, OutputBufferLength: u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn NtWaitForSingleObject( Handle: ?HANDLE, Alertable: BOOLEAN, Timeout: ?*LARGE_INTEGER, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn RtlIsNameLegalDOS8Dot3( Name: ?*UNICODE_STRING, OemName: ?*STRING, NameContainsSpaces: ?*BOOLEAN, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; pub extern "ntdll" fn NtQueryObject( Handle: ?HANDLE, ObjectInformationClass: OBJECT_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? ObjectInformation: ?*c_void, ObjectInformationLength: u32, ReturnLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtQuerySystemInformation( SystemInformationClass: SYSTEM_INFORMATION_CLASS, SystemInformation: ?*c_void, SystemInformationLength: u32, ReturnLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn NtQuerySystemTime( SystemTime: ?*LARGE_INTEGER, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn RtlLocalTimeToSystemTime( LocalTime: ?*LARGE_INTEGER, SystemTime: ?*LARGE_INTEGER, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn RtlTimeToSecondsSince1970( Time: ?*LARGE_INTEGER, ElapsedSeconds: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlFreeAnsiString( AnsiString: ?*STRING, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlFreeUnicodeString( UnicodeString: ?*UNICODE_STRING, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlFreeOemString( OemString: ?*STRING, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "ntdll" fn RtlInitString( DestinationString: ?*STRING, SourceString: ?*i8, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "ntdll" fn RtlInitStringEx( DestinationString: ?*STRING, SourceString: ?*i8, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn RtlInitAnsiString( DestinationString: ?*STRING, SourceString: ?*i8, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "ntdll" fn RtlInitAnsiStringEx( DestinationString: ?*STRING, SourceString: ?*i8, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlInitUnicodeString( DestinationString: ?*UNICODE_STRING, SourceString: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlAnsiStringToUnicodeString( DestinationString: ?*UNICODE_STRING, SourceString: ?*STRING, AllocateDestinationString: BOOLEAN, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlUnicodeStringToAnsiString( DestinationString: ?*STRING, SourceString: ?*UNICODE_STRING, AllocateDestinationString: BOOLEAN, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlUnicodeStringToOemString( DestinationString: ?*STRING, SourceString: ?*UNICODE_STRING, AllocateDestinationString: BOOLEAN, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlUnicodeToMultiByteSize( BytesInMultiByteString: ?*u32, // TODO: what to do with BytesParamIndex 2? UnicodeString: ?[*]u16, BytesInUnicodeString: u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; // TODO: this type is limited to platform 'windows5.0' pub extern "ntdll" fn RtlCharToInteger( String: ?*i8, Base: u32, Value: ?*u32, ) callconv(@import("std").os.windows.WINAPI) NTSTATUS; pub extern "ntdll" fn RtlUniform( Seed: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "api-ms-win-core-featurestaging-l1-1-0" fn GetFeatureEnabledState( featureId: u32, changeTime: FEATURE_CHANGE_TIME, ) callconv(@import("std").os.windows.WINAPI) FEATURE_ENABLED_STATE; pub extern "api-ms-win-core-featurestaging-l1-1-0" fn RecordFeatureUsage( featureId: u32, kind: u32, addend: u32, originName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "api-ms-win-core-featurestaging-l1-1-0" fn RecordFeatureError( featureId: u32, @"error": ?*const FEATURE_ERROR, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "api-ms-win-core-featurestaging-l1-1-0" fn SubscribeFeatureStateChangeNotification( subscription: ?*FEATURE_STATE_CHANGE_SUBSCRIPTION, callback: ?PFEATURE_STATE_CHANGE_CALLBACK, context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "api-ms-win-core-featurestaging-l1-1-0" fn UnsubscribeFeatureStateChangeNotification( subscription: FEATURE_STATE_CHANGE_SUBSCRIPTION, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "api-ms-win-core-featurestaging-l1-1-1" fn GetFeatureVariant( featureId: u32, changeTime: FEATURE_CHANGE_TIME, payloadId: ?*u32, hasNotification: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "DCIMAN32" fn DCIOpenProvider( ) callconv(@import("std").os.windows.WINAPI) ?HDC; // TODO: this type is limited to platform 'windows5.0' pub extern "DCIMAN32" fn DCICloseProvider( hdc: ?HDC, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "DCIMAN32" fn DCICreatePrimary( hdc: ?HDC, lplpSurface: ?*?*DCISURFACEINFO, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "DCIMAN32" fn DCICreateOffscreen( hdc: ?HDC, dwCompression: u32, dwRedMask: u32, dwGreenMask: u32, dwBlueMask: u32, dwWidth: u32, dwHeight: u32, dwDCICaps: u32, dwBitCount: u32, lplpSurface: ?*?*DCIOFFSCREEN, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "DCIMAN32" fn DCICreateOverlay( hdc: ?HDC, lpOffscreenSurf: ?*c_void, lplpSurface: ?*?*DCIOVERLAY, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "DCIMAN32" fn DCIEnum( hdc: ?HDC, lprDst: ?*RECT, lprSrc: ?*RECT, lpFnCallback: ?*c_void, lpContext: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "DCIMAN32" fn DCISetSrcDestClip( pdci: ?*DCIOFFSCREEN, srcrc: ?*RECT, destrc: ?*RECT, prd: ?*RGNDATA, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "DCIMAN32" fn WinWatchOpen( hwnd: ?HWND, ) callconv(@import("std").os.windows.WINAPI) ?HWINWATCH; pub extern "DCIMAN32" fn WinWatchClose( hWW: ?HWINWATCH, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "DCIMAN32" fn WinWatchGetClipList( hWW: ?HWINWATCH, prc: ?*RECT, size: u32, prd: ?*RGNDATA, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "DCIMAN32" fn WinWatchDidStatusChange( hWW: ?HWINWATCH, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "DCIMAN32" fn GetWindowRegionData( hwnd: ?HWND, size: u32, prd: ?*RGNDATA, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "DCIMAN32" fn GetDCRegionData( hdc: ?HDC, size: u32, prd: ?*RGNDATA, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "DCIMAN32" fn WinWatchNotify( hWW: ?HWINWATCH, NotifyCallback: ?WINWATCHNOTIFYPROC, NotifyParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "DCIMAN32" fn DCIEndAccess( pdci: ?*DCISURFACEINFO, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "DCIMAN32" fn DCIBeginAccess( pdci: ?*DCISURFACEINFO, x: i32, y: i32, dx: i32, dy: i32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "DCIMAN32" fn DCIDestroy( pdci: ?*DCISURFACEINFO, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "DCIMAN32" fn DCIDraw( pdci: ?*DCIOFFSCREEN, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "DCIMAN32" fn DCISetClipList( pdci: ?*DCIOFFSCREEN, prd: ?*RGNDATA, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "DCIMAN32" fn DCISetDestination( pdci: ?*DCIOFFSCREEN, dst: ?*RECT, src: ?*RECT, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-dx-d3dkmt-l1-1-0" fn GdiEntry13( ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "ADVPACK" fn RunSetupCommandA( hWnd: ?HWND, szCmdName: ?[*:0]const u8, szInfSection: ?[*:0]const u8, szDir: ?[*:0]const u8, lpszTitle: ?[*:0]const u8, phEXE: ?*?HANDLE, dwFlags: u32, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RunSetupCommandW( hWnd: ?HWND, szCmdName: ?[*:0]const u16, szInfSection: ?[*:0]const u16, szDir: ?[*:0]const u16, lpszTitle: ?[*:0]const u16, phEXE: ?*?HANDLE, dwFlags: u32, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn NeedRebootInit( ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "ADVPACK" fn NeedReboot( dwRebootCheck: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "ADVPACK" fn RebootCheckOnInstallA( hwnd: ?HWND, pszINF: ?[*:0]const u8, pszSec: ?[*:0]const u8, dwReserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RebootCheckOnInstallW( hwnd: ?HWND, pszINF: ?[*:0]const u16, pszSec: ?[*:0]const u16, dwReserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn TranslateInfStringA( pszInfFilename: ?[*:0]const u8, pszInstallSection: ?[*:0]const u8, pszTranslateSection: ?[*:0]const u8, pszTranslateKey: ?[*:0]const u8, pszBuffer: ?[*:0]u8, cchBuffer: u32, pdwRequiredSize: ?*u32, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn TranslateInfStringW( pszInfFilename: ?[*:0]const u16, pszInstallSection: ?[*:0]const u16, pszTranslateSection: ?[*:0]const u16, pszTranslateKey: ?[*:0]const u16, pszBuffer: ?[*:0]u16, cchBuffer: u32, pdwRequiredSize: ?*u32, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "ADVPACK" fn RegInstallA( hmod: ?HINSTANCE, pszSection: ?[*:0]const u8, pstTable: ?*const STRTABLEA, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "ADVPACK" fn RegInstallW( hmod: ?HINSTANCE, pszSection: ?[*:0]const u16, pstTable: ?*const STRTABLEW, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn LaunchINFSectionExW( hwnd: ?HWND, hInstance: ?HINSTANCE, pszParms: ?PWSTR, nShow: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn ExecuteCabA( hwnd: ?HWND, pCab: ?*CABINFOA, pReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn ExecuteCabW( hwnd: ?HWND, pCab: ?*CABINFOW, pReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn AdvInstallFileA( hwnd: ?HWND, lpszSourceDir: ?[*:0]const u8, lpszSourceFile: ?[*:0]const u8, lpszDestDir: ?[*:0]const u8, lpszDestFile: ?[*:0]const u8, dwFlags: u32, dwReserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn AdvInstallFileW( hwnd: ?HWND, lpszSourceDir: ?[*:0]const u16, lpszSourceFile: ?[*:0]const u16, lpszDestDir: ?[*:0]const u16, lpszDestFile: ?[*:0]const u16, dwFlags: u32, dwReserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RegSaveRestoreA( hWnd: ?HWND, pszTitleString: ?[*:0]const u8, hkBckupKey: ?HKEY, pcszRootKey: ?[*:0]const u8, pcszSubKey: ?[*:0]const u8, pcszValueName: ?[*:0]const u8, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RegSaveRestoreW( hWnd: ?HWND, pszTitleString: ?[*:0]const u16, hkBckupKey: ?HKEY, pcszRootKey: ?[*:0]const u16, pcszSubKey: ?[*:0]const u16, pcszValueName: ?[*:0]const u16, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RegSaveRestoreOnINFA( hWnd: ?HWND, pszTitle: ?[*:0]const u8, pszINF: ?[*:0]const u8, pszSection: ?[*:0]const u8, hHKLMBackKey: ?HKEY, hHKCUBackKey: ?HKEY, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RegSaveRestoreOnINFW( hWnd: ?HWND, pszTitle: ?[*:0]const u16, pszINF: ?[*:0]const u16, pszSection: ?[*:0]const u16, hHKLMBackKey: ?HKEY, hHKCUBackKey: ?HKEY, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RegRestoreAllA( hWnd: ?HWND, pszTitleString: ?[*:0]const u8, hkBckupKey: ?HKEY, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn RegRestoreAllW( hWnd: ?HWND, pszTitleString: ?[*:0]const u16, hkBckupKey: ?HKEY, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn FileSaveRestoreW( hDlg: ?HWND, lpFileList: ?PWSTR, lpDir: ?[*:0]const u16, lpBaseName: ?[*:0]const u16, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn FileSaveRestoreOnINFA( hWnd: ?HWND, pszTitle: ?[*:0]const u8, pszINF: ?[*:0]const u8, pszSection: ?[*:0]const u8, pszBackupDir: ?[*:0]const u8, pszBaseBackupFile: ?[*:0]const u8, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn FileSaveRestoreOnINFW( hWnd: ?HWND, pszTitle: ?[*:0]const u16, pszINF: ?[*:0]const u16, pszSection: ?[*:0]const u16, pszBackupDir: ?[*:0]const u16, pszBaseBackupFile: ?[*:0]const u16, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn AddDelBackupEntryA( lpcszFileList: ?[*:0]const u8, lpcszBackupDir: ?[*:0]const u8, lpcszBaseName: ?[*:0]const u8, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn AddDelBackupEntryW( lpcszFileList: ?[*:0]const u16, lpcszBackupDir: ?[*:0]const u16, lpcszBaseName: ?[*:0]const u16, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn FileSaveMarkNotExistA( lpFileList: ?[*:0]const u8, lpDir: ?[*:0]const u8, lpBaseName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn FileSaveMarkNotExistW( lpFileList: ?[*:0]const u16, lpDir: ?[*:0]const u16, lpBaseName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn GetVersionFromFileA( lpszFilename: ?[*:0]const u8, pdwMSVer: ?*u32, pdwLSVer: ?*u32, bVersion: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn GetVersionFromFileW( lpszFilename: ?[*:0]const u16, pdwMSVer: ?*u32, pdwLSVer: ?*u32, bVersion: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn GetVersionFromFileExA( lpszFilename: ?[*:0]const u8, pdwMSVer: ?*u32, pdwLSVer: ?*u32, bVersion: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn GetVersionFromFileExW( lpszFilename: ?[*:0]const u16, pdwMSVer: ?*u32, pdwLSVer: ?*u32, bVersion: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn IsNTAdmin( dwReserved: u32, lpdwReserved: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "ADVPACK" fn DelNodeA( pszFileOrDirName: ?[*:0]const u8, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn DelNodeW( pszFileOrDirName: ?[*:0]const u16, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn DelNodeRunDLL32W( hwnd: ?HWND, hInstance: ?HINSTANCE, pszParms: ?PWSTR, nShow: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn OpenINFEngineA( pszInfFilename: ?[*:0]const u8, pszInstallSection: ?[*:0]const u8, dwFlags: u32, phInf: ?*?*c_void, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn OpenINFEngineW( pszInfFilename: ?[*:0]const u16, pszInstallSection: ?[*:0]const u16, dwFlags: u32, phInf: ?*?*c_void, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn TranslateInfStringExA( hInf: ?*c_void, pszInfFilename: ?[*:0]const u8, pszTranslateSection: ?[*:0]const u8, pszTranslateKey: ?[*:0]const u8, pszBuffer: [*:0]u8, dwBufferSize: u32, pdwRequiredSize: ?*u32, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn TranslateInfStringExW( hInf: ?*c_void, pszInfFilename: ?[*:0]const u16, pszTranslateSection: ?[*:0]const u16, pszTranslateKey: ?[*:0]const u16, pszBuffer: [*:0]u16, dwBufferSize: u32, pdwRequiredSize: ?*u32, pvReserved: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn CloseINFEngine( hInf: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn ExtractFilesA( pszCabName: ?[*:0]const u8, pszExpandDir: ?[*:0]const u8, dwFlags: u32, pszFileList: ?[*:0]const u8, lpReserved: ?*c_void, dwReserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn ExtractFilesW( pszCabName: ?[*:0]const u16, pszExpandDir: ?[*:0]const u16, dwFlags: u32, pszFileList: ?[*:0]const u16, lpReserved: ?*c_void, dwReserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn LaunchINFSectionW( hwndOwner: ?HWND, hInstance: ?HINSTANCE, pszParams: ?PWSTR, nShow: i32, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "ADVPACK" fn UserInstStubWrapperA( hwnd: ?HWND, hInstance: ?HINSTANCE, pszParms: ?[*:0]const u8, nShow: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn UserInstStubWrapperW( hwnd: ?HWND, hInstance: ?HINSTANCE, pszParms: ?[*:0]const u16, nShow: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn UserUnInstStubWrapperA( hwnd: ?HWND, hInstance: ?HINSTANCE, pszParms: ?[*:0]const u8, nShow: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn UserUnInstStubWrapperW( hwnd: ?HWND, hInstance: ?HINSTANCE, pszParms: ?[*:0]const u16, nShow: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn SetPerUserSecValuesA( pPerUser: ?*PERUSERSECTIONA, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "ADVPACK" fn SetPerUserSecValuesW( pPerUser: ?*PERUSERSECTIONW, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn SendIMEMessageExA( param0: ?HWND, param1: LPARAM, ) callconv(@import("std").os.windows.WINAPI) LRESULT; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn SendIMEMessageExW( param0: ?HWND, param1: LPARAM, ) callconv(@import("std").os.windows.WINAPI) LRESULT; pub extern "USER32" fn IMPGetIMEA( param0: ?HWND, param1: ?*IMEPROA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "USER32" fn IMPGetIMEW( param0: ?HWND, param1: ?*IMEPROW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "USER32" fn IMPQueryIMEA( param0: ?*IMEPROA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "USER32" fn IMPQueryIMEW( param0: ?*IMEPROW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "USER32" fn IMPSetIMEA( param0: ?HWND, param1: ?*IMEPROA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "USER32" fn IMPSetIMEW( param0: ?HWND, param1: ?*IMEPROW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "USER32" fn WINNLSGetIMEHotkey( param0: ?HWND, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn WINNLSEnableIME( param0: ?HWND, param1: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "USER32" fn WINNLSGetEnableStatus( param0: ?HWND, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "APPHELP" fn ApphelpCheckShellObject( ObjectCLSID: ?*const Guid, bShimIfNecessary: BOOL, pullFlags: ?*u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "Wldp" fn WldpGetLockdownPolicy( hostInformation: ?*WLDP_HOST_INFORMATION, lockdownState: ?*u32, lockdownFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "Wldp" fn WldpIsClassInApprovedList( classID: ?*const Guid, hostInformation: ?*WLDP_HOST_INFORMATION, isApproved: ?*BOOL, optionalFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "Wldp" fn WldpSetDynamicCodeTrust( fileHandle: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "Wldp" fn WldpIsDynamicCodePolicyEnabled( isEnabled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "Wldp" fn WldpQueryDynamicCodeTrust( fileHandle: ?HANDLE, // TODO: what to do with BytesParamIndex 2? baseImage: ?*c_void, imageSize: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (52) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { pub const HW_PROFILE_INFO = thismodule.HW_PROFILE_INFOA; pub const STRENTRY = thismodule.STRENTRYA; pub const STRTABLE = thismodule.STRTABLEA; pub const CABINFO = thismodule.CABINFOA; pub const PERUSERSECTION = thismodule.PERUSERSECTIONA; pub const IMEPRO = thismodule.IMEPROA; pub const GetFirmwareEnvironmentVariable = thismodule.GetFirmwareEnvironmentVariableA; pub const GetFirmwareEnvironmentVariableEx = thismodule.GetFirmwareEnvironmentVariableExA; pub const SetFirmwareEnvironmentVariable = thismodule.SetFirmwareEnvironmentVariableA; pub const SetFirmwareEnvironmentVariableEx = thismodule.SetFirmwareEnvironmentVariableExA; pub const GetProfileInt = thismodule.GetProfileIntA; pub const GetProfileString = thismodule.GetProfileStringA; pub const WriteProfileString = thismodule.WriteProfileStringA; pub const GetProfileSection = thismodule.GetProfileSectionA; pub const WriteProfileSection = thismodule.WriteProfileSectionA; pub const GetPrivateProfileInt = thismodule.GetPrivateProfileIntA; pub const GetPrivateProfileString = thismodule.GetPrivateProfileStringA; pub const WritePrivateProfileString = thismodule.WritePrivateProfileStringA; pub const GetPrivateProfileSection = thismodule.GetPrivateProfileSectionA; pub const WritePrivateProfileSection = thismodule.WritePrivateProfileSectionA; pub const GetPrivateProfileSectionNames = thismodule.GetPrivateProfileSectionNamesA; pub const GetPrivateProfileStruct = thismodule.GetPrivateProfileStructA; pub const WritePrivateProfileStruct = thismodule.WritePrivateProfileStructA; pub const GetComputerName = thismodule.GetComputerNameA; pub const DnsHostnameToComputerName = thismodule.DnsHostnameToComputerNameA; pub const GetUserName = thismodule.GetUserNameA; pub const GetCurrentHwProfile = thismodule.GetCurrentHwProfileA; pub const RunSetupCommand = thismodule.RunSetupCommandA; pub const RebootCheckOnInstall = thismodule.RebootCheckOnInstallA; pub const TranslateInfString = thismodule.TranslateInfStringA; pub const RegInstall = thismodule.RegInstallA; pub const ExecuteCab = thismodule.ExecuteCabA; pub const AdvInstallFile = thismodule.AdvInstallFileA; pub const RegSaveRestore = thismodule.RegSaveRestoreA; pub const RegSaveRestoreOnINF = thismodule.RegSaveRestoreOnINFA; pub const RegRestoreAll = thismodule.RegRestoreAllA; pub const FileSaveRestoreOnINF = thismodule.FileSaveRestoreOnINFA; pub const AddDelBackupEntry = thismodule.AddDelBackupEntryA; pub const FileSaveMarkNotExist = thismodule.FileSaveMarkNotExistA; pub const GetVersionFromFile = thismodule.GetVersionFromFileA; pub const GetVersionFromFileEx = thismodule.GetVersionFromFileExA; pub const DelNode = thismodule.DelNodeA; pub const OpenINFEngine = thismodule.OpenINFEngineA; pub const TranslateInfStringEx = thismodule.TranslateInfStringExA; pub const ExtractFiles = thismodule.ExtractFilesA; pub const UserInstStubWrapper = thismodule.UserInstStubWrapperA; pub const UserUnInstStubWrapper = thismodule.UserUnInstStubWrapperA; pub const SetPerUserSecValues = thismodule.SetPerUserSecValuesA; pub const SendIMEMessageEx = thismodule.SendIMEMessageExA; pub const IMPGetIME = thismodule.IMPGetIMEA; pub const IMPQueryIME = thismodule.IMPQueryIMEA; pub const IMPSetIME = thismodule.IMPSetIMEA; }, .wide => struct { pub const HW_PROFILE_INFO = thismodule.HW_PROFILE_INFOW; pub const STRENTRY = thismodule.STRENTRYW; pub const STRTABLE = thismodule.STRTABLEW; pub const CABINFO = thismodule.CABINFOW; pub const PERUSERSECTION = thismodule.PERUSERSECTIONW; pub const IMEPRO = thismodule.IMEPROW; pub const GetFirmwareEnvironmentVariable = thismodule.GetFirmwareEnvironmentVariableW; pub const GetFirmwareEnvironmentVariableEx = thismodule.GetFirmwareEnvironmentVariableExW; pub const SetFirmwareEnvironmentVariable = thismodule.SetFirmwareEnvironmentVariableW; pub const SetFirmwareEnvironmentVariableEx = thismodule.SetFirmwareEnvironmentVariableExW; pub const GetProfileInt = thismodule.GetProfileIntW; pub const GetProfileString = thismodule.GetProfileStringW; pub const WriteProfileString = thismodule.WriteProfileStringW; pub const GetProfileSection = thismodule.GetProfileSectionW; pub const WriteProfileSection = thismodule.WriteProfileSectionW; pub const GetPrivateProfileInt = thismodule.GetPrivateProfileIntW; pub const GetPrivateProfileString = thismodule.GetPrivateProfileStringW; pub const WritePrivateProfileString = thismodule.WritePrivateProfileStringW; pub const GetPrivateProfileSection = thismodule.GetPrivateProfileSectionW; pub const WritePrivateProfileSection = thismodule.WritePrivateProfileSectionW; pub const GetPrivateProfileSectionNames = thismodule.GetPrivateProfileSectionNamesW; pub const GetPrivateProfileStruct = thismodule.GetPrivateProfileStructW; pub const WritePrivateProfileStruct = thismodule.WritePrivateProfileStructW; pub const GetComputerName = thismodule.GetComputerNameW; pub const DnsHostnameToComputerName = thismodule.DnsHostnameToComputerNameW; pub const GetUserName = thismodule.GetUserNameW; pub const GetCurrentHwProfile = thismodule.GetCurrentHwProfileW; pub const RunSetupCommand = thismodule.RunSetupCommandW; pub const RebootCheckOnInstall = thismodule.RebootCheckOnInstallW; pub const TranslateInfString = thismodule.TranslateInfStringW; pub const RegInstall = thismodule.RegInstallW; pub const ExecuteCab = thismodule.ExecuteCabW; pub const AdvInstallFile = thismodule.AdvInstallFileW; pub const RegSaveRestore = thismodule.RegSaveRestoreW; pub const RegSaveRestoreOnINF = thismodule.RegSaveRestoreOnINFW; pub const RegRestoreAll = thismodule.RegRestoreAllW; pub const FileSaveRestoreOnINF = thismodule.FileSaveRestoreOnINFW; pub const AddDelBackupEntry = thismodule.AddDelBackupEntryW; pub const FileSaveMarkNotExist = thismodule.FileSaveMarkNotExistW; pub const GetVersionFromFile = thismodule.GetVersionFromFileW; pub const GetVersionFromFileEx = thismodule.GetVersionFromFileExW; pub const DelNode = thismodule.DelNodeW; pub const OpenINFEngine = thismodule.OpenINFEngineW; pub const TranslateInfStringEx = thismodule.TranslateInfStringExW; pub const ExtractFiles = thismodule.ExtractFilesW; pub const UserInstStubWrapper = thismodule.UserInstStubWrapperW; pub const UserUnInstStubWrapper = thismodule.UserUnInstStubWrapperW; pub const SetPerUserSecValues = thismodule.SetPerUserSecValuesW; pub const SendIMEMessageEx = thismodule.SendIMEMessageExW; pub const IMPGetIME = thismodule.IMPGetIMEW; pub const IMPQueryIME = thismodule.IMPQueryIMEW; pub const IMPSetIME = thismodule.IMPSetIMEW; }, .unspecified => if (@import("builtin").is_test) struct { pub const HW_PROFILE_INFO = *opaque{}; pub const STRENTRY = *opaque{}; pub const STRTABLE = *opaque{}; pub const CABINFO = *opaque{}; pub const PERUSERSECTION = *opaque{}; pub const IMEPRO = *opaque{}; pub const GetFirmwareEnvironmentVariable = *opaque{}; pub const GetFirmwareEnvironmentVariableEx = *opaque{}; pub const SetFirmwareEnvironmentVariable = *opaque{}; pub const SetFirmwareEnvironmentVariableEx = *opaque{}; pub const GetProfileInt = *opaque{}; pub const GetProfileString = *opaque{}; pub const WriteProfileString = *opaque{}; pub const GetProfileSection = *opaque{}; pub const WriteProfileSection = *opaque{}; pub const GetPrivateProfileInt = *opaque{}; pub const GetPrivateProfileString = *opaque{}; pub const WritePrivateProfileString = *opaque{}; pub const GetPrivateProfileSection = *opaque{}; pub const WritePrivateProfileSection = *opaque{}; pub const GetPrivateProfileSectionNames = *opaque{}; pub const GetPrivateProfileStruct = *opaque{}; pub const WritePrivateProfileStruct = *opaque{}; pub const GetComputerName = *opaque{}; pub const DnsHostnameToComputerName = *opaque{}; pub const GetUserName = *opaque{}; pub const GetCurrentHwProfile = *opaque{}; pub const RunSetupCommand = *opaque{}; pub const RebootCheckOnInstall = *opaque{}; pub const TranslateInfString = *opaque{}; pub const RegInstall = *opaque{}; pub const ExecuteCab = *opaque{}; pub const AdvInstallFile = *opaque{}; pub const RegSaveRestore = *opaque{}; pub const RegSaveRestoreOnINF = *opaque{}; pub const RegRestoreAll = *opaque{}; pub const FileSaveRestoreOnINF = *opaque{}; pub const AddDelBackupEntry = *opaque{}; pub const FileSaveMarkNotExist = *opaque{}; pub const GetVersionFromFile = *opaque{}; pub const GetVersionFromFileEx = *opaque{}; pub const DelNode = *opaque{}; pub const OpenINFEngine = *opaque{}; pub const TranslateInfStringEx = *opaque{}; pub const ExtractFiles = *opaque{}; pub const UserInstStubWrapper = *opaque{}; pub const UserUnInstStubWrapper = *opaque{}; pub const SetPerUserSecValues = *opaque{}; pub const SendIMEMessageEx = *opaque{}; pub const IMPGetIME = *opaque{}; pub const IMPQueryIME = *opaque{}; pub const IMPSetIME = *opaque{}; } else struct { pub const HW_PROFILE_INFO = @compileError("'HW_PROFILE_INFO' requires that UNICODE be set to true or false in the root module"); pub const STRENTRY = @compileError("'STRENTRY' requires that UNICODE be set to true or false in the root module"); pub const STRTABLE = @compileError("'STRTABLE' requires that UNICODE be set to true or false in the root module"); pub const CABINFO = @compileError("'CABINFO' requires that UNICODE be set to true or false in the root module"); pub const PERUSERSECTION = @compileError("'PERUSERSECTION' requires that UNICODE be set to true or false in the root module"); pub const IMEPRO = @compileError("'IMEPRO' requires that UNICODE be set to true or false in the root module"); pub const GetFirmwareEnvironmentVariable = @compileError("'GetFirmwareEnvironmentVariable' requires that UNICODE be set to true or false in the root module"); pub const GetFirmwareEnvironmentVariableEx = @compileError("'GetFirmwareEnvironmentVariableEx' requires that UNICODE be set to true or false in the root module"); pub const SetFirmwareEnvironmentVariable = @compileError("'SetFirmwareEnvironmentVariable' requires that UNICODE be set to true or false in the root module"); pub const SetFirmwareEnvironmentVariableEx = @compileError("'SetFirmwareEnvironmentVariableEx' requires that UNICODE be set to true or false in the root module"); pub const GetProfileInt = @compileError("'GetProfileInt' requires that UNICODE be set to true or false in the root module"); pub const GetProfileString = @compileError("'GetProfileString' requires that UNICODE be set to true or false in the root module"); pub const WriteProfileString = @compileError("'WriteProfileString' requires that UNICODE be set to true or false in the root module"); pub const GetProfileSection = @compileError("'GetProfileSection' requires that UNICODE be set to true or false in the root module"); pub const WriteProfileSection = @compileError("'WriteProfileSection' requires that UNICODE be set to true or false in the root module"); pub const GetPrivateProfileInt = @compileError("'GetPrivateProfileInt' requires that UNICODE be set to true or false in the root module"); pub const GetPrivateProfileString = @compileError("'GetPrivateProfileString' requires that UNICODE be set to true or false in the root module"); pub const WritePrivateProfileString = @compileError("'WritePrivateProfileString' requires that UNICODE be set to true or false in the root module"); pub const GetPrivateProfileSection = @compileError("'GetPrivateProfileSection' requires that UNICODE be set to true or false in the root module"); pub const WritePrivateProfileSection = @compileError("'WritePrivateProfileSection' requires that UNICODE be set to true or false in the root module"); pub const GetPrivateProfileSectionNames = @compileError("'GetPrivateProfileSectionNames' requires that UNICODE be set to true or false in the root module"); pub const GetPrivateProfileStruct = @compileError("'GetPrivateProfileStruct' requires that UNICODE be set to true or false in the root module"); pub const WritePrivateProfileStruct = @compileError("'WritePrivateProfileStruct' requires that UNICODE be set to true or false in the root module"); pub const GetComputerName = @compileError("'GetComputerName' requires that UNICODE be set to true or false in the root module"); pub const DnsHostnameToComputerName = @compileError("'DnsHostnameToComputerName' requires that UNICODE be set to true or false in the root module"); pub const GetUserName = @compileError("'GetUserName' requires that UNICODE be set to true or false in the root module"); pub const GetCurrentHwProfile = @compileError("'GetCurrentHwProfile' requires that UNICODE be set to true or false in the root module"); pub const RunSetupCommand = @compileError("'RunSetupCommand' requires that UNICODE be set to true or false in the root module"); pub const RebootCheckOnInstall = @compileError("'RebootCheckOnInstall' requires that UNICODE be set to true or false in the root module"); pub const TranslateInfString = @compileError("'TranslateInfString' requires that UNICODE be set to true or false in the root module"); pub const RegInstall = @compileError("'RegInstall' requires that UNICODE be set to true or false in the root module"); pub const ExecuteCab = @compileError("'ExecuteCab' requires that UNICODE be set to true or false in the root module"); pub const AdvInstallFile = @compileError("'AdvInstallFile' requires that UNICODE be set to true or false in the root module"); pub const RegSaveRestore = @compileError("'RegSaveRestore' requires that UNICODE be set to true or false in the root module"); pub const RegSaveRestoreOnINF = @compileError("'RegSaveRestoreOnINF' requires that UNICODE be set to true or false in the root module"); pub const RegRestoreAll = @compileError("'RegRestoreAll' requires that UNICODE be set to true or false in the root module"); pub const FileSaveRestoreOnINF = @compileError("'FileSaveRestoreOnINF' requires that UNICODE be set to true or false in the root module"); pub const AddDelBackupEntry = @compileError("'AddDelBackupEntry' requires that UNICODE be set to true or false in the root module"); pub const FileSaveMarkNotExist = @compileError("'FileSaveMarkNotExist' requires that UNICODE be set to true or false in the root module"); pub const GetVersionFromFile = @compileError("'GetVersionFromFile' requires that UNICODE be set to true or false in the root module"); pub const GetVersionFromFileEx = @compileError("'GetVersionFromFileEx' requires that UNICODE be set to true or false in the root module"); pub const DelNode = @compileError("'DelNode' requires that UNICODE be set to true or false in the root module"); pub const OpenINFEngine = @compileError("'OpenINFEngine' requires that UNICODE be set to true or false in the root module"); pub const TranslateInfStringEx = @compileError("'TranslateInfStringEx' requires that UNICODE be set to true or false in the root module"); pub const ExtractFiles = @compileError("'ExtractFiles' requires that UNICODE be set to true or false in the root module"); pub const UserInstStubWrapper = @compileError("'UserInstStubWrapper' requires that UNICODE be set to true or false in the root module"); pub const UserUnInstStubWrapper = @compileError("'UserUnInstStubWrapper' requires that UNICODE be set to true or false in the root module"); pub const SetPerUserSecValues = @compileError("'SetPerUserSecValues' requires that UNICODE be set to true or false in the root module"); pub const SendIMEMessageEx = @compileError("'SendIMEMessageEx' requires that UNICODE be set to true or false in the root module"); pub const IMPGetIME = @compileError("'IMPGetIME' requires that UNICODE be set to true or false in the root module"); pub const IMPQueryIME = @compileError("'IMPQueryIME' requires that UNICODE be set to true or false in the root module"); pub const IMPSetIME = @compileError("'IMPSetIME' requires that UNICODE be set to true or false in the root module"); }, }; //-------------------------------------------------------------------------------- // Section: Imports (40) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const BOOLEAN = @import("../foundation.zig").BOOLEAN; const BSTR = @import("../foundation.zig").BSTR; const CHAR = @import("../system/system_services.zig").CHAR; const CONTEXT = @import("../system/diagnostics/debug.zig").CONTEXT; const CUSTOM_SYSTEM_EVENT_TRIGGER_CONFIG = @import("../system/system_services.zig").CUSTOM_SYSTEM_EVENT_TRIGGER_CONFIG; const FILE_SHARE_MODE = @import("../storage/file_system.zig").FILE_SHARE_MODE; const FILETIME = @import("../foundation.zig").FILETIME; const HANDLE = @import("../foundation.zig").HANDLE; const HDC = @import("../graphics/gdi.zig").HDC; const HINSTANCE = @import("../foundation.zig").HINSTANCE; const HKEY = @import("../system/registry.zig").HKEY; const HRESULT = @import("../foundation.zig").HRESULT; const HWND = @import("../foundation.zig").HWND; const IDispatch = @import("../system/ole_automation.zig").IDispatch; const IUnknown = @import("../system/com.zig").IUnknown; const JOB_SET_ARRAY = @import("../system/system_services.zig").JOB_SET_ARRAY; const LARGE_INTEGER = @import("../system/system_services.zig").LARGE_INTEGER; const LIST_ENTRY = @import("../system/kernel.zig").LIST_ENTRY; const LPARAM = @import("../foundation.zig").LPARAM; const LRESULT = @import("../foundation.zig").LRESULT; const NTSTATUS = @import("../foundation.zig").NTSTATUS; const OLECMDEXECOPT = @import("../system/com.zig").OLECMDEXECOPT; const OLECMDF = @import("../system/com.zig").OLECMDF; const OLECMDID = @import("../system/com.zig").OLECMDID; const PSTR = @import("../foundation.zig").PSTR; const PWSTR = @import("../foundation.zig").PWSTR; const READYSTATE = @import("../system/com.zig").READYSTATE; const RECT = @import("../foundation.zig").RECT; const RGNDATA = @import("../graphics/gdi.zig").RGNDATA; const SAFEARRAY = @import("../system/ole_automation.zig").SAFEARRAY; const SECURITY_ATTRIBUTES = @import("../security.zig").SECURITY_ATTRIBUTES; const SHANDLE_PTR = @import("../system/system_services.zig").SHANDLE_PTR; const STARTUPINFOA = @import("../system/threading.zig").STARTUPINFOA; const STRING = @import("../system/kernel.zig").STRING; const UNICODE_STRING = @import("../system/kernel.zig").UNICODE_STRING; const VARIANT = @import("../system/ole_automation.zig").VARIANT; const WAITORTIMERCALLBACK = @import("../system/system_services.zig").WAITORTIMERCALLBACK; const WPARAM = @import("../foundation.zig").WPARAM; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "LPFIBER_START_ROUTINE")) { _ = LPFIBER_START_ROUTINE; } if (@hasDecl(@This(), "PFIBER_CALLOUT_ROUTINE")) { _ = PFIBER_CALLOUT_ROUTINE; } if (@hasDecl(@This(), "PQUERYACTCTXW_FUNC")) { _ = PQUERYACTCTXW_FUNC; } if (@hasDecl(@This(), "APPLICATION_RECOVERY_CALLBACK")) { _ = APPLICATION_RECOVERY_CALLBACK; } if (@hasDecl(@This(), "PPS_POST_PROCESS_INIT_ROUTINE")) { _ = PPS_POST_PROCESS_INIT_ROUTINE; } if (@hasDecl(@This(), "PIO_APC_ROUTINE")) { _ = PIO_APC_ROUTINE; } if (@hasDecl(@This(), "PWINSTATIONQUERYINFORMATIONW")) { _ = PWINSTATIONQUERYINFORMATIONW; } if (@hasDecl(@This(), "PFEATURE_STATE_CHANGE_CALLBACK")) { _ = PFEATURE_STATE_CHANGE_CALLBACK; } if (@hasDecl(@This(), "ENUM_CALLBACK")) { _ = ENUM_CALLBACK; } if (@hasDecl(@This(), "WINWATCHNOTIFYPROC")) { _ = WINWATCHNOTIFYPROC; } if (@hasDecl(@This(), "REGINSTALLA")) { _ = REGINSTALLA; } if (@hasDecl(@This(), "PWLDP_SETDYNAMICCODETRUST_API")) { _ = PWLDP_SETDYNAMICCODETRUST_API; } if (@hasDecl(@This(), "PWLDP_ISDYNAMICCODEPOLICYENABLED_API")) { _ = PWLDP_ISDYNAMICCODEPOLICYENABLED_API; } if (@hasDecl(@This(), "PWLDP_QUERYDYNAMICODETRUST_API")) { _ = PWLDP_QUERYDYNAMICODETRUST_API; } if (@hasDecl(@This(), "PWLDP_QUERYWINDOWSLOCKDOWNMODE_API")) { _ = PWLDP_QUERYWINDOWSLOCKDOWNMODE_API; } if (@hasDecl(@This(), "PWLDP_QUERYWINDOWSLOCKDOWNRESTRICTION_API")) { _ = PWLDP_QUERYWINDOWSLOCKDOWNRESTRICTION_API; } if (@hasDecl(@This(), "PWLDP_SETWINDOWSLOCKDOWNRESTRICTION_API")) { _ = PWLDP_SETWINDOWSLOCKDOWNRESTRICTION_API; } if (@hasDecl(@This(), "PWLDP_WLDPISAPPAPPROVEDBYPOLICY_API")) { _ = PWLDP_WLDPISAPPAPPROVEDBYPOLICY_API; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/system/windows_programming.zig
const std = @import("std"); const builtin = @import("builtin"); const uv = @import("../uv/lib.zig"); const ssl = @import("../openssl/lib.zig"); const Options = struct { openssl_includes: []const []const u8, libuv_includes: []const []const u8, zlib_includes: []const []const u8, }; pub const pkg = std.build.Pkg{ .name = "h2o", .source = .{ .path = srcPath() ++ "/h2o.zig" }, }; pub fn addPackage(step: *std.build.LibExeObjStep) void { var new_pkg = pkg; new_pkg.dependencies = &.{ uv.pkg, ssl.pkg }; step.addPackage(new_pkg); step.addIncludeDir(srcPath() ++ "/"); step.addIncludeDir(srcPath() ++ "/vendor/include"); step.addIncludeDir(srcPath() ++ "/vendor/deps/picotls/include"); step.addIncludeDir(srcPath() ++ "/vendor/deps/quicly/include"); step.addIncludeDir(srcPath() ++ "/../openssl/vendor/include"); if (step.target.getOsTag() == .windows) { step.addIncludeDir(srcPath() ++ "/../mingw/win_posix/include"); step.addIncludeDir(srcPath() ++ "/../mingw/winpthreads/include"); } } pub fn create( b: *std.build.Builder, target: std.zig.CrossTarget, mode: std.builtin.Mode, opts: Options, ) !*std.build.LibExeObjStep { const lib = b.addStaticLibrary("h2o", null); lib.setTarget(target); lib.setBuildMode(mode); // lib.c_std = .C99; const alloc = b.allocator; // Unused defines: // -DH2O_ROOT="/usr/local" -DH2O_CONFIG_PATH="/usr/local/etc/h2o.conf" -DH2O_HAS_PTHREAD_SETAFFINITY_NP var c_flags = std.ArrayList([]const u8).init(alloc); // Move args into response file to avoid cli limit. try c_flags.appendSlice(&.{ "@lib/h2o/cflags", }); if (target.getOsTag() == .linux) { try c_flags.appendSlice(&.{ "-D_GNU_SOURCE", // This lets it find in6_pktinfo for some reason. }); } else if (target.getOsTag() == .windows) { try c_flags.appendSlice(&.{ "-D_WINDOWS=1", // Need this when using C99. "-D_POSIX_C_SOURCE=200809L", "-D_POSIX", }); } var c_files = std.ArrayList([]const u8).init(alloc); try c_files.appendSlice(&.{ // deps "deps/picohttpparser/picohttpparser.c", //"deps/cloexec/cloexec.c", //"deps/hiredis/async.c", // "deps/hiredis/hiredis.c", // "deps/hiredis/net.c", // "deps/hiredis/read.c", // "deps/hiredis/sds.c", "deps/libgkc/gkc.c", //"deps/libyrmcds/close.c", //"deps/libyrmcds/connect.c", //"deps/libyrmcds/recv.c", //"deps/libyrmcds/send.c", //"deps/libyrmcds/send_text.c", //"deps/libyrmcds/socket.c", //"deps/libyrmcds/strerror.c", //"deps/libyrmcds/text_mode.c", "deps/picotls/deps/cifra/src/blockwise.c", "deps/picotls/deps/cifra/src/chash.c", "deps/picotls/deps/cifra/src/curve25519.c", "deps/picotls/deps/cifra/src/drbg.c", "deps/picotls/deps/cifra/src/hmac.c", "deps/picotls/deps/cifra/src/sha256.c", "deps/picotls/lib/certificate_compression.c", "deps/picotls/lib/pembase64.c", "deps/picotls/lib/picotls.c", "deps/picotls/lib/openssl.c", "deps/picotls/lib/cifra/random.c", "deps/picotls/lib/cifra/x25519.c", // "deps/quicly/lib/cc-cubic.c", // "deps/quicly/lib/cc-pico.c", // "deps/quicly/lib/cc-reno.c", // "deps/quicly/lib/defaults.c", // "deps/quicly/lib/frame.c", // "deps/quicly/lib/local_cid.c", // "deps/quicly/lib/loss.c", // "deps/quicly/lib/quicly.c", // "deps/quicly/lib/ranges.c", // "deps/quicly/lib/rate.c", // "deps/quicly/lib/recvstate.c", // "deps/quicly/lib/remote_cid.c", // "deps/quicly/lib/retire_cid.c", // "deps/quicly/lib/sendstate.c", // "deps/quicly/lib/sentmap.c", // "deps/quicly/lib/streambuf.c", // common "lib/common/cache.c", "lib/common/file.c", "lib/common/filecache.c", "lib/common/hostinfo.c", // "lib/common/http1client.c", // "lib/common/http2client.c", // "lib/common/http3client.c", // "lib/common/httpclient.c", // "lib/common/memcached.c", "lib/common/memory.c", "lib/common/multithread.c", // "lib/common/redis.c", // "lib/common/serverutil.c", "lib/common/socket.c", "lib/common/socketpool.c", "lib/common/string.c", "lib/common/rand.c", "lib/common/time.c", "lib/common/timerwheel.c", "lib/common/token.c", "lib/common/url.c", "lib/common/balancer/roundrobin.c", "lib/common/balancer/least_conn.c", "lib/common/absprio.c", "lib/core/config.c", "lib/core/configurator.c", "lib/core/context.c", "lib/core/headers.c", // "lib/core/logconf.c", // "lib/core/proxy.c", "lib/core/request.c", "lib/core/util.c", // "lib/handler/access_log.c", "lib/handler/compress.c", "lib/handler/compress/gzip.c", "lib/handler/errordoc.c", "lib/handler/expires.c", "lib/handler/fastcgi.c", // "lib/handler/file.c", "lib/handler/headers.c", "lib/handler/mimemap.c", "lib/handler/proxy.c", // "lib/handler/connect.c", "lib/handler/redirect.c", "lib/handler/reproxy.c", "lib/handler/throttle_resp.c", "lib/handler/self_trace.c", "lib/handler/server_timing.c", "lib/handler/status.c", "lib/handler/headers_util.c", "lib/handler/status/events.c", "lib/handler/status/requests.c", "lib/handler/status/ssl.c", "lib/handler/http2_debug_state.c", "lib/handler/status/durations.c", // "lib/handler/configurator/access_log.c", "lib/handler/configurator/compress.c", "lib/handler/configurator/errordoc.c", "lib/handler/configurator/expires.c", // "lib/handler/configurator/fastcgi.c", "lib/handler/configurator/file.c", "lib/handler/configurator/headers.c", "lib/handler/configurator/proxy.c", "lib/handler/configurator/redirect.c", "lib/handler/configurator/reproxy.c", "lib/handler/configurator/throttle_resp.c", "lib/handler/configurator/self_trace.c", "lib/handler/configurator/server_timing.c", "lib/handler/configurator/status.c", "lib/handler/configurator/http2_debug_state.c", "lib/handler/configurator/headers_util.c", "lib/http1.c", "lib/tunnel.c", "lib/http2/cache_digests.c", "lib/http2/casper.c", "lib/http2/connection.c", "lib/http2/frame.c", "lib/http2/hpack.c", "lib/http2/scheduler.c", "lib/http2/stream.c", "lib/http2/http2_debug_state.c", // "lib/http3/frame.c", // "lib/http3/qpack.c", // "lib/http3/common.c", // "lib/http3/server.c", }); for (c_files.items) |file| { const path = b.fmt("{s}/vendor/{s}", .{ srcPath(), file }); lib.addCSourceFile(path, c_flags.items); } lib.addCSourceFile(fromRoot(b, "utils.c"), c_flags.items); // picohttpparser has intentional UB code in // findchar_fast when SSE4_2 is enabled: _mm_loadu_si128 can be given ranges pointer with less than 16 bytes. // Can't seem to turn off sanitize for just the one source file. Tried to separate picohttpparser into it's own lib too. // For now, disable sanitize c for entire h2o lib. lib.disable_sanitize_c = true; if (builtin.os.tag == .macos and target.getOsTag() == .macos) { if (target.isNativeOs()) { // Force using native headers or it won't find netinet/udp.h lib.linkFramework("CoreServices"); } else { lib.addSystemIncludeDir("/usr/include"); } } lib.linkLibC(); // Load user_config.h here. include/h2o.h was patched to include user_config.h lib.addIncludeDir(srcPath()); for (opts.openssl_includes) |path| { lib.addIncludeDir(path); } for (opts.libuv_includes) |path| { lib.addIncludeDir(path); } lib.addIncludeDir(fromRoot(b, "vendor/include")); for (opts.zlib_includes) |path| { lib.addIncludeDir(path); } lib.addIncludeDir(fromRoot(b, "vendor/deps/quicly/include")); lib.addIncludeDir(fromRoot(b, "vendor/deps/picohttpparser")); lib.addIncludeDir(fromRoot(b, "vendor/deps/picotls/include")); lib.addIncludeDir(fromRoot(b, "vendor/deps/klib")); lib.addIncludeDir(fromRoot(b, "vendor/deps/cloexec")); lib.addIncludeDir(fromRoot(b, "vendor/deps/brotli/c/include")); lib.addIncludeDir(fromRoot(b, "vendor/deps/yoml")); lib.addIncludeDir(fromRoot(b, "vendor/deps/hiredis")); lib.addIncludeDir(fromRoot(b, "vendor/deps/golombset")); lib.addIncludeDir(fromRoot(b, "vendor/deps/libgkc")); lib.addIncludeDir(fromRoot(b, "vendor/deps/libyrmcds")); lib.addIncludeDir(fromRoot(b, "vendor/deps/picotls/deps/cifra/src/ext")); lib.addIncludeDir(fromRoot(b, "vendor/deps/picotls/deps/cifra/src")); if (target.getOsTag() == .windows and target.getAbi() == .gnu) { // Since H2O source relies on posix only, provide an interface to windows API. lib.addSystemIncludeDir("./lib/mingw/win_posix/include"); if (builtin.os.tag == .linux or builtin.os.tag == .macos) { lib.addSystemIncludeDir("./lib/mingw/win_posix/include-posix"); } lib.addSystemIncludeDir("./lib/mingw/winpthreads/include"); } return lib; } pub const LinkOptions = struct { lib_path: ?[]const u8 = null, }; pub fn buildAndLink(step: *std.build.LibExeObjStep, opts: LinkOptions) void { if (opts.lib_path) |path| { linkLibPath(step, path); } else { const b = step.builder; const lib = create(b, step.target, step.build_mode, .{ .openssl_includes = &.{ srcPath() ++ "/../openssl/vendor/include", }, .libuv_includes = &.{ srcPath() ++ "/../uv/vendor/include", }, .zlib_includes = &.{ srcPath() ++ "/../zlib/vendor", }, }) catch unreachable; linkLib(step, lib); } } pub fn linkLib(step: *std.build.LibExeObjStep, lib: *std.build.LibExeObjStep) void { step.linkLibrary(lib); } pub fn linkLibPath(step: *std.build.LibExeObjStep, path: []const u8) void { step.addAssemblyFile(path); } fn srcPath() []const u8 { return (std.fs.path.dirname(@src().file) orelse unreachable); } fn fromRoot(b: *std.build.Builder, rel_path: []const u8) []const u8 { return std.fs.path.resolve(b.allocator, &.{ srcPath(), rel_path }) catch unreachable; }
lib/h2o/lib.zig
const gfx = @import("gfx.zig"); const std = @import("std"); const js = struct { const GPUSize32 = u32; const GPUSize64 = usize; const GPUIndex32 = u32; const GPUSignedOffset32 = i32; const GPUIntegerCoordinate = u32; const ObjectId = u32; const DescId = ObjectId; const ContextId = ObjectId; const AdapterId = ObjectId; const DeviceId = ObjectId; const ShaderId = ObjectId; const BindGroupLayoutId = ObjectId; const BindGroupId = ObjectId; const PipelineLayoutId = ObjectId; const RenderPipelineId = ObjectId; const RenderPassId = ObjectId; const CommandEncoderId = ObjectId; const CommandBufferId = ObjectId; const TextureId = ObjectId; const TextureViewId = ObjectId; const SamplerId = ObjectId; const QuerySetId = ObjectId; const BufferId = ObjectId; const invalid_id: ObjectId = 0; const default_desc_id: DescId = 0; extern fn initDesc() DescId; extern fn deinitDesc(desc_id: DescId) void; extern fn setDescField(desc_id: DescId, field_ptr: [*]const u8, field_len: usize) void; extern fn setDescString(desc_id: DescId, value_ptr: [*]const u8, value_len: usize) void; extern fn setDescBool(desc_id: DescId, value: bool) void; extern fn setDescU32(desc_id: DescId, value: u32) void; extern fn setDescI32(desc_id: DescId, value: i32) void; extern fn setDescF32(desc_id: DescId, value: f32) void; extern fn beginDescArray(desc_id: DescId) void; extern fn endDescArray(desc_id: DescId) void; extern fn beginDescChild(desc_id: DescId) void; extern fn endDescChild(desc_id: DescId) void; extern fn createContext(canvas_id_ptr: [*]const u8, canvas_id_len: usize) ContextId; extern fn destroyContext(context_id: ContextId) void; extern fn getContextCurrentTexture(context_id: ContextId) TextureId; extern fn configure(device_id: DeviceId, context_id: ContextId, desc_id: DescId) void; extern fn getPreferredFormat() usize; extern fn requestAdapter(desc_id: DescId) void; extern fn destroyAdapter(adapter_id: AdapterId) void; extern fn requestDevice(adapter_id: AdapterId, desc_id: DescId) void; extern fn destroyDevice(device_id: DeviceId) void; extern fn createShader(device_id: DeviceId, code_ptr: [*]const u8, code_len: usize) ShaderId; extern fn destroyShader(shader_id: ShaderId) void; extern fn checkShaderCompile(shader_id: ShaderId) void; extern fn createBuffer( device_id: DeviceId, desc_id: DescId, init_data_ptr: [*]const u8, init_data_len: usize, ) BufferId; extern fn destroyBuffer(buffer_id: BufferId) void; extern fn createTexture(device_id: DeviceId, desc_id: DescId) TextureId; extern fn destroyTexture(texture_id: TextureId) void; extern fn createTextureView(desc_id: DescId) TextureViewId; extern fn destroyTextureView(texture_view_id: TextureViewId) void; extern fn createSampler(device_id: DeviceId, desc_id: DescId) SamplerId; extern fn destroySampler(sampler_id: SamplerId) void; extern fn createBindGroupLayout(device_id: DeviceId, desc_id: DescId) BindGroupLayoutId; extern fn destroyBindGroupLayout(bind_group_layout_id: BindGroupLayoutId) void; extern fn createBindGroup(device_id: DeviceId, desc_id: DescId) BindGroupId; extern fn destroyBindGroup(bind_group_id: BindGroupId) void; extern fn createPipelineLayout(device_id: DeviceId, desc_id: DescId) PipelineLayoutId; extern fn destroyPipelineLayout(pipeline_layout_id: PipelineLayoutId) void; extern fn createRenderPipeline(device_id: DeviceId, desc_id: DescId) RenderPipelineId; extern fn destroyRenderPipeline(render_pipeline_id: RenderPipelineId) void; extern fn createCommandEncoder(device_id: DeviceId) CommandEncoderId; extern fn finishCommandEncoder(command_encoder_id: CommandEncoderId) CommandBufferId; extern fn beginRenderPass(command_encoder_id: CommandEncoderId, desc_id: DescId) RenderPassId; extern fn setPipeline(render_pass_id: RenderPassId, render_pipeline_id: RenderPipelineId) void; extern fn setBindGroup( render_pass_id: RenderPassId, group_index: GPUIndex32, bind_group_id: BindGroupId, dynamic_offsets_ptr: [*]const u8, dynamic_offsets_len: usize, ) void; extern fn setVertexBuffer( render_pass_id: RenderPassId, slot: GPUIndex32, buffer: BufferId, offset: GPUSize64, size: GPUSize64, ) void; extern fn setIndexBuffer( render_pass_id: RenderPassId, buffer_id: BufferId, index_format_ptr: [*]const u8, index_format_len: usize, offset: usize, size: usize, ) void; extern fn draw( render_pass_id: RenderPassId, vertex_count: GPUSize32, instance_count: GPUSize32, first_vertex: GPUSize32, first_instance: GPUSize32, ) void; extern fn drawIndexed( render_pass_id: RenderPassId, index_count: GPUSize32, instance_count: GPUSize32, first_index: GPUSize32, base_vertex: GPUSignedOffset32, first_instance: GPUSize32, ) void; extern fn endRenderPass(render_pass_id: RenderPassId) void; extern fn queueSubmit(device_id: DeviceId, command_buffer_id: CommandBufferId) void; extern fn queueWriteBuffer( device_id: DeviceId, buffer_id: BufferId, buffer_offset: GPUSize64, data_ptr: [*]const u8, data_len: usize, data_offset: GPUSize64, ) void; extern fn queueWriteTexture( device_id: DeviceId, destination_id: DescId, data_ptr: [*]const u8, data_len: usize, data_layout_id: DescId, size_width: GPUIntegerCoordinate, size_height: GPUIntegerCoordinate, size_depth_or_array_layers: GPUIntegerCoordinate, ) void; }; fn getEnumName(value: anytype) []const u8 { @setEvalBranchQuota(10000); comptime var enum_names: []const []const u8 = &.{}; inline for (@typeInfo(@TypeOf(value)).Enum.fields) |field| { comptime var enum_name: []const u8 = &.{}; inline for (field.name) |char| { enum_name = enum_name ++ &[_]u8{if (char == '_') '-' else char}; } enum_names = enum_names ++ &[_][]const u8{enum_name}; } return enum_names[@enumToInt(value)]; } fn getFieldName(comptime name: []const u8) []const u8 { comptime var field_name: []const u8 = &.{}; comptime var next_upper = false; inline for (name) |char| { if (char == '_') { next_upper = true; continue; } field_name = field_name ++ &[_]u8{if (next_upper) std.ascii.toUpper(char) else char}; next_upper = false; } return field_name; } fn setDescField(desc_id: js.DescId, field: []const u8) void { js.setDescField(desc_id, field.ptr, field.len); } fn setDescValue(desc_id: js.DescId, value: anytype) void { switch (@typeInfo(@TypeOf(value))) { .Bool => js.setDescBool(desc_id, value), .Int => |I| { if (I.bits != 32) { @compileError("Desc ints must be 32 bits!"); } switch (I.signedness) { .signed => js.setDescI32(desc_id, value), .unsigned => js.setDescU32(desc_id, value), } }, .Float => |F| { if (F.bits != 32) { @compileError("Desc floats must be 32 bits!"); } js.setDescF32(desc_id, value); }, .Enum => { const enum_name = getEnumName(value); js.setDescString(desc_id, enum_name.ptr, enum_name.len); }, .Optional => { if (value) |v| { setDescValue(desc_id, v); } }, .Pointer => |P| { switch (P.size) { .One => { setDescValue(desc_id, value.*); }, .Slice => { if (P.child == u8) { js.setDescString(desc_id, value.ptr, value.len); } else { js.beginDescArray(desc_id); for (value) |v| { setDescValue(desc_id, v); } js.endDescArray(desc_id); } }, else => @compileError("Invalid desc pointer size!"), } }, .Struct => |S| { if (S.layout == .Packed) { const BitType = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(@TypeOf(value)) }, }); js.setDescU32(desc_id, @intCast(u32, @bitCast(BitType, value))); } else if (typeIsArrayDesc(@TypeOf(value))) { js.beginDescArray(desc_id); inline for (S.fields) |field| { setDescValue(desc_id, @field(value, field.name)); } js.endDescArray(desc_id); } else if (S.fields.len == 1 and @hasField(@TypeOf(value), "impl")) { setDescValue(desc_id, value.impl.id); } else { js.beginDescChild(desc_id); inline for (S.fields) |field| { const field_name = comptime getFieldName(field.name); setDescFieldValue(desc_id, field_name, @field(value, field.name)); } js.endDescChild(desc_id); } }, .Union => |U| { inline for (U.fields) |field, i| { const Tag = U.tag_type orelse @compileError("Desc union must be tagged!"); const tag = std.meta.activeTag(value); const type_name = @typeName(@TypeOf(value)) ++ "Type"; if (@field(Tag, field.name) == tag) { setDescValue(desc_id, @field(value, field.name)); setDescFieldValue(desc_id, type_name, @as(u32, i)); break; } } }, else => @compileError("Invalid desc type!"), } } fn typeIsArrayDesc(comptime Type: type) bool { return Type == gfx.Extent3d or Type == gfx.Color; } fn setDescFieldValue(desc_id: js.DescId, field: []const u8, value: anytype) void { setDescField(desc_id, field); setDescValue(desc_id, value); } fn setDesc(desc_id: js.DescId, desc: anytype) void { inline for (@typeInfo(@TypeOf(desc)).Struct.fields) |field| { const field_name = comptime getFieldName(field.name); setDescFieldValue(desc_id, field_name, @field(desc, field.name)); } } pub const Instance = struct { pub fn init() !Instance { return Instance{}; } pub fn deinit(_: *Instance) void {} pub fn initSurface(_: *Instance, desc: gfx.SurfaceDesc) !Surface { return Surface{ .context_id = js.createContext( desc.window_info.canvas_id.ptr, desc.window_info.canvas_id.len, ), }; } pub fn deinitSurface(_: *Instance, _: *Surface) void {} pub fn initAdapter(_: *Instance, desc: gfx.AdapterDesc) !Adapter { return try await async requestAdapterAsync(desc); } pub fn deinitAdapter(_: *Instance, adapter: *Adapter) void { js.destroyAdapter(adapter.id); } var request_adapter_frame: anyframe = undefined; var request_adapter_id: anyerror!js.AdapterId = undefined; fn requestAdapterAsync(desc: gfx.AdapterDesc) !Adapter { defer js.deinitDesc(desc.impl.id); js.requestAdapter(desc.impl.id); suspend { request_adapter_frame = @frame(); } return Adapter{ .id = try request_adapter_id }; } export fn requestAdapterComplete(adapter_id: js.AdapterId) void { request_adapter_id = if (adapter_id == js.invalid_id) error.RequestAdapterFailed else adapter_id; resume request_adapter_frame; } }; pub const Surface = struct { context_id: js.ContextId, pub fn getPreferredFormat(_: Surface) !gfx.TextureFormat { const format = js.getPreferredFormat(); return @intToEnum(gfx.TextureFormat, format); } }; pub const AdapterDesc = struct { id: js.DescId = js.default_desc_id, pub fn setPowerPreference(desc: *AdapterDesc, power_preference: gfx.PowerPreference) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "powerPreference", power_preference); } pub fn setForceFallbackAdapter(desc: *AdapterDesc, force_fallback_adapter: bool) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "forceFallbackAdapter", force_fallback_adapter); } }; pub const Adapter = struct { id: js.AdapterId, pub fn initDevice(adapter: *Adapter, desc: gfx.DeviceDesc) !Device { return try await async adapter.requestDeviceAsync(desc); } pub fn deinitDevice(_: *Adapter, device: *Device) void { js.destroyDevice(device.id); } var request_device_frame: anyframe = undefined; var request_device_id: anyerror!js.DeviceId = undefined; fn requestDeviceAsync(adapter: *Adapter, desc: gfx.DeviceDesc) !Device { defer js.deinitDesc(desc.impl.id); js.requestDevice(adapter.id, desc.impl.id); suspend { request_device_frame = @frame(); } return Device{ .id = try request_device_id }; } export fn requestDeviceComplete(device_id: js.DeviceId) void { request_device_id = if (device_id == js.invalid_id) error.RequestDeviceFailed else device_id; resume request_device_frame; } }; pub const DeviceDesc = struct { id: js.DescId = js.default_desc_id, pub fn setRequiredFeatures( desc: *DeviceDesc, required_features: []const gfx.FeatureName, ) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "requiredFeatures", required_features); } pub fn setRequiredLimits(desc: *DeviceDesc, required_limits: gfx.Limits) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "requiredLimits", required_limits); } }; pub const Device = struct { id: js.AdapterId, pub fn initSwapchain( device: *Device, surface: *Surface, desc: gfx.SwapchainDesc, ) !Swapchain { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); setDescFieldValue(js_desc, "compositingAlphaMode", @as([]const u8, "opaque")); const swapchain = Swapchain{ .id = surface.context_id, .view_desc = js.initDesc() }; js.configure(device.id, swapchain.id, js_desc); return swapchain; } pub fn deinitSwapchain(_: *Device, swapchain: *Swapchain) void { js.deinitDesc(swapchain.view_desc); js.destroyContext(swapchain.id); } pub fn initShader(device: *Device, data: []const u8) !Shader { const shader = Shader{ .id = js.createShader(device.id, data.ptr, data.len), }; return shader; } pub fn deinitShader(_: *Device, shader: *Shader) void { js.destroyShader(shader.id); } pub fn initBuffer(device: *Device, desc: gfx.BufferDesc) !Buffer { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); const data = desc.data orelse &[_]u8{}; return Buffer{ .id = js.createBuffer(device.id, js_desc, data.ptr, data.len), }; } pub fn deinitBuffer(_: *Device, buffer: *Buffer) void { js.destroyBuffer(buffer.id); } pub fn initTexture(device: *Device, desc: gfx.TextureDesc) !Texture { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); return Texture{ .id = js.createTexture(device.id, js_desc) }; } pub fn deinitTexture(_: *Device, texture: *Texture) void { js.destroyTexture(texture.id); } pub fn initTextureView(_: *Device, desc: gfx.TextureViewDesc) !TextureView { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); return TextureView{ .id = js.createTextureView(js_desc) }; } pub fn deinitTextureView(_: *Device, texture_view: *TextureView) void { js.destroyTextureView(texture_view.id); } pub fn initSampler(device: *Device, desc: gfx.SamplerDesc) !Sampler { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); return Sampler{ .id = js.createSampler(device.id, js_desc) }; } pub fn deinitSampler(_: *Device, sampler: *Sampler) void { js.destroySampler(sampler.id); } pub fn initBindGroupLayout( device: *Device, desc: gfx.BindGroupLayoutDesc, ) !BindGroupLayout { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); return BindGroupLayout{ .id = js.createBindGroupLayout(device.id, js_desc), }; } pub fn deinitBindGroupLayout(_: *Device, bind_group_layout: *BindGroupLayout) void { js.destroyBindGroupLayout(bind_group_layout.id); } pub fn initBindGroup(device: *Device, desc: gfx.BindGroupDesc) !BindGroup { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); return BindGroup{ .id = js.createBindGroup(device.id, js_desc) }; } pub fn deinitBindGroup(_: *Device, bind_group: *BindGroup) void { js.destroyBindGroup(bind_group.id); } pub fn initPipelineLayout(device: *Device, desc: gfx.PipelineLayoutDesc) !PipelineLayout { var js_desc = js.initDesc(); defer js.deinitDesc(js_desc); setDesc(js_desc, desc); return PipelineLayout{ .id = js.createPipelineLayout(device.id, js_desc) }; } pub fn deinitPipelineLayout(_: *Device, pipeline_layout: *PipelineLayout) void { js.destroyPipelineLayout(pipeline_layout.id); } pub fn initRenderPipeline(device: *Device, desc: gfx.RenderPipelineDesc) !RenderPipeline { defer js.deinitDesc(desc.impl.id); return RenderPipeline{ .id = js.createRenderPipeline(device.id, desc.impl.id), }; } pub fn deinitRenderPipeline(_: *Device, render_pipeline: *RenderPipeline) void { js.destroyRenderPipeline(render_pipeline.id); } pub fn initCommandEncoder(device: *Device) !CommandEncoder { return CommandEncoder{ .id = js.createCommandEncoder(device.id) }; } pub fn getQueue(device: *Device) Queue { return Queue{ .id = device.id }; } }; pub const Swapchain = struct { id: js.ContextId, view_desc: js.DescId, pub fn getCurrentTextureView(swapchain: *Swapchain) !TextureView { const tex_id = js.getContextCurrentTexture(swapchain.id); setDescFieldValue(swapchain.view_desc, "texture", tex_id); return TextureView{ .id = js.createTextureView(swapchain.view_desc) }; } pub fn present(_: *Swapchain) !void {} }; pub const Shader = struct { id: js.ShaderId, }; pub const Buffer = struct { id: js.BufferId, }; pub const Texture = struct { id: js.TextureId, pub fn createView(texture: *Texture) !TextureView { return TextureView{ .id = js.createTextureView(texture.id) }; } pub fn destroy(texture: *Texture) void { js.destroyTexture(texture.id); } }; pub const TextureView = struct { id: js.TextureViewId, }; pub const Sampler = struct { id: js.SamplerId, pub fn destroy(sampler: *Sampler) void { js.destroySampler(sampler.id); } }; pub const BindGroupLayout = struct { id: js.BindGroupLayoutId, pub fn destroy(bind_group_layout: *BindGroupLayout) void { js.destroyBindGroupLayout(bind_group_layout.id); } }; pub const BindGroup = struct { id: js.BindGroupId, pub fn destroy(bind_group: *BindGroup) void { js.destroyBindGroup(bind_group.id); } }; pub const PipelineLayout = struct { id: js.PipelineLayoutId, pub fn destroy(pipeline_layout: *PipelineLayout) void { js.destroyPipelineLayout(pipeline_layout.id); } }; pub const RenderPipelineDesc = struct { id: js.DescId = js.default_desc_id, pub fn setPipelineLayout( desc: *RenderPipelineDesc, pipeline_layout: *const gfx.PipelineLayout, ) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "layout", pipeline_layout); } pub fn setVertexState(desc: *RenderPipelineDesc, vertex_state: gfx.VertexState) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "vertex", vertex_state); } pub fn setPrimitiveState( desc: *RenderPipelineDesc, primitive_state: gfx.PrimitiveState, ) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "primitive", primitive_state); } pub fn setDepthStencilState( desc: *RenderPipelineDesc, depth_stencil_state: gfx.DepthStencilState, ) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "depthStencil", depth_stencil_state); } pub fn setMultisampleState( desc: *RenderPipelineDesc, multisample_state: gfx.MultisampleState, ) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "multisample", multisample_state); } pub fn setFragmentState(desc: *RenderPipelineDesc, fragment_state: gfx.FragmentState) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "fragment", fragment_state); } }; pub const RenderPipeline = struct { id: js.RenderPipelineId, pub fn destroy(render_pipeline: *RenderPipeline) void { js.destroyRenderPipeline(render_pipeline.id); } }; pub const CommandEncoder = struct { id: js.CommandEncoderId, pub fn beginRenderPass(encoder: *CommandEncoder, desc: gfx.RenderPassDesc) !RenderPass { defer js.deinitDesc(desc.impl.id); return RenderPass{ .id = js.beginRenderPass(encoder.id, desc.impl.id) }; } pub fn finish(encoder: *CommandEncoder) !CommandBuffer { return CommandBuffer{ .id = js.finishCommandEncoder(encoder.id) }; } }; pub const CommandBuffer = struct { id: js.CommandBufferId, }; pub const RenderPassDesc = struct { id: js.DescId = js.default_desc_id, pub fn setColorAttachments( desc: *RenderPassDesc, color_attachments: []const gfx.ColorAttachment, ) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "colorAttachments", color_attachments); } pub fn setDepthStencilAttachment( desc: *RenderPassDesc, depth_stencil_attachment: gfx.DepthStencilAttachment, ) void { if (desc.id == js.default_desc_id) { desc.id = js.initDesc(); } setDescFieldValue(desc.id, "depthStencilAttachment", depth_stencil_attachment); } }; pub const RenderPass = struct { id: js.RenderPassId, pub fn setPipeline(render_pass: *RenderPass, render_pipeline: *const RenderPipeline) !void { js.setPipeline(render_pass.id, render_pipeline.id); } pub fn setBindGroup( render_pass: *RenderPass, group_index: u32, group: *const BindGroup, dynamic_offsets: ?[]const u32, ) !void { const offsets = if (dynamic_offsets) |offsets| std.mem.sliceAsBytes(offsets) else &[_]u8{}; js.setBindGroup(render_pass.id, group_index, group.id, offsets.ptr, offsets.len); } pub fn setVertexBuffer( render_pass: *RenderPass, slot: u32, buffer: *const Buffer, offset: u32, size: usize, ) !void { js.setVertexBuffer(render_pass.id, slot, buffer.id, offset, size); } pub fn setIndexBuffer( render_pass: *RenderPass, buffer: *const Buffer, index_format: gfx.IndexFormat, offset: u32, size: usize, ) !void { const fmt_name = getEnumName(index_format); js.setIndexBuffer(render_pass.id, buffer.id, fmt_name.ptr, fmt_name.len, offset, size); } pub fn draw( render_pass: *RenderPass, vertex_count: usize, instance_count: usize, first_vertex: usize, first_instance: usize, ) !void { js.draw(render_pass.id, vertex_count, instance_count, first_vertex, first_instance); } pub fn drawIndexed( render_pass: *RenderPass, index_count: usize, instance_count: usize, first_index: usize, base_vertex: i32, first_instance: usize, ) !void { js.drawIndexed( render_pass.id, index_count, instance_count, first_index, base_vertex, first_instance, ); } pub fn end(render_pass: *RenderPass) !void { js.endRenderPass(render_pass.id); } }; pub const Queue = struct { id: js.DeviceId, pub fn writeBuffer( queue: *Queue, buffer: *const Buffer, buffer_offset: usize, data: []const u8, data_offset: usize, ) !void { js.queueWriteBuffer(queue.id, buffer.id, buffer_offset, data.ptr, data.len, data_offset); } pub fn submit(queue: *Queue, command_buffers: []const gfx.CommandBuffer) !void { for (command_buffers) |command_buffer| { js.queueSubmit(queue.id, command_buffer.impl.id); } } };
src/gfx_web.zig
const std = @import("../std.zig"); const math = std.math; const expect = std.testing.expect; fn frexp_result(comptime T: type) type { return struct { significand: T, exponent: i32, }; } pub const frexp32_result = frexp_result(f32); pub const frexp64_result = frexp_result(f64); /// Breaks x into a normalized fraction and an integral power of two. /// f == frac * 2^exp, with |frac| in the interval [0.5, 1). /// /// Special Cases: /// - frexp(+-0) = +-0, 0 /// - frexp(+-inf) = +-inf, 0 /// - frexp(nan) = nan, undefined pub fn frexp(x: anytype) frexp_result(@TypeOf(x)) { const T = @TypeOf(x); return switch (T) { f32 => frexp32(x), f64 => frexp64(x), else => @compileError("frexp not implemented for " ++ @typeName(T)), }; } fn frexp32(x: f32) frexp32_result { var result: frexp32_result = undefined; var y = @bitCast(u32, x); const e = @intCast(i32, y >> 23) & 0xFF; if (e == 0) { if (x != 0) { // subnormal result = frexp32(x * 0x1.0p64); result.exponent -= 64; } else { // frexp(+-0) = (+-0, 0) result.significand = x; result.exponent = 0; } return result; } else if (e == 0xFF) { // frexp(nan) = (nan, undefined) result.significand = x; result.exponent = undefined; // frexp(+-inf) = (+-inf, 0) if (math.isInf(x)) { result.exponent = 0; } return result; } result.exponent = e - 0x7E; y &= 0x807FFFFF; y |= 0x3F000000; result.significand = @bitCast(f32, y); return result; } fn frexp64(x: f64) frexp64_result { var result: frexp64_result = undefined; var y = @bitCast(u64, x); const e = @intCast(i32, y >> 52) & 0x7FF; if (e == 0) { if (x != 0) { // subnormal result = frexp64(x * 0x1.0p64); result.exponent -= 64; } else { // frexp(+-0) = (+-0, 0) result.significand = x; result.exponent = 0; } return result; } else if (e == 0x7FF) { // frexp(nan) = (nan, undefined) result.significand = x; result.exponent = undefined; // frexp(+-inf) = (+-inf, 0) if (math.isInf(x)) { result.exponent = 0; } return result; } result.exponent = e - 0x3FE; y &= 0x800FFFFFFFFFFFFF; y |= 0x3FE0000000000000; result.significand = @bitCast(f64, y); return result; } test "math.frexp" { const a = frexp(@as(f32, 1.3)); const b = frexp32(1.3); expect(a.significand == b.significand and a.exponent == b.exponent); const c = frexp(@as(f64, 1.3)); const d = frexp64(1.3); expect(c.significand == d.significand and c.exponent == d.exponent); } test "math.frexp32" { const epsilon = 0.000001; var r: frexp32_result = undefined; r = frexp32(1.3); expect(math.approxEqAbs(f32, r.significand, 0.65, epsilon) and r.exponent == 1); r = frexp32(78.0234); expect(math.approxEqAbs(f32, r.significand, 0.609558, epsilon) and r.exponent == 7); } test "math.frexp64" { const epsilon = 0.000001; var r: frexp64_result = undefined; r = frexp64(1.3); expect(math.approxEqAbs(f64, r.significand, 0.65, epsilon) and r.exponent == 1); r = frexp64(78.0234); expect(math.approxEqAbs(f64, r.significand, 0.609558, epsilon) and r.exponent == 7); } test "math.frexp32.special" { var r: frexp32_result = undefined; r = frexp32(0.0); expect(r.significand == 0.0 and r.exponent == 0); r = frexp32(-0.0); expect(r.significand == -0.0 and r.exponent == 0); r = frexp32(math.inf(f32)); expect(math.isPositiveInf(r.significand) and r.exponent == 0); r = frexp32(-math.inf(f32)); expect(math.isNegativeInf(r.significand) and r.exponent == 0); r = frexp32(math.nan(f32)); expect(math.isNan(r.significand)); } test "math.frexp64.special" { var r: frexp64_result = undefined; r = frexp64(0.0); expect(r.significand == 0.0 and r.exponent == 0); r = frexp64(-0.0); expect(r.significand == -0.0 and r.exponent == 0); r = frexp64(math.inf(f64)); expect(math.isPositiveInf(r.significand) and r.exponent == 0); r = frexp64(-math.inf(f64)); expect(math.isNegativeInf(r.significand) and r.exponent == 0); r = frexp64(math.nan(f64)); expect(math.isNan(r.significand)); }
lib/std/math/frexp.zig
mod: *Module, /// Alias to `mod.gpa`. gpa: Allocator, /// Points to the temporary arena allocator of the Sema. /// This arena will be cleared when the sema is destroyed. arena: Allocator, /// Points to the arena allocator for the owner_decl. /// This arena will persist until the decl is invalidated. perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, air_values: std.ArrayListUnmanaged(Value) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller /// and `src_decl` of `Block` is the `Decl` of the callee. /// This `Decl` owns the arena memory of this `Sema`. owner_decl: *Decl, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, /// When semantic analysis needs to know the return type of the function whose body /// is being analyzed, this `Type` should be used instead of going through `func`. /// This will correctly handle the case of a comptime/inline function call of a /// generic function which uses a type expression for the return type. /// The type will be `void` in the case that `func` is `null`. fn_ret_ty: Type, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that /// instructions which do not have explicitly mapped source locations still have /// access to the source location set by the previous instruction which did /// contain a mapped source location. src: LazySrcLoc = .{ .token_offset = 0 }, decl_val_table: std.AutoHashMapUnmanaged(*Decl, Air.Inst.Ref) = .{}, /// When doing a generic function instantiation, this array collects a /// `Value` object for each parameter that is comptime known and thus elided /// from the generated function. This memory is allocated by a parent `Sema` and /// owned by the values arena of the Sema owner_decl. comptime_args: []TypedValue = &.{}, /// Marks the function instruction that `comptime_args` applies to so that we /// don't accidentally apply it to a function prototype which is used in the /// type expression of a generic function parameter. comptime_args_fn_inst: Zir.Inst.Index = 0, /// When `comptime_args` is provided, this field is also provided. It was used as /// the key in the `monomorphed_funcs` set. The `func` instruction is supposed /// to use this instead of allocating a fresh one. This avoids an unnecessary /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. preallocated_new_func: ?*Module.Fn = null, const std = @import("std"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); const Sema = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); const trace = @import("tracy.zig").trace; const Namespace = Module.Namespace; const CompileError = Module.CompileError; const SemaError = Module.SemaError; const Decl = Module.Decl; const CaptureScope = Module.CaptureScope; const WipCaptureScope = Module.WipCaptureScope; const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); /// This is the context needed to semantically analyze ZIR instructions and /// produce AIR instructions. /// This is a temporary structure stored on the stack; references to it are valid only /// during semantic analysis of the block. pub const Block = struct { parent: ?*Block, /// Shared among all child blocks. sema: *Sema, /// This Decl is the Decl according to the Zig source code corresponding to this Block. /// This can vary during inline or comptime function calls. See `Sema.owner_decl` /// for the one that will be the same for all Block instances. src_decl: *Decl, /// The namespace to use for lookups from this source block /// When analyzing fields, this is different from src_decl.src_namepsace. namespace: *Namespace, /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. params: std.ArrayListUnmanaged(Param) = .{}, wip_capture_scope: *CaptureScope, label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. runtime_cond: ?LazySrcLoc = null, runtime_loop: ?LazySrcLoc = null, /// Non zero if a non-inline loop or a runtime conditional have been encountered. /// Stores to to comptime variables are only allowed when var.runtime_index <= runtime_index. runtime_index: u32 = 0, is_comptime: bool, /// when null, it is determined by build mode, changed by @setRuntimeSafety want_safety: ?bool = null, c_import_buf: ?*std.ArrayList(u8) = null, const Param = struct { /// `noreturn` means `anytype`. ty: Type, is_comptime: bool, }; /// This `Block` maps a block ZIR instruction to the corresponding /// AIR instruction for break instruction analysis. pub const Label = struct { zir_block: Zir.Inst.Index, merges: Merges, }; /// This `Block` indicates that an inline function call is happening /// and return instructions should be analyzed as a break instruction /// to this AIR block instruction. /// It is shared among all the blocks in an inline or comptime called /// function. pub const Inlining = struct { comptime_result: Air.Inst.Ref, merges: Merges, }; pub const Merges = struct { block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. results: std.ArrayListUnmanaged(Air.Inst.Ref), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. br_list: std.ArrayListUnmanaged(Air.Inst.Index), }; /// For debugging purposes. pub fn dump(block: *Block, mod: Module) void { Zir.dumpBlock(mod, block); } pub fn makeSubBlock(parent: *Block) Block { return .{ .parent = parent, .sema = parent.sema, .src_decl = parent.src_decl, .namespace = parent.namespace, .instructions = .{}, .wip_capture_scope = parent.wip_capture_scope, .label = null, .inlining = parent.inlining, .is_comptime = parent.is_comptime, .runtime_cond = parent.runtime_cond, .runtime_loop = parent.runtime_loop, .runtime_index = parent.runtime_index, .want_safety = parent.want_safety, .c_import_buf = parent.c_import_buf, }; } pub fn wantSafety(block: *const Block) bool { return block.want_safety orelse switch (block.sema.mod.optimizeMode()) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, .ReleaseSmall => false, }; } pub fn getFileScope(block: *Block) *Module.File { return block.namespace.file_scope; } pub fn addTy( block: *Block, tag: Air.Inst.Tag, ty: Type, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty = ty }, }); } pub fn addTyOp( block: *Block, tag: Air.Inst.Tag, ty: Type, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty_op = .{ .ty = try block.sema.addType(ty), .operand = operand, } }, }); } pub fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref { return block.addInst(.{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = try block.sema.addType(ty), .operand = operand, } }, }); } pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .no_op = {} }, }); } pub fn addUnOp( block: *Block, tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .un_op = operand }, }); } pub fn addBr( block: *Block, target_block: Air.Inst.Index, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = .br, .data = .{ .br = .{ .block_inst = target_block, .operand = operand, } }, }); } pub fn addBinOp( block: *Block, tag: Air.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .bin_op = .{ .lhs = lhs, .rhs = rhs, } }, }); } pub fn addArg(block: *Block, ty: Type, name: u32) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = .arg, .data = .{ .ty_str = .{ .ty = try block.sema.addType(ty), .str = name, } }, }); } pub fn addStructFieldPtr( block: *Block, struct_ptr: Air.Inst.Ref, field_index: u32, ptr_field_ty: Type, ) !Air.Inst.Ref { const ty = try block.sema.addType(ptr_field_ty); const tag: Air.Inst.Tag = switch (field_index) { 0 => .struct_field_ptr_index_0, 1 => .struct_field_ptr_index_1, 2 => .struct_field_ptr_index_2, 3 => .struct_field_ptr_index_3, else => { return block.addInst(.{ .tag = .struct_field_ptr, .data = .{ .ty_pl = .{ .ty = ty, .payload = try block.sema.addExtra(Air.StructField{ .struct_operand = struct_ptr, .field_index = field_index, }), } }, }); }, }; return block.addInst(.{ .tag = tag, .data = .{ .ty_op = .{ .ty = ty, .operand = struct_ptr, } }, }); } pub fn addStructFieldVal( block: *Block, struct_val: Air.Inst.Ref, field_index: u32, field_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .struct_field_val, .data = .{ .ty_pl = .{ .ty = try block.sema.addType(field_ty), .payload = try block.sema.addExtra(Air.StructField{ .struct_operand = struct_val, .field_index = field_index, }), } }, }); } pub fn addSliceElemPtr( block: *Block, slice: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_ptr_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .slice_elem_ptr, .data = .{ .ty_pl = .{ .ty = try block.sema.addType(elem_ptr_ty), .payload = try block.sema.addExtra(Air.Bin{ .lhs = slice, .rhs = elem_index, }), } }, }); } pub fn addPtrElemPtr( block: *Block, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_ptr_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .ptr_elem_ptr, .data = .{ .ty_pl = .{ .ty = try block.sema.addType(elem_ptr_ty), .payload = try block.sema.addExtra(Air.Bin{ .lhs = array_ptr, .rhs = elem_index, }), } }, }); } pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { return Air.indexToRef(try block.addInstAsIndex(inst)); } pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); return result_index; } fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void { if (safety_check and block.wantSafety()) { _ = try block.sema.safetyPanic(block, src, .unreach); } else { _ = try block.addNoOp(.unreach); } } pub fn startAnonDecl(block: *Block) !WipAnonDecl { return WipAnonDecl{ .block = block, .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), .finished = false, }; } pub const WipAnonDecl = struct { block: *Block, new_decl_arena: std.heap.ArenaAllocator, finished: bool, pub fn arena(wad: *WipAnonDecl) Allocator { return wad.new_decl_arena.allocator(); } pub fn deinit(wad: *WipAnonDecl) void { if (!wad.finished) { wad.new_decl_arena.deinit(); } wad.* = undefined; } pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value) !*Decl { const new_decl = try wad.block.sema.mod.createAnonymousDecl(wad.block, .{ .ty = ty, .val = val, }); errdefer wad.block.sema.mod.abortAnonDecl(new_decl); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; return new_decl; } }; }; pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); sema.* = undefined; } /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. fn resolveBody(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) CompileError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); } /// ZIR instructions which are always `noreturn` return this. This matches the /// return type of `analyzeBody` so that we can tail call them. /// Only appropriate to return when the instruction is known to be NoReturn /// solely based on the ZIR tag. const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); /// This function is the main loop of `Sema` and it can be used in two different ways: /// * The traditional way where there are N breaks out of the block and peer type /// resolution is done on the break operands. In this case, the `Zir.Inst.Index` /// part of the return value will be `undefined`, and callsites should ignore it, /// finding the block result value via the block scope. /// * The "flat" way. There is only 1 break out of the block, and it is with a `break_inline` /// instruction. In this case, the `Zir.Inst.Index` part of the return value will be /// the break instruction. This communicates both which block the break applies to, as /// well as the operand. No block scope needs to be created for this strategy. pub fn analyzeBody( sema: *Sema, block: *Block, body: []const Zir.Inst.Index, ) CompileError!Zir.Inst.Index { // No tracy calls here, to avoid interfering with the tail call mechanism. const parent_capture_scope = block.wip_capture_scope; var wip_captures = WipCaptureScope{ .finalized = true, .scope = parent_capture_scope, .perm_arena = sema.perm_arena, .gpa = sema.gpa, }; defer if (wip_captures.scope != parent_capture_scope) { wip_captures.deinit(); }; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); var orig_captures: usize = parent_capture_scope.captures.count(); var crash_info = crash_report.prepAnalyzeBody(sema, block, body); crash_info.push(); defer crash_info.pop(); // We use a while(true) loop here to avoid a redundant way of breaking out of // the loop. The only way to break out of the loop is with a `noreturn` // instruction. var i: usize = 0; const result = while (true) { crash_info.setBodyIndex(i); const inst = body[i]; const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime => try sema.zirAllocComptime(block, inst), .anyframe_type => try sema.zirAnyframeType(block, inst), .array_cat => try sema.zirArrayCat(block, inst), .array_mul => try sema.zirArrayMul(block, inst), .array_type => try sema.zirArrayType(block, inst), .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), .vector_type => try sema.zirVectorType(block, inst), .as => try sema.zirAs(block, inst), .as_node => try sema.zirAsNode(block, inst), .bit_and => try sema.zirBitwise(block, inst, .bit_and), .bit_not => try sema.zirBitNot(block, inst), .bit_or => try sema.zirBitwise(block, inst, .bit_or), .bitcast => try sema.zirBitcast(block, inst), .suspend_block => try sema.zirSuspendBlock(block, inst), .bool_not => try sema.zirBoolNot(block, inst), .bool_br_and => try sema.zirBoolBr(block, inst, false), .bool_br_or => try sema.zirBoolBr(block, inst, true), .c_import => try sema.zirCImport(block, inst), .call => try sema.zirCall(block, inst), .closure_get => try sema.zirClosureGet(block, inst), .cmp_lt => try sema.zirCmp(block, inst, .lt), .cmp_lte => try sema.zirCmp(block, inst, .lte), .cmp_eq => try sema.zirCmpEq(block, inst, .eq, .cmp_eq), .cmp_gte => try sema.zirCmp(block, inst, .gte), .cmp_gt => try sema.zirCmp(block, inst, .gt), .cmp_neq => try sema.zirCmpEq(block, inst, .neq, .cmp_neq), .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), .decl_ref => try sema.zirDeclRef(block, inst), .decl_val => try sema.zirDeclVal(block, inst), .load => try sema.zirLoad(block, inst), .elem_ptr => try sema.zirElemPtr(block, inst), .elem_ptr_node => try sema.zirElemPtrNode(block, inst), .elem_ptr_imm => try sema.zirElemPtrImm(block, inst), .elem_val => try sema.zirElemVal(block, inst), .elem_val_node => try sema.zirElemValNode(block, inst), .elem_type => try sema.zirElemType(block, inst), .enum_literal => try sema.zirEnumLiteral(block, inst), .enum_to_int => try sema.zirEnumToInt(block, inst), .int_to_enum => try sema.zirIntToEnum(block, inst), .err_union_code => try sema.zirErrUnionCode(block, inst), .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), .error_union_type => try sema.zirErrorUnionType(block, inst), .error_value => try sema.zirErrorValue(block, inst), .error_to_int => try sema.zirErrorToInt(block, inst), .int_to_error => try sema.zirIntToError(block, inst), .field_ptr => try sema.zirFieldPtr(block, inst), .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), .field_val => try sema.zirFieldVal(block, inst), .field_val_named => try sema.zirFieldValNamed(block, inst), .field_call_bind => try sema.zirFieldCallBind(block, inst), .field_call_bind_named => try sema.zirFieldCallBindNamed(block, inst), .func => try sema.zirFunc(block, inst, false), .func_inferred => try sema.zirFunc(block, inst, true), .import => try sema.zirImport(block, inst), .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), .int => try sema.zirInt(block, inst), .int_big => try sema.zirIntBig(block, inst), .float => try sema.zirFloat(block, inst), .float128 => try sema.zirFloat128(block, inst), .int_type => try sema.zirIntType(block, inst), .is_non_err => try sema.zirIsNonErr(block, inst), .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), .is_non_null => try sema.zirIsNonNull(block, inst), .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), .merge_error_sets => try sema.zirMergeErrorSets(block, inst), .negate => try sema.zirNegate(block, inst, .sub), .negate_wrap => try sema.zirNegate(block, inst, .subwrap), .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), .optional_type => try sema.zirOptionalType(block, inst), .ptr_type => try sema.zirPtrType(block, inst), .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), .ref => try sema.zirRef(block, inst), .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), .shr => try sema.zirShr(block, inst), .slice_end => try sema.zirSliceEnd(block, inst), .slice_sentinel => try sema.zirSliceSentinel(block, inst), .slice_start => try sema.zirSliceStart(block, inst), .str => try sema.zirStr(block, inst), .switch_block => try sema.zirSwitchBlock(block, inst), .switch_cond => try sema.zirSwitchCond(block, inst, false), .switch_cond_ref => try sema.zirSwitchCond(block, inst, true), .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), .type_info => try sema.zirTypeInfo(block, inst), .size_of => try sema.zirSizeOf(block, inst), .bit_size_of => try sema.zirBitSizeOf(block, inst), .typeof => try sema.zirTypeof(block, inst), .log2_int_type => try sema.zirLog2IntType(block, inst), .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), .xor => try sema.zirBitwise(block, inst, .xor), .struct_init_empty => try sema.zirStructInitEmpty(block, inst), .struct_init => try sema.zirStructInit(block, inst, false), .struct_init_ref => try sema.zirStructInit(block, inst, true), .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), .array_init => try sema.zirArrayInit(block, inst, false), .array_init_ref => try sema.zirArrayInit(block, inst, true), .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), .union_init_ptr => try sema.zirUnionInitPtr(block, inst), .field_type => try sema.zirFieldType(block, inst), .field_type_ref => try sema.zirFieldTypeRef(block, inst), .ptr_to_int => try sema.zirPtrToInt(block, inst), .align_of => try sema.zirAlignOf(block, inst), .bool_to_int => try sema.zirBoolToInt(block, inst), .embed_file => try sema.zirEmbedFile(block, inst), .error_name => try sema.zirErrorName(block, inst), .tag_name => try sema.zirTagName(block, inst), .reify => try sema.zirReify(block, inst), .type_name => try sema.zirTypeName(block, inst), .frame_type => try sema.zirFrameType(block, inst), .frame_size => try sema.zirFrameSize(block, inst), .float_to_int => try sema.zirFloatToInt(block, inst), .int_to_float => try sema.zirIntToFloat(block, inst), .int_to_ptr => try sema.zirIntToPtr(block, inst), .float_cast => try sema.zirFloatCast(block, inst), .int_cast => try sema.zirIntCast(block, inst), .err_set_cast => try sema.zirErrSetCast(block, inst), .ptr_cast => try sema.zirPtrCast(block, inst), .truncate => try sema.zirTruncate(block, inst), .align_cast => try sema.zirAlignCast(block, inst), .has_decl => try sema.zirHasDecl(block, inst), .has_field => try sema.zirHasField(block, inst), .clz => try sema.zirClz(block, inst), .ctz => try sema.zirCtz(block, inst), .pop_count => try sema.zirPopCount(block, inst), .byte_swap => try sema.zirByteSwap(block, inst), .bit_reverse => try sema.zirBitReverse(block, inst), .shr_exact => try sema.zirShrExact(block, inst), .bit_offset_of => try sema.zirBitOffsetOf(block, inst), .offset_of => try sema.zirOffsetOf(block, inst), .cmpxchg_strong => try sema.zirCmpxchg(block, inst, .cmpxchg_strong), .cmpxchg_weak => try sema.zirCmpxchg(block, inst, .cmpxchg_weak), .splat => try sema.zirSplat(block, inst), .reduce => try sema.zirReduce(block, inst), .shuffle => try sema.zirShuffle(block, inst), .select => try sema.zirSelect(block, inst), .atomic_load => try sema.zirAtomicLoad(block, inst), .atomic_rmw => try sema.zirAtomicRmw(block, inst), .mul_add => try sema.zirMulAdd(block, inst), .builtin_call => try sema.zirBuiltinCall(block, inst), .field_ptr_type => try sema.zirFieldPtrType(block, inst), .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), .@"resume" => try sema.zirResume(block, inst), .@"await" => try sema.zirAwait(block, inst, false), .await_nosuspend => try sema.zirAwait(block, inst, true), .extended => try sema.zirExtended(block, inst), .sqrt => try sema.zirUnaryMath(block, inst), .sin => try sema.zirUnaryMath(block, inst), .cos => try sema.zirUnaryMath(block, inst), .exp => try sema.zirUnaryMath(block, inst), .exp2 => try sema.zirUnaryMath(block, inst), .log => try sema.zirUnaryMath(block, inst), .log2 => try sema.zirUnaryMath(block, inst), .log10 => try sema.zirUnaryMath(block, inst), .fabs => try sema.zirUnaryMath(block, inst), .floor => try sema.zirUnaryMath(block, inst), .ceil => try sema.zirUnaryMath(block, inst), .trunc => try sema.zirUnaryMath(block, inst), .round => try sema.zirUnaryMath(block, inst), .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), .add => try sema.zirArithmetic(block, inst, .add), .addwrap => try sema.zirArithmetic(block, inst, .addwrap), .add_sat => try sema.zirArithmetic(block, inst, .add_sat), .div => try sema.zirArithmetic(block, inst, .div), .div_exact => try sema.zirArithmetic(block, inst, .div_exact), .div_floor => try sema.zirArithmetic(block, inst, .div_floor), .div_trunc => try sema.zirArithmetic(block, inst, .div_trunc), .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem), .mod => try sema.zirArithmetic(block, inst, .mod), .rem => try sema.zirArithmetic(block, inst, .rem), .mul => try sema.zirArithmetic(block, inst, .mul), .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap), .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat), .sub => try sema.zirArithmetic(block, inst, .sub), .subwrap => try sema.zirArithmetic(block, inst, .subwrap), .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat), .maximum => try sema.zirMinMax(block, inst, .max), .minimum => try sema.zirMinMax(block, inst, .min), .shl => try sema.zirShl(block, inst, .shl), .shl_exact => try sema.zirShl(block, inst, .shl_exact), .shl_sat => try sema.zirShl(block, inst, .shl_sat), // Instructions that we know to *always* be noreturn based solely on their tag. // These functions match the return type of analyzeBody so that we can // tail call them here. .compile_error => break sema.zirCompileError(block, inst), .ret_coerce => break sema.zirRetCoerce(block, inst), .ret_node => break sema.zirRetNode(block, inst), .ret_load => break sema.zirRetLoad(block, inst), .ret_err_value => break sema.zirRetErrValue(block, inst), .@"unreachable" => break sema.zirUnreachable(block, inst), .panic => break sema.zirPanic(block, inst), // zig fmt: on // Instructions that we know can *never* be noreturn based solely on // their tag. We avoid needlessly checking if they are noreturn and // continue the loop. // We also know that they cannot be referenced later, so we avoid // putting them into the map. .breakpoint => { if (!block.is_comptime) { _ = try block.addNoOp(.breakpoint); } i += 1; continue; }, .fence => { try sema.zirFence(block, inst); i += 1; continue; }, .dbg_stmt => { try sema.zirDbgStmt(block, inst); i += 1; continue; }, .ensure_err_payload_void => { try sema.zirEnsureErrPayloadVoid(block, inst); i += 1; continue; }, .ensure_result_non_error => { try sema.zirEnsureResultNonError(block, inst); i += 1; continue; }, .ensure_result_used => { try sema.zirEnsureResultUsed(block, inst); i += 1; continue; }, .set_eval_branch_quota => { try sema.zirSetEvalBranchQuota(block, inst); i += 1; continue; }, .atomic_store => { try sema.zirAtomicStore(block, inst); i += 1; continue; }, .store => { try sema.zirStore(block, inst); i += 1; continue; }, .store_node => { try sema.zirStoreNode(block, inst); i += 1; continue; }, .store_to_block_ptr => { try sema.zirStoreToBlockPtr(block, inst); i += 1; continue; }, .store_to_inferred_ptr => { try sema.zirStoreToInferredPtr(block, inst); i += 1; continue; }, .resolve_inferred_alloc => { try sema.zirResolveInferredAlloc(block, inst); i += 1; continue; }, .validate_struct_init => { try sema.zirValidateStructInit(block, inst); i += 1; continue; }, .validate_array_init => { try sema.zirValidateArrayInit(block, inst); i += 1; continue; }, .@"export" => { try sema.zirExport(block, inst); i += 1; continue; }, .export_value => { try sema.zirExportValue(block, inst); i += 1; continue; }, .set_align_stack => { try sema.zirSetAlignStack(block, inst); i += 1; continue; }, .set_cold => { try sema.zirSetCold(block, inst); i += 1; continue; }, .set_float_mode => { try sema.zirSetFloatMode(block, inst); i += 1; continue; }, .set_runtime_safety => { try sema.zirSetRuntimeSafety(block, inst); i += 1; continue; }, .param => { try sema.zirParam(block, inst, false); i += 1; continue; }, .param_comptime => { try sema.zirParam(block, inst, true); i += 1; continue; }, .param_anytype => { try sema.zirParamAnytype(block, inst, false); i += 1; continue; }, .param_anytype_comptime => { try sema.zirParamAnytype(block, inst, true); i += 1; continue; }, .closure_capture => { try sema.zirClosureCapture(block, inst); i += 1; continue; }, .memcpy => { try sema.zirMemcpy(block, inst); i += 1; continue; }, .memset => { try sema.zirMemset(block, inst); i += 1; continue; }, // Special case instructions to handle comptime control flow. .@"break" => { if (block.is_comptime) { break inst; // same as break_inline } else { break sema.zirBreak(block, inst); } }, .break_inline => break inst, .repeat => { if (block.is_comptime) { // Send comptime control flow back to the beginning of this block. const src: LazySrcLoc = .{ .node_offset = datas[inst].node }; try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; } i = 0; continue; } else { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; try sema.requireRuntimeBlock(block, src); break always_noreturn; } }, .repeat_inline => { // Send comptime control flow back to the beginning of this block. const src: LazySrcLoc = .{ .node_offset = datas[inst].node }; try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; } i = 0; continue; }, .loop => blk: { if (!block.is_comptime) break :blk try sema.zirLoop(block, inst); // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .block => blk: { if (!block.is_comptime) break :blk try sema.zirBlock(block, inst); // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; // If this block contains a function prototype, we need to reset the // current list of parameters and restore it later. // Note: this probably needs to be resolved in a more general manner. const prev_params = block.params; block.params = .{}; defer { block.params.deinit(sema.gpa); block.params = prev_params; } const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .block_inline => blk: { // Directly analyze the block body without introducing a new block. const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; // If this block contains a function prototype, we need to reset the // current list of parameters and restore it later. // Note: this probably needs to be resolved in a more general manner. const prev_params = block.params; block.params = .{}; defer { block.params.deinit(sema.gpa); block.params = prev_params; } const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .condbr => blk: { if (!block.is_comptime) break sema.zirCondbr(block, inst); // Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); const inline_body = if (cond.val.toBool()) then_body else else_body; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .condbr_inline => blk: { const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); const inline_body = if (cond.val.toBool()) then_body else else_body; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, }; if (sema.typeOf(air_inst).isNoReturn()) break always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; } else unreachable; if (!wip_captures.finalized) { try wip_captures.finalize(); block.wip_capture_scope = parent_capture_scope; } return result; } fn zirExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off .func => return sema.zirFuncExtended( block, extended, inst), .variable => return sema.zirVarExtended( block, extended), .struct_decl => return sema.zirStructDecl( block, extended, inst), .enum_decl => return sema.zirEnumDecl( block, extended), .union_decl => return sema.zirUnionDecl( block, extended, inst), .opaque_decl => return sema.zirOpaqueDecl( block, extended), .ret_ptr => return sema.zirRetPtr( block, extended), .ret_type => return sema.zirRetType( block, extended), .this => return sema.zirThis( block, extended), .ret_addr => return sema.zirRetAddr( block, extended), .builtin_src => return sema.zirBuiltinSrc( block, extended), .error_return_trace => return sema.zirErrorReturnTrace( block, extended), .frame => return sema.zirFrame( block, extended), .frame_address => return sema.zirFrameAddress( block, extended), .alloc => return sema.zirAllocExtended( block, extended), .builtin_extern => return sema.zirBuiltinExtern( block, extended), .@"asm" => return sema.zirAsm( block, extended, inst), .typeof_peer => return sema.zirTypeofPeer( block, extended), .compile_log => return sema.zirCompileLog( block, extended), .add_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), .sub_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), .mul_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), .shl_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), .c_undef => return sema.zirCUndef( block, extended), .c_include => return sema.zirCInclude( block, extended), .c_define => return sema.zirCDefine( block, extended), .wasm_memory_size => return sema.zirWasmMemorySize( block, extended), .wasm_memory_grow => return sema.zirWasmMemoryGrow( block, extended), .prefetch => return sema.zirPrefetch( block, extended), // zig fmt: on } } pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { // We intentionally map the same indexes to the same values between ZIR and AIR. return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; // Finally, the last section of indexes refers to the map of ZIR=>AIR. return sema.inst_map.get(@intCast(u32, i)).?; } fn resolveConstBool( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !bool { const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.bool); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toBool(); } fn resolveConstString( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) ![]u8 { const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toAllocatedBytes(wanted_type, sema.arena); } pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { const air_inst = sema.resolveInst(zir_ref); const ty = try sema.analyzeAsType(block, src, air_inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return ty; } fn analyzeAsType( sema: *Sema, block: *Block, src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); var buffer: Value.ToTypeBuffer = undefined; const ty = val.toType(&buffer); return ty.copy(sema.arena); } /// May return Value Tags: `variable`, `undef`. /// See `resolveConstValue` for an alternative. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. fn resolveValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(block, src, air_ref)) |val| { if (val.tag() == .generic_poison) return error.GenericPoison; return val; } return sema.failWithNeededComptime(block, src); } /// Value Tag `variable` will cause a compile error. /// Value Tag `undef` may be returned. fn resolveConstMaybeUndefVal( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(block, src, inst)) |val| { switch (val.tag()) { .variable => return sema.failWithNeededComptime(block, src), .generic_poison => return error.GenericPoison, else => return val, } } return sema.failWithNeededComptime(block, src); } /// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors. /// See `resolveValue` for an alternative. fn resolveConstValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(block, src, air_ref)) |val| { switch (val.tag()) { .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src), .generic_poison => return error.GenericPoison, else => return val, } } return sema.failWithNeededComptime(block, src); } /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return a compile error. fn resolveDefinedValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { if (try sema.resolveMaybeUndefVal(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); } return val; } return null; } /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return the Value. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. fn resolveMaybeUndefVal( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(block, src, inst)) orelse return null; switch (val.tag()) { .variable => return null, .generic_poison => return error.GenericPoison, else => return val, } } /// Returns all Value tags including `variable` and `undef`. fn resolveMaybeUndefValAllowVariables( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) CompileError!?Value { // First section of indexes correspond to a set number of constant values. var i: usize = @enumToInt(inst); if (i < Air.Inst.Ref.typed_value_map.len) { return Air.Inst.Ref.typed_value_map[i].val; } i -= Air.Inst.Ref.typed_value_map.len; if (try sema.typeHasOnePossibleValue(block, src, sema.typeOf(inst))) |opv| { return opv; } const air_tags = sema.air_instructions.items(.tag); switch (air_tags[i]) { .constant => { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; return sema.air_values.items[ty_pl.payload]; }, .const_ty => { return try sema.air_instructions.items(.data)[i].ty.toValue(sema.arena); }, else => return null, } } fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "unable to resolve comptime value", .{}); } fn failWithUseOfUndef(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "use of undefined value here causes undefined behavior", .{}); } fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "division by zero here causes undefined behavior", .{}); } fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError { return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty }); } fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError { return sema.fail(block, src, "expected optional type, found {}", .{optional_ty}); } fn failWithErrorSetCodeMissing( sema: *Sema, block: *Block, src: LazySrcLoc, dest_err_set_ty: Type, src_err_set_ty: Type, ) CompileError { return sema.fail(block, src, "expected type '{}', found type '{}'", .{ dest_err_set_ty, src_err_set_ty, }); } /// We don't return a pointer to the new error note because the pointer /// becomes invalid when you add another one. fn errNote( sema: *Sema, block: *Block, src: LazySrcLoc, parent: *Module.ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { return sema.mod.errNoteNonLazy(src.toSrcLoc(block.src_decl), parent, format, args); } fn errMsg( sema: *Sema, block: *Block, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!*Module.ErrorMsg { return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(block.src_decl), format, args); } pub fn fail( sema: *Sema, block: *Block, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) CompileError { const err_msg = try sema.errMsg(block, src, format, args); return sema.failWithOwnedErrorMsg(err_msg); } fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { @setCold(true); if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { std.debug.print("compile error during Sema: {s}, src: {s}:{}\n", .{ err_msg.msg, err_msg.src_loc.file_scope.sub_file_path, err_msg.src_loc.lazy, }); crash_report.compilerPanic("unexpected compile error occurred", null); } const mod = sema.mod; { errdefer err_msg.destroy(mod.gpa); if (err_msg.src_loc.lazy == .unneeded) { return error.NeededSourceLocation; } try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1); } if (sema.owner_func) |func| { func.state = .sema_failure; } else { sema.owner_decl.analysis = .sema_failure; sema.owner_decl.generation = mod.generation; } mod.failed_decls.putAssumeCapacityNoClobber(sema.owner_decl, err_msg); return error.AnalysisFail; } /// Appropriate to call when the coercion has already been done by result /// location semantics. Asserts the value fits in the provided `Int` type. /// Only supports `Int` types 64 bits or less. /// TODO don't ever call this since we're migrating towards ResultLoc.coerced_ty. fn resolveAlreadyCoercedInt( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, comptime Int: type, ) !Int { comptime assert(@typeInfo(Int).Int.bits <= 64); const air_inst = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_inst); switch (@typeInfo(Int).Int.signedness) { .signed => return @intCast(Int, val.toSignedInt()), .unsigned => return @intCast(Int, val.toUnsignedInt()), } } fn resolveAlign( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !u16 { const alignment_big = try sema.resolveInt(block, src, zir_ref, Type.initTag(.u16)); const alignment = @intCast(u16, alignment_big); // We coerce to u16 in the prev line. if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{}); if (!std.math.isPowerOfTwo(alignment)) { return sema.fail(block, src, "alignment value {d} is not a power of two", .{ alignment, }); } return alignment; } fn resolveInt( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, dest_ty: Type, ) !u64 { const air_inst = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, dest_ty, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced); return val.toUnsignedInt(); } // Returns a compile error if the value has tag `variable`. See `resolveInstValue` for // a function that does not. pub fn resolveInstConst( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!TypedValue { const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ .ty = sema.typeOf(air_ref), .val = val, }; } // Value Tag may be `undef` or `variable`. // See `resolveInstConst` for an alternative. pub fn resolveInstValue( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!TypedValue { const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveValue(block, src, air_ref); return TypedValue{ .ty = sema.typeOf(air_ref), .val = val, }; } fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = sema.src; const bin_inst = sema.code.instructions.items(.data)[inst].bin; const pointee_ty = try sema.resolveType(block, src, bin_inst.lhs); const ptr = sema.resolveInst(bin_inst.rhs); const addr_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); // Needed for the call to `anon_decl.finish()` below which checks `ty.hasCodeGenBits()`. _ = try sema.typeHasOnePossibleValue(block, src, pointee_ty); if (Air.refToIndex(ptr)) |ptr_inst| { if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; switch (ptr_val.tag()) { .inferred_alloc => { const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. // This instruction will not make it to codegen; it is only to participate // in the `stored_inst_list` of the `inferred_alloc`. var trash_block = block.makeSubBlock(); defer trash_block.instructions.deinit(sema.gpa); const operand = try trash_block.addBitCast(pointee_ty, .void_value); try inferred_alloc.stored_inst_list.append(sema.arena, operand); try sema.requireRuntimeBlock(block, src); const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = pointee_ty, .@"align" = inferred_alloc.alignment, .@"addrspace" = addr_space, }); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); return bitcasted_ptr; }, .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; // There will be only one coerce_result_ptr because we are running at comptime. // The alloc will turn into a Decl. var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl = try anon_decl.finish( try pointee_ty.copy(anon_decl.arena()), Value.undef, ); const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = pointee_ty, .@"align" = iac.data.alignment, .@"addrspace" = addr_space, }); return sema.addConstant( ptr_ty, try Value.Tag.decl_ref_mut.create(sema.arena, .{ .decl = iac.data.decl, .runtime_index = block.runtime_index, }), ); }, .decl_ref_mut => { const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = pointee_ty, .@"addrspace" = addr_space, }); return sema.addConstant(ptr_ty, ptr_val); }, else => {}, } } } try sema.requireRuntimeBlock(block, src); // Make a dummy store through the pointer to test the coercion. // We will then use the generated instructions to decide what // kind of transformations to make on the result pointer. var trash_block = block.makeSubBlock(); defer trash_block.instructions.deinit(sema.gpa); const dummy_operand = try trash_block.addBitCast(pointee_ty, .void_value); try sema.storePtr(&trash_block, src, ptr, dummy_operand); { const air_tags = sema.air_instructions.items(.tag); //std.debug.print("dummy storePtr instructions:\n", .{}); //for (trash_block.instructions.items) |item| { // std.debug.print(" {s}\n", .{@tagName(air_tags[item])}); //} // The last one is always `store`. const trash_inst = trash_block.instructions.pop(); assert(air_tags[trash_inst] == .store); assert(trash_inst == sema.air_instructions.len - 1); sema.air_instructions.len -= 1; } const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = pointee_ty, .@"addrspace" = addr_space, }); var new_ptr = ptr; while (true) { const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); const trash_inst = trash_block.instructions.pop(); switch (air_tags[trash_inst]) { .bitcast => { if (Air.indexToRef(trash_inst) == dummy_operand) { return block.addBitCast(ptr_ty, new_ptr); } const ty_op = air_datas[trash_inst].ty_op; const operand_ty = sema.getTmpAir().typeOf(ty_op.operand); const ptr_operand_ty = try Type.ptr(sema.arena, .{ .pointee_type = operand_ty, .@"addrspace" = addr_space, }); new_ptr = try block.addBitCast(ptr_operand_ty, new_ptr); }, .wrap_optional => { const ty_op = air_datas[trash_inst].ty_op; const payload_ty = sema.getTmpAir().typeOf(ty_op.operand); const ptr_payload_ty = try Type.ptr(sema.arena, .{ .pointee_type = payload_ty, .@"addrspace" = addr_space, }); new_ptr = try block.addTyOp(.optional_payload_ptr_set, ptr_payload_ty, new_ptr); }, .wrap_errunion_err => { return sema.fail(block, src, "TODO coerce_result_ptr wrap_errunion_err", .{}); }, .wrap_errunion_payload => { return sema.fail(block, src, "TODO coerce_result_ptr wrap_errunion_payload", .{}); }, else => { if (std.debug.runtime_safety) { std.debug.panic("unexpected AIR tag for coerce_result_ptr: {s}", .{ air_tags[trash_inst], }); } else { unreachable; } }, } } else unreachable; // TODO should not need else unreachable } pub fn analyzeStructDecl( sema: *Sema, new_decl: *Decl, inst: Zir.Inst.Index, struct_obj: *Module.Struct, ) SemaError!void { const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); struct_obj.known_has_bits = small.known_has_bits; var extra_index: usize = extended.operand; extra_index += @boolToInt(small.has_src_node); extra_index += @boolToInt(small.has_body_len); extra_index += @boolToInt(small.has_fields_len); const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); break :blk .{ .node_offset = node_offset }; } else sema.src; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); const struct_obj = try new_decl_arena_allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = struct_val, }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); struct_obj.* = .{ .owner_decl = new_decl, .fields = .{}, .node_offset = src.node_offset, .zir_index = inst, .layout = small.layout, .status = .none, .known_has_bits = undefined, .namespace = .{ .parent = block.namespace, .ty = struct_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create struct {*} owned by {*} ({s})", .{ &struct_obj.namespace, new_decl, new_decl.name, }); try sema.analyzeStructDecl(new_decl, inst, struct_obj); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn createTypeName(sema: *Sema, block: *Block, name_strategy: Zir.Inst.NameStrategy) ![:0]u8 { switch (name_strategy) { .anon => { // It would be neat to have "struct:line:column" but this name has // to survive incremental updates, where it may have been shifted down // or up to a different line, but unchanged, and thus not unnecessarily // semantically analyzed. const name_index = sema.mod.getNextAnonNameIndex(); return std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{ block.src_decl.name, name_index, }); }, .parent => return sema.gpa.dupeZ(u8, mem.sliceTo(block.src_decl.name, 0)), .func => { const name_index = sema.mod.getNextAnonNameIndex(); const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{ block.src_decl.name, name_index, }); log.warn("TODO: handle NameStrategy.func correctly instead of using anon name '{s}'", .{ name, }); return name; }, } } fn zirEnumDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const gpa = sema.gpa; const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extra_index]); extra_index += 1; break :blk .{ .node_offset = node_offset }; } else sema.src; const tag_type_ref = if (small.has_tag_type) blk: { const tag_type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; break :blk tag_type_ref; } else .none; const body_len = if (small.has_body_len) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = sema.code.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); enum_ty_payload.* = .{ .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = enum_val, }, type_name); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); enum_obj.* = .{ .owner_decl = new_decl, .tag_ty = Type.initTag(.@"null"), .fields = .{}, .values = .{}, .node_offset = src.node_offset, .namespace = .{ .parent = block.namespace, .ty = enum_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create enum {*} owned by {*} ({s})", .{ &enum_obj.namespace, new_decl, new_decl.name, }); extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; if (fields_len == 0) { assert(body.len == 0); if (tag_type_ref != .none) { // TODO better source location const ty = try sema.resolveType(block, src, tag_type_ref); enum_obj.tag_ty = try ty.copy(new_decl_arena_allocator); } try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } extra_index += body.len; const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; const body_end = extra_index; extra_index += bit_bags_count; { // We create a block for the field type instructions because they // may need to reference Decls from inside the enum namespace. // Within the field type, default value, and alignment expressions, the "owner decl" // should be the enum itself. const prev_owner_decl = sema.owner_decl; sema.owner_decl = new_decl; defer sema.owner_decl = prev_owner_decl; const prev_owner_func = sema.owner_func; sema.owner_func = null; defer sema.owner_func = prev_owner_func; const prev_func = sema.func; sema.func = null; defer sema.func = prev_func; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); defer wip_captures.deinit(); var enum_block: Block = .{ .parent = null, .sema = sema, .src_decl = new_decl, .namespace = &enum_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer assert(enum_block.instructions.items.len == 0); // should all be comptime instructions if (body.len != 0) { _ = try sema.analyzeBody(&enum_block, body); } try wip_captures.finalize(); const tag_ty = blk: { if (tag_type_ref != .none) { // TODO better source location const ty = try sema.resolveType(block, src, tag_type_ref); break :blk try ty.copy(new_decl_arena_allocator); } const bits = std.math.log2_int_ceil(usize, fields_len); break :blk try Type.Tag.int_unsigned.create(new_decl_arena_allocator, bits); }; enum_obj.tag_ty = tag_ty; } try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { if (bag != 0) break true; } else false; if (any_values) { try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = enum_obj.tag_ty, }); } var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % 32 == 0) { cur_bit_bag = sema.code.extra[bit_bag_index]; bit_bag_index += 1; } const has_tag_value = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]); extra_index += 1; // This string needs to outlive the ZIR code. const field_name = try new_decl_arena_allocator.dupe(u8, field_name_zir); const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const tree = try sema.getAstTree(block); const field_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, field_i); const other_tag_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, gop.index); const msg = msg: { const msg = try sema.errMsg(block, field_src, "duplicate enum tag", .{}); errdefer msg.destroy(gpa); try sema.errNote(block, other_tag_src, msg, "other tag here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (has_tag_value) { const tag_val_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; // TODO: if we need to report an error here, use a source location // that points to this default value expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val; const copied_tag_val = try tag_val.copy(new_decl_arena_allocator); enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{ .ty = enum_obj.tag_ty, }); } else if (any_values) { const tag_val = try Value.Tag.int_u64.create(new_decl_arena_allocator, field_i); enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty }); } } try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirUnionDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extra_index]); extra_index += 1; break :blk .{ .node_offset = node_offset }; } else sema.src; extra_index += @boolToInt(small.has_tag_type); extra_index += @boolToInt(small.has_body_len); extra_index += @boolToInt(small.has_fields_len); const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); const union_obj = try new_decl_arena_allocator.create(Module.Union); const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union"; const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); union_payload.* = .{ .base = .{ .tag = type_tag }, .data = union_obj, }; const union_ty = Type.initPayload(&union_payload.base); const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = union_val, }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); union_obj.* = .{ .owner_decl = new_decl, .tag_ty = Type.initTag(.@"null"), .fields = .{}, .node_offset = src.node_offset, .zir_index = inst, .layout = small.layout, .status = .none, .namespace = .{ .parent = block.namespace, .ty = union_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create union {*} owned by {*} ({s})", .{ &union_obj.namespace, new_decl, new_decl.name, }); _ = try sema.mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirOpaqueDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const gpa = sema.gpa; const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extra_index]); extra_index += 1; break :blk .{ .node_offset = node_offset }; } else sema.src; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); opaque_ty_payload.* = .{ .base = .{ .tag = .@"opaque" }, .data = opaque_obj, }; const opaque_ty = Type.initPayload(&opaque_ty_payload.base); const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = opaque_val, }, type_name); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); opaque_obj.* = .{ .owner_decl = new_decl, .node_offset = src.node_offset, .namespace = .{ .parent = block.namespace, .ty = opaque_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create opaque {*} owned by {*} ({s})", .{ &opaque_obj.namespace, new_decl, new_decl.name, }); extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirErrorSetDecl( sema: *Sema, block: *Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); const fields = sema.code.extra[extra.end..][0..extra.data.fields_len]; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); const type_name = try sema.createTypeName(block, name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = error_set_val, }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); var names = Module.ErrorSet.NameMap{}; try names.ensureUnusedCapacity(new_decl_arena_allocator, fields.len); for (fields) |str_index| { const name = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index)); // TODO: This check should be performed in AstGen instead. const result = names.getOrPutAssumeCapacity(name); if (result.found_existing) { return sema.fail(block, src, "duplicate error set field {s}", .{name}); } } error_set.* = .{ .owner_decl = new_decl, .node_offset = inst_data.src_node, .names = names, }; try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirRetPtr( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; try sema.requireFunctionBlock(block, src); if (block.is_comptime) { const fn_ret_ty = try sema.resolveTypeFields(block, src, sema.fn_ret_ty); return sema.analyzeComptimeAlloc(block, fn_ret_ty, 0, src); } const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = sema.fn_ret_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); if (block.inlining != null) { // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. // TODO when functions gain result location support, the inlining struct in // Block should contain the return pointer, and we would pass that through here. return block.addTy(.alloc, ptr_type); } return block.addTy(.ret_ptr, ptr_type); } fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, inst_data.src(), operand); } fn zirRetType( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; try sema.requireFunctionBlock(block, src); return sema.addType(sema.fn_ret_ty); } fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.ensureResultUsed(block, operand, src); } fn ensureResultUsed( sema: *Sema, block: *Block, operand: Air.Inst.Ref, src: LazySrcLoc, ) CompileError!void { const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.fail(block, src, "expression value is ignored", .{}), } } fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.fail(block, src, "error is discarded", .{}), else => return, } } fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const object = sema.resolveInst(inst_data.operand); const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(); const array_ty = if (is_pointer_to) object_ty.childType() else object_ty; if (!array_ty.isIndexable()) { const msg = msg: { const msg = try sema.errMsg( block, src, "type '{}' does not support indexing", .{array_ty}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( block, src, msg, "for loop operand must be an array, slice, tuple, or vector", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } return sema.fieldVal(block, src, object, "len", src); } fn zirAllocExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const ty_src = src; // TODO better source location const align_src = src; // TODO better source location const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small); var extra_index: usize = extra.end; const var_ty: Type = if (small.has_type) blk: { const type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; break :blk try sema.resolveType(block, ty_src, type_ref); } else undefined; const alignment: u16 = if (small.has_align) blk: { const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const alignment = try sema.resolveAlign(block, align_src, align_ref); break :blk alignment; } else 0; const inferred_alloc_ty = if (small.is_const) Type.initTag(.inferred_alloc_const) else Type.initTag(.inferred_alloc_mut); if (small.is_comptime) { if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment, ty_src); } else { return sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ .decl = undefined, .alignment = alignment, }), ); } } if (small.has_type) { if (!small.is_const) { try sema.validateVarType(block, ty_src, var_ty, false); } const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_ty, .@"align" = alignment, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); try sema.requireRuntimeBlock(block, src); try sema.resolveTypeLayout(block, src, var_ty); return block.addTy(.alloc, ptr_type); } // `Sema.addConstant` does not add the instruction to the block because it is // not needed in the case of constant values. However here, we plan to "downgrade" // to a normal instruction when we hit `resolve_inferred_alloc`. So we append // to the block even though it is currently a `.constant`. const result = try sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }), ); try sema.requireFunctionBlock(block, src); try block.instructions.append(sema.gpa, Air.refToIndex(result).?); return result; } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src); } fn zirAllocInferredComptime(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; sema.src = src; return sema.addConstant( Type.initTag(.inferred_alloc_mut), try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined), ); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src); } const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); try sema.requireRuntimeBlock(block, var_decl_src); try sema.resolveTypeLayout(block, ty_src, var_ty); return block.addTy(.alloc, ptr_type); } fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const var_decl_src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src); } try sema.validateVarType(block, ty_src, var_ty, false); const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); try sema.requireRuntimeBlock(block, var_decl_src); try sema.resolveTypeLayout(block, ty_src, var_ty); return block.addTy(.alloc, ptr_type); } fn zirAllocInferred( sema: *Sema, block: *Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; sema.src = src; if (block.is_comptime) { return sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined), ); } // `Sema.addConstant` does not add the instruction to the block because it is // not needed in the case of constant values. However here, we plan to "downgrade" // to a normal instruction when we hit `resolve_inferred_alloc`. So we append // to the block even though it is currently a `.constant`. const result = try sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }), ); try sema.requireFunctionBlock(block, src); try block.instructions.append(sema.gpa, Air.refToIndex(result).?); return result; } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; const ptr_val = sema.air_values.items[value_index]; const var_is_mut = switch (sema.typeOf(ptr).tag()) { .inferred_alloc_const => false, .inferred_alloc_mut => true, else => unreachable, }; const target = sema.mod.getTarget(); switch (ptr_val.tag()) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; const decl = iac.data.decl; try sema.mod.declareDeclDependency(sema.owner_decl, decl); const final_elem_ty = try decl.ty.copy(sema.arena); const final_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = final_elem_ty, .@"align" = iac.data.alignment, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const final_ptr_ty_inst = try sema.addType(final_ptr_ty); sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; if (var_is_mut) { sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ .decl = decl, .runtime_index = block.runtime_index, }); } else { sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl); } }, .inferred_alloc => { const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); try sema.requireRuntimeBlock(block, src); try sema.resolveTypeLayout(block, ty_src, final_elem_ty); if (var_is_mut) { try sema.validateVarType(block, ty_src, final_elem_ty, false); } // Change it to a normal alloc. const final_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = final_elem_ty, .@"align" = inferred_alloc.data.alignment, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); sema.air_instructions.set(ptr_inst, .{ .tag = .alloc, .data = .{ .ty = final_ptr_ty }, }); }, else => unreachable, } } fn zirValidateStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len]; const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = sema.resolveInst(field_ptr_extra.lhs); const agg_ty = sema.typeOf(object_ptr).childType(); switch (agg_ty.zigTypeTag()) { .Struct => return sema.validateStructInit( block, agg_ty.castTag(.@"struct").?.data, init_src, instrs, ), .Union => return sema.validateUnionInit( block, agg_ty.cast(Type.Payload.Union).?.data, init_src, instrs, object_ptr, ), else => unreachable, } } fn validateUnionInit( sema: *Sema, block: *Block, union_obj: *Module.Union, init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { if (instrs.len != 1) { // TODO add note for other field // TODO add note for union declared here return sema.fail(block, init_src, "only one union field can be active at once", .{}); } const field_ptr = instrs[0]; const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); const field_index_big = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); const field_index = @intCast(u32, field_index_big); // Handle the possibility of the union value being comptime-known. const union_ptr_inst = Air.refToIndex(sema.resolveInst(field_ptr_extra.lhs)).?; switch (sema.air_instructions.items(.tag)[union_ptr_inst]) { .constant => return, // In this case the tag has already been set. No validation to do. .bitcast => { // TODO here we need to go back and see if we need to convert the union // to a comptime-known value. In such case, we must delete all the instructions // added to the current block starting with the bitcast. // If the bitcast result ptr is an alloc, the alloc should be replaced with // a constant decl_ref. // Otherwise, the bitcast should be preserved and a store instruction should be // emitted to store the constant union value through the bitcast. }, else => |t| { if (std.debug.runtime_safety) { std.debug.panic("unexpected AIR tag for union pointer: {s}", .{@tagName(t)}); } else { unreachable; } }, } // Otherwise, we set the new union tag now. const new_tag = try sema.addConstant( union_obj.tag_ty, try Value.Tag.enum_field_index.create(sema.arena, field_index), ); try sema.requireRuntimeBlock(block, init_src); _ = try block.addBinOp(.set_union_tag, union_ptr, new_tag); } fn validateStructInit( sema: *Sema, block: *Block, struct_obj: *Module.Struct, init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { const gpa = sema.gpa; // Maps field index to field_ptr index of where it was already initialized. const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count()); defer gpa.free(found_fields); mem.set(Zir.Inst.Index, found_fields, 0); for (instrs) |field_ptr| { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); if (found_fields[field_index] != 0) { const other_field_ptr = found_fields[field_index]; const other_field_ptr_data = sema.code.instructions.items(.data)[other_field_ptr].pl_node; const other_field_src: LazySrcLoc = .{ .node_offset_back2tok = other_field_ptr_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } found_fields[field_index] = field_ptr; } var root_msg: ?*Module.ErrorMsg = null; // TODO handle default struct field values for (found_fields) |field_ptr, i| { if (field_ptr != 0) continue; const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { root_msg = try sema.errMsg(block, init_src, template, args); } } if (root_msg) |msg| { const fqn = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( struct_obj.srcLoc(), msg, "struct '{s}' declared here", .{fqn}, ); return sema.failWithOwnedErrorMsg(msg); } } fn zirValidateArrayInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len]; const elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, elem_ptr_data.payload_index).data; const array_ptr = sema.resolveInst(elem_ptr_extra.ptr); const array_ty = sema.typeOf(array_ptr).childType(); const array_len = array_ty.arrayLen(); if (instrs.len != array_len) { return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{ array_len, instrs.len, }); } } fn failWithBadMemberAccess( sema: *Sema, block: *Block, agg_ty: Type, field_src: LazySrcLoc, field_name: []const u8, ) CompileError { const kw_name = switch (agg_ty.zigTypeTag()) { .Union => "union", .Struct => "struct", .Opaque => "opaque", .Enum => "enum", else => unreachable, }; const msg = msg: { const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{ kw_name, agg_ty, field_name, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, agg_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn failWithBadStructFieldAccess( sema: *Sema, block: *Block, struct_obj: *Module.Struct, field_src: LazySrcLoc, field_name: []const u8, ) CompileError { const gpa = sema.gpa; const fqn = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in struct '{s}'", .{ field_name, fqn }, ); errdefer msg.destroy(gpa); try sema.mod.errNoteNonLazy(struct_obj.srcLoc(), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn failWithBadUnionFieldAccess( sema: *Sema, block: *Block, union_obj: *Module.Union, field_src: LazySrcLoc, field_name: []const u8, ) CompileError { const gpa = sema.gpa; const fqn = try union_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in union '{s}'", .{ field_name, fqn }, ); errdefer msg.destroy(gpa); try sema.mod.errNoteNonLazy(union_obj.srcLoc(), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { const src_loc = decl_ty.declSrcLocOrNull() orelse return; const category = switch (decl_ty.zigTypeTag()) { .Union => "union", .Struct => "struct", .Enum => "enum", .Opaque => "opaque", .ErrorSet => "error set", else => unreachable, }; try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; if (bin_inst.lhs == .none) { // This is an elided instruction, but AstGen was not smart enough // to omit it. return; } const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = sema.typeOf(value), // TODO figure out which address space is appropriate here .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); } fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = sema.src; const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const operand = sema.resolveInst(bin_inst.rhs); const operand_ty = sema.typeOf(operand); const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; if (ptr_val.castTag(.inferred_alloc_comptime)) |iac| { // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| { if (operand_val.tag() == .variable) { return sema.failWithNeededComptime(block, src); } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl = try anon_decl.finish( try operand_ty.copy(anon_decl.arena()), try operand_val.copy(anon_decl.arena()), ); // TODO set the alignment on the decl return; } else { return sema.failWithNeededComptime(block, src); } } if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| { // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, operand); // Create a runtime bitcast instruction with exactly the type the pointer wants. const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = operand_ty, .@"align" = inferred_alloc.data.alignment, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, operand); } unreachable; } fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const quota = try sema.resolveAlreadyCoercedInt(block, src, inst_data.operand, u32); if (sema.branch_quota < quota) sema.branch_quota = quota; } fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); return sema.storePtr(block, sema.src, ptr, value); } fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ptr = sema.resolveInst(extra.lhs); const value = sema.resolveInst(extra.rhs); return sema.storePtr(block, src, ptr, value); } fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bytes = sema.code.instructions.items(.data)[inst].str.get(sema.code); return sema.addStrLit(block, bytes); } fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air.Inst.Ref { // `zir_bytes` references memory inside the ZIR module, which can get deallocated // after semantic analysis is complete, for example in the case of the initialization // expression of a variable declaration. We need the memory to be in the new // anonymous Decl's arena. var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, zir_bytes); const new_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), ); return sema.analyzeDeclRef(new_decl); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; return sema.addIntUnsigned(Type.initTag(.comptime_int), int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const arena = sema.arena; const int = sema.code.instructions.items(.data)[inst].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; const limbs = try arena.alloc(std.math.big.Limb, int.len); mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( Type.initTag(.comptime_int), try Value.Tag.int_big_positive.create(arena, limbs), ); } fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const number = sema.code.instructions.items(.data)[inst].float; return sema.addConstant( Type.initTag(.comptime_float), try Value.Tag.float_64.create(arena, number), ); } fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return sema.addConstant( Type.initTag(.comptime_float), try Value.Tag.float_128.create(arena, number), ); } fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const msg = try sema.resolveConstString(block, operand_src, inst_data.operand); return sema.fail(block, src, "{s}", .{msg}); } fn zirCompileLog( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); const src_node = extra.data.src_node; const src: LazySrcLoc = .{ .node_offset = src_node }; const args = sema.code.refSlice(extra.end, extended.small); for (args) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); const arg = sema.resolveInst(arg_ref); const arg_ty = sema.typeOf(arg); if (try sema.resolveMaybeUndefVal(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ arg_ty, val }); } else { try writer.print("@as({}, [runtime value])", .{arg_ty}); } } try writer.print("\n", .{}); const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, sema.owner_decl); if (!gop.found_existing) { gop.value_ptr.* = src_node; } return Air.Inst.Ref.void_value; } fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); const msg_inst = sema.resolveInst(inst_data.operand); return sema.panicWithMsg(block, src, msg_inst); } fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const gpa = sema.gpa; // AIR expects a block outside the loop block too. // Reserve space for a Loop instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); const loop_inst = block_inst + 1; try sema.air_instructions.ensureUnusedCapacity(gpa, 2); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = undefined, }); sema.air_instructions.appendAssumeCapacity(.{ .tag = .loop, .data = .{ .ty_pl = .{ .ty = .noreturn_type, .payload = undefined, } }, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block = parent_block.makeSubBlock(); child_block.label = &label; child_block.runtime_cond = null; child_block.runtime_loop = src; child_block.runtime_index += 1; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); var loop_block = child_block.makeSubBlock(); defer loop_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&loop_block, body); try child_block.instructions.append(gpa, loop_inst); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block.instructions.items.len); sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( Air.Block{ .body_len = @intCast(u32, loop_block.instructions.items.len) }, ); sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items); return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pl_node = sema.code.instructions.items(.data)[inst].pl_node; const src = pl_node.src(); const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; // we check this here to avoid undefined symbols if (!@import("build_options").have_llvm) return sema.fail(parent_block, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{}); var c_import_buf = std.ArrayList(u8).init(sema.gpa); defer c_import_buf.deinit(); var child_block: Block = .{ .parent = parent_block, .sema = sema, .src_decl = parent_block.src_decl, .namespace = parent_block.namespace, .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, .c_import_buf = &c_import_buf, }; defer child_block.instructions.deinit(sema.gpa); _ = try sema.analyzeBody(&child_block, body); const c_import_res = sema.mod.comp.cImport(c_import_buf.items) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); if (c_import_res.errors.len != 0) { const msg = msg: { const msg = try sema.errMsg(&child_block, src, "C import failed", .{}); errdefer msg.destroy(sema.gpa); if (!sema.mod.comp.bin_file.options.link_libc) try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{}); for (c_import_res.errors) |_| { // TODO integrate with LazySrcLoc // try sema.mod.errNoteNonLazy(.{}, msg, "{s}", .{clang_err.msg_ptr[0..clang_err.msg_len]}); // if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)", // clang_err.line + 1, // clang_err.column + 1, } @import("clang.zig").Stage2ErrorMsg.delete(c_import_res.errors.ptr, c_import_res.errors.len); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const c_import_pkg = Package.create( sema.gpa, null, c_import_res.out_zig_path, ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => unreachable, // we pass null for root_src_dir_path }; const std_pkg = sema.mod.main_pkg.table.get("std").?; const builtin_pkg = sema.mod.main_pkg.table.get("builtin").?; try c_import_pkg.add(sema.gpa, "builtin", builtin_pkg); try c_import_pkg.add(sema.gpa, "std", std_pkg); const result = sema.mod.importPkg(c_import_pkg) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); sema.mod.astGenFile(result.file) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); try sema.mod.semaFile(result.file); const file_root_decl = result.file.root_decl.?; try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl); return sema.addConstant(file_root_decl.ty, file_root_decl.val); } fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(parent_block, src, "TODO: implement Sema.zirSuspendBlock", .{}); } fn zirBlock( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pl_node = sema.code.instructions.items(.data)[inst].pl_node; const src = pl_node.src(); const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const gpa = sema.gpa; // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block: Block = .{ .parent = parent_block, .sema = sema, .src_decl = parent_block.src_decl, .namespace = parent_block.namespace, .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .label = &label, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, }; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); _ = try sema.analyzeBody(&child_block, body); return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } fn resolveBlockBody( sema: *Sema, parent_block: *Block, src: LazySrcLoc, child_block: *Block, body: []const Zir.Inst.Index, merges: *Block.Merges, ) CompileError!Air.Inst.Ref { if (child_block.is_comptime) { return sema.resolveBody(child_block, body); } else { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } } fn analyzeBlockBody( sema: *Sema, parent_block: *Block, src: LazySrcLoc, child_block: *Block, merges: *Block.Merges, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; const last_inst = child_block.instructions.items[last_inst_index]; if (sema.getBreakBlock(last_inst)) |br_block| { if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. const without_break = child_block.instructions.items[0..last_inst_index]; try parent_block.instructions.appendSlice(gpa, without_break); return merges.results.items[0]; } } } // It is impossible to have the number of results be > 1 in a comptime scope. assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition. // Need to set the type and emit the Block instruction. This allows machine code generation // to emit a jump instruction to after the block when it encounters the break. try parent_block.instructions.append(gpa, merges.block_inst); const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items, .none); const ty_inst = try sema.addType(resolved_ty); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len), }), } }; sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { const br_operand = sema.air_instructions.items(.data)[br].br.operand; const br_operand_src = src; const br_operand_ty = sema.typeOf(br_operand); if (br_operand_ty.eql(resolved_ty)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); defer coerce_block.instructions.deinit(gpa); const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. if (coerce_block.instructions.items.len == 0) { sema.air_instructions.items(.data)[br].br.operand = coerced_operand; continue; } assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == Air.refToIndex(coerced_operand).?); // Convert the br instruction to a block instruction that has the coercion // and then a new br inside that returns the coerced instruction. const sub_block_len = @intCast(u32, coerce_block.instructions.items.len + 1); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + sub_block_len); try sema.air_instructions.ensureUnusedCapacity(gpa, 1); const sub_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.items(.tag)[br] = .block; sema.air_instructions.items(.data)[br] = .{ .ty_pl = .{ .ty = Air.Inst.Ref.noreturn_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = sub_block_len, }), } }; sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); sema.air_extra.appendAssumeCapacity(sub_br_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .br, .data = .{ .br = .{ .block_inst = merges.block_inst, .operand = coerced_operand, } }, }); } return Air.indexToRef(merges.block_inst); } fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const decl_name = sema.code.nullTerminatedString(extra.decl_name); if (extra.namespace != .none) { return sema.fail(block, src, "TODO: implement exporting with field access", .{}); } const decl = try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); try sema.analyzeExport(block, src, options, decl); } fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = try sema.resolveInstConst(block, operand_src, extra.operand); const options = try sema.resolveExportOptions(block, options_src, extra.options); const decl = switch (operand.val.tag()) { .function => operand.val.castTag(.function).?.data.owner_decl, else => return sema.fail(block, operand_src, "TODO implement exporting arbitrary Value objects", .{}), // TODO put this Value into an anonymous Decl and then export it. }; try sema.analyzeExport(block, src, options, decl); } pub fn analyzeExport( sema: *Sema, block: *Block, src: LazySrcLoc, borrowed_options: std.builtin.ExportOptions, exported_decl: *Decl, ) !void { const Export = Module.Export; const mod = sema.mod; try mod.ensureDeclAnalyzed(exported_decl); // TODO run the same checks as we do for C ABI struct fields switch (exported_decl.ty.zigTypeTag()) { .Fn, .Int, .Struct, .Array, .Float => {}, else => return sema.fail(block, src, "unable to export type '{}'", .{exported_decl.ty}), } const gpa = mod.gpa; try mod.decl_exports.ensureUnusedCapacity(gpa, 1); try mod.export_owners.ensureUnusedCapacity(gpa, 1); const new_export = try gpa.create(Export); errdefer gpa.destroy(new_export); const symbol_name = try gpa.dupe(u8, borrowed_options.name); errdefer gpa.free(symbol_name); const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; errdefer if (section) |s| gpa.free(s); const src_decl = block.src_decl; const owner_decl = sema.owner_decl; log.debug("exporting Decl '{s}' as symbol '{s}' from Decl '{s}'", .{ exported_decl.name, symbol_name, owner_decl.name, }); new_export.* = .{ .options = .{ .name = symbol_name, .linkage = borrowed_options.linkage, .section = section, }, .src = src, .link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = {} }, .elf => .{ .elf = .{} }, .macho => .{ .macho = .{} }, .plan9 => .{ .plan9 = null }, .c => .{ .c = {} }, .wasm => .{ .wasm = {} }, .spirv => .{ .spirv = {} }, }, .owner_decl = owner_decl, .src_decl = src_decl, .exported_decl = exported_decl, .status = .in_progress, }; // Add to export_owners table. const eo_gop = mod.export_owners.getOrPutAssumeCapacity(owner_decl); if (!eo_gop.found_existing) { eo_gop.value_ptr.* = &[0]*Export{}; } eo_gop.value_ptr.* = try gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1); eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export; errdefer eo_gop.value_ptr.* = gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1); // Add to exported_decl table. const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl); if (!de_gop.found_existing) { de_gop.value_ptr.* = &[0]*Export{}; } de_gop.value_ptr.* = try gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1); de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; errdefer de_gop.value_ptr.* = gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } fn zirSetAlignStack(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const src: LazySrcLoc = inst_data.src(); const alignment = try sema.resolveAlign(block, operand_src, inst_data.operand); if (alignment > 256) { return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{ alignment, }); } const func = sema.owner_func orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); switch (func.owner_decl.ty.fnCallingConvention()) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => {}, } const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, src, msg, "other instance here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } gop.value_ptr.* = .{ .alignment = alignment, .src = src }; } fn zirSetCold(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand); const func = sema.func orelse return; // does nothing outside a function func.is_cold = is_cold; } fn zirSetFloatMode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.fail(block, src, "TODO: implement Sema.zirSetFloatMode", .{}); } fn zirSetRuntimeSafety(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand); } fn zirFence(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { if (block.is_comptime) return; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const order = try sema.resolveAtomicOrder(block, order_src, inst_data.operand); if (@enumToInt(order) < @enumToInt(std.builtin.AtomicOrder.Acquire)) { return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{}); } _ = try block.addInst(.{ .tag = .fence, .data = .{ .fence = order }, }); } fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].@"break"; const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; var block = start_block; while (true) { if (block.label) |label| { if (label.zir_block == zir_block) { const br_ref = try start_block.addBr(label.merges.block_inst, operand); try label.merges.results.append(sema.gpa, operand); try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?); return inst; } } block = block.parent.?; } } fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); // We do not set sema.src here because dbg_stmt instructions are only emitted for // ZIR code that possibly will need to generate runtime code. So error messages // and other source locations must not rely on sema.src being set from dbg_stmt // instructions. if (block.is_comptime) return; const inst_data = sema.code.instructions.items(.data)[inst].dbg_stmt; _ = try block.addInst(.{ .tag = .dbg_stmt, .data = .{ .dbg_stmt = .{ .line = inst_data.line, .column = inst_data.column, } }, }); } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclRef(decl); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclVal(block, src, decl); } fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !*Decl { var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl| { return decl; } namespace = namespace.parent orelse break; } unreachable; // AstGen detects use of undeclared identifier errors. } /// This looks up a member of a specific namespace. It is affected by `usingnamespace` but /// only for ones in the specified namespace. fn lookupInNamespace( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, ident_name: []const u8, observe_usingnamespace: bool, ) CompileError!?*Decl { const mod = sema.mod; const namespace_decl = namespace.getDecl(); if (namespace_decl.analysis == .file_failure) { try mod.declareDeclDependency(sema.owner_decl, namespace_decl); return error.AnalysisFail; } if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { const src_file = block.namespace.file_scope; const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; defer checked_namespaces.deinit(gpa); // Keep track of name conflicts for error notes. var candidates: std.ArrayListUnmanaged(*Decl) = .{}; defer candidates.deinit(gpa); try checked_namespaces.put(gpa, namespace, {}); var check_i: usize = 0; while (check_i < checked_namespaces.count()) : (check_i += 1) { const check_ns = checked_namespaces.keys()[check_i]; if (check_ns.decls.get(ident_name)) |decl| { // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. if (decl.is_pub or src_file == decl.getFileScope()) { try candidates.append(gpa, decl); } } var it = check_ns.usingnamespace_set.iterator(); while (it.next()) |entry| { const sub_usingnamespace_decl = entry.key_ptr.*; const sub_is_pub = entry.value_ptr.*; if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; } try sema.ensureDeclAnalyzed(sub_usingnamespace_decl); const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; const sub_ns = ns_ty.getNamespace().?; try checked_namespaces.put(gpa, sub_ns, {}); } } switch (candidates.items.len) { 0 => {}, 1 => { const decl = candidates.items[0]; try mod.declareDeclDependency(sema.owner_decl, decl); return decl; }, else => { const msg = msg: { const msg = try sema.errMsg(block, src, "ambiguous reference", .{}); errdefer msg.destroy(gpa); for (candidates.items) |candidate| { const src_loc = candidate.srcLoc(); try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }, } } else if (namespace.decls.get(ident_name)) |decl| { try mod.declareDeclDependency(sema.owner_decl, decl); return decl; } log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name, }); // TODO This dependency is too strong. Really, it should only be a dependency // on the non-existence of `ident_name` in the namespace. We can lessen the number of // outdated declarations by making this dependency more sophisticated. try mod.declareDeclDependency(sema.owner_decl, namespace_decl); return null; } fn zirCall( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.flags.args_len); const modifier = @intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier); const ensure_result_used = extra.data.flags.ensure_result_used; var func = sema.resolveInst(extra.data.callee); var resolved_args: []Air.Inst.Ref = undefined; const func_type = sema.typeOf(func); // Desugar bound functions here if (func_type.tag() == .bound_fn) { const bound_func = try sema.resolveValue(block, func_src, func); const bound_data = &bound_func.cast(Value.Payload.BoundFn).?.data; func = bound_data.func_inst; resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len + 1); resolved_args[0] = bound_data.arg0_inst; for (args) |zir_arg, i| { resolved_args[i + 1] = sema.resolveInst(zir_arg); } } else { resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); for (args) |zir_arg, i| { resolved_args[i] = sema.resolveInst(zir_arg); } } return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); } const GenericCallAdapter = struct { generic_fn: *Module.Fn, precomputed_hash: u64, func_ty_info: Type.Payload.Function.Data, comptime_tvs: []const TypedValue, pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { _ = adapted_key; // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. const generic_owner_decl = other_key.owner_decl.dependencies.keys()[0]; if (ctx.generic_fn.owner_decl != generic_owner_decl) return false; const other_comptime_args = other_key.comptime_args.?; for (other_comptime_args[0..ctx.func_ty_info.param_types.len]) |other_arg, i| { if (other_arg.ty.tag() != .generic_poison) { // anytype parameter if (!other_arg.ty.eql(ctx.comptime_tvs[i].ty)) { return false; } } if (other_arg.val.tag() != .generic_poison) { // comptime parameter if (ctx.comptime_tvs[i].val.tag() == .generic_poison) { // No match because the instantiation has a comptime parameter // but the callsite does not. return false; } if (!other_arg.val.eql(ctx.comptime_tvs[i].val, other_arg.ty)) { return false; } } } return true; } /// The implementation of the hash is in semantic analysis of function calls, so /// that any errors when computing the hash can be properly reported. pub fn hash(ctx: @This(), adapted_key: void) u64 { _ = adapted_key; return ctx.precomputed_hash; } }; const GenericRemoveAdapter = struct { precomputed_hash: u64, pub fn eql(ctx: @This(), adapted_key: *Module.Fn, other_key: *Module.Fn) bool { _ = ctx; return adapted_key == other_key; } /// The implementation of the hash is in semantic analysis of function calls, so /// that any errors when computing the hash can be properly reported. pub fn hash(ctx: @This(), adapted_key: *Module.Fn) u64 { _ = adapted_key; return ctx.precomputed_hash; } }; fn analyzeCall( sema: *Sema, block: *Block, func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const callee_ty = sema.typeOf(func); const func_ty = func_ty: { switch (callee_ty.zigTypeTag()) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { break :func_ty ptr_info.pointee_type; } }, else => {}, } return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty}); }; const func_ty_info = func_ty.fnInfo(); const cc = func_ty_info.cc; if (cc == .Naked) { // TODO add error note: declared here return sema.fail( block, func_src, "unable to call function with naked calling convention", .{}, ); } const fn_params_len = func_ty_info.param_types.len; if (func_ty_info.is_var_args) { assert(cc == .C); if (uncasted_args.len < fn_params_len) { // TODO add error note: declared here return sema.fail( block, func_src, "expected at least {d} argument(s), found {d}", .{ fn_params_len, uncasted_args.len }, ); } } else if (fn_params_len != uncasted_args.len) { // TODO add error note: declared here return sema.fail( block, func_src, "expected {d} argument(s), found {d}", .{ fn_params_len, uncasted_args.len }, ); } switch (modifier) { .auto, .always_inline, .compile_time, => {}, .async_kw, .never_tail, .never_inline, .no_async, .always_tail, => return sema.fail(block, call_src, "TODO implement call with modifier {}", .{ modifier, }), } const gpa = sema.gpa; const is_comptime_call = block.is_comptime or modifier == .compile_time or func_ty_info.return_type.requiresComptime(); const is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, .function => func_val.castTag(.function).?.data, .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), else => unreachable, }; // Analyze the ZIR. The same ZIR gets analyzed into a runtime function // or an inlined call depending on what union tag the `label` field is // set to in the `Block`. // This block instruction will be used to capture the return value from the // inlined function. const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); // This one is shared among sub-blocks within the same callee, but not // shared among the entire inline/comptime call stack. var inlining: Block.Inlining = .{ .comptime_result = undefined, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; sema.code = module_fn.owner_decl.getFileScope().zir; defer sema.code = parent_zir; const parent_inst_map = sema.inst_map; sema.inst_map = .{}; defer { sema.inst_map.deinit(gpa); sema.inst_map = parent_inst_map; } const parent_func = sema.func; sema.func = module_fn; defer sema.func = parent_func; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, module_fn.owner_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ .parent = null, .sema = sema, .src_decl = module_fn.owner_decl, .namespace = module_fn.owner_decl.src_namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .label = null, .inlining = &inlining, .is_comptime = is_comptime_call, }; const merges = &child_block.inlining.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); // If it's a comptime function call, we need to memoize it as long as no external // comptime memory is mutated. var memoized_call_key: Module.MemoizedCall.Key = undefined; var delete_memoized_call_key = false; defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); if (is_comptime_call) { memoized_call_key = .{ .func = module_fn, .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), }; delete_memoized_call_key = true; } try sema.emitBackwardBranch(&child_block, call_src); // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" // for a function body, which means we must map the parameter ZIR instructions to // the AIR instructions of the callsite. The callee could be a generic function // which means its parameter type expressions must be resolved in order and used // to successively coerce the arguments. const fn_info = sema.code.getFnInfo(module_fn.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); var arg_i: usize = 0; for (fn_info.param_body) |inst| switch (zir_tags[inst]) { .param, .param_comptime => { // Evaluate the parameter type expression now that previous ones have // been mapped, and coerce the corresponding argument to it. const pl_tok = sema.code.instructions.items(.data)[inst].pl_tok; const param_src = pl_tok.src(); const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index); const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty_inst = try sema.resolveBody(&child_block, param_body); const param_ty = try sema.analyzeAsType(&child_block, param_src, param_ty_inst); const arg_src = call_src; // TODO: better source location const casted_arg = try sema.coerce(&child_block, param_ty, uncasted_args[arg_i], arg_src); try sema.inst_map.putNoClobber(gpa, inst, casted_arg); if (is_comptime_call) { const arg_val = try sema.resolveConstMaybeUndefVal(&child_block, arg_src, casted_arg); memoized_call_key.args[arg_i] = .{ .ty = param_ty, .val = arg_val, }; } arg_i += 1; continue; }, .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i]; try sema.inst_map.putNoClobber(gpa, inst, uncasted_arg); if (is_comptime_call) { const arg_src = call_src; // TODO: better source location const arg_val = try sema.resolveConstMaybeUndefVal(&child_block, arg_src, uncasted_arg); memoized_call_key.args[arg_i] = .{ .ty = sema.typeOf(uncasted_arg), .val = arg_val, }; } arg_i += 1; continue; }, else => continue, }; // In case it is a generic function with an expression for the return type that depends // on parameters, we must now do the same for the return type as we just did with // each of the parameters, resolving the return type and providing it to the child // `Sema` so that it can be used for the `ret_ptr` instruction. const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body); const ret_ty_src = func_src; // TODO better source location const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); // Create a fresh inferred error set type for inline/comptime calls. const fn_ret_ty = blk: { if (func_ty_info.return_type.castTag(.error_union)) |payload| { if (payload.data.error_set.tag() == .error_set_inferred) { const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); node.data = .{ .func = module_fn }; parent_func.?.inferred_error_sets.prepend(node); const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, }); } } break :blk bare_return_type; }; const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; // This `res2` is here instead of directly breaking from `res` due to a stage1 // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (is_comptime_call) { if (mod.memoized_calls.get(memoized_call_key)) |result| { const ty_inst = try sema.addType(fn_ret_ty); try sema.air_values.append(gpa, result.val); sema.air_instructions.set(block_inst, .{ .tag = .constant, .data = .{ .ty_pl = .{ .ty = ty_inst, .payload = @intCast(u32, sema.air_values.items.len - 1), } }, }); break :res2 Air.indexToRef(block_inst); } } const result = result: { _ = sema.analyzeBody(&child_block, fn_info.body) catch |err| switch (err) { error.ComptimeReturn => break :result inlining.comptime_result, else => |e| return e, }; break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; if (is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, call_src, result); // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. // TODO: re-evaluate whether memoized_calls needs its own arena. I think // it should be fine to use the Decl arena for the function. { var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); const arena = arena_allocator.allocator(); for (memoized_call_key.args) |*arg| { arg.* = try arg.*.copy(arena); } try mod.memoized_calls.put(gpa, memoized_call_key, .{ .val = try result_val.copy(arena), .arena = arena_allocator.state, }); delete_memoized_call_key = false; } } break :res2 result; }; try wip_captures.finalize(); break :res res2; } else if (func_ty_info.is_generic) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, else => unreachable, }; // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. const namespace = module_fn.owner_decl.src_namespace; const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); // This hash must match `Module.MonomorphedFuncsContext.hash`. // For parameters explicitly marked comptime and simple parameter type expressions, // we know whether a parameter is elided from a monomorphed function, and can // use it in the hash here. However, for parameter type expressions that are not // explicitly marked comptime and rely on previous parameter comptime values, we // don't find out until after generating a monomorphed function whether the parameter // type ended up being a "must-be-comptime-known" type. var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, @ptrToInt(module_fn)); const comptime_tvs = try sema.arena.alloc(TypedValue, func_ty_info.param_types.len); for (func_ty_info.param_types) |param_ty, i| { const is_comptime = func_ty_info.paramIsComptime(i); if (is_comptime) { const arg_src = call_src; // TODO better source location const casted_arg = try sema.coerce(block, param_ty, uncasted_args[i], arg_src); if (try sema.resolveMaybeUndefVal(block, arg_src, casted_arg)) |arg_val| { if (param_ty.tag() != .generic_poison) { arg_val.hash(param_ty, &hasher); } comptime_tvs[i] = .{ // This will be different than `param_ty` in the case of `generic_poison`. .ty = sema.typeOf(casted_arg), .val = arg_val, }; } else { return sema.failWithNeededComptime(block, arg_src); } } else { comptime_tvs[i] = .{ .ty = sema.typeOf(uncasted_args[i]), .val = Value.initTag(.generic_poison), }; } } const precomputed_hash = hasher.final(); const adapter: GenericCallAdapter = .{ .generic_fn = module_fn, .precomputed_hash = precomputed_hash, .func_ty_info = func_ty_info, .comptime_tvs = comptime_tvs, }; const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); if (gop.found_existing) { const callee_func = gop.key_ptr.*; break :res try sema.finishGenericCall( block, call_src, callee_func, func_src, uncasted_args, fn_info, zir_tags, ); } const new_module_func = try gpa.create(Module.Fn); gop.key_ptr.* = new_module_func; { errdefer gpa.destroy(new_module_func); const remove_adapter: GenericRemoveAdapter = .{ .precomputed_hash = precomputed_hash, }; errdefer assert(mod.monomorphed_funcs.removeAdapted(new_module_func, remove_adapter)); try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); // Create a Decl for the new function. const src_decl = namespace.getDecl(); // TODO better names for generic function instantiations const name_index = mod.getNextAnonNameIndex(); const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ module_fn.owner_decl.name, name_index, }); const new_decl = try mod.allocateNewDecl(decl_name, namespace, module_fn.owner_decl.src_node, src_decl.src_scope); new_decl.src_line = module_fn.owner_decl.src_line; new_decl.is_pub = module_fn.owner_decl.is_pub; new_decl.is_exported = module_fn.owner_decl.is_exported; new_decl.has_align = module_fn.owner_decl.has_align; new_decl.has_linksection_or_addrspace = module_fn.owner_decl.has_linksection_or_addrspace; new_decl.@"addrspace" = module_fn.owner_decl.@"addrspace"; new_decl.zir_decl_index = module_fn.owner_decl.zir_decl_index; new_decl.alive = true; // This Decl is called at runtime. new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.analysis = .in_progress; new_decl.generation = mod.generation; namespace.anon_decls.putAssumeCapacityNoClobber(new_decl, {}); // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. assert(new_decl.dependencies.keys().len == 0); try mod.declareDeclDependency(new_decl, module_fn.owner_decl); var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); // Re-run the block that creates the function, with the comptime parameters // pre-populated inside `inst_map`. This causes `param_comptime` and // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a // new, monomorphized function, with the comptime parameters elided. var child_sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = sema.arena, .perm_arena = new_decl_arena_allocator, .code = fn_zir, .owner_decl = new_decl, .func = null, .fn_ret_ty = Type.void, .owner_func = null, .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, .preallocated_new_func = new_module_func, }; defer child_sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ .parent = null, .sema = &child_sema, .src_decl = new_decl, .namespace = namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer { child_block.instructions.deinit(gpa); child_block.params.deinit(gpa); } try child_sema.inst_map.ensureUnusedCapacity(gpa, @intCast(u32, uncasted_args.len)); var arg_i: usize = 0; for (fn_info.param_body) |inst| { var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { is_comptime = func_ty_info.paramIsComptime(arg_i); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; is_comptime = func_ty_info.paramIsComptime(arg_i); }, .param_anytype_comptime => { is_anytype = true; is_comptime = true; }, else => continue, } const arg_src = call_src; // TODO: better source location const arg = uncasted_args[arg_i]; if (is_comptime) { if (try sema.resolveMaybeUndefVal(block, arg_src, arg)) |arg_val| { const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val); child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); } else { return sema.failWithNeededComptime(block, arg_src); } } else if (is_anytype) { // We insert into the map an instruction which is runtime-known // but has the type of the argument. const child_arg = try child_block.addArg(sema.typeOf(arg), 0); child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); } arg_i += 1; } const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body) catch |err| { // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { sema.owner_decl.analysis = .dependency_failure; } return err; }; const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst) catch unreachable; const new_func = new_func_val.castTag(.function).?.data; assert(new_func == new_module_func); arg_i = 0; for (fn_info.param_body) |inst| { switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime, .param, .param_anytype => {}, else => continue, } const arg = child_sema.inst_map.get(inst).?; const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator); if (child_sema.resolveMaybeUndefValAllowVariables( &child_block, .unneeded, arg, ) catch unreachable) |arg_val| { child_sema.comptime_args[arg_i] = .{ .ty = copied_arg_ty, .val = try arg_val.copy(new_decl_arena_allocator), }; } else { child_sema.comptime_args[arg_i] = .{ .ty = copied_arg_ty, .val = Value.initTag(.generic_poison), }; } arg_i += 1; } try wip_captures.finalize(); // Populate the Decl ty/val with the function and its type. new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); new_decl.analysis = .complete; log.debug("generic function '{s}' instantiated with type {}", .{ new_decl.name, new_decl.ty, }); assert(!new_decl.ty.fnInfo().is_generic); // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR // parameters mapped appropriately. try mod.comp.bin_file.allocateDeclIndexes(new_decl); try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func }); try new_decl.finalizeNewArena(&new_decl_arena); } break :res try sema.finishGenericCall( block, call_src, new_module_func, func_src, uncasted_args, fn_info, zir_tags, ); } else res: { try sema.requireRuntimeBlock(block, call_src); const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); for (uncasted_args) |uncasted_arg, i| { const arg_src = call_src; // TODO: better source location if (i < fn_params_len) { const param_ty = func_ty.fnParamType(i); try sema.resolveTypeForCodegen(block, arg_src, param_ty); args[i] = try sema.coerce(block, param_ty, uncasted_arg, arg_src); } else { args[i] = uncasted_arg; } } try sema.resolveTypeForCodegen(block, call_src, func_ty_info.return_type); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + args.len); const func_inst = try block.addInst(.{ .tag = .call, .data = .{ .pl_op = .{ .operand = func, .payload = sema.addExtraAssumeCapacity(Air.Call{ .args_len = @intCast(u32, args.len), }), } }, }); sema.appendRefsAssumeCapacity(args); break :res func_inst; }; if (ensure_result_used) { try sema.ensureResultUsed(block, result, call_src); } return result; } fn finishGenericCall( sema: *Sema, block: *Block, call_src: LazySrcLoc, callee: *Module.Fn, func_src: LazySrcLoc, uncasted_args: []const Air.Inst.Ref, fn_info: Zir.FnInfo, zir_tags: []const Zir.Inst.Tag, ) CompileError!Air.Inst.Ref { const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl); // Make a runtime call to the new function, making sure to omit the comptime args. try sema.requireRuntimeBlock(block, call_src); const comptime_args = callee.comptime_args.?; const runtime_args_len = count: { var count: u32 = 0; var arg_i: usize = 0; for (fn_info.param_body) |inst| { switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime, .param, .param_anytype => { if (comptime_args[arg_i].val.tag() == .generic_poison) { count += 1; } arg_i += 1; }, else => continue, } } break :count count; }; const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { const new_fn_ty = callee.owner_decl.ty; var runtime_i: u32 = 0; var total_i: u32 = 0; for (fn_info.param_body) |inst| { switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime, .param, .param_anytype => {}, else => continue, } const is_runtime = comptime_args[total_i].val.tag() == .generic_poison; if (is_runtime) { const param_ty = new_fn_ty.fnParamType(runtime_i); const arg_src = call_src; // TODO: better source location const uncasted_arg = uncasted_args[total_i]; try sema.resolveTypeForCodegen(block, arg_src, param_ty); const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); runtime_args[runtime_i] = casted_arg; runtime_i += 1; } total_i += 1; } try sema.resolveTypeForCodegen(block, call_src, new_fn_ty.fnReturnType()); } try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); const func_inst = try block.addInst(.{ .tag = .call, .data = .{ .pl_op = .{ .operand = callee_inst, .payload = sema.addExtraAssumeCapacity(Air.Call{ .args_len = runtime_args_len, }), } }, }); sema.appendRefsAssumeCapacity(runtime_args); return func_inst; } fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const int_type = sema.code.instructions.items(.data)[inst].int_type; const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); return sema.addType(ty); } fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const child_type = try sema.resolveType(block, src, inst_data.operand); const opt_type = try Type.optional(sema.arena, child_type); return sema.addType(opt_type); } fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); const elem_type = array_type.elemType(); return sema.addType(elem_type); } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len = try sema.resolveAlreadyCoercedInt(block, len_src, extra.lhs, u32); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); const vector_type = try Type.Tag.vector.create(sema.arena, .{ .len = len, .elem_type = elem_type, }); return sema.addType(vector_type); } fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const len = try sema.resolveInt(block, .unneeded, bin_inst.lhs, Type.usize); const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs); const array_ty = try Type.array(sema.arena, len, null, elem_type); return sema.addType(array_ty); } fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node }; const sentinel_src: LazySrcLoc = .{ .node_offset_array_type_sentinel = inst_data.src_node }; const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node }; const len = try sema.resolveInt(block, len_src, extra.len, Type.usize); const elem_type = try sema.resolveType(block, elem_src, extra.elem_type); const uncasted_sentinel = sema.resolveInst(extra.sentinel); const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src); const sentinel_val = try sema.resolveConstValue(block, sentinel_src, sentinel); const array_ty = try Type.array(sema.arena, len, sentinel_val, elem_type); return sema.addType(array_ty); } fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; const return_type = try sema.resolveType(block, operand_src, inst_data.operand); const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); return sema.addType(anyframe_type); } fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const error_union = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); if (error_union.zigTypeTag() != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found {}", .{ error_union.elemType(), }); } const err_union_ty = try Module.errorUnionType(sema.arena, error_union, payload); return sema.addType(err_union_ty); } fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; // Create an anonymous error set type with only this error value, and return the value. const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); return sema.addConstant( result_type, try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key, }), ); } fn zirErrorToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const op = sema.resolveInst(inst_data.operand); const op_coerced = try sema.coerce(block, Type.anyerror, op, operand_src); const result_ty = Type.initTag(.u16); if (try sema.resolveMaybeUndefVal(block, src, op_coerced)) |val| { if (val.isUndef()) { return sema.addConstUndef(result_ty); } const payload = try sema.arena.create(Value.Payload.U64); payload.* = .{ .base = .{ .tag = .int_u64 }, .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, }; return sema.addConstant(result_ty, Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); return block.addBitCast(result_ty, op_coerced); } fn zirIntToError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const op = sema.resolveInst(inst_data.operand); if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { const int = value.toUnsignedInt(); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value {d} represents no error", .{int}); const payload = try sema.arena.create(Value.Payload.Error); payload.* = .{ .base = .{ .tag = .@"error" }, .data = .{ .name = sema.mod.error_name_list.items[@intCast(usize, int)] }, }; return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { return sema.fail(block, src, "TODO: get max errors in compilation", .{}); // const is_gt_max = @panic("TODO get max errors in compilation"); // try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code); } return block.addInst(.{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = Air.Inst.Ref.anyerror_type, .operand = op, } }, }); } fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, src, msg, "'||' merges error sets; 'or' performs boolean OR", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_ty.zigTypeTag() != .ErrorSet) return sema.fail(block, lhs_src, "expected error set type, found {}", .{lhs_ty}); if (rhs_ty.zigTypeTag() != .ErrorSet) return sema.fail(block, rhs_src, "expected error set type, found {}", .{rhs_ty}); // Anything merged with anyerror is anyerror. if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { return Air.Inst.Ref.anyerror_type; } // Resolve both error sets now. const lhs_names = switch (lhs_ty.tag()) { .error_set_single => blk: { // Work around coercion problems const tmp: *const [1][]const u8 = &lhs_ty.castTag(.error_set_single).?.data; break :blk tmp; }, .error_set_merged => lhs_ty.castTag(.error_set_merged).?.data.keys(), .error_set => lhs_ty.castTag(.error_set).?.data.names.keys(), else => unreachable, }; const rhs_names = switch (rhs_ty.tag()) { .error_set_single => blk: { const tmp: *const [1][]const u8 = &rhs_ty.castTag(.error_set_single).?.data; break :blk tmp; }, .error_set_merged => rhs_ty.castTag(.error_set_merged).?.data.keys(), .error_set => rhs_ty.castTag(.error_set).?.data.names.keys(), else => unreachable, }; // TODO do we really want to create a Decl for this? // The reason we do it right now is for memory management. var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); var names = Module.ErrorSet.NameMap{}; // TODO: Guess is an upper bound, but maybe this needs to be reduced by computing the exact size first. try names.ensureUnusedCapacity(anon_decl.arena(), @intCast(u32, lhs_names.len + rhs_names.len)); for (lhs_names) |name| { names.putAssumeCapacityNoClobber(name, {}); } for (rhs_names) |name| { names.putAssumeCapacity(name, {}); } const err_set_ty = try Type.Tag.error_set_merged.create(anon_decl.arena(), names); const err_set_decl = try anon_decl.finish( Type.type, try Value.Tag.ty.create(anon_decl.arena(), err_set_ty), ); try sema.mod.declareDeclDependency(sema.owner_decl, err_set_decl); return sema.addType(err_set_ty); } fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); return sema.addConstant( Type.initTag(.enum_literal), try Value.Tag.enum_literal.create(sema.arena, duped_name), ); } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand_ty.unionHasTag()) { // return sema.fail( // block, // operand_src, // "untagged union '{}' cannot be converted to integer", // .{dest_ty_src}, // ); //} return sema.fail(block, operand_src, "TODO zirEnumToInt for tagged unions", .{}); }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found {}", .{ operand_ty, }); }, }; const enum_tag_ty = sema.typeOf(enum_tag); var int_tag_type_buffer: Type.Payload.Bits = undefined; const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); if (try sema.typeHasOnePossibleValue(block, src, enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, opv); } if (try sema.resolveMaybeUndefVal(block, operand_src, enum_tag)) |enum_tag_val| { var buffer: Value.Payload.U64 = undefined; const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); } try sema.requireRuntimeBlock(block, src); return block.addBitCast(int_tag_ty, enum_tag); } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const target = sema.mod.getTarget(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { return sema.fail(block, dest_ty_src, "expected enum, found {}", .{dest_ty}); } if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { return sema.addConstant(dest_ty, int_val); } if (int_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.enumHasInt(int_val, target)) { const msg = msg: { const msg = try sema.errMsg( block, src, "enum '{}' has no tag with value {}", .{ dest_ty, int_val }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( dest_ty.declSrcLoc(), msg, "enum declared here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } return sema.addConstant(dest_ty, int_val); } try sema.requireRuntimeBlock(block, src); // TODO insert safety check to make sure the value matches an enum value return block.addTyOp(.intcast, dest_ty, operand); } /// Pointer in, pointer out. fn zirOptionalPayloadPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const optional_ptr = sema.resolveInst(inst_data.operand); const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag() == .Pointer); const src = inst_data.src(); const opt_type = optional_ptr_ty.elemType(); if (opt_type.zigTypeTag() != .Optional) { return sema.fail(block, src, "expected optional type, found {}", .{opt_type}); } const child_type = try opt_type.optionalChildAlloc(sema.arena); const child_pointer = try Type.ptr(sema.arena, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, optional_ptr_ty)) |val| { if (val.isNull()) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. return sema.addConstant( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, pointer_val), ); } } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } return block.addTyOp(.optional_payload_ptr, child_pointer, optional_ptr); } /// Value in, value out. fn zirOptionalPayload( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const result_ty = switch (operand_ty.zigTypeTag()) { .Optional => try operand_ty.optionalChildAlloc(sema.arena), .Pointer => t: { if (operand_ty.ptrSize() != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } const ptr_info = operand_ty.ptrInfo().data; break :t try Type.ptr(sema.arena, .{ .pointee_type = try ptr_info.pointee_type.copy(sema.arena), .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", .@"volatile" = ptr_info.@"volatile", .size = .One, }); }, else => return sema.failWithExpectedOptionalType(block, src, operand_ty), }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.isNull()) { return sema.fail(block, src, "unable to unwrap null", .{}); } if (val.castTag(.opt_payload)) |payload| { return sema.addConstant(result_ty, payload.data); } return sema.addConstant(result_ty, val); } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_null = try block.addUnOp(.is_non_null, operand); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } return block.addTyOp(.optional_payload, result_ty, operand); } /// Value in, value out fn zirErrUnionPayload( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_src = src; const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.fail(block, operand_src, "expected error union type, found '{}'", .{operand_ty}); if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } const data = val.castTag(.eu_payload).?.data; const result_ty = operand_ty.errorUnionPayload(); return sema.addConstant(result_ty, data); } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } const result_ty = operand_ty.errorUnionPayload(); return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } /// Pointer in, pointer out. fn zirErrUnionPayloadPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag() == .Pointer); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found {}", .{operand_ty.elemType()}); const payload_ty = operand_ty.elemType().errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), .@"addrspace" = operand_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } return sema.addConstant( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, pointer_val), ); } } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } return block.addTyOp(.unwrap_errunion_payload_ptr, operand_pointer_ty, operand); } /// Value in, value out fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found '{}'", .{operand_ty}); const result_ty = operand_ty.errorUnionSet(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); return sema.addConstant(result_ty, val); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.unwrap_errunion_err, result_ty, operand); } /// Pointer in, value out fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag() == .Pointer); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found {}", .{operand_ty.elemType()}); const result_ty = operand_ty.elemType().errorUnionSet(); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { assert(val.getError() != null); return sema.addConstant(result_ty, val); } } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found '{}'", .{operand_ty}); if (operand_ty.errorUnionPayload().zigTypeTag() != .Void) { return sema.fail(block, src, "expression value is ignored", .{}); } } fn zirFunc( sema: *Sema, block: *Block, inst: Zir.Inst.Index, inferred_error_set: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index); var extra_index = extra.end; const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; extra_index += ret_ty_body.len; var body_inst: Zir.Inst.Index = 0; var src_locs: Zir.Inst.Func.SrcLocs = undefined; if (extra.data.body_len != 0) { body_inst = inst; extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } const cc: std.builtin.CallingConvention = if (sema.owner_decl.is_exported) .C else .Unspecified; return sema.funcCommon( block, inst_data.src_node, body_inst, ret_ty_body, cc, Value.@"null", false, inferred_error_set, false, src_locs, null, ); } fn funcCommon( sema: *Sema, block: *Block, src_node_offset: i32, body_inst: Zir.Inst.Index, ret_ty_body: []const Zir.Inst.Index, cc: std.builtin.CallingConvention, align_val: Value, var_args: bool, inferred_error_set: bool, is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; // The return type body might be a type expression that depends on generic parameters. // In such case we need to use a generic_poison value for the return type and mark // the function as generic. var is_generic = false; const bare_return_type: Type = ret_ty: { if (ret_ty_body.len == 0) break :ret_ty Type.void; const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; block.params = .{}; defer { block.params.deinit(sema.gpa); block.params = prev_params; } if (sema.resolveBody(block, ret_ty_body)) |ret_ty_inst| { if (sema.analyzeAsType(block, ret_ty_src, ret_ty_inst)) |ret_ty| { break :ret_ty ret_ty; } else |err| break :err err; } else |err| break :err err; }; switch (err) { error.GenericPoison => { // The type is not available until the generic instantiation. is_generic = true; break :ret_ty Type.initTag(.generic_poison); }, else => |e| return e, } }; const mod = sema.mod; const new_func: *Module.Fn = new_func: { if (body_inst == 0) break :new_func undefined; if (sema.comptime_args_fn_inst == body_inst) { const new_func = sema.preallocated_new_func.?; sema.preallocated_new_func = null; // take ownership break :new_func new_func; } break :new_func try sema.gpa.create(Module.Fn); }; errdefer if (body_inst != 0) sema.gpa.destroy(new_func); var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null; errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node); // Note: no need to errdefer since this will still be in its default state at the end of the function. const fn_ty: Type = fn_ty: { // Hot path for some common function types. // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. if (!is_generic and block.params.items.len == 0 and !var_args and align_val.tag() == .null_value and !inferred_error_set) { if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) { break :fn_ty Type.initTag(.fn_noreturn_no_args); } if (bare_return_type.zigTypeTag() == .Void and cc == .Unspecified) { break :fn_ty Type.initTag(.fn_void_no_args); } if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Naked) { break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); } if (bare_return_type.zigTypeTag() == .Void and cc == .C) { break :fn_ty Type.initTag(.fn_ccc_void_no_args); } } const param_types = try sema.arena.alloc(Type, block.params.items.len); const comptime_params = try sema.arena.alloc(bool, block.params.items.len); for (block.params.items) |param, i| { param_types[i] = param.ty; comptime_params[i] = param.is_comptime; is_generic = is_generic or param.is_comptime or param.ty.tag() == .generic_poison or param.ty.requiresComptime(); } if (align_val.tag() != .null_value) { return sema.fail(block, src, "TODO implement support for function prototypes to have alignment specified", .{}); } is_generic = is_generic or bare_return_type.requiresComptime(); const return_type = if (!inferred_error_set or bare_return_type.tag() == .generic_poison) bare_return_type else blk: { const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); node.data = .{ .func = new_func }; maybe_inferred_error_set_node = node; const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, }); }; break :fn_ty try Type.Tag.function.create(sema.arena, .{ .param_types = param_types, .comptime_params = comptime_params.ptr, .return_type = return_type, .cc = cc, .is_var_args = var_args, .is_generic = is_generic, }); }; if (opt_lib_name) |lib_name| blk: { const lib_name_src: LazySrcLoc = .{ .node_offset_lib_name = src_node_offset }; log.debug("extern fn symbol expected in lib '{s}'", .{lib_name}); mod.comp.stage1AddLinkLib(lib_name) catch |err| { return sema.fail(block, lib_name_src, "unable to add link lib '{s}': {s}", .{ lib_name, @errorName(err), }); }; const target = mod.getTarget(); if (target_util.is_libc_lib_name(target, lib_name)) { if (!mod.comp.bin_file.options.link_libc) { return sema.fail( block, lib_name_src, "dependency on libc must be explicitly specified in the build command", .{}, ); } break :blk; } if (target_util.is_libcpp_lib_name(target, lib_name)) { if (!mod.comp.bin_file.options.link_libcpp) { return sema.fail( block, lib_name_src, "dependency on libc++ must be explicitly specified in the build command", .{}, ); } break :blk; } if (!target.isWasm() and !mod.comp.bin_file.options.pic) { return sema.fail( block, lib_name_src, "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.", .{ lib_name, lib_name }, ); } } if (is_extern) { return sema.addConstant( fn_ty, try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl), ); } if (body_inst == 0) { const fn_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = fn_ty, .@"addrspace" = .generic, .mutable = false, }); return sema.addType(fn_ptr_ty); } const is_inline = fn_ty.fnCallingConvention() == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .queued; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == body_inst) blk: { break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, .zir_body_inst = body_inst, .owner_decl = sema.owner_decl, .comptime_args = comptime_args, .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, .lbrace_column = @truncate(u16, src_locs.columns), .rbrace_column = @truncate(u16, src_locs.columns >> 16), }; if (maybe_inferred_error_set_node) |node| { new_func.inferred_error_sets.prepend(node); } maybe_inferred_error_set_node = null; fn_payload.* = .{ .base = .{ .tag = .function }, .data = new_func, }; return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); } fn zirParam( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_comptime: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_tok; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index); const param_name = sema.code.nullTerminatedString(extra.data.name); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; // TODO check if param_name shadows a Decl. This only needs to be done if // usingnamespace is implemented. _ = param_name; // We could be in a generic function instantiation, or we could be evaluating a generic // function without any comptime args provided. const param_ty = param_ty: { const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; block.params = .{}; defer { block.params.deinit(sema.gpa); block.params = prev_params; } if (sema.resolveBody(block, body)) |param_ty_inst| { if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| { break :param_ty param_ty; } else |err| break :err err; } else |err| break :err err; }; switch (err) { error.GenericPoison => { // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ .ty = Type.initTag(.generic_poison), .is_comptime = is_comptime, }); try sema.inst_map.putNoClobber(sema.gpa, inst, .generic_poison); return; }, else => |e| return e, } }; if (sema.inst_map.get(inst)) |arg| { if (is_comptime or param_ty.requiresComptime()) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = try sema.coerce(block, param_ty, arg, src); sema.inst_map.putAssumeCapacity(inst, coerced_arg); return; } // Even though a comptime argument is provided, the generic function wants to treat // this as a runtime parameter. assert(sema.inst_map.remove(inst)); } try block.params.append(sema.gpa, .{ .ty = param_ty, .is_comptime = is_comptime or param_ty.requiresComptime(), }); const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison)); try sema.inst_map.putNoClobber(sema.gpa, inst, result); } fn zirParamAnytype( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_comptime: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const param_name = inst_data.get(sema.code); // TODO check if param_name shadows a Decl. This only needs to be done if // usingnamespace is implemented. _ = param_name; if (sema.inst_map.get(inst)) |air_ref| { const param_ty = sema.typeOf(air_ref); if (is_comptime or param_ty.requiresComptime()) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. return; } // The map is already populated but we do need to add a runtime parameter. try block.params.append(sema.gpa, .{ .ty = param_ty, .is_comptime = false, }); return; } // We are evaluating a generic function without any comptime args provided. try block.params.append(sema.gpa, .{ .ty = Type.initTag(.generic_poison), .is_comptime = is_comptime, }); try sema.inst_map.put(sema.gpa, inst, .generic_poison); } fn zirAs(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data; return sema.analyzeAs(block, src, extra.dest_type, extra.operand); } fn analyzeAs( sema: *Sema, block: *Block, src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, ) CompileError!Air.Inst.Ref { const dest_ty = try sema.resolveType(block, src, zir_dest_type); const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_ty, operand, src); } fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr = sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); if (!ptr_ty.isPtrAtRuntime()) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}); } // TODO handle known-pointer-address const src = inst_data.src(); try sema.requireRuntimeBlock(block, src); return block.addUnOp(.ptrtoint, ptr); } fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object = sema.resolveInst(extra.lhs); return sema.fieldVal(block, src, object, field_name, field_name_src); } fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object_ptr = sema.resolveInst(extra.lhs); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src); } fn zirFieldCallBind(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object_ptr = sema.resolveInst(extra.lhs); return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src); } fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.fieldVal(block, src, object, field_name, field_name_src); } fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src); } fn zirFieldCallBindNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src); } fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty); _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_ty, operand, operand_src); } else if (dest_is_comptime_int) { return sema.fail(block, src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.requireRuntimeBlock(block, operand_src); // TODO insert safety check to make sure the value fits in the dest type return block.addTyOp(.intcast, dest_ty, operand); } fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); return sema.bitCast(block, dest_ty, operand, operand_src); } fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { .ComptimeFloat => true, .Float => false, else => return sema.fail( block, dest_ty_src, "expected float type, found '{}'", .{dest_ty}, ), }; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, operand_src, "expected float type, found '{}'", .{operand_ty}, ), } if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_ty, operand, operand_src); } if (dest_is_comptime_float) { return sema.fail(block, src, "unable to cast runtime value to 'comptime_float'", .{}); } const target = sema.mod.getTarget(); const src_bits = operand_ty.floatBits(target); const dst_bits = dest_ty.floatBits(target); if (dst_bits >= src_bits) { return sema.coerce(block, dest_ty, operand, operand_src); } try sema.requireRuntimeBlock(block, operand_src); return block.addTyOp(.fptrunc, dest_ty, operand); } fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const array = sema.resolveInst(bin_inst.lhs); const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemVal(block, sema.src, array, elem_index, sema.src); } fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = sema.resolveInst(extra.lhs); const elem_index = sema.resolveInst(extra.rhs); return sema.elemVal(block, src, array, elem_index, elem_index_src); } fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const array_ptr = sema.resolveInst(bin_inst.lhs); const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const elem_index = sema.resolveInst(extra.rhs); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.ptr); const elem_index = try sema.addIntUnsigned(Type.usize, extra.index); return sema.elemPtr(block, src, array_ptr, elem_index, src); } fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const start = sema.resolveInst(extra.start); return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded); } fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const start = sema.resolveInst(extra.start); const end = sema.resolveInst(extra.end); return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded); } fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const start = sema.resolveInst(extra.start); const end = sema.resolveInst(extra.end); const sentinel = sema.resolveInst(extra.sentinel); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); } fn zirSwitchCapture( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index); const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_info.src_node }; const switch_src = switch_info.src(); const operand_is_ref = switch_extra.data.bits.is_ref; const cond_inst = Zir.refToIndex(switch_extra.data.operand).?; const cond_info = sema.code.instructions.items(.data)[cond_inst].un_node; const operand_ptr = sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; if (is_multi) { return sema.fail(block, switch_src, "TODO implement Sema for switch capture multi", .{}); } const scalar_prong = switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index); const item = sema.resolveInst(scalar_prong.item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item) catch unreachable; switch (operand_ty.zigTypeTag()) { .Union => { const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const enum_ty = union_obj.tag_ty; const field_index_usize = enum_ty.enumTagFieldIndex(item_val).?; const field_index = @intCast(u32, field_index_usize); const field = union_obj.fields.values()[field_index]; // TODO handle multiple union tags which have compatible types if (is_ref) { assert(operand_is_ref); const field_ty_ptr = try Type.ptr(sema.arena, .{ .pointee_type = field.ty, .@"addrspace" = .generic, .mutable = operand_ptr_ty.ptrIsMutable(), }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { return sema.addConstant( field_ty_ptr, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = op_ptr_val, .field_index = field_index, }), ); } try sema.requireRuntimeBlock(block, operand_src); return block.addStructFieldPtr(operand_ptr, field_index, field_ty_ptr); } const operand = if (operand_is_ref) try sema.analyzeLoad(block, operand_src, operand_ptr, operand_src) else operand_ptr; if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| { return sema.addConstant( field.ty, operand_val.castTag(.@"union").?.data.val, ); } try sema.requireRuntimeBlock(block, operand_src); return block.addStructFieldVal(operand, field_index, field.ty); }, .ErrorSet => { return sema.fail(block, operand_src, "TODO implement Sema for zirSwitchCapture for error sets", .{}); }, else => { return sema.fail(block, operand_src, "switch on type '{}' provides no capture value", .{ operand_ty, }); }, } } fn zirSwitchCaptureElse( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index).data; const src = switch_info.src(); const operand_is_ref = switch_extra.bits.is_ref; assert(!is_ref or operand_is_ref); return sema.fail(block, src, "TODO implement Sema for zirSwitchCaptureElse", .{}); } fn zirSwitchCond( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_ptr = sema.resolveInst(inst_data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, src) else operand_ptr; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .Type, .Void, .Bool, .Int, .Float, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Pointer, .Fn, .ErrorSet, .Enum, => { if ((try sema.typeHasOnePossibleValue(block, src, operand_ty))) |opv| { return sema.addConstant(operand_ty, opv); } return operand; }, .Union => { const enum_ty = operand_ty.unionTagType() orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "switch on untagged union", .{}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, operand_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; return sema.unionToTag(block, enum_ty, operand, src); }, .ErrorUnion, .NoReturn, .Array, .Struct, .Undefined, .Null, .Optional, .BoundFn, .Opaque, .Vector, .Frame, .AnyFrame, => return sema.fail(block, src, "switch on type '{}'", .{operand_ty}), } } fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const src_node_offset = inst_data.src_node; const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset }; const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); const operand = sema.resolveInst(extra.data.operand); var header_extra_index: usize = extra.end; const scalar_cases_len = extra.data.bits.scalar_cases_len; const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { const multi_cases_len = sema.code.extra[header_extra_index]; header_extra_index += 1; break :blk multi_cases_len; } else 0; const special_prong = extra.data.bits.specialProng(); const special: struct { body: []const Zir.Inst.Index, end: usize } = switch (special_prong) { .none => .{ .body = &.{}, .end = header_extra_index }, .under, .@"else" => blk: { const body_len = sema.code.extra[header_extra_index]; const extra_body_start = header_extra_index + 1; break :blk .{ .body = sema.code.extra[extra_body_start..][0..body_len], .end = extra_body_start + body_len, }; }, }; const operand_ty = sema.typeOf(operand); // Validate usage of '_' prongs. if (special_prong == .under and !operand_ty.isNonexhaustiveEnum()) { const msg = msg: { const msg = try sema.errMsg( block, src, "'_' prong only allowed when switching on non-exhaustive enums", .{}, ); errdefer msg.destroy(gpa); try sema.errNote( block, special_prong_src, msg, "'_' prong here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } // Validate for duplicate items, missing else prong, and invalid range. switch (operand_ty.zigTypeTag()) { .Enum => { var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); defer gpa.free(seen_fields); mem.set(?Module.SwitchProngSrc, seen_fields, null); var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItemEnum( block, seen_fields, item_ref, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + body_len; for (items) |item_ref, item_i| { try sema.validateSwitchItemEnum( block, seen_fields, item_ref, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, ); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } const all_tags_handled = for (seen_fields) |seen_src| { if (seen_src == null) break false; } else !operand_ty.isNonexhaustiveEnum(); switch (special_prong) { .none => { if (!all_tags_handled) { const msg = msg: { const msg = try sema.errMsg( block, src, "switch must handle all possibilities", .{}, ); errdefer msg.destroy(sema.gpa); for (seen_fields) |seen_src, i| { if (seen_src != null) continue; const field_name = operand_ty.enumFieldName(i); // TODO have this point to the tag decl instead of here try sema.errNote( block, src, msg, "unhandled enumeration value: '{s}'", .{field_name}, ); } try sema.mod.errNoteNonLazy( operand_ty.declSrcLoc(), msg, "enum '{}' declared here", .{operand_ty}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } }, .under => { if (all_tags_handled) return sema.fail( block, special_prong_src, "unreachable '_' prong; all cases already handled", .{}, ); }, .@"else" => { if (all_tags_handled) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", .{}, ); }, } }, .ErrorSet => return sema.fail(block, src, "TODO validate switch .ErrorSet", .{}), .Union => return sema.fail(block, src, "TODO validate switch .Union", .{}), .Int, .ComptimeInt => { var range_set = RangeSet.init(gpa); defer range_set.deinit(); var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItem( block, &range_set, item_ref, operand_ty, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; for (items) |item_ref, item_i| { try sema.validateSwitchItem( block, &range_set, item_ref, operand_ty, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, ); } var range_i: u32 = 0; while (range_i < ranges_len) : (range_i += 1) { const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; try sema.validateSwitchRange( block, &range_set, item_first, item_last, operand_ty, src_node_offset, .{ .range = .{ .prong = multi_i, .item = range_i } }, ); } extra_index += body_len; } } check_range: { if (operand_ty.zigTypeTag() == .Int) { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); const target = sema.mod.getTarget(); const min_int = try operand_ty.minInt(arena.allocator(), target); const max_int = try operand_ty.maxInt(arena.allocator(), target); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", .{}, ); } break :check_range; } } if (special_prong != .@"else") { return sema.fail( block, src, "switch must handle all possibilities", .{}, ); } } }, .Bool => { var true_count: u8 = 0; var false_count: u8 = 0; var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItemBool( block, &true_count, &false_count, item_ref, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + body_len; for (items) |item_ref, item_i| { try sema.validateSwitchItemBool( block, &true_count, &false_count, item_ref, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, ); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } switch (special_prong) { .@"else" => { if (true_count + false_count == 2) { return sema.fail( block, src, "unreachable else prong; all cases already handled", .{}, ); } }, .under, .none => { if (true_count + false_count < 2) { return sema.fail( block, src, "switch must handle all possibilities", .{}, ); } }, } }, .EnumLiteral, .Void, .Fn, .Pointer, .Type => { if (special_prong != .@"else") { return sema.fail( block, src, "else prong required when switching on type '{}'", .{operand_ty}, ); } var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty }); defer seen_values.deinit(); var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItemSparse( block, &seen_values, item_ref, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + body_len; for (items) |item_ref, item_i| { try sema.validateSwitchItemSparse( block, &seen_values, item_ref, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, ); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } }, .ErrorUnion, .NoReturn, .Array, .Struct, .Undefined, .Null, .Optional, .BoundFn, .Opaque, .Vector, .Frame, .AnyFrame, .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ operand_ty, }), } const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block: Block = .{ .parent = block, .sema = sema, .src_decl = block.src_decl, .namespace = block.namespace, .wip_capture_scope = block.wip_capture_scope, .instructions = .{}, .label = &label, .inlining = block.inlining, .is_comptime = block.is_comptime, }; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { var extra_index: usize = special.end; { var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val, operand_ty)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } } } { var multi_i: usize = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..body_len]; for (items) |item_ref| { const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val, operand_ty)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } } var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; // Validation above ensured these will succeed. const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first) catch unreachable; const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last) catch unreachable; if (Value.compare(operand_val, .gte, first_tv.val, operand_ty) and Value.compare(operand_val, .lte, last_tv.val, operand_ty)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } } extra_index += body_len; } } return sema.resolveBlockBody(block, src, &child_block, special.body, merges); } if (scalar_cases_len + multi_cases_len == 0) { return sema.resolveBlockBody(block, src, &child_block, special.body, merges); } try sema.requireRuntimeBlock(block, src); const estimated_cases_extra = (scalar_cases_len + multi_cases_len) * @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2; var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra); defer cases_extra.deinit(gpa); var case_block = child_block.makeSubBlock(); case_block.runtime_loop = null; case_block.runtime_cond = operand_src; case_block.runtime_index += 1; defer case_block.instructions.deinit(gpa); var extra_index: usize = special.end; var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; const item = sema.resolveInst(item_ref); // `item` is already guaranteed to be constant known. _ = try sema.analyzeBody(&case_block, body); try wip_captures.finalize(); try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@enumToInt(item)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } var is_first = true; var prev_cond_br: Air.Inst.Index = undefined; var first_else_body: []const Air.Inst.Index = &.{}; defer gpa.free(first_else_body); var prev_then_body: []const Air.Inst.Index = &.{}; defer gpa.free(prev_then_body); var cases_len = scalar_cases_len; var multi_i: usize = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = child_block.wip_capture_scope; var any_ok: Air.Inst.Ref = .none; // If there are any ranges, we have to put all the items into the // else prong. Otherwise, we can take advantage of multiple items // mapping to the same body. if (ranges_len == 0) { cases_len += 1; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(@intCast(u32, items.len)); cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); for (items) |item_ref| { const item = sema.resolveInst(item_ref); cases_extra.appendAssumeCapacity(@enumToInt(item)); } cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } else { for (items) |item_ref| { const item = sema.resolveInst(item_ref); const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); if (any_ok != .none) { any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); } else { any_ok = cmp_ok; } } var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const item_first = sema.resolveInst(first_ref); const item_last = sema.resolveInst(last_ref); // operand >= first and operand <= last const range_first_ok = try case_block.addBinOp( .cmp_gte, operand, item_first, ); const range_last_ok = try case_block.addBinOp( .cmp_lte, operand, item_last, ); const range_ok = try case_block.addBinOp( .bool_and, range_first_ok, range_last_ok, ); if (any_ok != .none) { any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); } else { any_ok = range_ok; } } const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = any_ok, .payload = undefined, }, } }); var cond_body = case_block.instructions.toOwnedSlice(gpa); defer gpa.free(cond_body); var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); try wip_captures.finalize(); if (is_first) { is_first = false; first_else_body = cond_body; cond_body = &.{}; } else { try sema.air_extra.ensureUnusedCapacity( gpa, @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len, ); sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, prev_then_body.len), .else_body_len = @intCast(u32, cond_body.len), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(cond_body); } prev_then_body = case_block.instructions.toOwnedSlice(gpa); prev_cond_br = new_cond_br; } } var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first) { var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; if (special.body.len != 0) { _ = try sema.analyzeBody(&case_block, special.body); } else { // We still need a terminator in this block, but we have proven // that it is unreachable. // TODO this should be a special safety panic other than unreachable, something // like "panic: switch operand had corrupt value not allowed by the type" try case_block.addUnreachable(src, true); } try wip_captures.finalize(); if (is_first) { final_else_body = case_block.instructions.items; } else { try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len); sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, prev_then_body.len), .else_body_len = @intCast(u32, case_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); final_else_body = first_else_body; } } try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + cases_extra.items.len + final_else_body.len); _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ .cases_len = @intCast(u32, cases_len), .else_body_len = @intCast(u32, final_else_body.len), }), } } }); sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); sema.air_extra.appendSliceAssumeCapacity(final_else_body); return sema.analyzeBlockBody(block, src, &child_block, merges); } fn resolveSwitchItemVal( sema: *Sema, block: *Block, item_ref: Zir.Inst.Ref, switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) CompileError!TypedValue { const item = sema.resolveInst(item_ref); const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. if (sema.resolveConstValue(block, .unneeded, item)) |val| { return TypedValue{ .ty = item_ty, .val = val }; } else |err| switch (err) { error.NeededSourceLocation => { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); return TypedValue{ .ty = item_ty, .val = try sema.resolveConstValue(block, src, item), }; }, else => |e| return e, } } fn validateSwitchRange( sema: *Sema, block: *Block, range_set: *RangeSet, first_ref: Zir.Inst.Ref, last_ref: Zir.Inst.Ref, operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } fn validateSwitchItem( sema: *Sema, block: *Block, range_set: *RangeSet, item_ref: Zir.Inst.Ref, operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } fn validateSwitchItemEnum( sema: *Sema, block: *Block, seen_fields: []?Module.SwitchProngSrc, item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val) orelse { const msg = msg: { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); const msg = try sema.errMsg( block, src, "enum '{}' has no tag with value '{}'", .{ item_tv.ty, item_tv.val }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( item_tv.ty.declSrcLoc(), msg, "enum declared here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; const maybe_prev_src = seen_fields[field_index]; seen_fields[field_index] = switch_prong_src; return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } fn validateSwitchDupe( sema: *Sema, block: *Block, maybe_prev_src: ?Module.SwitchProngSrc, switch_prong_src: Module.SwitchProngSrc, src_node_offset: i32, ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; const gpa = sema.gpa; const src = switch_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); const prev_src = prev_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); const msg = msg: { const msg = try sema.errMsg( block, src, "duplicate switch value", .{}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( block, prev_src, msg, "previous value here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn validateSwitchItemBool( sema: *Sema, block: *Block, true_count: *u8, false_count: *u8, item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; if (item_val.toBool()) { true_count.* += 1; } else { false_count.* += 1; } if (true_count.* + false_count.* > 2) { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } } const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage); fn validateSwitchItemSparse( sema: *Sema, block: *Block, seen_values: *ValueSrcMap, item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); } fn validateSwitchNoRange( sema: *Sema, block: *Block, ranges_len: u32, operand_ty: Type, src_node_offset: i32, ) CompileError!void { if (ranges_len == 0) return; const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; const range_src: LazySrcLoc = .{ .node_offset_switch_range = src_node_offset }; const msg = msg: { const msg = try sema.errMsg( block, operand_src, "ranges not allowed when switching on type '{}'", .{operand_ty}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( block, range_src, msg, "range here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); const field_name = try sema.resolveConstString(block, name_src, extra.rhs); const ty = try sema.resolveTypeFields(block, ty_src, unresolved_ty); const has_field = hf: { if (ty.isSlice()) { if (mem.eql(u8, field_name, "ptr")) break :hf true; if (mem.eql(u8, field_name, "len")) break :hf true; break :hf false; } break :hf switch (ty.zigTypeTag()) { .Struct => ty.structFields().contains(field_name), .Union => ty.unionFields().contains(field_name), .Enum => ty.enumFields().contains(field_name), .Array => mem.eql(u8, field_name, "len"), else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ ty, }), }; }; if (has_field) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs); const namespace = container_type.getNamespace() orelse return sema.fail( block, lhs_src, "expected struct, enum, union, or opaque, found '{}'", .{container_type}, ); if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { return Air.Inst.Ref.bool_true; } } return Air.Inst.Ref.bool_false; } fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const operand_src = inst_data.src(); const operand = inst_data.get(sema.code); const result = mod.importFile(block.getFileScope(), operand) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand}); }, else => { // TODO: these errors are file system errors; make sure an update() will // retry this and not cache the file system error, which may be transient. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; try mod.semaFile(result.file); const file_root_decl = result.file.root_decl.?; try mod.declareDeclDependency(sema.owner_decl, file_root_decl); return sema.addConstant(file_root_decl.ty, file_root_decl.val); } fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name = try sema.resolveConstString(block, operand_src, inst_data.operand); const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, else => { // TODO: these errors are file system errors; make sure an update() will // retry this and not cache the file system error, which may be transient. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(err) }); }, }; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at // a `*Module.EmbedFile`. The purpose of this would be: // - If only the length is read and the bytes are not inspected by comptime code, // there can be an optimization where the codegen backend does a copy_file_range // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), ); return sema.analyzeDeclRef(embed_file.owner_decl); } fn zirRetErrValueCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; _ = inst; return sema.fail(block, sema.src, "TODO implement zirRetErrValueCode", .{}); } fn zirShl( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); // TODO coerce rhs if air_tag is not shl_sat const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { const lhs_ty = sema.typeOf(lhs); if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse break :rs rhs_src; if (rhs_val.isUndef()) return sema.addConstUndef(lhs_ty); // If rhs is 0, return lhs without doing any calculations. if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(lhs_ty, lhs_val); } const val = switch (air_tag) { .shl_exact => return sema.fail(block, lhs_src, "TODO implement Sema for comptime shl_exact", .{}), .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod.getTarget()), .shl => try lhs_val.shl(rhs_val, sema.arena), else => unreachable, }; return sema.addConstant(lhs_ty, val); } else rs: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(sema.typeOf(lhs)); } break :rs lhs_src; }; // TODO: insert runtime safety check for shl_exact try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, lhs, rhs); } fn zirShr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { const lhs_ty = sema.typeOf(lhs); if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(lhs_ty, lhs_val); } const val = try lhs_val.shr(rhs_val, sema.arena); return sema.addConstant(lhs_ty, val); } } try sema.requireRuntimeBlock(block, src); return block.addBinOp(.shr, lhs, rhs); } fn zirBitwise( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const scalar_type = if (resolved_type.zigTypeTag() == .Vector) resolved_type.elemType() else resolved_type; const scalar_tag = scalar_type.zigTypeTag(); if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ lhs_ty.arrayLen(), rhs_ty.arrayLen(), }); } return sema.fail(block, src, "TODO implement support for vectors in zirBitwise", .{}); } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ lhs_ty, rhs_ty, }); } const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { .bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena), .bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena), .xor => try lhs_val.bitwiseXor(rhs_val, sema.arena), else => unreachable, }; return sema.addConstant(scalar_type, result_val); } } try sema.requireRuntimeBlock(block, src); return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src = src; // TODO put this on the operand, not the '~' const operand = sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); const scalar_type = operand_type.scalarType(); if (scalar_type.zigTypeTag() != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{operand_type}); } if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); if (val.isUndef()) { return sema.addConstUndef(scalar_type); } else if (operand_type.zigTypeTag() == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.arrayLen()); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { const elem_val = val.elemValueBuffer(i, &elem_val_buf); elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target); } return sema.addConstant( operand_type, try Value.Tag.array.create(sema.arena, elems), ); } else { const result_val = try val.bitwiseNot(scalar_type, sema.arena, target); return sema.addConstant(scalar_type, result_val); } } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.not, operand_type, operand); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs_info = getArrayCatInfo(lhs_ty) orelse return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty}); const rhs_info = getArrayCatInfo(rhs_ty) orelse return sema.fail(block, rhs_src, "expected array, found '{}'", .{rhs_ty}); if (!lhs_info.elem_type.eql(rhs_info.elem_type)) { return sema.fail(block, rhs_src, "expected array of type '{}', found '{}'", .{ lhs_info.elem_type, rhs_ty }); } // When there is a sentinel mismatch, no sentinel on the result. The type system // will catch this if it is a problem. var res_sent: ?Value = null; if (rhs_info.sentinel != null and lhs_info.sentinel != null) { if (rhs_info.sentinel.?.eql(lhs_info.sentinel.?, lhs_info.elem_type)) { res_sent = lhs_info.sentinel.?; } } if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| { const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); const rhs_len = try sema.usizeCast(block, lhs_src, rhs_info.len); const final_len = lhs_len + rhs_len; const final_len_including_sent = final_len + @boolToInt(res_sent != null); const is_pointer = lhs_ty.zigTypeTag() == .Pointer; const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const buf = try anon_decl.arena().alloc(Value, final_len_including_sent); { var i: usize = 0; while (i < lhs_len) : (i += 1) { const val = try lhs_sub_val.elemValue(sema.arena, i); buf[i] = try val.copy(anon_decl.arena()); } } { var i: usize = 0; while (i < rhs_len) : (i += 1) { const val = try rhs_sub_val.elemValue(sema.arena, i); buf[lhs_len + i] = try val.copy(anon_decl.arena()); } } const ty = if (res_sent) |rs| ty: { buf[final_len] = try rs.copy(anon_decl.arena()); break :ty try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()), .sentinel = try rs.copy(anon_decl.arena()), }); } else try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()), }); const val = try Value.Tag.array.create(anon_decl.arena(), buf); const decl = try anon_decl.finish(ty, val); if (is_pointer) { return sema.analyzeDeclRef(decl); } else { return sema.analyzeDeclVal(block, .unneeded, decl); } } else { return sema.fail(block, lhs_src, "TODO runtime array_cat", .{}); } } else { return sema.fail(block, lhs_src, "TODO runtime array_cat", .{}); } } fn getArrayCatInfo(t: Type) ?Type.ArrayInfo { return switch (t.zigTypeTag()) { .Array => t.arrayInfo(), .Pointer => blk: { const ptrinfo = t.ptrInfo().data; if (ptrinfo.pointee_type.zigTypeTag() != .Array) return null; if (ptrinfo.size != .One) return null; break :blk ptrinfo.pointee_type.arrayInfo(); }, else => null, }; } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const lhs_ty = sema.typeOf(lhs); const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; // In `**` rhs has to be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize); const mulinfo = getArrayCatInfo(lhs_ty) orelse return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty}); const final_len_u64 = std.math.mul(u64, mulinfo.len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { const final_len = try sema.usizeCast(block, src, final_len_u64); const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null); const lhs_len = try sema.usizeCast(block, lhs_src, mulinfo.len); const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const final_ty = if (mulinfo.sentinel) |sent| try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()), .sentinel = try sent.copy(anon_decl.arena()), }) else try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()), }); const buf = try anon_decl.arena().alloc(Value, final_len_including_sent); // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. const val = if (lhs_len == 1) blk: { const elem_val = try lhs_sub_val.elemValue(sema.arena, 0); const copied_val = try elem_val.copy(anon_decl.arena()); break :blk try Value.Tag.repeated.create(anon_decl.arena(), copied_val); } else blk: { // the actual loop var i: usize = 0; while (i < factor) : (i += 1) { var j: usize = 0; while (j < lhs_len) : (j += 1) { const val = try lhs_sub_val.elemValue(sema.arena, j); buf[lhs_len * i + j] = try val.copy(anon_decl.arena()); } } if (mulinfo.sentinel) |sent| { buf[final_len] = try sent.copy(anon_decl.arena()); } break :blk try Value.Tag.array.create(anon_decl.arena(), buf); }; const decl = try anon_decl.finish(final_ty, val); if (lhs_ty.zigTypeTag() == .Pointer) { return sema.analyzeDeclRef(decl); } else { return sema.analyzeDeclVal(block, .unneeded, decl); } } return sema.fail(block, lhs_src, "TODO runtime array_mul", .{}); } fn zirNegate( sema: *Sema, block: *Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; const rhs_src = src; // TODO better source location const lhs = sema.resolveInst(.zero); const rhs = sema.resolveInst(inst_data.operand); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } fn zirArithmetic( sema: *Sema, block: *Block, inst: Zir.Inst.Index, zir_tag: Zir.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; sema.src = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src); } fn zirOverflowArithmetic( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, zir_tag: Zir.Inst.Extended, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); const ptr = sema.resolveInst(extra.ptr); const lhs_ty = sema.typeOf(lhs); // Note, the types of lhs/rhs (also for shifting)/ptr are already correct as ensured by astgen. const dest_ty = lhs_ty; if (dest_ty.zigTypeTag() != .Int) { return sema.fail(block, src, "expected integer type, found '{}'", .{dest_ty}); } const target = sema.mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); const result: struct { overflowed: enum { yes, no, undef }, wrapped: Air.Inst.Ref, } = result: { switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; } const result = try lhs_val.intAddWithOverflow(rhs_val, dest_ty, sema.arena, target); const inst = try sema.addConstant(dest_ty, result.wrapped_result); break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; } } }, .sub_with_overflow => { // If the rhs is zero, then the result is lhs and no overflow occured. // Otherwise, if either result is undefined, both results are undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; } else if (rhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; } const result = try lhs_val.intSubWithOverflow(rhs_val, dest_ty, sema.arena, target); const inst = try sema.addConstant(dest_ty, result.wrapped_result); break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; } } }, .mul_with_overflow => { // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = lhs }; } else if (lhs_val.compare(.eq, Value.one, dest_ty)) { break :result .{ .overflowed = .no, .wrapped = rhs }; } } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef()) { if (rhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = rhs }; } else if (rhs_val.compare(.eq, Value.one, dest_ty)) { break :result .{ .overflowed = .no, .wrapped = lhs }; } } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; } const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, target); const inst = try sema.addConstant(dest_ty, result.wrapped_result); break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; } } }, .shl_with_overflow => { // If lhs is zero, the result is zero and no overflow occurred. // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) { break :result .{ .overflowed = .no, .wrapped = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; } const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, target); const inst = try sema.addConstant(dest_ty, result.wrapped_result); break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; } } }, else => unreachable, } const air_tag: Air.Inst.Tag = switch (zir_tag) { .add_with_overflow => .add_with_overflow, .mul_with_overflow => .mul_with_overflow, .sub_with_overflow => .sub_with_overflow, .shl_with_overflow => .shl_with_overflow, else => unreachable, }; try sema.requireRuntimeBlock(block, src); return block.addInst(.{ .tag = air_tag, .data = .{ .pl_op = .{ .operand = ptr, .payload = try sema.addExtra(Air.Bin{ .lhs = lhs, .rhs = rhs, }), } }, }); }; try sema.storePtr2(block, src, ptr, ptr_src, result.wrapped, src, .store); return switch (result.overflowed) { .yes => Air.Inst.Ref.bool_true, .no => Air.Inst.Ref.bool_false, .undef => try sema.addConstUndef(Type.initTag(.bool)), }; } fn analyzeArithmetic( sema: *Sema, block: *Block, /// TODO performance investigation: make this comptime? zir_tag: Zir.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ lhs_ty.arrayLen(), rhs_ty.arrayLen(), }); } return sema.fail(block, src, "TODO implement support for vectors in Sema.analyzeArithmetic", .{}); } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ lhs_ty, rhs_ty, }); } if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { .One, .Slice => {}, .Many, .C => { const op_src = src; // TODO better source location const air_tag: Air.Inst.Tag = switch (zir_tag) { .add => .ptr_add, .sub => .ptr_sub, else => return sema.fail( block, op_src, "invalid pointer arithmetic operand: '{s}''", .{@tagName(zir_tag)}, ), }; return analyzePtrArithmetic(sema, block, op_src, lhs, rhs, air_tag, lhs_src, rhs_src); }, }; const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const scalar_type = if (resolved_type.zigTypeTag() == .Vector) resolved_type.elemType() else resolved_type; const scalar_tag = scalar_type.zigTypeTag(); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; if (!is_int and !(is_float and floatOpAllowed(zir_tag))) { return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag), }); } const target = sema.mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { switch (zir_tag) { .add => { // For integers: // If either of the operands are zero, then the other operand is // returned, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the addition would // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(scalar_type); } } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return sema.addConstUndef(scalar_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intAdd(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .add }; } else break :rs .{ .src = lhs_src, .air_tag = .add }; }, .addwrap => { // Integers only; floats are checked above. // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.numberAddWrap(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .addwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .addwrap }; }, .add_sat => { // Integers only; floats are checked above. // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intAddSat(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .add_sat }; } else break :rs .{ .src = rhs_src, .air_tag = .add_sat }; }, .sub => { // For integers: // If the rhs is zero, then the other operand is // returned, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the subtraction would // overflow, causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(scalar_type); } } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return sema.addConstUndef(scalar_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intSub(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatSub(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .sub }; } else break :rs .{ .src = lhs_src, .air_tag = .sub }; }, .subwrap => { // Integers only; floats are checked above. // If the RHS is zero, then the other operand is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.numberSubWrap(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = rhs_src, .air_tag = .subwrap }; } else break :rs .{ .src = lhs_src, .air_tag = .subwrap }; }, .sub_sat => { // Integers only; floats are checked above. // If the RHS is zero, result is LHS. // If either of the operands are undefined, result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.intSubSat(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat }; } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat }; }, .div => { // TODO: emit compile error when .div is used on integers and there would be an // ambiguous result between div_floor and div_trunc. // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { return sema.addConstUndef(scalar_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intDiv(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena), ); } } else { if (is_int) { break :rs .{ .src = rhs_src, .air_tag = .div_trunc }; } else { break :rs .{ .src = rhs_src, .air_tag = .div_float }; } } } else { if (is_int) { break :rs .{ .src = lhs_src, .air_tag = .div_trunc }; } else { break :rs .{ .src = lhs_src, .air_tag = .div_float }; } } }, .div_trunc => { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { return sema.addConstUndef(scalar_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intDiv(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_trunc }; } else break :rs .{ .src = lhs_src, .air_tag = .div_trunc }; }, .div_floor => { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { return sema.addConstUndef(scalar_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intDivFloor(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_floor }; } else break :rs .{ .src = lhs_src, .air_tag = .div_floor }; }, .div_exact => { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. // TODO: emit runtime safety for if there is a remainder // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } else { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (is_int) { // TODO: emit compile error if there is a remainder return sema.addConstant( scalar_type, try lhs_val.intDiv(rhs_val, sema.arena), ); } else { // TODO: emit compile error if there is a remainder return sema.addConstant( scalar_type, try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_exact }; } else break :rs .{ .src = lhs_src, .air_tag = .div_exact }; }, .mul => { // For integers: // If either of the operands are zero, the result is zero. // If either of the operands are one, the result is the other // operand, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the addition would // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (lhs_val.compare(.eq, Value.one, scalar_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(scalar_type); } } if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (rhs_val.compare(.eq, Value.one, scalar_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return sema.addConstUndef(scalar_type); } } if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intMul(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatMul(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = lhs_src, .air_tag = .mul }; } else break :rs .{ .src = rhs_src, .air_tag = .mul }; }, .mulwrap => { // Integers only; floats are handled above. // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (lhs_val.compare(.eq, Value.one, scalar_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (rhs_val.compare(.eq, Value.one, scalar_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } return sema.addConstant( scalar_type, try lhs_val.numberMulWrap(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .mulwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .mulwrap }; }, .mul_sat => { // Integers only; floats are checked above. // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (lhs_val.compare(.eq, Value.one, scalar_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (rhs_val.compare(.eq, Value.one, scalar_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } return sema.addConstant( scalar_type, try lhs_val.intMulSat(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat }; }, .mod_rem => { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. // // For either one: if the result would be different between @mod and @rem, // then emit a compile error saying you have to pick one. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, lhs_src); } if (lhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } } else if (lhs_ty.isSignedInt()) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (rhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intRem(rhs_val, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .rem }; } else if (rhs_ty.isSignedInt()) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs .{ .src = rhs_src, .air_tag = .rem }; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (rhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef() or lhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return sema.addConstant( scalar_type, try lhs_val.floatRem(rhs_val, sema.arena), ); } else { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } } else { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } }, .rem => { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intRem(rhs_val, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .rem }; } else { break :rs .{ .src = rhs_src, .air_tag = .rem }; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.floatRem(rhs_val, sema.arena), ); } else break :rs .{ .src = rhs_src, .air_tag = .rem }; } else break :rs .{ .src = lhs_src, .air_tag = .rem }; }, .mod => { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intMod(rhs_val, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .mod }; } else { break :rs .{ .src = rhs_src, .air_tag = .mod }; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.floatMod(rhs_val, sema.arena), ); } else break :rs .{ .src = rhs_src, .air_tag = .mod }; } else break :rs .{ .src = lhs_src, .air_tag = .mod }; }, else => unreachable, } }; try sema.requireRuntimeBlock(block, rs.src); return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs); } fn analyzePtrArithmetic( sema: *Sema, block: *Block, op_src: LazySrcLoc, ptr: Air.Inst.Ref, uncasted_offset: Air.Inst.Ref, air_tag: Air.Inst.Tag, ptr_src: LazySrcLoc, offset_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); // TODO adjust the return type according to alignment and other factors const runtime_src = rs: { if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| { if (try sema.resolveMaybeUndefVal(block, offset_src, offset)) |offset_val| { const ptr_ty = sema.typeOf(ptr); const new_ptr_ty = ptr_ty; // TODO modify alignment if (ptr_val.isUndef() or offset_val.isUndef()) { return sema.addConstUndef(new_ptr_ty); } const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt()); if (ptr_val.getUnsignedInt()) |addr| { const target = sema.mod.getTarget(); const ptr_child_ty = ptr_ty.childType(); const elem_ty = if (ptr_ty.isSinglePointer() and ptr_child_ty.zigTypeTag() == .Array) ptr_child_ty.childType() else ptr_child_ty; const elem_size = elem_ty.abiSize(target); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } const new_ptr_val = try ptr_val.elemPtr(sema.arena, offset_int); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, ptr, offset); } fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } fn zirAsm( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node }; const outputs_len = @truncate(u5, extended.small); const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); if (extra.data.asm_source == 0) { // This can move to become an AstGen error after inline assembly improvements land // and stage1 code matches stage2 code. return sema.fail(block, src, "assembly code must use string literal syntax", .{}); } if (outputs_len > 1) { return sema.fail(block, src, "TODO implement Sema for asm with more than 1 output", .{}); } var extra_i = extra.end; var output_type_bits = extra.data.output_type_bits; const Output = struct { constraint: []const u8, ty: Type }; const output: ?Output = if (outputs_len == 0) null else blk: { const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i); extra_i = output.end; const is_type = @truncate(u1, output_type_bits) != 0; output_type_bits >>= 1; if (!is_type) { return sema.fail(block, src, "TODO implement Sema for asm with non `->` output", .{}); } const constraint = sema.code.nullTerminatedString(output.data.constraint); break :blk Output{ .constraint = constraint, .ty = try sema.resolveType(block, ret_ty_src, output.data.operand), }; }; const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc([]const u8, inputs_len); for (args) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); extra_i = input.end; const name = sema.code.nullTerminatedString(input.data.name); _ = name; // TODO: use the name arg.* = sema.resolveInst(input.data.operand); inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); } const clobbers = try sema.arena.alloc([]const u8, clobbers_len); for (clobbers) |*name| { name.* = sema.code.nullTerminatedString(sema.code.extra[extra_i]); extra_i += 1; } try sema.requireRuntimeBlock(block, src); const gpa = sema.gpa; try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Asm).Struct.fields.len + args.len); const asm_air = try block.addInst(.{ .tag = .assembly, .data = .{ .ty_pl = .{ .ty = if (output) |o| try sema.addType(o.ty) else Air.Inst.Ref.void_type, .payload = sema.addExtraAssumeCapacity(Air.Asm{ .zir_index = inst, }), } }, }); sema.appendRefsAssumeCapacity(args); return asm_air; } /// Only called for equality operators. See also `zirCmp`. fn zirCmpEq( sema: *Sema, block: *Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_ty_tag = lhs_ty.zigTypeTag(); const rhs_ty_tag = rhs_ty.zigTypeTag(); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null if (op == .eq) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } if (((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or rhs_ty_tag == .Null and lhs_ty_tag == .Optional)) { // comparing null with optionals const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs; return sema.analyzeIsNull(block, src, opt_operand, op == .neq); } if (((lhs_ty_tag == .Null and rhs_ty.isCPtr()) or (rhs_ty_tag == .Null and lhs_ty.isCPtr()))) { // comparing null with C pointers const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs; return sema.analyzeIsNull(block, src, opt_operand, op == .neq); } if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type}); } if (lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) { return sema.analyzeCmpUnionTag(block, rhs, rhs_src, lhs, lhs_src, op); } if (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union) { return sema.analyzeCmpUnionTag(block, lhs, lhs_src, rhs, rhs_src, op); } if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) { const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lval| { if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rval| { if (lval.isUndef() or rval.isUndef()) { return sema.addConstUndef(Type.initTag(.bool)); } // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, // or calling to Module.getErrorValue to get the values and then compare them is // faster. const lhs_name = lval.castTag(.@"error").?.data.name; const rhs_name = rval.castTag(.@"error").?.data.name; if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { break :src rhs_src; } } else { break :src lhs_src; } }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, lhs, rhs); } if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_as_type.eql(rhs_as_type) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true); } fn analyzeCmpUnionTag( sema: *Sema, block: *Block, un: Air.Inst.Ref, un_src: LazySrcLoc, tag: Air.Inst.Ref, tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { const union_ty = try sema.resolveTypeFields(block, un_src, sema.typeOf(un)); const union_tag_ty = union_ty.unionTagType() orelse { // TODO note at declaration site that says "union foo is not tagged" return sema.fail(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); }; // Coerce both the union and the tag to the union's tag type, and then execute the // enum comparison codepath. const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src); const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); return sema.cmpSelf(block, coerced_union, coerced_tag, op, un_src, tag_src); } /// Only called for non-equality operators. See also `zirCmpEq`. fn zirCmp( sema: *Sema, block: *Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, false); } fn analyzeCmp( sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return sema.fail(block, src, "{s} operator not allowed for type '{}'", .{ @tagName(op), resolved_type, }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); return sema.cmpSelf(block, casted_lhs, casted_rhs, op, lhs_src, rhs_src); } fn cmpSelf( sema: *Sema, block: *Block, casted_lhs: Air.Inst.Ref, casted_rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (lhs_val.isUndef()) return sema.addConstUndef(resolved_type); if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(resolved_type); if (lhs_val.compare(op, rhs_val, resolved_type)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { if (resolved_type.zigTypeTag() == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, op, casted_rhs, lhs_val.toBool(), rhs_src); } break :src rhs_src; } } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. if (resolved_type.zigTypeTag() == .Bool) { if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(resolved_type); return sema.runtimeBoolCmp(block, op, casted_lhs, rhs_val.toBool(), lhs_src); } } break :src lhs_src; } }; try sema.requireRuntimeBlock(block, runtime_src); const tag: Air.Inst.Tag = switch (op) { .lt => .cmp_lt, .lte => .cmp_lte, .eq => .cmp_eq, .gte => .cmp_gte, .gt => .cmp_gt, .neq => .cmp_neq, }; // TODO handle vectors return block.addBinOp(tag, casted_lhs, casted_rhs); } /// cmp_eq (x, false) => not(x) /// cmp_eq (x, true ) => x /// cmp_neq(x, false) => x /// cmp_neq(x, true ) => not(x) fn runtimeBoolCmp( sema: *Sema, block: *Block, op: std.math.CompareOperator, lhs: Air.Inst.Ref, rhs: bool, runtime_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { if ((op == .neq) == rhs) { try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.not, Type.initTag(.bool), lhs); } else { return lhs; } } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); try sema.resolveTypeLayout(block, src, operand_ty); const target = sema.mod.getTarget(); const abi_size = switch (operand_ty.zigTypeTag()) { .Fn => unreachable, .NoReturn, .Undefined, .Null, .BoundFn, .Opaque, => return sema.fail(block, src, "no size available for type '{}'", .{operand_ty}), .Type, .EnumLiteral, .ComptimeFloat, .ComptimeInt, .Void, => 0, .Bool, .Int, .Float, .Pointer, .Array, .Struct, .Optional, .ErrorUnion, .ErrorSet, .Enum, .Union, .Vector, .Frame, .AnyFrame, => operand_ty.abiSize(target), }; return sema.addIntUnsigned(Type.comptime_int, abi_size); } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); const target = sema.mod.getTarget(); const bit_size = operand_ty.bitSize(target); return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size); } fn zirThis( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const this_decl = block.namespace.getDecl(); const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.analyzeDeclVal(block, src, this_decl); } fn zirClosureCapture( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!void { // TODO: Compile error when closed over values are modified const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const tv = try sema.resolveInstConst(block, inst_data.src(), inst_data.operand); try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ .ty = try tv.ty.copy(sema.perm_arena), .val = try tv.val.copy(sema.perm_arena), }); } fn zirClosureGet( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { // TODO CLOSURE: Test this with inline functions const inst_data = sema.code.instructions.items(.data)[inst].inst_node; var scope: *CaptureScope = block.src_decl.src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. const tv = while (true) { // Note: We don't need to add a dependency here, because // decls always depend on their lexical parents. if (scope.captures.getPtr(inst_data.inst)) |tv| { break tv; } scope = scope.parent.?; } else unreachable; return sema.addConstant(tv.ty, tv.val); } fn zirRetAddr( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; try sema.requireRuntimeBlock(block, src); return try block.addNoOp(.ret_addr); } fn zirBuiltinSrc( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.fail(block, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType(block, src, "TypeInfo"); const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .Type => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)), .val = Value.initTag(.unreachable_value), }), ), .Void => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)), .val = Value.initTag(.unreachable_value), }), ), .Bool => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)), .val = Value.initTag(.unreachable_value), }), ), .NoReturn => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)), .val = Value.initTag(.unreachable_value), }), ), .ComptimeFloat => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)), .val = Value.initTag(.unreachable_value), }), ), .ComptimeInt => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)), .val = Value.initTag(.unreachable_value), }), ), .Undefined => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)), .val = Value.initTag(.unreachable_value), }), ), .Null => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)), .val = Value.initTag(.unreachable_value), }), ), .EnumLiteral => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)), .val = Value.initTag(.unreachable_value), }), ), .Fn => { const info = ty.fnInfo(); const field_values = try sema.arena.alloc(Value, 6); // calling_convention: CallingConvention, field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)); // alignment: comptime_int, field_values[1] = try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)); // is_generic: bool, field_values[2] = if (info.is_generic) Value.initTag(.bool_true) else Value.initTag(.bool_false); // is_var_args: bool, field_values[3] = if (info.is_var_args) Value.initTag(.bool_true) else Value.initTag(.bool_false); // return_type: ?type, field_values[4] = try Value.Tag.ty.create(sema.arena, ty.fnReturnType()); // args: []const FnArg, field_values[5] = Value.@"null"; // TODO return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Int => { const info = ty.intInfo(target); const field_values = try sema.arena.alloc(Value, 2); // signedness: Signedness, field_values[0] = try Value.Tag.enum_field_index.create( sema.arena, @enumToInt(info.signedness), ); // bits: comptime_int, field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Pointer => { const info = ty.ptrInfo().data; const field_values = try sema.arena.alloc(Value, 8); // size: Size, field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)); // is_const: bool, field_values[1] = if (!info.mutable) Value.initTag(.bool_true) else Value.initTag(.bool_false); // is_volatile: bool, field_values[2] = if (info.@"volatile") Value.initTag(.bool_true) else Value.initTag(.bool_false); // alignment: comptime_int, field_values[3] = try Value.Tag.int_u64.create(sema.arena, info.@"align"); // address_space: AddressSpace field_values[4] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")); // child: type, field_values[5] = try Value.Tag.ty.create(sema.arena, info.pointee_type); // is_allowzero: bool, field_values[6] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false); // sentinel: anytype, field_values[7] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null"; return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Array => { const info = ty.arrayInfo(); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); // sentinel: anytype, field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null"; return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Optional => { const field_values = try sema.arena.alloc(Value, 1); // child: type, field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .ErrorUnion => { const field_values = try sema.arena.alloc(Value, 2); // error_set: type, field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet()); // payload: type, field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload()); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Enum => { // TODO: look into memoizing this result. var int_tag_type_buffer: Type.Payload.Bits = undefined; const int_tag_ty = try ty.intTagType(&int_tag_type_buffer).copy(sema.arena); const is_exhaustive = if (ty.isNonexhaustiveEnum()) Value.@"false" else Value.@"true"; var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); const enum_field_ty = t: { const enum_field_ty_decl = (try sema.namespaceLookup( block, src, type_info_ty.getNamespace().?, "EnumField", )).?; try sema.mod.declareDeclDependency(sema.owner_decl, enum_field_ty_decl); try sema.ensureDeclAnalyzed(enum_field_ty_decl); var buffer: Value.ToTypeBuffer = undefined; break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); }; const enum_fields = ty.enumFields(); const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count()); for (enum_field_vals) |*field_val, i| { var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, i), }; const tag_val = Value.initPayload(&tag_val_payload.base); var buffer: Value.Payload.U64 = undefined; const int_val = try tag_val.enumToInt(ty, &buffer).copy(fields_anon_decl.arena()); const name = enum_fields.keys()[i]; const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), ); break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); }; const enum_field_fields = try fields_anon_decl.arena().create([2]Value); enum_field_fields.* = .{ // name: []const u8, name_val, // value: comptime_int, int_val, }; field_val.* = try Value.Tag.@"struct".create(fields_anon_decl.arena(), enum_field_fields); } const fields_val = v: { const new_decl = try fields_anon_decl.finish( try Type.Tag.array.create(fields_anon_decl.arena(), .{ .len = enum_field_vals.len, .elem_type = enum_field_ty, }), try Value.Tag.array.create( fields_anon_decl.arena(), try fields_anon_decl.arena().dupe(Value, enum_field_vals), ), ); break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); }; if (ty.getNamespace()) |namespace| { if (namespace.decls.count() != 0) { return sema.fail(block, src, "TODO: implement zirTypeInfo for Enum which has declarations", .{}); } } const decls_val = Value.initTag(.empty_array); const field_values = try sema.arena.create([5]Value); field_values.* = .{ // layout: ContainerLayout, try Value.Tag.enum_field_index.create( sema.arena, @enumToInt(std.builtin.TypeInfo.ContainerLayout.Auto), ), // tag_type: type, try Value.Tag.ty.create(sema.arena, int_tag_ty), // fields: []const EnumField, fields_val, // decls: []const Declaration, decls_val, // is_exhaustive: bool, is_exhaustive, }; return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Enum)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Struct => return sema.fail(block, src, "TODO: implement zirTypeInfo for Struct", .{}), .ErrorSet => return sema.fail(block, src, "TODO: implement zirTypeInfo for ErrorSet", .{}), .Union => return sema.fail(block, src, "TODO: implement zirTypeInfo for Union", .{}), .BoundFn => @panic("TODO remove this type from the language and compiler"), .Opaque => return sema.fail(block, src, "TODO: implement zirTypeInfo for Opaque", .{}), .Frame => return sema.fail(block, src, "TODO: implement zirTypeInfo for Frame", .{}), .AnyFrame => return sema.fail(block, src, "TODO: implement zirTypeInfo for AnyFrame", .{}), .Vector => return sema.fail(block, src, "TODO: implement zirTypeInfo for Vector", .{}), } } fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); return sema.addType(operand_ty); } fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); return sema.log2IntType(block, operand_ty, src); } fn zirLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveType(block, src, inst_data.operand); return sema.log2IntType(block, operand, src); } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Air.Inst.Ref { switch (operand.zigTypeTag()) { .ComptimeInt => return Air.Inst.Ref.comptime_int_type, .Int => { const bits = operand.bitSize(sema.mod.getTarget()); const count = if (bits == 0) 0 else blk: { var count: u16 = 0; var s = bits - 1; while (s != 0) : (s >>= 1) { count += 1; } break :blk count; }; const res = try Module.makeIntType(sema.arena, .unsigned, count); return sema.addType(res); }, else => return sema.fail( block, src, "bit shifting operation expected integer type, found '{}'", .{operand}, ), } } fn zirTypeofPeer( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const args = sema.code.refSlice(extra.end, extended.small); const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len); defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { inst_list[i] = sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list, .{ .typeof_builtin_call_node_offset = extra.data.src_node }); return sema.addType(result_type); } fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src = src; // TODO put this on the operand, not the `!` const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { return if (val.isUndef()) sema.addConstUndef(bool_type) else if (val.toBool()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true; } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.not, bool_type, operand); } fn zirBoolBr( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, is_bool_or: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const lhs = sema.resolveInst(inst_data.lhs); const lhs_src = sema.src; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const gpa = sema.gpa; if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (lhs_val.toBool() == is_bool_or) { if (is_bool_or) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } // comptime-known left-hand side. No need for a block here; the result // is simply the rhs expression. Here we rely on there only being 1 // break instruction (`break_inline`). return sema.resolveBody(parent_block, body); } const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .bool_type, .payload = undefined, } }, }); var child_block = parent_block.makeSubBlock(); child_block.runtime_loop = null; child_block.runtime_cond = lhs_src; child_block.runtime_index += 1; defer child_block.instructions.deinit(gpa); var then_block = child_block.makeSubBlock(); defer then_block.instructions.deinit(gpa); var else_block = child_block.makeSubBlock(); defer else_block.instructions.deinit(gpa); const lhs_block = if (is_bool_or) &then_block else &else_block; const rhs_block = if (is_bool_or) &else_block else &then_block; const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false; _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body); _ = try rhs_block.addBr(block_inst, rhs_result); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, then_block.instructions.items.len), .else_body_len = @intCast(u32, else_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); _ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = lhs, .payload = cond_br_payload, } } }); sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, ); sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); try parent_block.instructions.append(gpa, block_inst); return Air.indexToRef(block_inst); } fn zirIsNonNull( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNull(block, src, operand, true); } fn zirIsNonNullPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = sema.resolveInst(inst_data.operand); if ((try sema.resolveMaybeUndefVal(block, src, ptr)) == null) { return block.addUnOp(.is_non_null_ptr, ptr); } const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, inst_data.src(), operand); } fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } fn zirCondbr( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const uncasted_cond = sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { const body = if (cond_val.toBool()) then_body else else_body; _ = try sema.analyzeBody(parent_block, body); return always_noreturn; } const gpa = sema.gpa; // We'll re-use the sub block to save on memory bandwidth, and yank out the // instructions array in between using it for the then block and else block. var sub_block = parent_block.makeSubBlock(); sub_block.runtime_loop = null; sub_block.runtime_cond = cond_src; sub_block.runtime_index += 1; defer sub_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&sub_block, then_body); const true_instructions = sub_block.instructions.toOwnedSlice(gpa); defer gpa.free(true_instructions); _ = try sema.analyzeBody(&sub_block, else_body); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + true_instructions.len + sub_block.instructions.items.len); _ = try parent_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = cond, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, true_instructions.len), .else_body_len = @intCast(u32, sub_block.instructions.items.len), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(true_instructions); sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items); return always_noreturn; } fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable"; const src = inst_data.src(); try sema.requireRuntimeBlock(block, src); // TODO Add compile error for @optimizeFor occurring too late in a scope. try block.addUnreachable(src, inst_data.safety); return always_noreturn; } fn zirRetErrValue( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); const src = inst_data.src(); // Return the error code from the function. const kv = try sema.mod.getErrorValue(err_name); const result_inst = try sema.addConstant( try Type.Tag.error_set_single.create(sema.arena, kv.key), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), ); return sema.analyzeRet(block, result_inst, src); } fn zirRetCoerce( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src); } fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src); } fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ret_ptr = sema.resolveInst(inst_data.operand); if (block.is_comptime or block.inlining != null) { const operand = try sema.analyzeLoad(block, src, ret_ptr, src); return sema.analyzeRet(block, operand, src); } try sema.requireRuntimeBlock(block, src); _ = try block.addUnOp(.ret_load, ret_ptr); return always_noreturn; } fn analyzeRet( sema: *Sema, block: *Block, uncasted_operand: Air.Inst.Ref, src: LazySrcLoc, ) CompileError!Zir.Inst.Index { // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { const op_ty = sema.typeOf(uncasted_operand); switch (op_ty.zigTypeTag()) { .ErrorSet => { try payload.data.addErrorSet(sema.gpa, op_ty); }, .ErrorUnion => { try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); }, else => {}, } } } const operand = try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src); if (block.inlining) |inlining| { if (block.is_comptime) { inlining.comptime_result = operand; return error.ComptimeReturn; } // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(sema.gpa, operand); _ = try block.addBr(inlining.merges.block_inst, operand); return always_noreturn; } try sema.resolveTypeLayout(block, src, sema.fn_ret_ty); _ = try block.addUnOp(.ret, operand); return always_noreturn; } fn floatOpAllowed(tag: Zir.Inst.Tag) bool { // extend this swich as additional operators are implemented return switch (tag) { .add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true, else => false, }; } fn zirPtrTypeSimple(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].ptr_type_simple; const elem_type = try sema.resolveType(block, .unneeded, inst_data.elem_type); const ty = try Type.ptr(sema.arena, .{ .pointee_type = elem_type, .@"addrspace" = .generic, .mutable = inst_data.is_mutable, .@"allowzero" = inst_data.is_allowzero or inst_data.size == .C, .@"volatile" = inst_data.is_volatile, .size = inst_data.size, }); return sema.addType(ty); } fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); var extra_i = extra.end; const sentinel = if (inst_data.flags.has_sentinel) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk (try sema.resolveInstConst(block, .unneeded, ref)).val; } else null; const abi_align = if (inst_data.flags.has_align) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u32); } else 0; const address_space = if (inst_data.flags.has_addrspace) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.analyzeAddrspace(block, .unneeded, ref, .pointer); } else .generic; const bit_start = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); } else 0; const bit_end = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); } else 0; if (bit_end != 0 and bit_start >= bit_end * 8) return sema.fail(block, src, "bit offset starts after end of host integer", .{}); const elem_type = try sema.resolveType(block, .unneeded, extra.data.elem_type); const ty = try Type.ptr(sema.arena, .{ .pointee_type = elem_type, .sentinel = sentinel, .@"align" = abi_align, .@"addrspace" = address_space, .bit_offset = bit_start, .host_size = bit_end, .mutable = inst_data.flags.is_mutable, .@"allowzero" = inst_data.flags.is_allowzero or inst_data.size == .C, .@"volatile" = inst_data.flags.is_volatile, .size = inst_data.size, }); return sema.addType(ty); } fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const obj_ty = try sema.resolveType(block, src, inst_data.operand); const gpa = sema.gpa; switch (obj_ty.zigTypeTag()) { .Struct => { // This logic must be synchronized with that in `zirStructInit`. const struct_ty = try sema.resolveTypeFields(block, src, obj_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_obj.fields.count()); defer gpa.free(field_inits); var root_msg: ?*Module.ErrorMsg = null; for (struct_obj.fields.values()) |field, i| { if (field.default_val.tag() == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { try sema.errNote(block, src, msg, template, args); } else { root_msg = try sema.errMsg(block, src, template, args); } } else { field_inits[i] = try sema.addConstant(field.ty, field.default_val); } } return sema.finishStructInit(block, src, field_inits, root_msg, struct_obj, struct_ty, false); }, .Array => { if (obj_ty.sentinel()) |sentinel| { const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); return sema.addConstant(obj_ty, val); } else { return sema.addConstant(obj_ty, Value.initTag(.empty_array)); } }, .Void => return sema.addConstant(obj_ty, Value.void), else => unreachable, } } fn zirUnionInitPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirUnionInitPtr", .{}); } fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = inst_data.src(); const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const unresolved_struct_type = try sema.resolveType(block, src, first_field_type_extra.container_type); const resolved_ty = try sema.resolveTypeFields(block, src, unresolved_struct_type); if (resolved_ty.castTag(.@"struct")) |struct_payload| { // This logic must be synchronized with that in `zirStructInitEmpty`. const struct_obj = struct_payload.data; // Maps field index to field_type index of where it was already initialized. // For making sure all fields are accounted for and no fields are duplicated. const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count()); defer gpa.free(found_fields); mem.set(Zir.Inst.Index, found_fields, 0); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_obj.fields.count()); defer gpa.free(field_inits); var field_i: u32 = 0; var extra_index = extra.end; while (field_i < extra.data.fields_len) : (field_i += 1) { const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); extra_index = item.end; const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); if (found_fields[field_index] != 0) { const other_field_type = found_fields[field_index]; const other_field_type_data = zir_datas[other_field_type].pl_node; const other_field_src: LazySrcLoc = .{ .node_offset_back2tok = other_field_type_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } found_fields[field_index] = item.data.field_type; field_inits[field_index] = sema.resolveInst(item.data.init); } var root_msg: ?*Module.ErrorMsg = null; for (found_fields) |field_type_inst, i| { if (field_type_inst != 0) continue; // Check if the field has a default init. const field = struct_obj.fields.values()[i]; if (field.default_val.tag() == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { try sema.errNote(block, src, msg, template, args); } else { root_msg = try sema.errMsg(block, src, template, args); } } else { field_inits[i] = try sema.addConstant(field.ty, field.default_val); } } return sema.finishStructInit(block, src, field_inits, root_msg, struct_obj, resolved_ty, is_ref); } else if (resolved_ty.cast(Type.Payload.Union)) |union_payload| { const union_obj = union_payload.data; if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end); const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); const field_index = @intCast(u32, field_index_usize); if (is_ref) { return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true union", .{}); } const init_inst = sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(block, field_src, init_inst)) |val| { const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index); return sema.addConstant( resolved_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), ); } return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known union values", .{}); } unreachable; } fn finishStructInit( sema: *Sema, block: *Block, src: LazySrcLoc, field_inits: []const Air.Inst.Ref, root_msg: ?*Module.ErrorMsg, struct_obj: *Module.Struct, struct_ty: Type, is_ref: bool, ) !Air.Inst.Ref { const gpa = sema.gpa; if (root_msg) |msg| { const fqn = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( struct_obj.srcLoc(), msg, "struct '{s}' declared here", .{fqn}, ); return sema.failWithOwnedErrorMsg(msg); } if (is_ref) { return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true", .{}); } const is_comptime = for (field_inits) |field_init| { if (!(try sema.isComptimeKnown(block, src, field_init))) { break false; } } else true; if (is_comptime) { const values = try sema.arena.alloc(Value, field_inits.len); for (field_inits) |field_init, i| { values[i] = (sema.resolveMaybeUndefVal(block, src, field_init) catch unreachable).?; } return sema.addConstant(struct_ty, try Value.Tag.@"struct".create(sema.arena, values)); } return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } fn zirStructInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); _ = is_ref; return sema.fail(block, src, "TODO: Sema.zirStructInitAnon", .{}); } fn zirArrayInit( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); assert(args.len != 0); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len); defer gpa.free(resolved_args); for (args) |arg, i| resolved_args[i] = sema.resolveInst(arg); const elem_ty = sema.typeOf(resolved_args[0]); const array_ty = try Type.Tag.array.create(sema.arena, .{ .len = resolved_args.len, .elem_type = elem_ty, }); const opt_runtime_src: ?LazySrcLoc = for (resolved_args) |arg| { const arg_src = src; // TODO better source location const comptime_known = try sema.isComptimeKnown(block, arg_src, arg); if (!comptime_known) break arg_src; } else null; const runtime_src = opt_runtime_src orelse { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const elem_vals = try anon_decl.arena().alloc(Value, resolved_args.len); for (resolved_args) |arg, i| { // We checked that all args are comptime above. const arg_val = (sema.resolveMaybeUndefVal(block, src, arg) catch unreachable).?; elem_vals[i] = try arg_val.copy(anon_decl.arena()); } const val = try Value.Tag.array.create(anon_decl.arena(), elem_vals); const decl = try anon_decl.finish(try array_ty.copy(anon_decl.arena()), val); if (is_ref) { return sema.analyzeDeclRef(decl); } else { return sema.analyzeDeclVal(block, .unneeded, decl); } }; try sema.requireRuntimeBlock(block, runtime_src); try sema.resolveTypeLayout(block, src, elem_ty); const alloc_ty = try Type.ptr(sema.arena, .{ .pointee_type = array_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); const alloc = try block.addTy(.alloc, alloc_ty); for (resolved_args) |arg, i| { const index = try sema.addIntUnsigned(Type.initTag(.u64), i); const elem_ptr = try block.addBinOp(.ptr_elem_ptr, alloc, index); _ = try block.addBinOp(.store, elem_ptr, arg); } if (is_ref) { return alloc; } else { return sema.analyzeLoad(block, .unneeded, alloc, .unneeded); } } fn zirArrayInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); _ = is_ref; return sema.fail(block, src, "TODO: Sema.zirArrayInitAnon", .{}); } fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFieldTypeRef", .{}); } fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); const field_name = sema.code.nullTerminatedString(extra.name_start); const unresolved_ty = try sema.resolveType(block, src, extra.container_type); const resolved_ty = try sema.resolveTypeFields(block, src, unresolved_ty); switch (resolved_ty.zigTypeTag()) { .Struct => { const struct_obj = resolved_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.get(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, src, field_name); return sema.addType(field.ty); }, .Union => { const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.get(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, src, field_name); return sema.addType(field.ty); }, else => return sema.fail(block, src, "expected struct or union; found '{}'", .{ resolved_ty, }), } } fn zirErrorReturnTrace( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.fail(block, src, "TODO: Sema.zirErrorReturnTrace", .{}); } fn zirFrame( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.fail(block, src, "TODO: Sema.zirFrame", .{}); } fn zirFrameAddress( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.fail(block, src, "TODO: Sema.zirFrameAddress", .{}); } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); const resolved_ty = try sema.resolveTypeFields(block, operand_src, ty); try sema.resolveTypeLayout(block, operand_src, resolved_ty); const target = sema.mod.getTarget(); const abi_align = resolved_ty.abiAlignment(target); return sema.addIntUnsigned(Type.comptime_int, abi_align); } fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(Type.initTag(.u1)); const bool_ints = [2]Air.Inst.Ref{ .zero, .one }; return bool_ints[@boolToInt(val.toBool())]; } return block.addUnOp(.bool_to_int, operand); } fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirErrorName", .{}); } fn zirUnaryMath(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirUnaryMath", .{}); } fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const enum_ty = switch (operand_ty.zigTypeTag()) { .Enum => operand_ty, .Union => operand_ty.unionTagType() orelse { const decl = operand_ty.getOwnerDecl(); const msg = msg: { const msg = try sema.errMsg(block, src, "union '{s}' is untagged", .{ decl.name, }); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }, else => return sema.fail(block, operand_src, "expected enum or union; found {}", .{ operand_ty, }), }; const enum_decl = enum_ty.getOwnerDecl(); const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { const field_index = enum_ty.enumTagFieldIndex(val) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "no field with value {} in enum '{s}'", .{ casted_operand, enum_decl.name, }); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy(enum_decl.srcLoc(), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; const field_name = enum_ty.enumFieldName(field_index); return sema.addStrLit(block, field_name); } // In case the value is runtime-known, we have an AIR instruction for this instead // of trying to lower it in Sema because an optimization pass may result in the operand // being comptime-known, which would let us elide the `tag_name` AIR instruction. return block.addUnOp(.tag_name, casted_operand); } fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const type_info_ty = try sema.resolveBuiltinTypeFields(block, src, "TypeInfo"); const uncasted_operand = sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstValue(block, operand_src, type_info); const union_val = val.cast(Value.Payload.Union).?.data; const tag_ty = type_info_ty.unionTagType().?; const tag_index = tag_ty.enumTagFieldIndex(union_val.tag).?; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, .Bool => return Air.Inst.Ref.bool_type, .NoReturn => return Air.Inst.Ref.noreturn_type, .ComptimeFloat => return Air.Inst.Ref.comptime_float_type, .ComptimeInt => return Air.Inst.Ref.comptime_int_type, .Undefined => return Air.Inst.Ref.undefined_type, .Null => return Air.Inst.Ref.null_type, .AnyFrame => return Air.Inst.Ref.anyframe_type, .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { const struct_val = union_val.val.castTag(.@"struct").?.data; // TODO use reflection instead of magic numbers here const signedness_val = struct_val[0]; const bits_val = struct_val[1]; const signedness = signedness_val.toEnum(std.builtin.Signedness); const bits = @intCast(u16, bits_val.toUnsignedInt()); const ty = switch (signedness) { .signed => try Type.Tag.int_signed.create(sema.arena, bits), .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), }; return sema.addType(ty); }, .Vector => { const struct_val = union_val.val.castTag(.@"struct").?.data; // TODO use reflection instead of magic numbers here const len_val = struct_val[0]; const child_val = struct_val[1]; const len = len_val.toUnsignedInt(); var buffer: Value.ToTypeBuffer = undefined; const child_ty = child_val.toType(&buffer); const ty = try Type.vector(sema.arena, len, child_ty); return sema.addType(ty); }, .Float => return sema.fail(block, src, "TODO: Sema.zirReify for Float", .{}), .Pointer => return sema.fail(block, src, "TODO: Sema.zirReify for Pointer", .{}), .Array => return sema.fail(block, src, "TODO: Sema.zirReify for Array", .{}), .Struct => return sema.fail(block, src, "TODO: Sema.zirReify for Struct", .{}), .Optional => return sema.fail(block, src, "TODO: Sema.zirReify for Optional", .{}), .ErrorUnion => return sema.fail(block, src, "TODO: Sema.zirReify for ErrorUnion", .{}), .ErrorSet => return sema.fail(block, src, "TODO: Sema.zirReify for ErrorSet", .{}), .Enum => return sema.fail(block, src, "TODO: Sema.zirReify for Enum", .{}), .Union => return sema.fail(block, src, "TODO: Sema.zirReify for Union", .{}), .Fn => return sema.fail(block, src, "TODO: Sema.zirReify for Fn", .{}), .BoundFn => @panic("TODO delete BoundFn from the language"), .Opaque => return sema.fail(block, src, "TODO: Sema.zirReify for Opaque", .{}), .Frame => return sema.fail(block, src, "TODO: Sema.zirReify for Frame", .{}), } } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, ty_src, inst_data.operand); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const bytes = try ty.nameAlloc(anon_decl.arena()); const new_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), ); return sema.analyzeDeclRef(new_decl); } fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFrameType", .{}); } fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFrameSize", .{}); } fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); _ = try sema.checkIntType(block, ty_src, dest_ty); try sema.checkFloatType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) { error.FloatCannotFit => { return sema.fail(block, operand_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty }); }, else => |e| return e, }; return sema.addConstant(dest_ty, result_val); } try sema.requireRuntimeBlock(block, operand_src); return block.addTyOp(.float_to_int, dest_ty, operand); } fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); try sema.checkFloatType(block, ty_src, dest_ty); _ = try sema.checkIntType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); const result_val = try val.intToFloat(sema.arena, dest_ty, target); return sema.addConstant(dest_ty, result_val); } try sema.requireRuntimeBlock(block, operand_src); return block.addTyOp(.int_to_float, dest_ty, operand); } fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand_res = sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src); const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const type_res = try sema.resolveType(block, src, extra.lhs); if (type_res.zigTypeTag() != .Pointer) return sema.fail(block, type_src, "expected pointer, found '{}'", .{type_res}); const ptr_align = type_res.ptrAlignment(sema.mod.getTarget()); if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { const addr = val.toUnsignedInt(); if (!type_res.isAllowzeroPtr() and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{type_res}); if (addr != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{type_res}); const val_payload = try sema.arena.create(Value.Payload.U64); val_payload.* = .{ .base = .{ .tag = .int_u64 }, .data = addr, }; return sema.addConstant(type_res, Value.initPayload(&val_payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { if (!type_res.isAllowzeroPtr()) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } if (ptr_align > 1) { const val_payload = try sema.arena.create(Value.Payload.U64); val_payload.* = .{ .base = .{ .tag = .int_u64 }, .data = ptr_align - 1, }; const align_minus_1 = try sema.addConstant( Type.usize, Value.initPayload(&val_payload.base), ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment); } } return block.addBitCast(type_res, operand_coerced); } fn zirErrSetCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirErrSetCast", .{}); } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .Pointer) { return sema.fail(block, operand_src, "expected pointer, found {s} type '{}'", .{ @tagName(operand_ty.zigTypeTag()), operand_ty, }); } if (dest_ty.zigTypeTag() != .Pointer) { return sema.fail(block, dest_ty_src, "expected pointer, found {s} type '{}'", .{ @tagName(dest_ty.zigTypeTag()), dest_ty, }); } return sema.coerceCompatiblePtrs(block, dest_ty, operand, operand_src); } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty); const src_is_comptime_int = try sema.checkIntType(block, operand_src, operand_ty); if (dest_is_comptime_int) { return sema.coerce(block, dest_ty, operand, operand_src); } const target = sema.mod.getTarget(); const dest_info = dest_ty.intInfo(target); if (dest_info.bits == 0) { return sema.addConstant(dest_ty, Value.zero); } if (!src_is_comptime_int) { const src_info = operand_ty.intInfo(target); if (src_info.bits == 0) { return sema.addConstant(dest_ty, Value.zero); } if (src_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ @tagName(dest_info.signedness), operand_ty, }); } if (src_info.bits > 0 and src_info.bits < dest_info.bits) { const msg = msg: { const msg = try sema.errMsg( block, src, "destination type '{}' has more bits than source type '{}'", .{ dest_ty, operand_ty }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ dest_info.bits, }); try sema.errNote(block, operand_src, msg, "source type has {d} bits", .{ src_info.bits, }); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } } if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(dest_ty); return sema.addConstant(dest_ty, try val.intTrunc(sema.arena, dest_info.signedness, dest_info.bits)); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.trunc, dest_ty, operand); } fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_align = try sema.resolveAlign(block, align_src, extra.lhs); const ptr = sema.resolveInst(extra.rhs); const ptr_ty = sema.typeOf(ptr); // TODO in addition to pointers, this instruction is supposed to work for // pointer-like optionals and slices. try sema.checkPtrType(block, ptr_src, ptr_ty); // TODO compile error if the result pointer is comptime known and would have an // alignment that disagrees with the Decl's alignment. // TODO insert safety check that the alignment is correct const ptr_info = ptr_ty.ptrInfo().data; const dest_ty = try Type.ptr(sema.arena, .{ .pointee_type = ptr_info.pointee_type, .@"align" = dest_align, .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", .@"volatile" = ptr_info.@"volatile", .size = ptr_info.size, }); return sema.coerceCompatiblePtrs(block, dest_ty, ptr, ptr_src); } fn zirClz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); // TODO implement support for vectors if (operand_ty.zigTypeTag() != .Int) { return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ operand_ty, }); } const target = sema.mod.getTarget(); const bits = operand_ty.intInfo(target).bits; if (bits == 0) return Air.Inst.Ref.zero; const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.addIntUnsigned(result_ty, val.clz(operand_ty, target)); } else operand_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.clz, result_ty, operand); } fn zirCtz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); // TODO implement support for vectors if (operand_ty.zigTypeTag() != .Int) { return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ operand_ty, }); } const target = sema.mod.getTarget(); const bits = operand_ty.intInfo(target).bits; if (bits == 0) return Air.Inst.Ref.zero; const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.fail(block, operand_src, "TODO: implement comptime @ctz", .{}); } else operand_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.ctz, result_ty, operand); } fn zirPopCount(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); // TODO implement support for vectors if (operand_ty.zigTypeTag() != .Int) { return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ operand_ty, }); } const target = sema.mod.getTarget(); const bits = operand_ty.intInfo(target).bits; if (bits == 0) return Air.Inst.Ref.zero; const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); const result_val = try val.popCount(operand_ty, target, sema.arena); return sema.addConstant(result_ty, result_val); } else operand_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.popcount, result_ty, operand); } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirByteSwap", .{}); } fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBitReverse", .{}); } fn zirShrExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirShrExact", .{}); } fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBitOffsetOf", .{}); } fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirOffsetOf", .{}); } /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { switch (ty.zigTypeTag()) { .ComptimeInt => return true, .Int => return false, else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty}), } } fn checkPtrType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .Pointer => {}, else => return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty}), } } fn checkFloatType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty}), } } fn checkNumericType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, .Vector => switch (ty.childType().zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty}), } } fn checkAtomicOperandType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { var buffer: Type.Payload.Bits = undefined; const target = sema.mod.getTarget(); const max_atomic_bits = target_util.largestAtomicBits(target); const int_ty = switch (ty.zigTypeTag()) { .Int => ty, .Enum => ty.intTagType(&buffer), .Float => { const bit_count = ty.floatBits(target); if (bit_count > max_atomic_bits) { return sema.fail( block, ty_src, "expected {d}-bit float type or smaller; found {d}-bit float type", .{ max_atomic_bits, bit_count }, ); } return; }, .Bool => return, // Will be treated as `u8`. else => { if (ty.isPtrAtRuntime()) return; return sema.fail( block, ty_src, "expected bool, integer, float, enum, or pointer type; found {}", .{ty}, ); }, }; const bit_count = int_ty.intInfo(target).bits; if (bit_count > max_atomic_bits) { return sema.fail( block, ty_src, "expected {d}-bit integer type or smaller; found {d}-bit integer type", .{ max_atomic_bits, bit_count }, ); } } fn checkPtrIsNotComptimeMutable( sema: *Sema, block: *Block, ptr_val: Value, ptr_src: LazySrcLoc, operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; if (ptr_val.isComptimeMutablePtr()) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } fn checkComptimeVarStore( sema: *Sema, block: *Block, src: LazySrcLoc, decl_ref_mut: Value.Payload.DeclRefMut.Data, ) CompileError!void { if (decl_ref_mut.runtime_index < block.runtime_index) { if (block.runtime_cond) |cond_src| { const msg = msg: { const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, cond_src, msg, "runtime condition here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (block.runtime_loop) |loop_src| { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, loop_src, msg, "non-inline loop here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } unreachable; } } const SimdBinOp = struct { len: ?usize, /// Coerced to `result_ty`. lhs: Air.Inst.Ref, /// Coerced to `result_ty`. rhs: Air.Inst.Ref, lhs_val: ?Value, rhs_val: ?Value, /// Only different than `scalar_ty` when it is a vector operation. result_ty: Type, scalar_ty: Type, }; fn checkSimdBinOp( sema: *Sema, block: *Block, src: LazySrcLoc, uncasted_lhs: Air.Inst.Ref, uncasted_rhs: Air.Inst.Ref, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); var vec_len: ?usize = null; if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { const lhs_len = lhs_ty.arrayLen(); const rhs_len = rhs_ty.arrayLen(); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, lhs_src, msg, "length {d} here", .{lhs_len}); try sema.errNote(block, rhs_src, msg, "length {d} here", .{rhs_len}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } vec_len = try sema.usizeCast(block, lhs_src, lhs_len); } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: {} and {}", .{ lhs_ty, rhs_ty, }); errdefer msg.destroy(sema.gpa); if (lhs_zig_ty_tag == .Vector) { try sema.errNote(block, lhs_src, msg, "vector here", .{}); try sema.errNote(block, rhs_src, msg, "scalar here", .{}); } else { try sema.errNote(block, lhs_src, msg, "scalar here", .{}); try sema.errNote(block, rhs_src, msg, "vector here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, }); const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src); return SimdBinOp{ .len = vec_len, .lhs = lhs, .rhs = rhs, .lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs), .rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs), .result_ty = result_ty, .scalar_ty = result_ty.scalarType(), }; } fn resolveExportOptions( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExportOptions { const export_options_ty = try sema.getBuiltinType(block, src, "ExportOptions"); const air_ref = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, export_options_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced); const fields = val.castTag(.@"struct").?.data; const struct_obj = export_options_ty.castTag(.@"struct").?.data; const name_index = struct_obj.fields.getIndex("name").?; const linkage_index = struct_obj.fields.getIndex("linkage").?; const section_index = struct_obj.fields.getIndex("section").?; if (!fields[section_index].isNull()) { return sema.fail(block, src, "TODO: implement exporting with linksection", .{}); } const name_ty = Type.initTag(.const_slice_u8); return std.builtin.ExportOptions{ .name = try fields[name_index].toAllocatedBytes(name_ty, sema.arena), .linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage), .section = null, // TODO }; } fn resolveAtomicOrder( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.AtomicOrder { const atomic_order_ty = try sema.getBuiltinType(block, src, "AtomicOrder"); const air_ref = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, atomic_order_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced); return val.toEnum(std.builtin.AtomicOrder); } fn resolveAtomicRmwOp( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.AtomicRmwOp { const atomic_rmw_op_ty = try sema.getBuiltinType(block, src, "AtomicRmwOp"); const air_ref = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, atomic_rmw_op_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced); return val.toEnum(std.builtin.AtomicRmwOp); } fn zirCmpxchg( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, inst_data.payload_index).data; const src = inst_data.src(); // zig fmt: off const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const expected_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const new_value_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.ptr); const ptr_ty = sema.typeOf(ptr); const elem_ty = ptr_ty.elemType(); try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); if (elem_ty.zigTypeTag() == .Float) { return sema.fail( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", .{elem_ty}, ); } const expected_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.expected_value), expected_src); const new_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.new_value), new_value_src); const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order); const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order); if (@enumToInt(success_order) < @enumToInt(std.builtin.AtomicOrder.Monotonic)) { return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{}); } if (@enumToInt(failure_order) < @enumToInt(std.builtin.AtomicOrder.Monotonic)) { return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{}); } if (@enumToInt(failure_order) > @enumToInt(success_order)) { return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{}); } if (failure_order == .Release or failure_order == .AcqRel) { return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); } const result_ty = try Type.optional(sema.arena, elem_ty); // special case zero bit types if ((try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) != null) { return sema.addConstant(result_ty, Value.@"null"); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveMaybeUndefVal(block, expected_src, expected_value)) |expected_val| { if (try sema.resolveMaybeUndefVal(block, new_value_src, new_value)) |new_val| { if (expected_val.isUndef() or new_val.isUndef()) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well return sema.addConstUndef(result_ty); } const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const result_val = if (stored_val.eql(expected_val, elem_ty)) blk: { try sema.storePtr(block, src, ptr, new_value); break :blk Value.@"null"; } else try Value.Tag.opt_payload.create(sema.arena, stored_val); return sema.addConstant(result_ty, result_val); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; const flags: u32 = @as(u32, @enumToInt(success_order)) | (@as(u32, @enumToInt(failure_order)) << 3); try sema.requireRuntimeBlock(block, runtime_src); return block.addInst(.{ .tag = air_tag, .data = .{ .ty_pl = .{ .ty = try sema.addType(result_ty), .payload = try sema.addExtra(Air.Cmpxchg{ .ptr = ptr, .expected_value = expected_value, .new_value = new_value, .flags = flags, }), } }, }); } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirSplat", .{}); } fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirReduce", .{}); } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirShuffle", .{}); } fn zirSelect(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirSelect", .{}); } fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; // zig fmt: off const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.lhs); const ptr_ty = sema.typeOf(ptr); const elem_ty = ptr_ty.elemType(); try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs); switch (order) { .Release, .AcqRel => { return sema.fail( block, order_src, "@atomicLoad atomic ordering must not be Release or AcqRel", .{}, ); }, else => {}, } if (try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) |val| { return sema.addConstant(elem_ty, val); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } } try sema.requireRuntimeBlock(block, ptr_src); return block.addInst(.{ .tag = .atomic_load, .data = .{ .atomic_load = .{ .ptr = ptr, .order = order, } }, }); } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); // zig fmt: off const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.ptr); const ptr_ty = sema.typeOf(ptr); const operand_ty = ptr_ty.elemType(); try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); switch (operand_ty.zigTypeTag()) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, .Bool => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); }, .Float => switch (op) { .Xchg, .Add, .Sub => {}, else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, and .Sub", .{}), }, else => {}, } const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); if (order == .Unordered) { return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{}); } // special case zero bit types if (try sema.typeHasOnePossibleValue(block, operand_ty_src, operand_ty)) |val| { return sema.addConstant(operand_ty, val); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); const operand_val = maybe_operand_val orelse { try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; if (ptr_val.isComptimeMutablePtr()) { const target = sema.mod.getTarget(); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { // zig fmt: off .Xchg => operand_val, .Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target), .Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target), .And => try stored_val.bitwiseAnd (operand_val, sema.arena), .Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena, target), .Or => try stored_val.bitwiseOr (operand_val, sema.arena), .Xor => try stored_val.bitwiseXor (operand_val, sema.arena), .Max => try stored_val.numberMax (operand_val), .Min => try stored_val.numberMin (operand_val), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty); return sema.addConstant(operand_ty, stored_val); } else break :rs ptr_src; } else ptr_src; const flags: u32 = @as(u32, @enumToInt(order)) | (@as(u32, @enumToInt(op)) << 3); try sema.requireRuntimeBlock(block, runtime_src); return block.addInst(.{ .tag = .atomic_rmw, .data = .{ .pl_op = .{ .operand = ptr, .payload = try sema.addExtra(Air.AtomicRmw{ .operand = operand, .flags = flags, }), } }, }); } fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data; const src = inst_data.src(); // zig fmt: off const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.ptr); const operand_ty = sema.typeOf(ptr).elemType(); try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); const air_tag: Air.Inst.Tag = switch (order) { .Acquire, .AcqRel => { return sema.fail( block, order_src, "@atomicStore atomic ordering must not be Acquire or AcqRel", .{}, ); }, .Unordered => .atomic_store_unordered, .Monotonic => .atomic_store_monotonic, .Release => .atomic_store_release, .SeqCst => .atomic_store_seq_cst, }; return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag); } fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirMulAdd", .{}); } fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBuiltinCall", .{}); } fn zirFieldPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFieldPtrType", .{}); } fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFieldParentPtr", .{}); } fn zirMinMax( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs)); try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs)); const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); // TODO @maximum(max_int, undefined) should return max_int const runtime_src = if (simd_op.lhs_val) |lhs_val| rs: { if (lhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); const rhs_val = simd_op.rhs_val orelse break :rs rhs_src; if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, else => unreachable, }; const vec_len = simd_op.len orelse { const result_val = try opFunc(lhs_val, rhs_val); return sema.addConstant(simd_op.result_ty, result_val); }; var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { const lhs_elem_val = lhs_val.elemValueBuffer(i, &lhs_buf); const rhs_elem_val = rhs_val.elemValueBuffer(i, &rhs_buf); elem.* = try opFunc(lhs_elem_val, rhs_elem_val); } return sema.addConstant( simd_op.result_ty, try Value.Tag.array.create(sema.arena, elems), ); } else rs: { if (simd_op.rhs_val) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); } break :rs lhs_src; }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); } fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data; const src = inst_data.src(); const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); if (dest_ptr_ty.zigTypeTag() != .Pointer) { return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); } if (dest_ptr_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); } const uncasted_src_ptr = sema.resolveInst(extra.source); const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); if (uncasted_src_ptr_ty.zigTypeTag() != .Pointer) { return sema.fail(block, src_src, "expected pointer, found '{}'", .{ uncasted_src_ptr_ty, }); } const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; const wanted_src_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = dest_ptr_ty.elemType2(), .@"align" = src_ptr_info.@"align", .@"addrspace" = src_ptr_info.@"addrspace", .mutable = false, .@"allowzero" = src_ptr_info.@"allowzero", .@"volatile" = src_ptr_info.@"volatile", .size = .Many, }); const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src); const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src); const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); const maybe_src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr); const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); const runtime_src = if (maybe_dest_ptr_val) |dest_ptr_val| rs: { if (maybe_src_ptr_val) |src_ptr_val| { if (maybe_len_val) |len_val| { _ = dest_ptr_val; _ = src_ptr_val; _ = len_val; return sema.fail(block, src, "TODO: Sema.zirMemcpy at comptime", .{}); } else break :rs len_src; } else break :rs src_src; } else dest_src; try sema.requireRuntimeBlock(block, runtime_src); _ = try block.addInst(.{ .tag = .memcpy, .data = .{ .pl_op = .{ .operand = dest_ptr, .payload = try sema.addExtra(Air.Bin{ .lhs = src_ptr, .rhs = len, }), } }, }); } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data; const src = inst_data.src(); const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); if (dest_ptr_ty.zigTypeTag() != .Pointer) { return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); } if (dest_ptr_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); } const elem_ty = dest_ptr_ty.elemType2(); const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src); const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src); const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); const runtime_src = if (maybe_dest_ptr_val) |ptr_val| rs: { if (maybe_len_val) |len_val| { if (try sema.resolveMaybeUndefVal(block, value_src, value)) |val| { _ = ptr_val; _ = len_val; _ = val; return sema.fail(block, src, "TODO: Sema.zirMemset at comptime", .{}); } else break :rs value_src; } else break :rs len_src; } else dest_src; try sema.requireRuntimeBlock(block, runtime_src); _ = try block.addInst(.{ .tag = .memset, .data = .{ .pl_op = .{ .operand = dest_ptr, .payload = try sema.addExtra(Air.Bin{ .lhs = value, .rhs = len, }), } }, }); } fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirResume", .{}); } fn zirAwait( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_nosuspend: bool, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); _ = is_nosuspend; return sema.fail(block, src, "TODO: Sema.zirAwait", .{}); } fn zirVarExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type const mut_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at mut token const init_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at init expr const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); var extra_index: usize = extra.end; const lib_name: ?[]const u8 = if (small.has_lib_name) blk: { const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]); extra_index += 1; break :blk lib_name; } else null; // ZIR supports encoding this information but it is not used; the information // is encoded via the Decl entry. assert(!small.has_align); //const align_val: Value = if (small.has_align) blk: { // const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); // extra_index += 1; // const align_tv = try sema.resolveInstConst(block, align_src, align_ref); // break :blk align_tv.val; //} else Value.@"null"; const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: { const init_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; break :blk sema.resolveInst(init_ref); } else .none; const have_ty = extra.data.var_type != .none; const var_ty = if (have_ty) try sema.resolveType(block, ty_src, extra.data.var_type) else sema.typeOf(uncasted_init); const init_val = if (uncasted_init != .none) blk: { const init = if (have_ty) try sema.coerce(block, var_ty, uncasted_init, init_src) else uncasted_init; break :blk (try sema.resolveMaybeUndefVal(block, init_src, init)) orelse return sema.failWithNeededComptime(block, init_src); } else Value.initTag(.unreachable_value); try sema.validateVarType(block, mut_src, var_ty, small.is_extern); if (lib_name != null) { // Look at the sema code for functions which has this logic, it just needs to // be extracted and shared by both var and func return sema.fail(block, src, "TODO: handle var with lib_name in Sema", .{}); } const new_var = try sema.gpa.create(Module.Var); log.debug("created variable {*} owner_decl: {*} ({s})", .{ new_var, sema.owner_decl, sema.owner_decl.name, }); new_var.* = .{ .owner_decl = sema.owner_decl, .init = init_val, .is_extern = small.is_extern, .is_mutable = true, // TODO get rid of this unused field .is_threadlocal = small.is_threadlocal, }; const result = try sema.addConstant( var_ty, try Value.Tag.variable.create(sema.arena, new_var), ); return result; } fn zirFuncExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.ExtendedFunc, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = extra.data.src_node }; const align_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at align const small = @bitCast(Zir.Inst.ExtendedFunc.Small, extended.small); var extra_index: usize = extra.end; const lib_name: ?[]const u8 = if (small.has_lib_name) blk: { const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]); extra_index += 1; break :blk lib_name; } else null; const cc: std.builtin.CallingConvention = if (small.has_cc) blk: { const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const cc_tv = try sema.resolveInstConst(block, cc_src, cc_ref); break :blk cc_tv.val.toEnum(std.builtin.CallingConvention); } else .Unspecified; const align_val: Value = if (small.has_align) blk: { const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const align_tv = try sema.resolveInstConst(block, align_src, align_ref); break :blk align_tv.val; } else Value.@"null"; const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; extra_index += ret_ty_body.len; var body_inst: Zir.Inst.Index = 0; var src_locs: Zir.Inst.Func.SrcLocs = undefined; if (extra.data.body_len != 0) { body_inst = inst; extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } const is_var_args = small.is_var_args; const is_inferred_error = small.is_inferred_error; const is_extern = small.is_extern; return sema.funcCommon( block, extra.data.src_node, body_inst, ret_ty_body, cc, align_val, is_var_args, is_inferred_error, is_extern, src_locs, lib_name, ); } fn zirCUndef( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; const name = try sema.resolveConstString(block, src, extra.operand); try block.c_import_buf.?.writer().print("#undefine {s}\n", .{name}); return Air.Inst.Ref.void_value; } fn zirCInclude( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; const name = try sema.resolveConstString(block, src, extra.operand); try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name}); return Air.Inst.Ref.void_value; } fn zirCDefine( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; const name = try sema.resolveConstString(block, src, extra.lhs); const rhs = sema.resolveInst(extra.rhs); if (sema.typeOf(rhs).zigTypeTag() != .Void) { const value = try sema.resolveConstString(block, src, extra.rhs); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { try block.c_import_buf.?.writer().print("#define {s}\n", .{name}); } return Air.Inst.Ref.void_value; } fn zirWasmMemorySize( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO: implement Sema.zirWasmMemorySize", .{}); } fn zirWasmMemoryGrow( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); } fn zirPrefetch( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO: implement Sema.zirPrefetch", .{}); } fn zirBuiltinExtern( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO: implement Sema.zirBuiltinExtern", .{}); } fn requireFunctionBlock(sema: *Sema, block: *Block, src: LazySrcLoc) !void { if (sema.func == null) { return sema.fail(block, src, "instruction illegal outside function body", .{}); } } fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc) !void { if (block.is_comptime) { return sema.failWithNeededComptime(block, src); } try sema.requireFunctionBlock(block, src); } /// Emit a compile error if type cannot be used for a runtime variable. fn validateVarType( sema: *Sema, block: *Block, src: LazySrcLoc, var_ty: Type, is_extern: bool, ) CompileError!void { var ty = var_ty; while (true) switch (ty.zigTypeTag()) { .Bool, .Int, .Float, .ErrorSet, .Enum, .Frame, .AnyFrame, .Void, => return, .BoundFn, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .NoReturn, .Type, .Undefined, .Null, => break, .Pointer => { const elem_ty = ty.childType(); if (elem_ty.zigTypeTag() == .Opaque) return; ty = elem_ty; }, .Opaque => if (is_extern) return else break, .Optional => { var buf: Type.Payload.ElemType = undefined; const child_ty = ty.optionalChild(&buf); return validateVarType(sema, block, src, child_ty, is_extern); }, .Array, .Vector => ty = ty.elemType(), .ErrorUnion => ty = ty.errorUnionPayload(), .Fn, .Struct, .Union => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); if (resolved_ty.requiresComptime()) { break; } else { return; } }, } else unreachable; // TODO should not need else unreachable return sema.fail(block, src, "variable of type '{}' must be const or comptime", .{var_ty}); } pub const PanicId = enum { unreach, unwrap_null, unwrap_errunion, cast_to_null, incorrect_alignment, invalid_error_code, }; fn addSafetyCheck( sema: *Sema, parent_block: *Block, ok: Air.Inst.Ref, panic_id: PanicId, ) !void { const gpa = sema.gpa; var fail_block: Block = .{ .parent = parent_block, .sema = sema, .src_decl = parent_block.src_decl, .namespace = parent_block.namespace, .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, }; defer fail_block.instructions.deinit(gpa); _ = try sema.safetyPanic(&fail_block, .unneeded, panic_id); try parent_block.instructions.ensureUnusedCapacity(gpa, 1); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + 1 + // The main block only needs space for the cond_br. @typeInfo(Air.CondBr).Struct.fields.len + 1 + // The ok branch of the cond_br only needs space for the br. fail_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 3); const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); const cond_br_inst = block_inst + 1; const br_inst = cond_br_inst + 1; sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .void_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1, }), } }, }); sema.air_extra.appendAssumeCapacity(cond_br_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = ok, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = 1, .else_body_len = @intCast(u32, fail_block.instructions.items.len), }), } }, }); sema.air_extra.appendAssumeCapacity(br_inst); sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items); sema.air_instructions.appendAssumeCapacity(.{ .tag = .br, .data = .{ .br = .{ .block_inst = block_inst, .operand = .void_value, } }, }); parent_block.instructions.appendAssumeCapacity(block_inst); } fn panicWithMsg( sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; const this_feature_is_implemented_in_the_backend = mod.comp.bin_file.options.object_format == .c or mod.comp.bin_file.options.use_llvm; if (!this_feature_is_implemented_in_the_backend) { // TODO implement this feature in all the backends and then delete this branch _ = try block.addNoOp(.breakpoint); _ = try block.addNoOp(.unreach); return always_noreturn; } const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); const ptr_stack_trace_ty = try Type.ptr(arena, .{ .pointee_type = stack_trace_ty, .@"addrspace" = target_util.defaultAddressSpace(mod.getTarget(), .global_constant), // TODO might need a place that is more dynamic }); const null_stack_trace = try sema.addConstant( try Type.optional(arena, ptr_stack_trace_ty), Value.@"null", ); const args = try arena.create([2]Air.Inst.Ref); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; } fn safetyPanic( sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: PanicId, ) CompileError!Zir.Inst.Index { const msg = switch (panic_id) { .unreach => "reached unreachable code", .unwrap_null => "attempt to use null value", .unwrap_errunion => "unreachable error occurred", .cast_to_null => "cast causes pointer to be null", .incorrect_alignment => "incorrect alignment", .invalid_error_code => "invalid error code", }; const msg_inst = msg_inst: { // TODO instead of making a new decl for every panic in the entire compilation, // introduce the concept of a reference-counted decl for these var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); break :msg_inst try sema.analyzeDeclRef(try anon_decl.finish( try Type.Tag.array_u8.create(anon_decl.arena(), msg.len), try Value.Tag.bytes.create(anon_decl.arena(), msg), )); }; const casted_msg_inst = try sema.coerce(block, Type.initTag(.const_slice_u8), msg_inst, src); return sema.panicWithMsg(block, src, casted_msg_inst); } fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void { sema.branch_count += 1; if (sema.branch_count > sema.branch_quota) { // TODO show the "called from here" stack return sema.fail(block, src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota}); } } fn fieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. const arena = sema.arena; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. const is_pointer_to = object_ty.isSinglePointer(); const inner_ty = if (is_pointer_to) object_ty.childType() else object_ty; switch (inner_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.initTag(.comptime_int), try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), ); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Pointer => if (inner_ty.isSlice()) { if (mem.eql(u8, field_name, "ptr")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src); } else if (mem.eql(u8, field_name, "len")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSliceLen(block, src, slice); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Type => { const dereffed_type = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; var to_type_buffer: Value.ToTypeBuffer = undefined; const child_type = val.toType(&to_type_buffer); switch (child_type.zigTypeTag()) { .ErrorSet => { const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { if (payload.data.names.getEntry(field_name)) |entry| { break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ field_name, child_type, }); } else (try sema.mod.getErrorValue(field_name)).key; return sema.addConstant( try child_type.copy(arena), try Value.Tag.@"error".create(arena, .{ .name = name }), ); }, .Union => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } if (child_type.unionTagType()) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { const field_index = @intCast(u32, field_index_usize); return sema.addConstant( enum_ty, try Value.Tag.enum_field_index.create(sema.arena, field_index), ); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } const field_index_usize = child_type.enumFieldIndex(field_name) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); return sema.addConstant(try child_type.copy(arena), enum_val); }, .Struct, .Opaque => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } // TODO add note: declared here const kw_name = switch (child_type.zigTypeTag()) { .Struct => "struct", .Opaque => "opaque", .Union => "union", else => unreachable, }; return sema.fail(block, src, "{s} '{}' has no member named '{s}'", .{ kw_name, child_type, field_name, }); }, else => return sema.fail(block, src, "type '{}' has no members", .{child_type}), } }, .Struct => if (is_pointer_to) { // Avoid loading the entire struct by fetching a pointer and loading that const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty); return sema.analyzeLoad(block, src, field_ptr, object_src); } else { return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty); }, .Union => if (is_pointer_to) { // Avoid loading the entire union by fetching a pointer and loading that const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty); return sema.analyzeLoad(block, src, field_ptr, object_src); } else { return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty); }, else => {}, } return sema.fail(block, src, "type '{}' does not support field access", .{object_ty}); } fn fieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag()) { .Pointer => object_ptr_ty.elemType(), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty}), }; // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. const is_pointer_to = object_ty.isSinglePointer(); const inner_ty = if (is_pointer_to) object_ty.childType() else object_ty; switch (inner_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.initTag(.comptime_int), try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), )); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Pointer => if (inner_ty.isSlice()) { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; if (mem.eql(u8, field_name, "ptr")) { const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try slice_ptr_ty.copy(anon_decl.arena()), try val.slicePtr().copy(anon_decl.arena()), )); } try sema.requireRuntimeBlock(block, src); const result_ty = try Type.ptr(sema.arena, .{ .pointee_type = slice_ptr_ty, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), }); return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); } else if (mem.eql(u8, field_name, "len")) { if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, try Value.Tag.int_u64.create(anon_decl.arena(), val.sliceLen()), )); } try sema.requireRuntimeBlock(block, src); const result_ty = try Type.ptr(sema.arena, .{ .pointee_type = Type.usize, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), }); return block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Type => { _ = try sema.resolveConstValue(block, object_ptr_src, object_ptr); const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src); const inner = if (is_pointer_to) try sema.analyzeLoad(block, src, result, object_ptr_src) else result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; var to_type_buffer: Value.ToTypeBuffer = undefined; const child_type = val.toType(&to_type_buffer); switch (child_type.zigTypeTag()) { .ErrorSet => { // TODO resolve inferred error sets const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { if (payload.data.names.getEntry(field_name)) |entry| { break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ field_name, child_type, }); } else (try sema.mod.getErrorValue(field_name)).key; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try child_type.copy(anon_decl.arena()), try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }), )); }, .Union => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } if (child_type.unionTagType()) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index| { const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try enum_ty.copy(anon_decl.arena()), try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), )); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } const field_index = child_type.enumFieldIndex(field_name) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try child_type.copy(anon_decl.arena()), try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), )); }, .Struct, .Opaque => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, else => return sema.fail(block, src, "type '{}' has no members", .{child_type}), } }, .Struct => { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; return sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty); }, .Union => { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; return sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty); }, else => {}, } return sema.fail(block, src, "type '{}' does not support field access (fieldPtr, {}.{s})", .{ object_ty, object_ptr_ty, field_name }); } fn fieldCallBind( sema: *Sema, block: *Block, src: LazySrcLoc, raw_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and raw_ptr_ty.ptrSize() == .One) raw_ptr_ty.childType() else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty}); // Optionally dereference a second pointer to get the concrete type. const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) else raw_ptr; const arena = sema.arena; find_field: { switch (concrete_ty.zigTypeTag()) { .Struct => { const struct_ty = try sema.resolveTypeFields(block, src, concrete_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); const field = struct_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, .{ .pointee_type = field.ty, .mutable = ptr_ty.ptrIsMutable(), .@"addrspace" = ptr_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { const pointer = try sema.addConstant( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = struct_ptr_val, .field_index = field_index, }), ); return sema.analyzeLoad(block, src, pointer, src); } try sema.requireRuntimeBlock(block, src); const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty); return sema.analyzeLoad(block, src, ptr_inst, src); }, .Union => return sema.fail(block, src, "TODO implement field calls on unions", .{}), .Type => { const namespace = try sema.analyzeLoad(block, src, object_ptr, src); return sema.fieldVal(block, src, namespace, field_name, field_name_src); }, else => {}, } } // If we get here, we need to look for a decl in the struct type instead. switch (concrete_ty.zigTypeTag()) { .Struct, .Opaque, .Union, .Enum => { if (concrete_ty.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { const decl_val = try sema.analyzeLoad(block, src, inst, src); const decl_type = sema.typeOf(decl_val); if (decl_type.zigTypeTag() == .Fn and decl_type.fnParamLen() >= 1) { const first_param_type = decl_type.fnParamType(0); const first_param_tag = first_param_type.tag(); // zig fmt: off if (first_param_tag == .var_args_param or first_param_tag == .generic_poison or ( first_param_type.zigTypeTag() == .Pointer and first_param_type.ptrSize() == .One and first_param_type.childType().eql(concrete_ty))) { // zig fmt: on // TODO: bound fn calls on rvalues should probably // generate a by-value argument somehow. const ty = Type.Tag.bound_fn.init(); const value = try Value.Tag.bound_fn.create(arena, .{ .func_inst = decl_val, .arg0_inst = object_ptr, }); return sema.addConstant(ty, value); } else if (first_param_type.eql(concrete_ty)) { var deref = try sema.analyzeLoad(block, src, object_ptr, src); const ty = Type.Tag.bound_fn.init(); const value = try Value.Tag.bound_fn.create(arena, .{ .func_inst = decl_val, .arg0_inst = deref, }); return sema.addConstant(ty, value); } } } } }, else => {}, } return sema.fail(block, src, "type '{}' has no field or member function named '{s}'", .{ concrete_ty, field_name }); } fn namespaceLookup( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, decl_name: []const u8, ) CompileError!?*Decl { const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { const msg = msg: { const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ decl_name, }); errdefer msg.destroy(gpa); try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } return decl; } return null; } fn namespaceLookupRef( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclRef(decl); } fn namespaceLookupVal( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclVal(block, src, decl); } fn structFieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); const field_index = @intCast(u32, field_index_big); const field = struct_obj.fields.values()[field_index]; var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = field.ty, .mutable = struct_ptr_ty.ptrIsMutable(), .@"addrspace" = struct_ptr_ty.ptrAddressSpace(), }; // TODO handle when the struct pointer is overaligned, we should return a potentially // over-aligned field pointer too. if (struct_obj.layout == .Packed) p: { const target = sema.mod.getTarget(); comptime assert(Type.packed_struct_layout_version == 1); var offset: u64 = 0; var running_bits: u16 = 0; for (struct_obj.fields.values()) |f, i| { if (!f.ty.hasCodeGenBits()) continue; const field_align = f.packedAlignment(); if (field_align == 0) { if (i == field_index) { ptr_ty_data.bit_offset = running_bits; } running_bits += @intCast(u16, f.ty.bitSize(target)); } else { if (running_bits != 0) { var int_payload: Type.Payload.Bits = .{ .base = .{ .tag = .int_unsigned }, .data = running_bits, }; const int_ty: Type = .{ .ptr_otherwise = &int_payload.base }; if (i > field_index) { ptr_ty_data.host_size = @intCast(u16, int_ty.abiSize(target)); break :p; } const int_align = int_ty.abiAlignment(target); offset = std.mem.alignForwardGeneric(u64, offset, int_align); offset += int_ty.abiSize(target); running_bits = 0; } offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == field_index) { break :p; } offset += f.ty.abiSize(target); } } assert(running_bits != 0); var int_payload: Type.Payload.Bits = .{ .base = .{ .tag = .int_unsigned }, .data = running_bits, }; const int_ty: Type = .{ .ptr_otherwise = &int_payload.base }; ptr_ty_data.host_size = @intCast(u16, int_ty.abiSize(target)); } const ptr_field_ty = try Type.ptr(arena, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return sema.addConstant( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = struct_ptr_val, .field_index = field_index, }), ); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty); } fn structFieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, struct_byval: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { assert(unresolved_struct_ty.zigTypeTag() == .Struct); const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); const field = struct_obj.fields.values()[field_index]; if (try sema.resolveMaybeUndefVal(block, src, struct_byval)) |struct_val| { if (struct_val.isUndef()) return sema.addConstUndef(field.ty); const field_values = struct_val.castTag(.@"struct").?.data; return sema.addConstant(field.ty, field_values[field_index]); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldVal(struct_byval, field_index, field.ty); } fn unionFieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, union_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index_big = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field_index = @intCast(u32, field_index_big); const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(), .@"addrspace" = union_ptr_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error return sema.addConstant( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = union_ptr_val, .field_index = field_index, }), ); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty); } fn unionFieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, union_byval: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { assert(unresolved_union_ty.zigTypeTag() == .Union); const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); const field = union_obj.fields.values()[field_index]; if (try sema.resolveMaybeUndefVal(block, src, union_byval)) |union_val| { if (union_val.isUndef()) return sema.addConstUndef(field.ty); // TODO detect inactive union field and emit compile error const active_val = union_val.castTag(.@"union").?.data.val; return sema.addConstant(field.ty, active_val); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldVal(union_byval, field_index, field.ty); } fn elemPtr( sema: *Sema, block: *Block, src: LazySrcLoc, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const array_ptr_src = src; // TODO better source location const array_ptr_ty = sema.typeOf(array_ptr); const array_ty = switch (array_ptr_ty.zigTypeTag()) { .Pointer => array_ptr_ty.elemType(), else => return sema.fail(block, array_ptr_src, "expected pointer, found '{}'", .{array_ptr_ty}), }; if (!array_ty.isIndexable()) { return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty}); } switch (array_ty.zigTypeTag()) { .Pointer => { // In all below cases, we have to deref the ptr operand to get the actual array pointer. const array = try sema.analyzeLoad(block, array_ptr_src, array_ptr, array_ptr_src); const result_ty = try array_ty.elemPtrType(sema.arena); switch (array_ty.ptrSize()) { .Slice => { const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = if (maybe_slice_val) |slice_val| rs: { const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt()); const elem_ptr = try slice_val.elemPtr(sema.arena, index); return sema.addConstant(result_ty, elem_ptr); } else array_ptr_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addSliceElemPtr(array, elem_index, result_ty); }, .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, array_ptr_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs array_ptr_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt()); const elem_ptr = try ptr_val.elemPtr(sema.arena, index); return sema.addConstant(result_ty, elem_ptr); }; try sema.requireRuntimeBlock(block, runtime_src); return block.addPtrElemPtr(array, elem_index, result_ty); }, .One => { assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable return sema.elemPtrArray(block, array_ptr_src, array, elem_index, elem_index_src); }, } }, .Array => return sema.elemPtrArray(block, array_ptr_src, array_ptr, elem_index, elem_index_src), .Vector => return sema.fail(block, src, "TODO implement Sema for elemPtr for vector", .{}), else => unreachable, } } fn elemVal( sema: *Sema, block: *Block, src: LazySrcLoc, array: Air.Inst.Ref, elem_index_uncasted: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const array_src = src; // TODO better source location const array_ty = sema.typeOf(array); if (!array_ty.isIndexable()) { return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty}); } const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); switch (array_ty.zigTypeTag()) { .Pointer => switch (array_ty.ptrSize()) { .Slice => { const maybe_slice_val = try sema.resolveDefinedValue(block, array_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = if (maybe_slice_val) |slice_val| rs: { const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt()); const elem_val = try slice_val.elemValue(sema.arena, index); return sema.addConstant(array_ty.elemType2(), elem_val); } else array_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(.slice_elem_val, array, elem_index); }, .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, array_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs array_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt()); const maybe_array_val = try sema.pointerDeref(block, array_src, ptr_val, array_ty); const array_val = maybe_array_val orelse break :rs array_src; const elem_val = try array_val.elemValue(sema.arena, index); return sema.addConstant(array_ty.elemType2(), elem_val); }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(.ptr_elem_val, array, elem_index); }, .One => { assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable const elem_ptr = try sema.elemPtr(block, array_src, array, elem_index, elem_index_src); return sema.analyzeLoad(block, array_src, elem_ptr, elem_index_src); }, }, .Array => { if (try sema.resolveMaybeUndefVal(block, array_src, array)) |array_val| { const elem_ty = array_ty.childType(); if (array_val.isUndef()) return sema.addConstUndef(elem_ty); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { const index = @intCast(usize, index_val.toUnsignedInt()); const elem_val = try array_val.elemValue(sema.arena, index); return sema.addConstant(elem_ty, elem_val); } } try sema.requireRuntimeBlock(block, array_src); return block.addBinOp(.array_elem_val, array, elem_index); }, .Vector => return sema.fail(block, array_src, "TODO implement Sema for elemVal for vector", .{}), else => unreachable, } } fn elemPtrArray( sema: *Sema, block: *Block, src: LazySrcLoc, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const array_ptr_ty = sema.typeOf(array_ptr); const result_ty = try array_ptr_ty.elemPtrType(sema.arena); if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| { if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { // Both array pointer and index are compile-time known. const index_u64 = index_val.toUnsignedInt(); // @intCast here because it would have been impossible to construct a value that // required a larger index. const elem_ptr = try array_ptr_val.elemPtr(sema.arena, @intCast(usize, index_u64)); return sema.addConstant(result_ty, elem_ptr); } } // TODO safety check for array bounds try sema.requireRuntimeBlock(block, src); return block.addPtrElemPtr(array_ptr, elem_index, result_ty); } fn coerce( sema: *Sema, block: *Block, dest_ty_unresolved: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { switch (dest_ty_unresolved.tag()) { .var_args_param => return sema.coerceVarArgParam(block, inst, inst_src), .generic_poison => return inst, else => {}, } const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(block, dest_ty_src, dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(block, inst_src, sema.typeOf(inst)); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty)) return inst; const arena = sema.arena; const target = sema.mod.getTarget(); const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src); return block.addBitCast(dest_ty, inst); } // undefined to anything if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { return sema.addConstant(dest_ty, val); } } assert(inst_ty.zigTypeTag() != .Undefined); switch (dest_ty.zigTypeTag()) { .Optional => { // null to ?T if (inst_ty.zigTypeTag() == .Null) { return sema.addConstant(dest_ty, Value.@"null"); } // T to ?T const child_type = try dest_ty.optionalChildAlloc(sema.arena); const intermediate = try sema.coerce(block, child_type, inst, inst_src); return sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => { const dest_info = dest_ty.ptrInfo().data; // Function body to function pointer. if (inst_ty.zigTypeTag() == .Fn) { const fn_val = try sema.resolveConstValue(block, inst_src, inst); const fn_decl = fn_val.castTag(.function).?.data.owner_decl; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } // *T to *[1]T single_item: { if (dest_info.size != .One) break :single_item; if (!inst_ty.isSinglePointer()) break :single_item; const ptr_elem_ty = inst_ty.childType(); const array_ty = dest_info.pointee_type; if (array_ty.zigTypeTag() != .Array) break :single_item; const array_elem_ty = array_ty.childType(); const dest_is_mut = dest_info.mutable; if (inst_ty.isConstPtr() and dest_is_mut) break :single_item; if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :single_item; if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :single_item; switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, .no_match => break :single_item, } return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // Coercions where the source is a single pointer to an array. src_array_ptr: { if (!inst_ty.isSinglePointer()) break :src_array_ptr; const array_ty = inst_ty.childType(); if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; const len0 = array_ty.arrayLen() == 0; // We resolve here so that the backend has the layout of the elem type. const array_elem_type = try sema.resolveTypeFields(block, inst_src, array_ty.childType()); const dest_is_mut = dest_info.mutable; if (inst_ty.isConstPtr() and dest_is_mut and !len0) break :src_array_ptr; if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :src_array_ptr; if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :src_array_ptr; const dst_elem_type = dest_info.pointee_type; switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, .no_match => break :src_array_ptr, } switch (dest_info.size) { .Slice => { // *[N]T to []T return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src); }, .C => { // *[N]T to [*c]T return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); }, .Many => { // *[N]T to [*]T // *[N:s]T to [*:s]T // *[N:s]T to [*]T if (dest_info.sentinel) |dst_sentinel| { if (array_ty.sentinel()) |src_sentinel| { if (src_sentinel.eql(dst_sentinel, dst_elem_type)) { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } } } else { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } }, .One => {}, } } // coercion from C pointer if (inst_ty.isCPtr()) src_c_ptr: { // In this case we must add a safety check because the C pointer // could be null. const src_elem_ty = inst_ty.childType(); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, .no_match => break :src_c_ptr, } // TODO add safety check for null pointer return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // coercion to C pointer if (dest_info.size == .C) { switch (inst_ty.zigTypeTag()) { .Null => { return sema.addConstant(dest_ty, Value.@"null"); }, .ComptimeInt => { const addr = try sema.coerce(block, Type.usize, inst, inst_src); return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; const addr = try sema.coerce(block, ptr_size_ty, inst, inst_src); return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, else => {}, } } // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer and inst_ty.childType().zigTypeTag() != .Pointer) { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } }, .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) { .Float, .ComptimeFloat => float: { const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse break :float; if (val.floatHasFraction()) { return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val, dest_ty }); } const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) { error.FloatCannotFit => { return sema.fail(block, inst_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty }); }, else => |e| return e, }; return try sema.addConstant(dest_ty, result_val); }, .Int, .ComptimeInt => { if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { // comptime known integer to other number if (!val.intFitsInType(dest_ty, target)) { return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_ty, val }); } return try sema.addConstant(dest_ty, val); } // integer widening const dst_info = dest_ty.intInfo(target); const src_info = inst_ty.intInfo(target); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) { try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.intcast, dest_ty, inst); } }, else => {}, }, .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, inst_src, inst); const result_val = try val.floatCast(sema.arena, dest_ty); return try sema.addConstant(dest_ty, result_val); }, .Float => { if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { const result_val = try val.floatCast(sema.arena, dest_ty); if (!val.eql(result_val, dest_ty)) { return sema.fail( block, inst_src, "type {} cannot represent float value {}", .{ dest_ty, val }, ); } return try sema.addConstant(dest_ty, result_val); } // float widening const src_bits = inst_ty.floatBits(target); const dst_bits = dest_ty.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.fpext, dest_ty, inst); } }, .Int, .ComptimeInt => int: { const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse break :int; const result_val = try val.intToFloat(sema.arena, dest_ty, target); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty)) { // return sema.fail( // block, // inst_src, // "type {} cannot represent integer value {}", // .{ dest_ty, val }, // ); //} return try sema.addConstant(dest_ty, result_val); }, else => {}, }, .Enum => switch (inst_ty.zigTypeTag()) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const field_index = dest_ty.enumFieldIndex(bytes) orelse { const msg = msg: { const msg = try sema.errMsg( block, inst_src, "enum '{}' has no field named '{s}'", .{ dest_ty, bytes }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( dest_ty.declSrcLoc(), msg, "enum declared here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; return sema.addConstant( dest_ty, try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), ); }, .Union => blk: { // union to its own tag type const union_tag_ty = inst_ty.unionTagType() orelse break :blk; if (union_tag_ty.eql(dest_ty)) { return sema.unionToTag(block, inst_ty, inst, inst_src); } }, else => {}, }, .ErrorUnion => { // T to E!T or E to E!T return sema.wrapErrorUnion(block, dest_ty, inst, inst_src); }, .Union => switch (inst_ty.zigTypeTag()) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), else => {}, }, .Array => switch (inst_ty.zigTypeTag()) { .Vector => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src), else => {}, }, .Vector => switch (inst_ty.zigTypeTag()) { .Array => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src), else => {}, }, else => {}, } return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty }); } const InMemoryCoercionResult = enum { ok, no_match, }; /// If pointers have the same representation in runtime memory, a bitcast AIR instruction /// may be used for the coercion. /// * `const` attribute can be gained /// * `volatile` attribute can be gained /// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut /// * alignment can be decreased /// * bit offset attributes must match exactly /// * `*`/`[*]` must match exactly, but `[*c]` matches either one /// * sentinel-terminated pointers can coerce into `[*]` /// TODO improve this function to report recursive compile errors like it does in stage1. /// look at the function types_match_const_cast_only fn coerceInMemoryAllowed( sema: *Sema, block: *Block, dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: std.Target, dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { if (dest_ty.eql(src_ty)) return .ok; // Pointers / Pointer-like Optionals var dest_buf: Type.Payload.ElemType = undefined; var src_buf: Type.Payload.ElemType = undefined; if (try sema.typePtrOrOptionalPtrTy(block, dest_ty, &dest_buf, dest_src)) |dest_ptr_ty| { if (try sema.typePtrOrOptionalPtrTy(block, src_ty, &src_buf, src_src)) |src_ptr_ty| { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src); } } // Slices if (dest_ty.isSlice() and src_ty.isSlice()) { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } const dest_tag = dest_ty.zigTypeTag(); const src_tag = src_ty.zigTypeTag(); // Functions if (dest_tag == .Fn and src_tag == .Fn) { return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src); } // Error Unions if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) { const child = try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionPayload(), src_ty.errorUnionPayload(), dest_is_mut, target, dest_src, src_src); if (child == .no_match) { return child; } return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target, dest_src, src_src); } // Error Sets if (dest_tag == .ErrorSet and src_tag == .ErrorSet) { return try sema.coerceInMemoryAllowedErrorSets(dest_ty, src_ty); } // TODO: arrays // TODO: non-pointer-like optionals // TODO: vectors return .no_match; } fn coerceInMemoryAllowedErrorSets( sema: *Sema, dest_ty: Type, src_ty: Type, ) !InMemoryCoercionResult { // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. if (dest_ty.isAnyError()) { return .ok; } // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. // This check is important because it works without forcing a full resolution // of inferred error sets. if (src_ty.castTag(.error_set_inferred)) |src_payload| { if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { const src_func = src_payload.data.func; const dst_func = dst_payload.data.func; if (src_func == dst_func or dst_payload.data.inferred_error_sets.contains(src_payload.data)) { return .ok; } return .no_match; } } if (dest_ty.castTag(.error_set_inferred)) |payload| { try sema.resolveInferredErrorSet(payload.data); // isAnyError might have changed from a false negative to a true positive after resolution. if (dest_ty.isAnyError()) { return .ok; } } switch (src_ty.tag()) { .error_set_inferred => { const src_data = src_ty.castTag(.error_set_inferred).?.data; try sema.resolveInferredErrorSet(src_data); // src anyerror status might have changed after the resolution. if (src_ty.isAnyError()) { // dest_ty.isAnyError() == true is already checked for at this point. return .no_match; } var it = src_data.errors.keyIterator(); while (it.next()) |name_ptr| { if (!dest_ty.errorSetHasField(name_ptr.*)) { return .no_match; } } return .ok; }, .error_set_single => { const name = src_ty.castTag(.error_set_single).?.data; if (dest_ty.errorSetHasField(name)) { return .ok; } }, .error_set_merged => { const names = src_ty.castTag(.error_set_merged).?.data.keys(); for (names) |name| { if (!dest_ty.errorSetHasField(name)) { return .no_match; } } return .ok; }, .error_set => { const names = src_ty.castTag(.error_set).?.data.names.keys(); for (names) |name| { if (!dest_ty.errorSetHasField(name)) { return .no_match; } } return .ok; }, .anyerror => switch (dest_ty.tag()) { .error_set_inferred => return .no_match, // Caught by dest.isAnyError() above. .error_set_single, .error_set_merged, .error_set => {}, .anyerror => unreachable, // Filtered out above. else => unreachable, }, else => unreachable, } return .no_match; } fn coerceInMemoryAllowedFns( sema: *Sema, block: *Block, dest_ty: Type, src_ty: Type, target: std.Target, dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { const dest_info = dest_ty.fnInfo(); const src_info = src_ty.fnInfo(); if (dest_info.is_var_args != src_info.is_var_args) { return .no_match; } if (dest_info.is_generic != src_info.is_generic) { return .no_match; } if (!src_info.return_type.isNoReturn()) { const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type, src_info.return_type, false, target, dest_src, src_src); if (rt == .no_match) { return rt; } } if (dest_info.param_types.len != src_info.param_types.len) { return .no_match; } for (dest_info.param_types) |dest_param_ty, i| { const src_param_ty = src_info.param_types[i]; if (dest_info.comptime_params[i] != src_info.comptime_params[i]) { return .no_match; } // TODO: nolias // Note: Cast direction is reversed here. const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); if (param == .no_match) { return param; } } if (dest_info.cc != src_info.cc) { return .no_match; } return .ok; } fn coerceInMemoryAllowedPtrs( sema: *Sema, block: *Block, dest_ty: Type, src_ty: Type, dest_ptr_ty: Type, src_ptr_ty: Type, dest_is_mut: bool, target: std.Target, dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { const dest_info = dest_ptr_ty.ptrInfo().data; const src_info = src_ptr_ty.ptrInfo().data; const child = try sema.coerceInMemoryAllowed(block, dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target, dest_src, src_src); if (child == .no_match) { return child; } if (dest_info.@"addrspace" != src_info.@"addrspace") { return .no_match; } const ok_sent = dest_info.sentinel == null or src_info.size == .C or (src_info.sentinel != null and dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type)); if (!ok_sent) { return .no_match; } const ok_ptr_size = src_info.size == dest_info.size or src_info.size == .C or dest_info.size == .C; if (!ok_ptr_size) { return .no_match; } const ok_cv_qualifiers = (src_info.mutable or !dest_info.mutable) and (!src_info.@"volatile" or dest_info.@"volatile"); if (!ok_cv_qualifiers) { return .no_match; } const dest_allow_zero = dest_ty.ptrAllowsZero(); const src_allow_zero = src_ty.ptrAllowsZero(); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or (!dest_allow_zero and !src_allow_zero); if (!ok_allows_zero) { return .no_match; } if (src_info.host_size != dest_info.host_size or src_info.bit_offset != dest_info.bit_offset) { return .no_match; } // If both pointers have alignment 0, it means they both want ABI alignment. // In this case, if they share the same child type, no need to resolve // pointee type alignment. Otherwise both pointee types must have their alignment // resolved and we compare the alignment numerically. if (src_info.@"align" != 0 or dest_info.@"align" != 0 or !dest_info.pointee_type.eql(src_info.pointee_type)) { const src_align = src_info.@"align"; const dest_align = dest_info.@"align"; if (dest_align > src_align) { return .no_match; } } return .ok; } fn coerceVarArgParam( sema: *Sema, block: *Block, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); switch (inst_ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.fail(block, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, } // TODO implement more of this function. return inst; } // TODO migrate callsites to use storePtr2 instead. fn storePtr( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, uncasted_operand: Air.Inst.Ref, ) CompileError!void { return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, .store); } fn storePtr2( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, uncasted_operand: Air.Inst.Ref, operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) !void { const ptr_ty = sema.typeOf(ptr); if (ptr_ty.isConstPtr()) return sema.fail(block, src, "cannot assign to constant", .{}); const elem_ty = ptr_ty.childType(); const operand = try sema.coerce(block, elem_ty, uncasted_operand, operand_src); if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null) return; const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); const operand_val = maybe_operand_val orelse { try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; if (ptr_val.isComptimeMutablePtr()) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; } else ptr_src; // TODO handle if the element type requires comptime try sema.requireRuntimeBlock(block, runtime_src); try sema.resolveTypeLayout(block, src, elem_ty); _ = try block.addBinOp(air_tag, ptr, operand); } /// Call when you have Value objects rather than Air instructions, and you want to /// assert the store must be done at comptime. fn storePtrVal( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, operand_val: Value, operand_ty: Type, ) !void { var kit = try beginComptimePtrMutation(sema, block, src, ptr_val); try sema.checkComptimeVarStore(block, src, kit.decl_ref_mut); const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, kit.ty); const arena = kit.beginArena(sema.gpa); defer kit.finishArena(); kit.val.* = try bitcasted_val.copy(arena); } const ComptimePtrMutationKit = struct { decl_ref_mut: Value.Payload.DeclRefMut.Data, val: *Value, ty: Type, decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator { self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa); return self.decl_arena.allocator(); } fn finishArena(self: *ComptimePtrMutationKit) void { self.decl_ref_mut.decl.value_arena.?.* = self.decl_arena.state; self.decl_arena = undefined; } }; fn beginComptimePtrMutation( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ) CompileError!ComptimePtrMutationKit { switch (ptr_val.tag()) { .decl_ref_mut => { const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .val = &decl_ref_mut.decl.val, .ty = decl_ref_mut.decl.ty, }; }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; var parent = try beginComptimePtrMutation(sema, block, src, elem_ptr.array_ptr); const elem_ty = parent.ty.childType(); switch (parent.val.tag()) { .undef => { // An array has been initialized to undefined at comptime and now we // are for the first time setting an element. We must change the representation // of the array from `undef` to `array`. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const array_len_including_sentinel = try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); const elems = try arena.alloc(Value, array_len_including_sentinel); mem.set(Value, elems, Value.undef); parent.val.* = try Value.Tag.array.create(arena, elems); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &elems[elem_ptr.index], .ty = elem_ty, }; }, .bytes => { // An array is memory-optimized to store a slice of bytes, but we are about // to modify an individual field and the representation has to change. // If we wanted to avoid this, there would need to be special detection // elsewhere to identify when writing a value to an array element that is stored // using the `bytes` tag, and handle it without making a call to this function. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const bytes = parent.val.castTag(.bytes).?.data; assert(bytes.len == parent.ty.arrayLenIncludingSentinel()); const elems = try arena.alloc(Value, bytes.len); for (elems) |*elem, i| { elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); } parent.val.* = try Value.Tag.array.create(arena, elems); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &elems[elem_ptr.index], .ty = elem_ty, }; }, .repeated => { // An array is memory-optimized to store only a single element value, and // that value is understood to be the same for the entire length of the array. // However, now we want to modify an individual field and so the // representation has to change. If we wanted to avoid this, there would // need to be special detection elsewhere to identify when writing a value to an // array element that is stored using the `repeated` tag, and handle it // without making a call to this function. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const repeated_val = try parent.val.castTag(.repeated).?.data.copy(arena); const array_len_including_sentinel = try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); const elems = try arena.alloc(Value, array_len_including_sentinel); mem.set(Value, elems, repeated_val); parent.val.* = try Value.Tag.array.create(arena, elems); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &elems[elem_ptr.index], .ty = elem_ty, }; }, .array => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &parent.val.castTag(.array).?.data[elem_ptr.index], .ty = elem_ty, }, else => unreachable, } }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; var parent = try beginComptimePtrMutation(sema, block, src, field_ptr.container_ptr); const field_index = @intCast(u32, field_ptr.field_index); const field_ty = parent.ty.structFieldType(field_index); switch (parent.val.tag()) { .undef => { // A struct or union has been initialized to undefined at comptime and now we // are for the first time setting a field. We must change the representation // of the struct/union from `undef` to `struct`/`union`. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); switch (parent.ty.zigTypeTag()) { .Struct => { const fields = try arena.alloc(Value, parent.ty.structFieldCount()); mem.set(Value, fields, Value.undef); parent.val.* = try Value.Tag.@"struct".create(arena, fields); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &fields[field_index], .ty = field_ty, }; }, .Union => { const payload = try arena.create(Value.Payload.Union); payload.* = .{ .data = .{ .tag = try Value.Tag.enum_field_index.create(arena, field_index), .val = Value.undef, } }; parent.val.* = Value.initPayload(&payload.base); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &payload.data.val, .ty = field_ty, }; }, else => unreachable, } }, .@"struct" => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &parent.val.castTag(.@"struct").?.data[field_index], .ty = field_ty, }, .@"union" => { // We need to set the active field of the union. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const payload = &parent.val.castTag(.@"union").?.data; payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &payload.val, .ty = field_ty, }; }, else => unreachable, } }, .eu_payload_ptr => return sema.fail(block, src, "TODO comptime store to eu_payload_ptr", .{}), .opt_payload_ptr => return sema.fail(block, src, "TODO comptime store opt_payload_ptr", .{}), .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already else => unreachable, } } const ComptimePtrLoadKit = struct { /// The Value of the Decl that owns this memory. root_val: Value, /// Parent Value. val: Value, /// The Type of the parent Value. ty: Type, /// The starting byte offset of `val` from `root_val`. byte_offset: usize, /// Whether the `root_val` could be mutated by further /// semantic analysis and a copy must be performed. is_mutable: bool, }; const ComptimePtrLoadError = CompileError || error{ RuntimeLoad, }; fn beginComptimePtrLoad( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ) ComptimePtrLoadError!ComptimePtrLoadKit { const target = sema.mod.getTarget(); switch (ptr_val.tag()) { .decl_ref => { const decl = ptr_val.castTag(.decl_ref).?.data; const decl_val = try decl.value(); if (decl_val.tag() == .variable) return error.RuntimeLoad; return ComptimePtrLoadKit{ .root_val = decl_val, .val = decl_val, .ty = decl.ty, .byte_offset = 0, .is_mutable = false, }; }, .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; const decl_val = try decl.value(); if (decl_val.tag() == .variable) return error.RuntimeLoad; return ComptimePtrLoadKit{ .root_val = decl_val, .val = decl_val, .ty = decl.ty, .byte_offset = 0, .is_mutable = true, }; }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr); const elem_ty = parent.ty.childType(); const elem_size = elem_ty.abiSize(target); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = try parent.val.elemValue(sema.arena, elem_ptr.index), .ty = elem_ty, .byte_offset = try sema.usizeCast(block, src, parent.byte_offset + elem_size * elem_ptr.index), .is_mutable = parent.is_mutable, }; }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr); const field_index = @intCast(u32, field_ptr.field_index); try sema.resolveTypeLayout(block, src, parent.ty); const field_offset = parent.ty.structFieldOffset(field_index, target); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = try parent.val.fieldValue(sema.arena, field_index), .ty = parent.ty.structFieldType(field_index), .byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset), .is_mutable = parent.is_mutable, }; }, .eu_payload_ptr => { const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = parent.val.castTag(.eu_payload).?.data, .ty = parent.ty.errorUnionPayload(), .byte_offset = undefined, .is_mutable = parent.is_mutable, }; }, .opt_payload_ptr => { const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = parent.val.castTag(.opt_payload).?.data, .ty = try parent.ty.optionalChildAlloc(sema.arena), .byte_offset = undefined, .is_mutable = parent.is_mutable, }; }, .zero, .one, .int_u64, .int_i64, .int_big_positive, .int_big_negative, .variable, .extern_fn, .function, => return error.RuntimeLoad, else => unreachable, } } fn bitCast( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // TODO validate the type size and other compile errors if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { const old_ty = sema.typeOf(inst); const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty); return sema.addConstant(dest_ty, result_val); } try sema.requireRuntimeBlock(block, inst_src); return block.addBitCast(dest_ty, inst); } pub fn bitCastVal( sema: *Sema, block: *Block, src: LazySrcLoc, val: Value, old_ty: Type, new_ty: Type, ) !Value { if (old_ty.eql(new_ty)) return val; // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. const target = sema.mod.getTarget(); const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); val.writeToMemory(old_ty, target, buffer); return Value.readFromMemory(new_ty, target, buffer, sema.arena); } fn coerceArrayPtrToSlice( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), }); return sema.addConstant(dest_ty, slice_val); } try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.array_to_slice, dest_ty, inst); } fn coerceCompatiblePtrs( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src); return sema.bitCast(block, dest_ty, inst, inst_src); } fn coerceEnumToUnion( sema: *Sema, block: *Block, union_ty: Type, union_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType() orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ union_ty, inst_ty, }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{}); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src); if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index = union_obj.tag_ty.enumTagFieldIndex(val) orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "union {} has no tag with value {}", .{ union_ty, val, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(block, inst_src, field.ty); const opv = (try sema.typeHasOnePossibleValue(block, inst_src, field_ty)) orelse { // TODO resolve the field names and include in the error message, // also instead of 'union declared here' make it 'field "foo" declared here'. const msg = msg: { const msg = try sema.errMsg(block, inst_src, "coercion to union {} must initialize {} field", .{ union_ty, field_ty, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = val, .val = opv, })); } try sema.requireRuntimeBlock(block, inst_src); if (tag_ty.isNonexhaustiveEnum()) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} from non-exhaustive enum", .{ union_ty, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } // If the union has all fields 0 bits, the union value is just the enum value. if (union_ty.unionHasAllZeroBitFieldTypes()) { return block.addBitCast(union_ty, enum_tag); } // TODO resolve the field names and add a hint that says "field 'foo' has type 'bar'" // instead of the "union declared here" hint const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} which has non-void fields", .{ union_ty, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } // Coerces vectors/arrays which have the same in-memory layout. This can be used for // both coercing from and to vectors. fn coerceVectorInMemory( sema: *Sema, block: *Block, dest_ty: Type, dest_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); const inst_len = inst_ty.arrayLen(); const dest_len = dest_ty.arrayLen(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty, }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const target = sema.mod.getTarget(); const dest_elem_ty = dest_ty.childType(); const inst_elem_ty = inst_ty.childType(); const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src); if (in_memory_result != .ok) { // TODO recursive error notes for coerceInMemoryAllowed failure return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty }); } if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| { // These types share the same comptime value representation. return sema.addConstant(dest_ty, inst_val); } try sema.requireRuntimeBlock(block, inst_src); return block.addBitCast(dest_ty, inst); } fn analyzeDeclVal( sema: *Sema, block: *Block, src: LazySrcLoc, decl: *Decl, ) CompileError!Air.Inst.Ref { if (sema.decl_val_table.get(decl)) |result| { return result; } const decl_ref = try sema.analyzeDeclRef(decl); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { if (sema.air_instructions.items(.tag)[index] == .constant) { try sema.decl_val_table.put(sema.gpa, decl, result); } } return result; } fn ensureDeclAnalyzed(sema: *Sema, decl: *Decl) CompileError!void { sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { sema.owner_decl.analysis = .dependency_failure; } return err; }; } fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); try sema.ensureDeclAnalyzed(decl); const decl_tv = try decl.typedValue(); if (decl_tv.val.castTag(.variable)) |payload| { const variable = payload.data; const alignment: u32 = if (decl.align_val.tag() == .null_value) 0 else @intCast(u32, decl.align_val.toUnsignedInt()); const ty = try Type.ptr(sema.arena, .{ .pointee_type = decl_tv.ty, .mutable = variable.is_mutable, .@"addrspace" = decl.@"addrspace", .@"align" = alignment, }); return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl)); } return sema.addConstant( try Type.ptr(sema.arena, .{ .pointee_type = decl_tv.ty, .mutable = false, .@"addrspace" = decl.@"addrspace", }), try Value.Tag.decl_ref.create(sema.arena, decl), ); } fn analyzeRef( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(block, src, operand)) |val| { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try operand_ty.copy(anon_decl.arena()), try val.copy(anon_decl.arena()), )); } try sema.requireRuntimeBlock(block, src); const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = operand_ty, .mutable = false, .@"addrspace" = address_space, }); const mut_ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = operand_ty, .@"addrspace" = address_space, }); const alloc = try block.addTy(.alloc, mut_ptr_type); try sema.storePtr(block, src, alloc, operand); // TODO: Replace with sema.coerce when that supports adding pointer constness. return sema.bitCast(block, ptr_type, alloc, src); } fn analyzeLoad( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.childType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.load, elem_ty, ptr); } fn analyzeSlicePtr( sema: *Sema, block: *Block, src: LazySrcLoc, slice: Air.Inst.Ref, slice_ty: Type, slice_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const result_ty = slice_ty.slicePtrFieldType(buf); if (try sema.resolveMaybeUndefVal(block, slice_src, slice)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.slice_ptr, result_ty, slice); } fn analyzeSliceLen( sema: *Sema, block: *Block, src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(block, src, slice_inst)) |slice_val| { if (slice_val.isUndef()) { return sema.addConstUndef(Type.usize); } return sema.addIntUnsigned(Type.usize, slice_val.sliceLen()); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.slice_len, Type.usize, slice_inst); } fn analyzeIsNull( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolveMaybeUndefVal(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { return sema.addConstUndef(result_ty); } const is_null = opt_val.isNull(); const bool_value = if (invert_logic) !is_null else is_null; if (bool_value) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } try sema.requireRuntimeBlock(block, src); const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null; return block.addUnOp(air_tag, operand); } fn analyzeIsNonErr( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const operand_ty = sema.typeOf(operand); const ot = operand_ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); const result_ty = Type.initTag(.bool); if (try sema.resolveMaybeUndefVal(block, src, operand)) |err_union| { if (err_union.isUndef()) { return sema.addConstUndef(result_ty); } if (err_union.getError() == null) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } try sema.requireRuntimeBlock(block, src); return block.addUnOp(.is_non_err, operand); } fn analyzeSlice( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_ptr: Air.Inst.Ref, uncasted_start: Air.Inst.Ref, uncasted_end_opt: Air.Inst.Ref, sentinel_opt: Air.Inst.Ref, sentinel_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const ptr_src = src; // TODO better source location const start_src = src; // TODO better source location const end_src = src; // TODO better source location // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { .Pointer => ptr_ptr_ty.elemType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty}), }; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty = ptr_ptr_child_ty.childType(); switch (ptr_ptr_child_ty.zigTypeTag()) { .Array => {}, .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { .One => { const double_child_ty = ptr_ptr_child_ty.childType(); if (double_child_ty.zigTypeTag() == .Array) { ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; elem_ty = double_child_ty.childType(); } else { return sema.fail(block, ptr_src, "slice of single-item pointer", .{}); } }, .Many, .C => { ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; elem_ty = ptr_ptr_child_ty.childType(); }, .Slice => { ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; elem_ty = ptr_ptr_child_ty.childType(); }, }, else => return sema.fail(block, ptr_src, "slice of non-array type '{}'", .{ptr_ptr_child_ty}), } const ptr = if (slice_ty.isSlice()) try sema.analyzeSlicePtr(block, src, ptr_or_slice, slice_ty, ptr_src) else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); const new_ptr = try analyzePtrArithmetic(sema, block, src, ptr, start, .ptr_add, ptr_src, start_src); const end = e: { if (uncasted_end_opt != .none) { break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); } if (array_ty.zigTypeTag() == .Array) { break :e try sema.addConstant( Type.usize, try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), ); } else if (slice_ty.isSlice()) { break :e try sema.analyzeSliceLen(block, src, ptr_or_slice); } return sema.fail(block, end_src, "slice of pointer must include end value", .{}); }; const slice_sentinel = if (sentinel_opt != .none) blk: { const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src); break :blk try sema.resolveConstValue(block, sentinel_src, casted); } else null; const new_len = try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; if (opt_new_len_val) |new_len_val| { const new_len_int = new_len_val.toUnsignedInt(); const sentinel = if (array_ty.zigTypeTag() == .Array and new_len_int == array_ty.arrayLen()) array_ty.sentinel() else slice_sentinel; const return_ty = try Type.ptr(sema.arena, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty), .sentinel = null, .@"align" = new_ptr_ty_info.@"align", .@"addrspace" = new_ptr_ty_info.@"addrspace", .mutable = new_ptr_ty_info.mutable, .@"allowzero" = new_allowzero, .@"volatile" = new_ptr_ty_info.@"volatile", .size = .One, }); const opt_new_ptr_val = try sema.resolveMaybeUndefVal(block, ptr_src, new_ptr); const new_ptr_val = opt_new_ptr_val orelse { return block.addBitCast(return_ty, new_ptr); }; if (!new_ptr_val.isUndef()) { return sema.addConstant(return_ty, new_ptr_val); } // Special case: @as([]i32, undefined)[x..x] if (new_len_int == 0) { return sema.addConstUndef(return_ty); } return sema.fail(block, ptr_src, "non-zero length slice of undefined pointer", .{}); } const return_ty = try Type.ptr(sema.arena, .{ .pointee_type = elem_ty, .sentinel = slice_sentinel, .@"align" = new_ptr_ty_info.@"align", .@"addrspace" = new_ptr_ty_info.@"addrspace", .mutable = new_ptr_ty_info.mutable, .@"allowzero" = new_allowzero, .@"volatile" = new_ptr_ty_info.@"volatile", .size = .Slice, }); try sema.requireRuntimeBlock(block, src); return block.addInst(.{ .tag = .slice, .data = .{ .ty_pl = .{ .ty = try sema.addType(return_ty), .payload = try sema.addExtra(Air.Bin{ .lhs = new_ptr, .rhs = new_len, }), } }, }); } /// Asserts that lhs and rhs types are both numeric. fn cmpNumeric( sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); assert(lhs_ty.isNumeric()); assert(rhs_ty.isNumeric()); const lhs_ty_tag = lhs_ty.zigTypeTag(); const rhs_ty_tag = rhs_ty.zigTypeTag(); if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ lhs_ty.arrayLen(), rhs_ty.arrayLen(), }); } return sema.fail(block, src, "TODO implement support for vectors in cmpNumeric", .{}); } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { return sema.fail(block, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ lhs_ty, rhs_ty, }); } const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(Type.initTag(.bool)); } if (Value.compareHetero(lhs_val, op, rhs_val)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { break :src rhs_src; } } else { break :src lhs_src; } }; // TODO handle comparisons against lazy zero values // Some values can be compared against zero without being runtime known or without forcing // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout // of this function if we don't need to. try sema.requireRuntimeBlock(block, runtime_src); // For floats, emit a float comparison instruction. const lhs_is_float = switch (lhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; const rhs_is_float = switch (rhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; const target = sema.mod.getTarget(); if (lhs_is_float and rhs_is_float) { // Implicit cast the smaller one to the larger one. const dest_ty = x: { if (lhs_ty_tag == .ComptimeFloat) { break :x rhs_ty; } else if (rhs_ty_tag == .ComptimeFloat) { break :x lhs_ty; } if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) { break :x lhs_ty; } else { break :x rhs_ty; } }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. // For mixed signed and unsigned integers, implicit cast both operands to a signed // integer with + 1 bit. // For mixed floats and integers, extract the integer part from the float, cast that to // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| lhs_val.compareWithZero(.lt) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| rhs_val.compareWithZero(.lt) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { if (lhs_val.isUndef()) return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (lhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); defer bigint.deinit(); const zcmp = lhs_val.orderAgainstZero(); if (lhs_val.floatHasFraction()) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { try bigint.addScalar(bigint.toConst(), -1); } else { try bigint.addScalar(bigint.toConst(), 1); } } lhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { lhs_bits = lhs_val.intBitCountTwosComp(); break :x (lhs_val.orderAgainstZero() != .lt); }; lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { const int_info = lhs_ty.intInfo(target); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } var rhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (rhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); defer bigint.deinit(); const zcmp = rhs_val.orderAgainstZero(); if (rhs_val.floatHasFraction()) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { try bigint.addScalar(bigint.toConst(), -1); } else { try bigint.addScalar(bigint.toConst(), 1); } } rhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { rhs_bits = rhs_val.intBitCountTwosComp(); break :x (rhs_val.orderAgainstZero() != .lt); }; rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { const int_info = rhs_ty.intInfo(target); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } const dest_ty = if (dest_float_type) |ft| ft else blk: { const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) { error.Overflow => return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}), }; const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } fn wrapOptional( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); } try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.wrap_optional, dest_ty, inst); } fn wrapErrorUnion( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(); const dest_payload_ty = dest_ty.errorUnionPayload(); if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { if (inst_ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, dest_payload_ty, inst, inst_src); return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); } switch (dest_err_set_ty.tag()) { .anyerror => {}, .error_set_single => ok: { const expected_name = val.castTag(.@"error").?.data.name; const n = dest_err_set_ty.castTag(.error_set_single).?.data; if (mem.eql(u8, expected_name, n)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .error_set => { const expected_name = val.castTag(.@"error").?.data.name; const error_set = dest_err_set_ty.castTag(.error_set).?.data; if (!error_set.names.contains(expected_name)) { return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); } }, .error_set_inferred => ok: { const expected_name = val.castTag(.@"error").?.data.name; const data = dest_err_set_ty.castTag(.error_set_inferred).?.data; try sema.resolveInferredErrorSet(data); if (data.is_anyerror) break :ok; if (data.errors.contains(expected_name)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .error_set_merged => { const expected_name = val.castTag(.@"error").?.data.name; const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; if (!error_set.contains(expected_name)) { return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); } }, else => unreachable, } return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src); // we are coercing from E to E!T if (inst_ty.zigTypeTag() == .ErrorSet) { var coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src); return block.addTyOp(.wrap_errunion_err, dest_ty, coerced); } else { var coerced = try sema.coerce(block, dest_payload_ty, inst, inst_src); return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); } } fn unionToTag( sema: *Sema, block: *Block, enum_ty: Type, un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { if ((try sema.typeHasOnePossibleValue(block, un_src, enum_ty))) |opv| { return sema.addConstant(enum_ty, opv); } if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| { return sema.addConstant(enum_ty, un_val.unionTag()); } try sema.requireRuntimeBlock(block, un_src); return block.addTyOp(.get_union_tag, enum_ty, un); } fn resolvePeerTypes( sema: *Sema, block: *Block, src: LazySrcLoc, instructions: []Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); if (instructions.len == 1) return sema.typeOf(instructions[0]); const target = sema.mod.getTarget(); var chosen = instructions[0]; var any_are_null = false; var chosen_i: usize = 0; for (instructions[1..]) |candidate, candidate_i| { const candidate_ty = sema.typeOf(candidate); const chosen_ty = sema.typeOf(chosen); if (candidate_ty.eql(chosen_ty)) continue; const candidate_ty_tag = candidate_ty.zigTypeTag(); const chosen_ty_tag = chosen_ty.zigTypeTag(); switch (candidate_ty_tag) { .NoReturn, .Undefined => continue, .Null => { any_are_null = true; continue; }, .Int => switch (chosen_ty_tag) { .ComptimeInt => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, .Int => { if (chosen_ty.isSignedInt() == candidate_ty.isSignedInt()) { if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) { chosen = candidate; chosen_i = candidate_i + 1; } continue; } }, .Pointer => if (chosen_ty.ptrSize() == .C) continue, else => {}, }, .ComptimeInt => switch (chosen_ty_tag) { .Int, .Float, .ComptimeFloat => continue, .Pointer => if (chosen_ty.ptrSize() == .C) continue, else => {}, }, .Float => switch (chosen_ty_tag) { .Float => { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; } continue; }, .ComptimeFloat, .ComptimeInt => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, else => {}, }, .ComptimeFloat => switch (chosen_ty_tag) { .Float => continue, .ComptimeInt => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, else => {}, }, .Enum => switch (chosen_ty_tag) { .EnumLiteral => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, else => {}, }, .EnumLiteral => switch (chosen_ty_tag) { .Enum => continue, else => {}, }, .Pointer => { if (candidate_ty.ptrSize() == .C) { if (chosen_ty_tag == .Int or chosen_ty_tag == .ComptimeInt) { chosen = candidate; chosen_i = candidate_i + 1; continue; } if (chosen_ty_tag == .Pointer and chosen_ty.ptrSize() != .Slice) { continue; } } }, .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, chosen_ty, false, target, src, src)) == .ok) { chosen = candidate; chosen_i = candidate_i + 1; continue; } if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { any_are_null = true; continue; } }, else => {}, } switch (chosen_ty_tag) { .NoReturn, .Undefined => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, .Null => { any_are_null = true; chosen = candidate; chosen_i = candidate_i + 1; continue; }, .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } if ((try sema.coerceInMemoryAllowed(block, candidate_ty, opt_child_ty, false, target, src, src)) == .ok) { any_are_null = true; chosen = candidate; chosen_i = candidate_i + 1; continue; } }, else => {}, } // At this point, we hit a compile error. We need to recover // the source locations. const chosen_src = candidate_srcs.resolve( sema.gpa, block.src_decl, chosen_i, ); const candidate_src = candidate_srcs.resolve( sema.gpa, block.src_decl, candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ chosen_ty, candidate_ty }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty}); if (candidate_src) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const chosen_ty = sema.typeOf(chosen); if (any_are_null) { switch (chosen_ty.zigTypeTag()) { .Null, .Optional => return chosen_ty, else => return Type.optional(sema.arena, chosen_ty), } } return chosen_ty; } pub fn resolveTypeLayout( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .Struct => return sema.resolveStructLayout(block, src, ty), .Union => return sema.resolveUnionLayout(block, src, ty), .Array => { const elem_ty = ty.childType(); return sema.resolveTypeLayout(block, src, elem_ty); }, .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); return sema.resolveTypeLayout(block, src, payload_ty); }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); return sema.resolveTypeLayout(block, src, payload_ty); }, else => {}, } } fn resolveStructLayout( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!void { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const struct_obj = resolved_ty.castTag(.@"struct").?.data; switch (struct_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { return sema.fail(block, src, "struct {} depends on itself", .{ty}); }, .have_layout => return, } struct_obj.status = .layout_wip; for (struct_obj.fields.values()) |field| { try sema.resolveTypeLayout(block, src, field.ty); } struct_obj.status = .have_layout; } fn resolveUnionLayout( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!void { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; switch (union_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { return sema.fail(block, src, "union {} depends on itself", .{ty}); }, .have_layout => return, } union_obj.status = .layout_wip; for (union_obj.fields.values()) |field| { try sema.resolveTypeLayout(block, src, field.ty); } union_obj.status = .have_layout; } fn resolveTypeForCodegen( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .Pointer => { const child_ty = try sema.resolveTypeFields(block, src, ty.childType()); return resolveTypeForCodegen(sema, block, src, child_ty); }, .Struct => return resolveStructLayout(sema, block, src, ty), .Union => return resolveUnionLayout(sema, block, src, ty), .Array => return resolveTypeForCodegen(sema, block, src, ty.childType()), .Optional => { var buf: Type.Payload.ElemType = undefined; return resolveTypeForCodegen(sema, block, src, ty.optionalChild(&buf)); }, .ErrorUnion => return resolveTypeForCodegen(sema, block, src, ty.errorUnionPayload()), else => {}, } } fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; switch (struct_obj.status) { .none => {}, .field_types_wip => { return sema.fail(block, src, "struct {} depends on itself", .{ty}); }, .have_field_types, .have_layout, .layout_wip => return ty, } struct_obj.status = .field_types_wip; try semaStructFields(sema.mod, struct_obj); if (struct_obj.fields.count() == 0) { struct_obj.status = .have_layout; } else { struct_obj.status = .have_field_types; } return ty; }, .type_info => return sema.resolveBuiltinTypeFields(block, src, "TypeInfo"), .extern_options => return sema.resolveBuiltinTypeFields(block, src, "ExternOptions"), .export_options => return sema.resolveBuiltinTypeFields(block, src, "ExportOptions"), .atomic_order => return sema.resolveBuiltinTypeFields(block, src, "AtomicOrder"), .atomic_rmw_op => return sema.resolveBuiltinTypeFields(block, src, "AtomicRmwOp"), .calling_convention => return sema.resolveBuiltinTypeFields(block, src, "CallingConvention"), .address_space => return sema.resolveBuiltinTypeFields(block, src, "AddressSpace"), .float_mode => return sema.resolveBuiltinTypeFields(block, src, "FloatMode"), .reduce_op => return sema.resolveBuiltinTypeFields(block, src, "ReduceOp"), .call_options => return sema.resolveBuiltinTypeFields(block, src, "CallOptions"), .prefetch_options => return sema.resolveBuiltinTypeFields(block, src, "PrefetchOptions"), .@"union", .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.status) { .none => {}, .field_types_wip => { return sema.fail(block, src, "union {} depends on itself", .{ty}); }, .have_field_types, .have_layout, .layout_wip => return ty, } union_obj.status = .field_types_wip; try semaUnionFields(sema.mod, union_obj); union_obj.status = .have_field_types; return ty; }, else => return ty, } } fn resolveBuiltinTypeFields( sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8, ) CompileError!Type { const resolved_ty = try sema.getBuiltinType(block, src, name); return sema.resolveTypeFields(block, src, resolved_ty); } fn resolveInferredErrorSet(sema: *Sema, inferred_error_set: *Module.Fn.InferredErrorSet) CompileError!void { // Ensuring that a particular decl is analyzed does not neccesarily mean that // it's error set is inferred, so traverse all of them to get the complete // picture. // Note: We want to skip re-resolving the current function, as recursion // doesn't change the error set. We can just check for state == .in_progress for this. // TODO: Is that correct? if (inferred_error_set.is_resolved) { return; } var it = inferred_error_set.inferred_error_sets.keyIterator(); while (it.next()) |other_error_set_ptr| { const func = other_error_set_ptr.*.func; const decl = func.*.owner_decl; if (func.*.state == .in_progress) { // Recursion, doesn't alter current error set, keep going. continue; } try sema.ensureDeclAnalyzed(decl); // To ensure that all dependencies are properly added to the set. try sema.resolveInferredErrorSet(other_error_set_ptr.*); var error_it = other_error_set_ptr.*.errors.keyIterator(); while (error_it.next()) |entry| { try inferred_error_set.errors.put(sema.gpa, entry.*, {}); } if (other_error_set_ptr.*.is_anyerror) inferred_error_set.is_anyerror = true; } inferred_error_set.is_resolved = true; } fn semaStructFields( mod: *Module, struct_obj: *Module.Struct, ) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; const decl = struct_obj.owner_decl; const zir = struct_obj.namespace.file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); var extra_index: usize = extended.operand; const src: LazySrcLoc = .{ .node_offset = struct_obj.node_offset }; extra_index += @boolToInt(small.has_src_node); const body_len = if (small.has_body_len) blk: { const body_len = zir.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = zir.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; extra_index += 1; break :decls_len decls_len; } else 0; // Skip over decls. var decls_it = zir.declIteratorInner(extra_index, decls_len); while (decls_it.next()) |_| {} extra_index = decls_it.extra_index; const body = zir.extra[extra_index..][0..body_len]; if (fields_len == 0) { assert(body.len == 0); return; } extra_index += body.len; var decl_arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = decl_arena.state; const decl_arena_allocator = decl_arena.allocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .func = null, .fn_ret_ty = Type.void, .owner_func = null, }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl, .namespace = &struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer { assert(block_scope.instructions.items.len == 0); block_scope.params.deinit(gpa); } if (body.len != 0) { _ = try sema.analyzeBody(&block_scope, body); } try wip_captures.finalize(); try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; var bit_bag_index: usize = extra_index; extra_index += bit_bags_count; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % fields_per_u32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_align = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const has_default = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const is_comptime = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const unused = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; _ = unused; const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); extra_index += 1; const field_type_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; // This string needs to outlive the ZIR code. const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); const field_ty: Type = if (field_type_ref == .none) Type.initTag(.noreturn) else // TODO: if we need to report an error here, use a source location // that points to this type expression rather than the struct. // But only resolve the source location if we need to emit a compile error. try sema.resolveType(&block_scope, src, field_type_ref); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); assert(!gop.found_existing); gop.value_ptr.* = .{ .ty = try field_ty.copy(decl_arena_allocator), .abi_align = Value.initTag(.abi_align_default), .default_val = Value.initTag(.unreachable_value), .is_comptime = is_comptime, .offset = undefined, }; if (has_align) { const align_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; // TODO: if we need to report an error here, use a source location // that points to this alignment expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator); } if (has_default) { const default_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; const default_inst = sema.resolveInst(default_ref); // TODO: if we need to report an error here, use a source location // that points to this default value expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const default_val = (try sema.resolveMaybeUndefVal(&block_scope, src, default_inst)) orelse return sema.failWithNeededComptime(&block_scope, src); gop.value_ptr.default_val = try default_val.copy(decl_arena_allocator); } } } fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; const decl = union_obj.owner_decl; const zir = union_obj.namespace.file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); var extra_index: usize = extended.operand; const src: LazySrcLoc = .{ .node_offset = union_obj.node_offset }; extra_index += @boolToInt(small.has_src_node); const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: { const ty_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk ty_ref; } else .none; const body_len = if (small.has_body_len) blk: { const body_len = zir.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = zir.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; extra_index += 1; break :decls_len decls_len; } else 0; // Skip over decls. var decls_it = zir.declIteratorInner(extra_index, decls_len); while (decls_it.next()) |_| {} extra_index = decls_it.extra_index; const body = zir.extra[extra_index..][0..body_len]; if (fields_len == 0) { assert(body.len == 0); return; } extra_index += body.len; var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa); defer union_obj.owner_decl.value_arena.?.* = decl_arena.state; const decl_arena_allocator = decl_arena.allocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .func = null, .fn_ret_ty = Type.void, .owner_func = null, }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl, .namespace = &union_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer { assert(block_scope.instructions.items.len == 0); block_scope.params.deinit(gpa); } if (body.len != 0) { _ = try sema.analyzeBody(&block_scope, body); } try wip_captures.finalize(); try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); var int_tag_ty: Type = undefined; var enum_field_names: ?*Module.EnumNumbered.NameMap = null; var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; if (tag_type_ref != .none) { const provided_ty = try sema.resolveType(&block_scope, src, tag_type_ref); if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, fields_len, provided_ty); enum_field_names = &union_obj.tag_ty.castTag(.enum_numbered).?.data.fields; enum_value_map = &union_obj.tag_ty.castTag(.enum_numbered).?.data.values; } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; } } else { // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis // purposes, we still auto-generate an enum tag type the same way. That the union is // untagged is represented by the Type tag (union vs union_tagged). union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, fields_len); enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; } const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; var bit_bag_index: usize = extra_index; extra_index += bit_bags_count; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % fields_per_u32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_type = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const has_align = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const has_tag = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const unused = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; _ = unused; const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); extra_index += 1; const field_type_ref: Zir.Inst.Ref = if (has_type) blk: { const field_type_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk field_type_ref; } else .none; const align_ref: Zir.Inst.Ref = if (has_align) blk: { const align_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk align_ref; } else .none; const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk tag_ref; } else .none; if (enum_value_map) |map| { const tag_src = src; // TODO better source location const coerced = try sema.coerce(&block_scope, int_tag_ty, tag_ref, tag_src); const val = try sema.resolveConstValue(&block_scope, tag_src, coerced); map.putAssumeCapacityContext(val, {}, .{ .ty = int_tag_ty }); } // This string needs to outlive the ZIR code. const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); if (enum_field_names) |set| { set.putAssumeCapacity(field_name, {}); } const field_ty: Type = if (!has_type) Type.void else if (field_type_ref == .none) Type.initTag(.noreturn) else // TODO: if we need to report an error here, use a source location // that points to this type expression rather than the union. // But only resolve the source location if we need to emit a compile error. try sema.resolveType(&block_scope, src, field_type_ref); const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); assert(!gop.found_existing); gop.value_ptr.* = .{ .ty = try field_ty.copy(decl_arena_allocator), .abi_align = Value.initTag(.abi_align_default), }; if (align_ref != .none) { // TODO: if we need to report an error here, use a source location // that points to this alignment expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator); } else { gop.value_ptr.abi_align = Value.initTag(.abi_align_default); } } } fn generateUnionTagTypeNumbered( sema: *Sema, block: *Block, fields_len: u32, int_ty: Type, ) !Type { const mod = sema.mod; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); enum_ty_payload.* = .{ .base = .{ .tag = .enum_numbered }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); // TODO better type name const new_decl = try mod.createAnonymousDecl(block, .{ .ty = Type.type, .val = enum_val, }); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); enum_obj.* = .{ .owner_decl = new_decl, .tag_ty = int_ty, .fields = .{}, .values = .{}, .node_offset = 0, }; // Here we pre-allocate the maps using the decl arena. try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = int_ty }); try new_decl.finalizeNewArena(&new_decl_arena); return enum_ty; } fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type { const mod = sema.mod; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); enum_ty_payload.* = .{ .base = .{ .tag = .enum_simple }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); // TODO better type name const new_decl = try mod.createAnonymousDecl(block, .{ .ty = Type.type, .val = enum_val, }); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); enum_obj.* = .{ .owner_decl = new_decl, .fields = .{}, .node_offset = 0, }; // Here we pre-allocate the maps using the decl arena. try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); try new_decl.finalizeNewArena(&new_decl_arena); return enum_ty; } fn getBuiltin( sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const opt_builtin_inst = try sema.namespaceLookupRef( block, src, std_file.root_decl.?.src_namespace, "builtin", ); const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst.?, src); const builtin_ty = try sema.analyzeAsType(block, src, builtin_inst); const opt_ty_decl = try sema.namespaceLookup( block, src, builtin_ty.getNamespace().?, name, ); return sema.analyzeDeclVal(block, src, opt_ty_decl.?); } fn getBuiltinType( sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8, ) CompileError!Type { const ty_inst = try sema.getBuiltin(block, src, name); return sema.analyzeAsType(block, src, ty_inst); } /// There is another implementation of this in `Type.onePossibleValue`. This one /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts /// that the types are already resolved. fn typeHasOnePossibleValue( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!?Value { switch (ty.tag()) { .f16, .f32, .f64, .f128, .c_longdouble, .comptime_int, .comptime_float, .u1, .u8, .i8, .u16, .i16, .u32, .i32, .u64, .i64, .u128, .i128, .usize, .isize, .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong, .bool, .type, .anyerror, .fn_noreturn_no_args, .fn_void_no_args, .fn_naked_noreturn_no_args, .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, .array_u8_sentinel_0, .const_slice_u8, .const_slice_u8_sentinel_0, .const_slice, .mut_slice, .anyopaque, .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .enum_literal, .anyerror_void_error_union, .error_union, .error_set, .error_set_single, .error_set_inferred, .error_set_merged, .@"opaque", .var_args_param, .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, .atomic_order, .atomic_rmw_op, .calling_convention, .address_space, .float_mode, .reduce_op, .call_options, .prefetch_options, .export_options, .extern_options, .type_info, .@"anyframe", .anyframe_T, .many_const_pointer, .many_mut_pointer, .c_const_pointer, .c_mut_pointer, .single_const_pointer, .single_mut_pointer, .pointer, .bound_fn, => return null, .@"struct" => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const s = resolved_ty.castTag(.@"struct").?.data; for (s.fields.values()) |value| { if ((try sema.typeHasOnePossibleValue(block, src, value.ty)) == null) { return null; } } return Value.initTag(.empty_struct_value); }, .enum_numbered => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { return Value.zero; // auto-numbered } else { return enum_obj.values.keys()[0]; } } else { return null; } }, .enum_full => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const enum_obj = resolved_ty.castTag(.enum_full).?.data; if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { return Value.zero; // auto-numbered } else { return enum_obj.values.keys()[0]; } } else { return null; } }, .enum_simple => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const enum_simple = resolved_ty.castTag(.enum_simple).?.data; if (enum_simple.fields.count() == 1) { return Value.zero; } else { return null; } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (!tag_ty.hasCodeGenBits()) { return Value.zero; } else { return null; } }, .@"union", .union_tagged => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; const tag_val = (try sema.typeHasOnePossibleValue(block, src, union_obj.tag_ty)) orelse return null; const only_field = union_obj.fields.values()[0]; const val_val = (try sema.typeHasOnePossibleValue(block, src, only_field.ty)) orelse return null; // TODO make this not allocate. The function in `Type.onePossibleValue` // currently returns `empty_struct_value` and we should do that here too. return try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val_val, }); }, .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), .void => return Value.void, .noreturn => return Value.initTag(.unreachable_value), .@"null" => return Value.@"null", .@"undefined" => return Value.initTag(.undef), .int_unsigned, .int_signed => { if (ty.cast(Type.Payload.Bits).?.data == 0) { return Value.zero; } else { return null; } }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); if ((try sema.typeHasOnePossibleValue(block, src, ty.elemType())) != null) { return Value.initTag(.the_only_possible_value); } return null; }, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, .generic_poison => return error.GenericPoison, } } fn getAstTree(sema: *Sema, block: *Block) CompileError!*const std.zig.Ast { return block.namespace.file_scope.getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); return error.AnalysisFail; }; } fn enumFieldSrcLoc( decl: *Decl, tree: std.zig.Ast, node_offset: i32, field_index: usize, ) LazySrcLoc { @setCold(true); const enum_node = decl.relativeToNodeIndex(node_offset); const node_tags = tree.nodes.items(.tag); var buffer: [2]std.zig.Ast.Node.Index = undefined; const container_decl = switch (node_tags[enum_node]) { .container_decl, .container_decl_trailing, => tree.containerDecl(enum_node), .container_decl_two, .container_decl_two_trailing, => tree.containerDeclTwo(&buffer, enum_node), .container_decl_arg, .container_decl_arg_trailing, => tree.containerDeclArg(enum_node), else => unreachable, }; var it_index: usize = 0; for (container_decl.ast.members) |member_node| { switch (node_tags[member_node]) { .container_field_init, .container_field_align, .container_field, => { if (it_index == field_index) { return .{ .node_offset = decl.nodeIndexToRelative(member_node) }; } it_index += 1; }, else => continue, } } else unreachable; } /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { return sema.getTmpAir().typeOf(inst); } fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, .values = sema.air_values.items, }; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u1 => return .u1_type, .u8 => return .u8_type, .i8 => return .i8_type, .u16 => return .u16_type, .i16 => return .i16_type, .u32 => return .u32_type, .i32 => return .i32_type, .u64 => return .u64_type, .i64 => return .i64_type, .u128 => return .u128_type, .i128 => return .i128_type, .usize => return .usize_type, .isize => return .isize_type, .c_short => return .c_short_type, .c_ushort => return .c_ushort_type, .c_int => return .c_int_type, .c_uint => return .c_uint_type, .c_long => return .c_long_type, .c_ulong => return .c_ulong_type, .c_longlong => return .c_longlong_type, .c_ulonglong => return .c_ulonglong_type, .c_longdouble => return .c_longdouble_type, .f16 => return .f16_type, .f32 => return .f32_type, .f64 => return .f64_type, .f128 => return .f128_type, .anyopaque => return .anyopaque_type, .bool => return .bool_type, .void => return .void_type, .type => return .type_type, .anyerror => return .anyerror_type, .comptime_int => return .comptime_int_type, .comptime_float => return .comptime_float_type, .noreturn => return .noreturn_type, .@"anyframe" => return .anyframe_type, .@"null" => return .null_type, .@"undefined" => return .undefined_type, .enum_literal => return .enum_literal_type, .atomic_order => return .atomic_order_type, .atomic_rmw_op => return .atomic_rmw_op_type, .calling_convention => return .calling_convention_type, .address_space => return .address_space_type, .float_mode => return .float_mode_type, .reduce_op => return .reduce_op_type, .call_options => return .call_options_type, .prefetch_options => return .prefetch_options_type, .export_options => return .export_options_type, .extern_options => return .extern_options_type, .type_info => return .type_info_type, .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, .fn_noreturn_no_args => return .fn_noreturn_no_args_type, .fn_void_no_args => return .fn_void_no_args_type, .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, .const_slice_u8 => return .const_slice_u8_type, .anyerror_void_error_union => return .anyerror_void_error_union_type, .generic_poison => return .generic_poison_type, else => {}, } try sema.air_instructions.append(sema.gpa, .{ .tag = .const_ty, .data = .{ .ty = ty }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { return sema.addConstant(ty, Value.initTag(.undef)); } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); try sema.air_instructions.append(gpa, .{ .tag = .constant, .data = .{ .ty_pl = .{ .ty = ty_inst, .payload = @intCast(u32, sema.air_values.items.len - 1), } }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); return addExtraAssumeCapacity(sema, extra); } pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); const result = @intCast(u32, sema.air_extra.items.len); inline for (fields) |field| { sema.air_extra.appendAssumeCapacity(switch (field.field_type) { u32 => @field(extra, field.name), Air.Inst.Ref => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type"), }); } return result; } fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { const coerced = @bitCast([]const u32, refs); sema.air_extra.appendSliceAssumeCapacity(coerced); } fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); switch (air_tags[inst_index]) { .br => return air_datas[inst_index].br.block_inst, else => return null, } } fn isComptimeKnown( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) !bool { return (try sema.resolveMaybeUndefVal(block, src, inst)) != null; } fn analyzeComptimeAlloc( sema: *Sema, block: *Block, var_type: Type, alignment: u32, src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(block, src, var_type); const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_type, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), .@"align" = alignment, }); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const align_val = if (alignment == 0) Value.@"null" else try Value.Tag.int_u64.create(anon_decl.arena(), alignment); const decl = try anon_decl.finish( try var_type.copy(anon_decl.arena()), // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand // into fields/elements and have those overridden with stored values. Value.undef, ); decl.align_val = align_val; try sema.mod.declareDeclDependency(sema.owner_decl, decl); return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ .runtime_index = block.runtime_index, .decl = decl, })); } /// The places where a user can specify an address space attribute pub const AddressSpaceContext = enum { /// A function is specified to be placed in a certain address space. function, /// A (global) variable is specified to be placed in a certain address space. /// In contrast to .constant, these values (and thus the address space they will be /// placed in) are required to be mutable. variable, /// A (global) constant value is specified to be placed in a certain address space. /// In contrast to .variable, values placed in this address space are not required to be mutable. constant, /// A pointer is ascripted to point into a certain address space. pointer, }; pub fn analyzeAddrspace( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref); const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); const target = sema.mod.getTarget(); const arch = target.cpu.arch; const supported = switch (address_space) { .generic => true, .gs, .fs, .ss => (arch == .i386 or arch == .x86_64) and ctx == .pointer, }; if (!supported) { // TODO error messages could be made more elaborate here const entity = switch (ctx) { .function => "functions", .variable => "mutable values", .constant => "constant values", .pointer => "pointers", }; return sema.fail( block, src, "{s} with address space '{s}' are not supported on {s}", .{ entity, @tagName(address_space), arch.genericName() }, ); } return address_space; } /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { const target = sema.mod.getTarget(); const load_ty = ptr_ty.childType(); const parent = sema.beginComptimePtrLoad(block, src, ptr_val) catch |err| switch (err) { error.RuntimeLoad => return null, else => |e| return e, }; // We have a Value that lines up in virtual memory exactly with what we want to load. // If the Type is in-memory coercable to `load_ty`, it may be returned without modifications. const coerce_in_mem_ok = (try sema.coerceInMemoryAllowed(block, load_ty, parent.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, parent.ty, load_ty, false, target, src, src)) == .ok; if (coerce_in_mem_ok) { if (parent.is_mutable) { // The decl whose value we are obtaining here may be overwritten with // a different value upon further semantic analysis, which would // invalidate this memory. So we must copy here. return try parent.val.copy(sema.arena); } return parent.val; } // The type is not in-memory coercable, so it must be bitcasted according // to the pointer type we are performing the load through. // TODO emit a compile error if the types are not allowed to be bitcasted if (parent.ty.abiSize(target) >= load_ty.abiSize(target)) { // The Type it is stored as in the compiler has an ABI size greater or equal to // the ABI size of `load_ty`. We may perform the bitcast based on // `parent.val` alone (more efficient). return try sema.bitCastVal(block, src, parent.val, parent.ty, load_ty); } // The Type it is stored as in the compiler has an ABI size less than the ABI size // of `load_ty`. The bitcast must be performed based on the `parent.root_val` // and reinterpreted starting at `parent.byte_offset`. return sema.fail(block, src, "TODO: implement bitcast with index offset", .{}); } /// Used to convert a u64 value to a usize value, emitting a compile error if the number /// is too big to fit. fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize { return std.math.cast(usize, int) catch |err| switch (err) { error.Overflow => return sema.fail(block, src, "expression produces integer value {d} which is too big for this compiler implementation to handle", .{int}), }; } /// For pointer-like optionals, it returns the pointer type. For pointers, /// the type is returned unmodified. /// This can return `error.AnalysisFail` because it sometimes requires resolving whether /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. fn typePtrOrOptionalPtrTy( sema: *Sema, block: *Block, ty: Type, buf: *Type.Payload.ElemType, src: LazySrcLoc, ) !?Type { switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, .c_const_pointer, .c_mut_pointer, => return ty.optionalChild(buf), .single_const_pointer_to_comptime_int, .single_const_pointer, .single_mut_pointer, .many_const_pointer, .many_mut_pointer, .manyptr_u8, .manyptr_const_u8, => return ty, .pointer => switch (ty.ptrSize()) { .Slice => return null, .C => return ty.optionalChild(buf), else => return ty, }, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, .optional => { const child_type = ty.optionalChild(buf); if (child_type.zigTypeTag() != .Pointer) return null; const info = child_type.ptrInfo().data; switch (info.size) { .Slice, .C => return null, .Many, .One => { if (info.@"allowzero") return null; // optionals of zero sized types behave like bools, not pointers if ((try sema.typeHasOnePossibleValue(block, src, child_type)) != null) { return null; } return child_type; }, } }, else => return null, } }
src/Sema.zig
const std = @import("../std.zig"); const builtin = @import("builtin"); const unicode = std.unicode; const mem = std.mem; const fs = std.fs; const os = std.os; pub const GetAppDataDirError = error{ OutOfMemory, AppDataDirUnavailable, }; /// Caller owns returned memory. /// TODO determine if we can remove the allocator requirement pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 { switch (builtin.os.tag) { .windows => { var dir_path_ptr: [*:0]u16 = undefined; switch (os.windows.shell32.SHGetKnownFolderPath( &os.windows.FOLDERID_LocalAppData, os.windows.KF_FLAG_CREATE, null, &dir_path_ptr, )) { os.windows.S_OK => { defer os.windows.ole32.CoTaskMemFree(@ptrCast(*c_void, dir_path_ptr)); const global_dir = unicode.utf16leToUtf8Alloc(allocator, mem.spanZ(dir_path_ptr)) catch |err| switch (err) { error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, error.DanglingSurrogateHalf => return error.AppDataDirUnavailable, error.OutOfMemory => return error.OutOfMemory, }; defer allocator.free(global_dir); return fs.path.join(allocator, &[_][]const u8{ global_dir, appname }); }, os.windows.E_OUTOFMEMORY => return error.OutOfMemory, else => return error.AppDataDirUnavailable, } }, .macosx => { const home_dir = os.getenv("HOME") orelse { // TODO look in /etc/passwd return error.AppDataDirUnavailable; }; return fs.path.join(allocator, &[_][]const u8{ home_dir, "Library", "Application Support", appname }); }, .linux, .freebsd, .netbsd, .dragonfly => { const home_dir = os.getenv("HOME") orelse { // TODO look in /etc/passwd return error.AppDataDirUnavailable; }; return fs.path.join(allocator, &[_][]const u8{ home_dir, ".local", "share", appname }); }, else => @compileError("Unsupported OS"), } } test "getAppDataDir" { // We can't actually validate the result const dir = getAppDataDir(std.testing.allocator, "zig") catch return; defer std.testing.allocator.free(dir); }
lib/std/fs/get_app_data_dir.zig
usingnamespace @import("bits.zig"); pub const SOCKET = *opaque {}; pub const INVALID_SOCKET = @intToPtr(SOCKET, ~@as(usize, 0)); pub const GROUP = u32; pub const ADDRESS_FAMILY = u16; pub const WSAEVENT = HANDLE; // Microsoft use the signed c_int for this, but it should never be negative pub const socklen_t = u32; pub const LM_HB_Extension = 128; pub const LM_HB1_PnP = 1; pub const LM_HB1_PDA_Palmtop = 2; pub const LM_HB1_Computer = 4; pub const LM_HB1_Printer = 8; pub const LM_HB1_Modem = 16; pub const LM_HB1_Fax = 32; pub const LM_HB1_LANAccess = 64; pub const LM_HB2_Telephony = 1; pub const LM_HB2_FileServer = 2; pub const ATMPROTO_AALUSER = 0; pub const ATMPROTO_AAL1 = 1; pub const ATMPROTO_AAL2 = 2; pub const ATMPROTO_AAL34 = 3; pub const ATMPROTO_AAL5 = 5; pub const SAP_FIELD_ABSENT = 4294967294; pub const SAP_FIELD_ANY = 4294967295; pub const SAP_FIELD_ANY_AESA_SEL = 4294967290; pub const SAP_FIELD_ANY_AESA_REST = 4294967291; pub const ATM_E164 = 1; pub const ATM_NSAP = 2; pub const ATM_AESA = 2; pub const ATM_ADDR_SIZE = 20; pub const BLLI_L2_ISO_1745 = 1; pub const BLLI_L2_Q921 = 2; pub const BLLI_L2_X25L = 6; pub const BLLI_L2_X25M = 7; pub const BLLI_L2_ELAPB = 8; pub const BLLI_L2_HDLC_ARM = 9; pub const BLLI_L2_HDLC_NRM = 10; pub const BLLI_L2_HDLC_ABM = 11; pub const BLLI_L2_LLC = 12; pub const BLLI_L2_X75 = 13; pub const BLLI_L2_Q922 = 14; pub const BLLI_L2_USER_SPECIFIED = 16; pub const BLLI_L2_ISO_7776 = 17; pub const BLLI_L3_X25 = 6; pub const BLLI_L3_ISO_8208 = 7; pub const BLLI_L3_X223 = 8; pub const BLLI_L3_SIO_8473 = 9; pub const BLLI_L3_T70 = 10; pub const BLLI_L3_ISO_TR9577 = 11; pub const BLLI_L3_USER_SPECIFIED = 16; pub const BLLI_L3_IPI_SNAP = 128; pub const BLLI_L3_IPI_IP = 204; pub const BHLI_ISO = 0; pub const BHLI_UserSpecific = 1; pub const BHLI_HighLayerProfile = 2; pub const BHLI_VendorSpecificAppId = 3; pub const AAL5_MODE_MESSAGE = 1; pub const AAL5_MODE_STREAMING = 2; pub const AAL5_SSCS_NULL = 0; pub const AAL5_SSCS_SSCOP_ASSURED = 1; pub const AAL5_SSCS_SSCOP_NON_ASSURED = 2; pub const AAL5_SSCS_FRAME_RELAY = 4; pub const BCOB_A = 1; pub const BCOB_C = 3; pub const BCOB_X = 16; pub const TT_NOIND = 0; pub const TT_CBR = 4; pub const TT_VBR = 8; pub const TR_NOIND = 0; pub const TR_END_TO_END = 1; pub const TR_NO_END_TO_END = 2; pub const CLIP_NOT = 0; pub const CLIP_SUS = 32; pub const UP_P2P = 0; pub const UP_P2MP = 1; pub const BLLI_L2_MODE_NORMAL = 64; pub const BLLI_L2_MODE_EXT = 128; pub const BLLI_L3_MODE_NORMAL = 64; pub const BLLI_L3_MODE_EXT = 128; pub const BLLI_L3_PACKET_16 = 4; pub const BLLI_L3_PACKET_32 = 5; pub const BLLI_L3_PACKET_64 = 6; pub const BLLI_L3_PACKET_128 = 7; pub const BLLI_L3_PACKET_256 = 8; pub const BLLI_L3_PACKET_512 = 9; pub const BLLI_L3_PACKET_1024 = 10; pub const BLLI_L3_PACKET_2048 = 11; pub const BLLI_L3_PACKET_4096 = 12; pub const PI_ALLOWED = 0; pub const PI_RESTRICTED = 64; pub const PI_NUMBER_NOT_AVAILABLE = 128; pub const SI_USER_NOT_SCREENED = 0; pub const SI_USER_PASSED = 1; pub const SI_USER_FAILED = 2; pub const SI_NETWORK = 3; pub const CAUSE_LOC_USER = 0; pub const CAUSE_LOC_PRIVATE_LOCAL = 1; pub const CAUSE_LOC_PUBLIC_LOCAL = 2; pub const CAUSE_LOC_TRANSIT_NETWORK = 3; pub const CAUSE_LOC_PUBLIC_REMOTE = 4; pub const CAUSE_LOC_PRIVATE_REMOTE = 5; pub const CAUSE_LOC_INTERNATIONAL_NETWORK = 7; pub const CAUSE_LOC_BEYOND_INTERWORKING = 10; pub const CAUSE_UNALLOCATED_NUMBER = 1; pub const CAUSE_NO_ROUTE_TO_TRANSIT_NETWORK = 2; pub const CAUSE_NO_ROUTE_TO_DESTINATION = 3; pub const CAUSE_VPI_VCI_UNACCEPTABLE = 10; pub const CAUSE_NORMAL_CALL_CLEARING = 16; pub const CAUSE_USER_BUSY = 17; pub const CAUSE_NO_USER_RESPONDING = 18; pub const CAUSE_CALL_REJECTED = 21; pub const CAUSE_NUMBER_CHANGED = 22; pub const CAUSE_USER_REJECTS_CLIR = 23; pub const CAUSE_DESTINATION_OUT_OF_ORDER = 27; pub const CAUSE_INVALID_NUMBER_FORMAT = 28; pub const CAUSE_STATUS_ENQUIRY_RESPONSE = 30; pub const CAUSE_NORMAL_UNSPECIFIED = 31; pub const CAUSE_VPI_VCI_UNAVAILABLE = 35; pub const CAUSE_NETWORK_OUT_OF_ORDER = 38; pub const CAUSE_TEMPORARY_FAILURE = 41; pub const CAUSE_ACCESS_INFORMAION_DISCARDED = 43; pub const CAUSE_NO_VPI_VCI_AVAILABLE = 45; pub const CAUSE_RESOURCE_UNAVAILABLE = 47; pub const CAUSE_QOS_UNAVAILABLE = 49; pub const CAUSE_USER_CELL_RATE_UNAVAILABLE = 51; pub const CAUSE_BEARER_CAPABILITY_UNAUTHORIZED = 57; pub const CAUSE_BEARER_CAPABILITY_UNAVAILABLE = 58; pub const CAUSE_OPTION_UNAVAILABLE = 63; pub const CAUSE_BEARER_CAPABILITY_UNIMPLEMENTED = 65; pub const CAUSE_UNSUPPORTED_TRAFFIC_PARAMETERS = 73; pub const CAUSE_INVALID_CALL_REFERENCE = 81; pub const CAUSE_CHANNEL_NONEXISTENT = 82; pub const CAUSE_INCOMPATIBLE_DESTINATION = 88; pub const CAUSE_INVALID_ENDPOINT_REFERENCE = 89; pub const CAUSE_INVALID_TRANSIT_NETWORK_SELECTION = 91; pub const CAUSE_TOO_MANY_PENDING_ADD_PARTY = 92; pub const CAUSE_AAL_PARAMETERS_UNSUPPORTED = 93; pub const CAUSE_MANDATORY_IE_MISSING = 96; pub const CAUSE_UNIMPLEMENTED_MESSAGE_TYPE = 97; pub const CAUSE_UNIMPLEMENTED_IE = 99; pub const CAUSE_INVALID_IE_CONTENTS = 100; pub const CAUSE_INVALID_STATE_FOR_MESSAGE = 101; pub const CAUSE_RECOVERY_ON_TIMEOUT = 102; pub const CAUSE_INCORRECT_MESSAGE_LENGTH = 104; pub const CAUSE_PROTOCOL_ERROR = 111; pub const CAUSE_COND_UNKNOWN = 0; pub const CAUSE_COND_PERMANENT = 1; pub const CAUSE_COND_TRANSIENT = 2; pub const CAUSE_REASON_USER = 0; pub const CAUSE_REASON_IE_MISSING = 4; pub const CAUSE_REASON_IE_INSUFFICIENT = 8; pub const CAUSE_PU_PROVIDER = 0; pub const CAUSE_PU_USER = 8; pub const CAUSE_NA_NORMAL = 0; pub const CAUSE_NA_ABNORMAL = 4; pub const QOS_CLASS0 = 0; pub const QOS_CLASS1 = 1; pub const QOS_CLASS2 = 2; pub const QOS_CLASS3 = 3; pub const QOS_CLASS4 = 4; pub const TNS_TYPE_NATIONAL = 64; pub const TNS_PLAN_CARRIER_ID_CODE = 1; pub const SIO_GET_NUMBER_OF_ATM_DEVICES = 1343619073; pub const SIO_GET_ATM_ADDRESS = 3491102722; pub const SIO_ASSOCIATE_PVC = 2417360899; pub const SIO_GET_ATM_CONNECTION_ID = 1343619076; pub const RIO_MSG_DONT_NOTIFY = 1; pub const RIO_MSG_DEFER = 2; pub const RIO_MSG_WAITALL = 4; pub const RIO_MSG_COMMIT_ONLY = 8; pub const RIO_MAX_CQ_SIZE = 134217728; pub const RIO_CORRUPT_CQ = 4294967295; pub const WINDOWS_AF_IRDA = 26; pub const WCE_AF_IRDA = 22; pub const IRDA_PROTO_SOCK_STREAM = 1; pub const SOL_IRLMP = 255; pub const IRLMP_ENUMDEVICES = 16; pub const IRLMP_IAS_SET = 17; pub const IRLMP_IAS_QUERY = 18; pub const IRLMP_SEND_PDU_LEN = 19; pub const IRLMP_EXCLUSIVE_MODE = 20; pub const IRLMP_IRLPT_MODE = 21; pub const IRLMP_9WIRE_MODE = 22; pub const IRLMP_TINYTP_MODE = 23; pub const IRLMP_PARAMETERS = 24; pub const IRLMP_DISCOVERY_MODE = 25; pub const IRLMP_SHARP_MODE = 32; pub const IAS_ATTRIB_NO_CLASS = 16; pub const IAS_ATTRIB_NO_ATTRIB = 0; pub const IAS_ATTRIB_INT = 1; pub const IAS_ATTRIB_OCTETSEQ = 2; pub const IAS_ATTRIB_STR = 3; pub const IAS_MAX_USER_STRING = 256; pub const IAS_MAX_OCTET_STRING = 1024; pub const IAS_MAX_CLASSNAME = 64; pub const IAS_MAX_ATTRIBNAME = 256; pub const LmCharSetASCII = 0; pub const LmCharSetISO_8859_1 = 1; pub const LmCharSetISO_8859_2 = 2; pub const LmCharSetISO_8859_3 = 3; pub const LmCharSetISO_8859_4 = 4; pub const LmCharSetISO_8859_5 = 5; pub const LmCharSetISO_8859_6 = 6; pub const LmCharSetISO_8859_7 = 7; pub const LmCharSetISO_8859_8 = 8; pub const LmCharSetISO_8859_9 = 9; pub const LmCharSetUNICODE = 255; pub const LM_BAUD_1200 = 1200; pub const LM_BAUD_2400 = 2400; pub const LM_BAUD_9600 = 9600; pub const LM_BAUD_19200 = 19200; pub const LM_BAUD_38400 = 38400; pub const LM_BAUD_57600 = 57600; pub const LM_BAUD_115200 = 115200; pub const LM_BAUD_576K = 576000; pub const LM_BAUD_1152K = 1152000; pub const LM_BAUD_4M = 4000000; pub const LM_BAUD_16M = 16000000; pub const IPX_PTYPE = 16384; pub const IPX_FILTERPTYPE = 16385; pub const IPX_STOPFILTERPTYPE = 16387; pub const IPX_DSTYPE = 16386; pub const IPX_EXTENDED_ADDRESS = 16388; pub const IPX_RECVHDR = 16389; pub const IPX_MAXSIZE = 16390; pub const IPX_ADDRESS = 16391; pub const IPX_GETNETINFO = 16392; pub const IPX_GETNETINFO_NORIP = 16393; pub const IPX_SPXGETCONNECTIONSTATUS = 16395; pub const IPX_ADDRESS_NOTIFY = 16396; pub const IPX_MAX_ADAPTER_NUM = 16397; pub const IPX_RERIPNETNUMBER = 16398; pub const IPX_RECEIVE_BROADCAST = 16399; pub const IPX_IMMEDIATESPXACK = 16400; pub const IPPROTO_RM = 113; pub const MAX_MCAST_TTL = 255; pub const RM_OPTIONSBASE = 1000; pub const RM_RATE_WINDOW_SIZE = 1001; pub const RM_SET_MESSAGE_BOUNDARY = 1002; pub const RM_FLUSHCACHE = 1003; pub const RM_SENDER_WINDOW_ADVANCE_METHOD = 1004; pub const RM_SENDER_STATISTICS = 1005; pub const RM_LATEJOIN = 1006; pub const RM_SET_SEND_IF = 1007; pub const RM_ADD_RECEIVE_IF = 1008; pub const RM_DEL_RECEIVE_IF = 1009; pub const RM_SEND_WINDOW_ADV_RATE = 1010; pub const RM_USE_FEC = 1011; pub const RM_SET_MCAST_TTL = 1012; pub const RM_RECEIVER_STATISTICS = 1013; pub const RM_HIGH_SPEED_INTRANET_OPT = 1014; pub const SENDER_DEFAULT_RATE_KBITS_PER_SEC = 56; pub const SENDER_DEFAULT_WINDOW_ADV_PERCENTAGE = 15; pub const MAX_WINDOW_INCREMENT_PERCENTAGE = 25; pub const SENDER_DEFAULT_LATE_JOINER_PERCENTAGE = 0; pub const SENDER_MAX_LATE_JOINER_PERCENTAGE = 75; pub const BITS_PER_BYTE = 8; pub const LOG2_BITS_PER_BYTE = 3; pub const SOCKET_DEFAULT2_QM_POLICY = GUID.parse("{aec2ef9c-3a4d-4d3e-8842-239942e39a47}"); pub const REAL_TIME_NOTIFICATION_CAPABILITY = GUID.parse("{6b59819a-5cae-492d-a901-2a3c2c50164f}"); pub const REAL_TIME_NOTIFICATION_CAPABILITY_EX = GUID.parse("{6843da03-154a-4616-a508-44371295f96b}"); pub const ASSOCIATE_NAMERES_CONTEXT = GUID.parse("{59a38b67-d4fe-46e1-ba3c-87ea74ca3049}"); pub const WSAID_CONNECTEX = GUID{ .Data1 = 0x25a207b9, .Data2 = 0xddf3, .Data3 = 0x4660, .Data4 = [8]u8{ 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e }, }; pub const WSAID_ACCEPTEX = GUID{ .Data1 = 0xb5367df1, .Data2 = 0xcbac, .Data3 = 0x11cf, .Data4 = [8]u8{ 0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92 }, }; pub const WSAID_GETACCEPTEXSOCKADDRS = GUID{ .Data1 = 0xb5367df2, .Data2 = 0xcbac, .Data3 = 0x11cf, .Data4 = [8]u8{ 0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92 }, }; pub const WSAID_WSARECVMSG = GUID{ .Data1 = 0xf689d7c8, .Data2 = 0x6f1f, .Data3 = 0x436b, .Data4 = [8]u8{ 0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22 }, }; pub const WSAID_WSAPOLL = GUID{ .Data1 = 0x18C76F85, .Data2 = 0xDC66, .Data3 = 0x4964, .Data4 = [8]u8{ 0x97, 0x2E, 0x23, 0xC2, 0x72, 0x38, 0x31, 0x2B }, }; pub const WSAID_WSASENDMSG = GUID{ .Data1 = 0xa441e712, .Data2 = 0x754f, .Data3 = 0x43ca, .Data4 = [8]u8{ 0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d }, }; pub const TCP_INITIAL_RTO_DEFAULT_RTT = 0; pub const TCP_INITIAL_RTO_DEFAULT_MAX_SYN_RETRANSMISSIONS = 0; pub const SOCKET_SETTINGS_GUARANTEE_ENCRYPTION = 1; pub const SOCKET_SETTINGS_ALLOW_INSECURE = 2; pub const SOCKET_SETTINGS_IPSEC_SKIP_FILTER_INSTANTIATION = 1; pub const SOCKET_SETTINGS_IPSEC_OPTIONAL_PEER_NAME_VERIFICATION = 2; pub const SOCKET_SETTINGS_IPSEC_ALLOW_FIRST_INBOUND_PKT_UNENCRYPTED = 4; pub const SOCKET_SETTINGS_IPSEC_PEER_NAME_IS_RAW_FORMAT = 8; pub const SOCKET_QUERY_IPSEC2_ABORT_CONNECTION_ON_FIELD_CHANGE = 1; pub const SOCKET_QUERY_IPSEC2_FIELD_MASK_MM_SA_ID = 1; pub const SOCKET_QUERY_IPSEC2_FIELD_MASK_QM_SA_ID = 2; pub const SOCKET_INFO_CONNECTION_SECURED = 1; pub const SOCKET_INFO_CONNECTION_ENCRYPTED = 2; pub const SOCKET_INFO_CONNECTION_IMPERSONATED = 4; pub const IN4ADDR_LOOPBACK = 16777343; pub const IN4ADDR_LOOPBACKPREFIX_LENGTH = 8; pub const IN4ADDR_LINKLOCALPREFIX_LENGTH = 16; pub const IN4ADDR_MULTICASTPREFIX_LENGTH = 4; pub const IFF_UP = 1; pub const IFF_BROADCAST = 2; pub const IFF_LOOPBACK = 4; pub const IFF_POINTTOPOINT = 8; pub const IFF_MULTICAST = 16; pub const IP_OPTIONS = 1; pub const IP_HDRINCL = 2; pub const IP_TOS = 3; pub const IP_TTL = 4; pub const IP_MULTICAST_IF = 9; pub const IP_MULTICAST_TTL = 10; pub const IP_MULTICAST_LOOP = 11; pub const IP_ADD_MEMBERSHIP = 12; pub const IP_DROP_MEMBERSHIP = 13; pub const IP_DONTFRAGMENT = 14; pub const IP_ADD_SOURCE_MEMBERSHIP = 15; pub const IP_DROP_SOURCE_MEMBERSHIP = 16; pub const IP_BLOCK_SOURCE = 17; pub const IP_UNBLOCK_SOURCE = 18; pub const IP_PKTINFO = 19; pub const IP_HOPLIMIT = 21; pub const IP_RECVTTL = 21; pub const IP_RECEIVE_BROADCAST = 22; pub const IP_RECVIF = 24; pub const IP_RECVDSTADDR = 25; pub const IP_IFLIST = 28; pub const IP_ADD_IFLIST = 29; pub const IP_DEL_IFLIST = 30; pub const IP_UNICAST_IF = 31; pub const IP_RTHDR = 32; pub const IP_GET_IFLIST = 33; pub const IP_RECVRTHDR = 38; pub const IP_TCLASS = 39; pub const IP_RECVTCLASS = 40; pub const IP_RECVTOS = 40; pub const IP_ORIGINAL_ARRIVAL_IF = 47; pub const IP_ECN = 50; pub const IP_PKTINFO_EX = 51; pub const IP_WFP_REDIRECT_RECORDS = 60; pub const IP_WFP_REDIRECT_CONTEXT = 70; pub const IP_MTU_DISCOVER = 71; pub const IP_MTU = 73; pub const IP_NRT_INTERFACE = 74; pub const IP_RECVERR = 75; pub const IP_USER_MTU = 76; pub const IP_UNSPECIFIED_TYPE_OF_SERVICE = -1; pub const IN6ADDR_LINKLOCALPREFIX_LENGTH = 64; pub const IN6ADDR_MULTICASTPREFIX_LENGTH = 8; pub const IN6ADDR_SOLICITEDNODEMULTICASTPREFIX_LENGTH = 104; pub const IN6ADDR_V4MAPPEDPREFIX_LENGTH = 96; pub const IN6ADDR_6TO4PREFIX_LENGTH = 16; pub const IN6ADDR_TEREDOPREFIX_LENGTH = 32; pub const MCAST_JOIN_GROUP = 41; pub const MCAST_LEAVE_GROUP = 42; pub const MCAST_BLOCK_SOURCE = 43; pub const MCAST_UNBLOCK_SOURCE = 44; pub const MCAST_JOIN_SOURCE_GROUP = 45; pub const MCAST_LEAVE_SOURCE_GROUP = 46; pub const IPV6_HOPOPTS = 1; pub const IPV6_HDRINCL = 2; pub const IPV6_UNICAST_HOPS = 4; pub const IPV6_MULTICAST_IF = 9; pub const IPV6_MULTICAST_HOPS = 10; pub const IPV6_MULTICAST_LOOP = 11; pub const IPV6_ADD_MEMBERSHIP = 12; pub const IPV6_DROP_MEMBERSHIP = 13; pub const IPV6_DONTFRAG = 14; pub const IPV6_PKTINFO = 19; pub const IPV6_HOPLIMIT = 21; pub const IPV6_PROTECTION_LEVEL = 23; pub const IPV6_RECVIF = 24; pub const IPV6_RECVDSTADDR = 25; pub const IPV6_CHECKSUM = 26; pub const IPV6_V6ONLY = 27; pub const IPV6_IFLIST = 28; pub const IPV6_ADD_IFLIST = 29; pub const IPV6_DEL_IFLIST = 30; pub const IPV6_UNICAST_IF = 31; pub const IPV6_RTHDR = 32; pub const IPV6_GET_IFLIST = 33; pub const IPV6_RECVRTHDR = 38; pub const IPV6_TCLASS = 39; pub const IPV6_RECVTCLASS = 40; pub const IPV6_ECN = 50; pub const IPV6_PKTINFO_EX = 51; pub const IPV6_WFP_REDIRECT_RECORDS = 60; pub const IPV6_WFP_REDIRECT_CONTEXT = 70; pub const IPV6_MTU_DISCOVER = 71; pub const IPV6_MTU = 72; pub const IPV6_NRT_INTERFACE = 74; pub const IPV6_RECVERR = 75; pub const IPV6_USER_MTU = 76; pub const IP_UNSPECIFIED_HOP_LIMIT = -1; pub const PROTECTION_LEVEL_UNRESTRICTED = 10; pub const PROTECTION_LEVEL_EDGERESTRICTED = 20; pub const PROTECTION_LEVEL_RESTRICTED = 30; pub const INET_ADDRSTRLEN = 22; pub const INET6_ADDRSTRLEN = 65; pub const TCP_OFFLOAD_NO_PREFERENCE = 0; pub const TCP_OFFLOAD_NOT_PREFERRED = 1; pub const TCP_OFFLOAD_PREFERRED = 2; pub const TCP_EXPEDITED_1122 = 2; pub const TCP_KEEPALIVE = 3; pub const TCP_MAXSEG = 4; pub const TCP_MAXRT = 5; pub const TCP_STDURG = 6; pub const TCP_NOURG = 7; pub const TCP_ATMARK = 8; pub const TCP_NOSYNRETRIES = 9; pub const TCP_TIMESTAMPS = 10; pub const TCP_OFFLOAD_PREFERENCE = 11; pub const TCP_CONGESTION_ALGORITHM = 12; pub const TCP_DELAY_FIN_ACK = 13; pub const TCP_MAXRTMS = 14; pub const TCP_FASTOPEN = 15; pub const TCP_KEEPCNT = 16; pub const TCP_KEEPINTVL = 17; pub const TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18; pub const TCP_ICMP_ERROR_INFO = 19; pub const UDP_SEND_MSG_SIZE = 2; pub const UDP_RECV_MAX_COALESCED_SIZE = 3; pub const UDP_COALESCED_INFO = 3; pub const AF_UNSPEC = 0; pub const AF_UNIX = 1; pub const AF_INET = 2; pub const AF_IMPLINK = 3; pub const AF_PUP = 4; pub const AF_CHAOS = 5; pub const AF_NS = 6; pub const AF_ISO = 7; pub const AF_ECMA = 8; pub const AF_DATAKIT = 9; pub const AF_CCITT = 10; pub const AF_SNA = 11; pub const AF_DECnet = 12; pub const AF_DLI = 13; pub const AF_LAT = 14; pub const AF_HYLINK = 15; pub const AF_APPLETALK = 16; pub const AF_NETBIOS = 17; pub const AF_VOICEVIEW = 18; pub const AF_FIREFOX = 19; pub const AF_UNKNOWN1 = 20; pub const AF_BAN = 21; pub const AF_ATM = 22; pub const AF_INET6 = 23; pub const AF_CLUSTER = 24; pub const AF_12844 = 25; pub const AF_IRDA = 26; pub const AF_NETDES = 28; pub const AF_MAX = 29; pub const AF_TCNPROCESS = 29; pub const AF_TCNMESSAGE = 30; pub const AF_ICLFXBM = 31; pub const AF_LINK = 33; pub const AF_HYPERV = 34; pub const SOCK_STREAM = 1; pub const SOCK_DGRAM = 2; pub const SOCK_RAW = 3; pub const SOCK_RDM = 4; pub const SOCK_SEQPACKET = 5; pub const SOL_SOCKET = 65535; pub const SO_DEBUG = 1; pub const SO_ACCEPTCONN = 2; pub const SO_REUSEADDR = 4; pub const SO_KEEPALIVE = 8; pub const SO_DONTROUTE = 16; pub const SO_BROADCAST = 32; pub const SO_USELOOPBACK = 64; pub const SO_LINGER = 128; pub const SO_OOBINLINE = 256; pub const SO_SNDBUF = 4097; pub const SO_RCVBUF = 4098; pub const SO_SNDLOWAT = 4099; pub const SO_RCVLOWAT = 4100; pub const SO_SNDTIMEO = 4101; pub const SO_RCVTIMEO = 4102; pub const SO_ERROR = 4103; pub const SO_TYPE = 4104; pub const SO_BSP_STATE = 4105; pub const SO_GROUP_ID = 8193; pub const SO_GROUP_PRIORITY = 8194; pub const SO_MAX_MSG_SIZE = 8195; pub const SO_CONDITIONAL_ACCEPT = 12290; pub const SO_PAUSE_ACCEPT = 12291; pub const SO_COMPARTMENT_ID = 12292; pub const SO_RANDOMIZE_PORT = 12293; pub const SO_PORT_SCALABILITY = 12294; pub const SO_REUSE_UNICASTPORT = 12295; pub const SO_REUSE_MULTICASTPORT = 12296; pub const SO_ORIGINAL_DST = 12303; pub const WSK_SO_BASE = 16384; pub const TCP_NODELAY = 1; pub const IOC_UNIX = 0; pub const IOC_WS2 = 134217728; pub const IOC_PROTOCOL = 268435456; pub const IOC_VENDOR = 402653184; pub const SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_OUT | IOC_IN | IOC_WS2 | 6; pub const SIO_BSP_HANDLE = IOC_OUT | IOC_WS2 | 27; pub const SIO_BSP_HANDLE_SELECT = IOC_OUT | IOC_WS2 | 28; pub const SIO_BSP_HANDLE_POLL = IOC_OUT | IOC_WS2 | 29; pub const SIO_BASE_HANDLE = IOC_OUT | IOC_WS2 | 34; pub const IPPROTO_IP = 0; pub const IPPORT_TCPMUX = 1; pub const IPPORT_ECHO = 7; pub const IPPORT_DISCARD = 9; pub const IPPORT_SYSTAT = 11; pub const IPPORT_DAYTIME = 13; pub const IPPORT_NETSTAT = 15; pub const IPPORT_QOTD = 17; pub const IPPORT_MSP = 18; pub const IPPORT_CHARGEN = 19; pub const IPPORT_FTP_DATA = 20; pub const IPPORT_FTP = 21; pub const IPPORT_TELNET = 23; pub const IPPORT_SMTP = 25; pub const IPPORT_TIMESERVER = 37; pub const IPPORT_NAMESERVER = 42; pub const IPPORT_WHOIS = 43; pub const IPPORT_MTP = 57; pub const IPPORT_TFTP = 69; pub const IPPORT_RJE = 77; pub const IPPORT_FINGER = 79; pub const IPPORT_TTYLINK = 87; pub const IPPORT_SUPDUP = 95; pub const IPPORT_POP3 = 110; pub const IPPORT_NTP = 123; pub const IPPORT_EPMAP = 135; pub const IPPORT_NETBIOS_NS = 137; pub const IPPORT_NETBIOS_DGM = 138; pub const IPPORT_NETBIOS_SSN = 139; pub const IPPORT_IMAP = 143; pub const IPPORT_SNMP = 161; pub const IPPORT_SNMP_TRAP = 162; pub const IPPORT_IMAP3 = 220; pub const IPPORT_LDAP = 389; pub const IPPORT_HTTPS = 443; pub const IPPORT_MICROSOFT_DS = 445; pub const IPPORT_EXECSERVER = 512; pub const IPPORT_LOGINSERVER = 513; pub const IPPORT_CMDSERVER = 514; pub const IPPORT_EFSSERVER = 520; pub const IPPORT_BIFFUDP = 512; pub const IPPORT_WHOSERVER = 513; pub const IPPORT_ROUTESERVER = 520; pub const IPPORT_RESERVED = 1024; pub const IPPORT_REGISTERED_MAX = 49151; pub const IPPORT_DYNAMIC_MIN = 49152; pub const IPPORT_DYNAMIC_MAX = 65535; pub const IN_CLASSA_NET = 4278190080; pub const IN_CLASSA_NSHIFT = 24; pub const IN_CLASSA_HOST = 16777215; pub const IN_CLASSA_MAX = 128; pub const IN_CLASSB_NET = 4294901760; pub const IN_CLASSB_NSHIFT = 16; pub const IN_CLASSB_HOST = 65535; pub const IN_CLASSB_MAX = 65536; pub const IN_CLASSC_NET = 4294967040; pub const IN_CLASSC_NSHIFT = 8; pub const IN_CLASSC_HOST = 255; pub const IN_CLASSD_NET = 4026531840; pub const IN_CLASSD_NSHIFT = 28; pub const IN_CLASSD_HOST = 268435455; pub const INADDR_LOOPBACK = 2130706433; pub const INADDR_NONE = 4294967295; pub const IOCPARM_MASK = 127; pub const IOC_VOID = 536870912; pub const IOC_OUT = 1073741824; pub const IOC_IN = 2147483648; pub const MSG_TRUNC = 256; pub const MSG_CTRUNC = 512; pub const MSG_BCAST = 1024; pub const MSG_MCAST = 2048; pub const MSG_ERRQUEUE = 4096; pub const AI_PASSIVE = 1; pub const AI_CANONNAME = 2; pub const AI_NUMERICHOST = 4; pub const AI_NUMERICSERV = 8; pub const AI_DNS_ONLY = 16; pub const AI_ALL = 256; pub const AI_ADDRCONFIG = 1024; pub const AI_V4MAPPED = 2048; pub const AI_NON_AUTHORITATIVE = 16384; pub const AI_SECURE = 32768; pub const AI_RETURN_PREFERRED_NAMES = 65536; pub const AI_FQDN = 131072; pub const AI_FILESERVER = 262144; pub const AI_DISABLE_IDN_ENCODING = 524288; pub const AI_EXTENDED = 2147483648; pub const AI_RESOLUTION_HANDLE = 1073741824; pub const FIONBIO = -2147195266; pub const ADDRINFOEX_VERSION_2 = 2; pub const ADDRINFOEX_VERSION_3 = 3; pub const ADDRINFOEX_VERSION_4 = 4; pub const NS_ALL = 0; pub const NS_SAP = 1; pub const NS_NDS = 2; pub const NS_PEER_BROWSE = 3; pub const NS_SLP = 5; pub const NS_DHCP = 6; pub const NS_TCPIP_LOCAL = 10; pub const NS_TCPIP_HOSTS = 11; pub const NS_DNS = 12; pub const NS_NETBT = 13; pub const NS_WINS = 14; pub const NS_NLA = 15; pub const NS_NBP = 20; pub const NS_MS = 30; pub const NS_STDA = 31; pub const NS_NTDS = 32; pub const NS_EMAIL = 37; pub const NS_X500 = 40; pub const NS_NIS = 41; pub const NS_NISPLUS = 42; pub const NS_WRQ = 50; pub const NS_NETDES = 60; pub const NI_NOFQDN = 1; pub const NI_NUMERICHOST = 2; pub const NI_NAMEREQD = 4; pub const NI_NUMERICSERV = 8; pub const NI_DGRAM = 16; pub const NI_MAXHOST = 1025; pub const NI_MAXSERV = 32; pub const INCL_WINSOCK_API_PROTOTYPES = 1; pub const INCL_WINSOCK_API_TYPEDEFS = 0; pub const FD_SETSIZE = 64; pub const IMPLINK_IP = 155; pub const IMPLINK_LOWEXPER = 156; pub const IMPLINK_HIGHEXPER = 158; pub const WSADESCRIPTION_LEN = 256; pub const WSASYS_STATUS_LEN = 128; pub const SOCKET_ERROR = -1; pub const FROM_PROTOCOL_INFO = -1; pub const SO_PROTOCOL_INFOA = 8196; pub const SO_PROTOCOL_INFOW = 8197; pub const PVD_CONFIG = 12289; pub const SOMAXCONN = 2147483647; pub const MSG_PEEK = 2; pub const MSG_WAITALL = 8; pub const MSG_PUSH_IMMEDIATE = 32; pub const MSG_PARTIAL = 32768; pub const MSG_INTERRUPT = 16; pub const MSG_MAXIOVLEN = 16; pub const MAXGETHOSTSTRUCT = 1024; pub const FD_READ_BIT = 0; pub const FD_WRITE_BIT = 1; pub const FD_OOB_BIT = 2; pub const FD_ACCEPT_BIT = 3; pub const FD_CONNECT_BIT = 4; pub const FD_CLOSE_BIT = 5; pub const FD_QOS_BIT = 6; pub const FD_GROUP_QOS_BIT = 7; pub const FD_ROUTING_INTERFACE_CHANGE_BIT = 8; pub const FD_ADDRESS_LIST_CHANGE_BIT = 9; pub const FD_MAX_EVENTS = 10; pub const CF_ACCEPT = 0; pub const CF_REJECT = 1; pub const CF_DEFER = 2; pub const SD_RECEIVE = 0; pub const SD_SEND = 1; pub const SD_BOTH = 2; pub const SG_UNCONSTRAINED_GROUP = 1; pub const SG_CONSTRAINED_GROUP = 2; pub const MAX_PROTOCOL_CHAIN = 7; pub const BASE_PROTOCOL = 1; pub const LAYERED_PROTOCOL = 0; pub const WSAPROTOCOL_LEN = 255; pub const PFL_MULTIPLE_PROTO_ENTRIES = 1; pub const PFL_RECOMMENDED_PROTO_ENTRY = 2; pub const PFL_HIDDEN = 4; pub const PFL_MATCHES_PROTOCOL_ZERO = 8; pub const PFL_NETWORKDIRECT_PROVIDER = 16; pub const XP1_CONNECTIONLESS = 1; pub const XP1_GUARANTEED_DELIVERY = 2; pub const XP1_GUARANTEED_ORDER = 4; pub const XP1_MESSAGE_ORIENTED = 8; pub const XP1_PSEUDO_STREAM = 16; pub const XP1_GRACEFUL_CLOSE = 32; pub const XP1_EXPEDITED_DATA = 64; pub const XP1_CONNECT_DATA = 128; pub const XP1_DISCONNECT_DATA = 256; pub const XP1_SUPPORT_BROADCAST = 512; pub const XP1_SUPPORT_MULTIPOINT = 1024; pub const XP1_MULTIPOINT_CONTROL_PLANE = 2048; pub const XP1_MULTIPOINT_DATA_PLANE = 4096; pub const XP1_QOS_SUPPORTED = 8192; pub const XP1_INTERRUPT = 16384; pub const XP1_UNI_SEND = 32768; pub const XP1_UNI_RECV = 65536; pub const XP1_IFS_HANDLES = 131072; pub const XP1_PARTIAL_MESSAGE = 262144; pub const XP1_SAN_SUPPORT_SDP = 524288; pub const BIGENDIAN = 0; pub const LITTLEENDIAN = 1; pub const SECURITY_PROTOCOL_NONE = 0; pub const JL_SENDER_ONLY = 1; pub const JL_RECEIVER_ONLY = 2; pub const JL_BOTH = 4; pub const WSA_FLAG_OVERLAPPED = 1; pub const WSA_FLAG_MULTIPOINT_C_ROOT = 2; pub const WSA_FLAG_MULTIPOINT_C_LEAF = 4; pub const WSA_FLAG_MULTIPOINT_D_ROOT = 8; pub const WSA_FLAG_MULTIPOINT_D_LEAF = 16; pub const WSA_FLAG_ACCESS_SYSTEM_SECURITY = 64; pub const WSA_FLAG_NO_HANDLE_INHERIT = 128; pub const WSA_FLAG_REGISTERED_IO = 256; pub const TH_NETDEV = 1; pub const TH_TAPI = 2; pub const SERVICE_MULTIPLE = 1; pub const NS_LOCALNAME = 19; pub const RES_UNUSED_1 = 1; pub const RES_FLUSH_CACHE = 2; pub const RES_SERVICE = 4; pub const LUP_DEEP = 1; pub const LUP_CONTAINERS = 2; pub const LUP_NOCONTAINERS = 4; pub const LUP_NEAREST = 8; pub const LUP_RETURN_NAME = 16; pub const LUP_RETURN_TYPE = 32; pub const LUP_RETURN_VERSION = 64; pub const LUP_RETURN_COMMENT = 128; pub const LUP_RETURN_ADDR = 256; pub const LUP_RETURN_BLOB = 512; pub const LUP_RETURN_ALIASES = 1024; pub const LUP_RETURN_QUERY_STRING = 2048; pub const LUP_RETURN_ALL = 4080; pub const LUP_RES_SERVICE = 32768; pub const LUP_FLUSHCACHE = 4096; pub const LUP_FLUSHPREVIOUS = 8192; pub const LUP_NON_AUTHORITATIVE = 16384; pub const LUP_SECURE = 32768; pub const LUP_RETURN_PREFERRED_NAMES = 65536; pub const LUP_DNS_ONLY = 131072; pub const LUP_ADDRCONFIG = 1048576; pub const LUP_DUAL_ADDR = 2097152; pub const LUP_FILESERVER = 4194304; pub const LUP_DISABLE_IDN_ENCODING = 8388608; pub const LUP_API_ANSI = 16777216; pub const LUP_RESOLUTION_HANDLE = 2147483648; pub const RESULT_IS_ALIAS = 1; pub const RESULT_IS_ADDED = 16; pub const RESULT_IS_CHANGED = 32; pub const RESULT_IS_DELETED = 64; pub const POLLRDNORM = 256; pub const POLLRDBAND = 512; pub const POLLPRI = 1024; pub const POLLWRNORM = 16; pub const POLLWRBAND = 32; pub const POLLERR = 1; pub const POLLHUP = 2; pub const POLLNVAL = 4; pub const SO_CONNDATA = 28672; pub const SO_CONNOPT = 28673; pub const SO_DISCDATA = 28674; pub const SO_DISCOPT = 28675; pub const SO_CONNDATALEN = 28676; pub const SO_CONNOPTLEN = 28677; pub const SO_DISCDATALEN = 28678; pub const SO_DISCOPTLEN = 28679; pub const SO_OPENTYPE = 28680; pub const SO_SYNCHRONOUS_ALERT = 16; pub const SO_SYNCHRONOUS_NONALERT = 32; pub const SO_MAXDG = 28681; pub const SO_MAXPATHDG = 28682; pub const SO_UPDATE_ACCEPT_CONTEXT = 28683; pub const SO_CONNECT_TIME = 28684; pub const SO_UPDATE_CONNECT_CONTEXT = 28688; pub const TCP_BSDURGENT = 28672; pub const TF_DISCONNECT = 1; pub const TF_REUSE_SOCKET = 2; pub const TF_WRITE_BEHIND = 4; pub const TF_USE_DEFAULT_WORKER = 0; pub const TF_USE_SYSTEM_THREAD = 16; pub const TF_USE_KERNEL_APC = 32; pub const TP_ELEMENT_MEMORY = 1; pub const TP_ELEMENT_FILE = 2; pub const TP_ELEMENT_EOP = 4; pub const NLA_ALLUSERS_NETWORK = 1; pub const NLA_FRIENDLY_NAME = 2; pub const WSPDESCRIPTION_LEN = 255; pub const WSS_OPERATION_IN_PROGRESS = 259; pub const LSP_SYSTEM = 2147483648; pub const LSP_INSPECTOR = 1; pub const LSP_REDIRECTOR = 2; pub const LSP_PROXY = 4; pub const LSP_FIREWALL = 8; pub const LSP_INBOUND_MODIFY = 16; pub const LSP_OUTBOUND_MODIFY = 32; pub const LSP_CRYPTO_COMPRESS = 64; pub const LSP_LOCAL_CACHE = 128; pub const IPPROTO_ICMP = 1; pub const IPPROTO_IGMP = 2; pub const IPPROTO_GGP = 3; pub const IPPROTO_TCP = 6; pub const IPPROTO_PUP = 12; pub const IPPROTO_UDP = 17; pub const IPPROTO_IDP = 22; pub const IPPROTO_ND = 77; pub const IPPROTO_RAW = 255; pub const IPPROTO_MAX = 256; pub const IP_DEFAULT_MULTICAST_TTL = 1; pub const IP_DEFAULT_MULTICAST_LOOP = 1; pub const IP_MAX_MEMBERSHIPS = 20; pub const AF_IPX = 6; pub const FD_READ = 1; pub const FD_WRITE = 2; pub const FD_OOB = 4; pub const FD_ACCEPT = 8; pub const FD_CONNECT = 16; pub const FD_CLOSE = 32; pub const SERVICE_RESOURCE = 1; pub const SERVICE_SERVICE = 2; pub const SERVICE_LOCAL = 4; pub const SERVICE_FLAG_DEFER = 1; pub const SERVICE_FLAG_HARD = 2; pub const PROP_COMMENT = 1; pub const PROP_LOCALE = 2; pub const PROP_DISPLAY_HINT = 4; pub const PROP_VERSION = 8; pub const PROP_START_TIME = 16; pub const PROP_MACHINE = 32; pub const PROP_ADDRESSES = 256; pub const PROP_SD = 512; pub const PROP_ALL = 2147483648; pub const SERVICE_ADDRESS_FLAG_RPC_CN = 1; pub const SERVICE_ADDRESS_FLAG_RPC_DG = 2; pub const SERVICE_ADDRESS_FLAG_RPC_NB = 4; pub const NS_DEFAULT = 0; pub const NS_VNS = 50; pub const NSTYPE_HIERARCHICAL = 1; pub const NSTYPE_DYNAMIC = 2; pub const NSTYPE_ENUMERABLE = 4; pub const NSTYPE_WORKGROUP = 8; pub const XP_CONNECTIONLESS = 1; pub const XP_GUARANTEED_DELIVERY = 2; pub const XP_GUARANTEED_ORDER = 4; pub const XP_MESSAGE_ORIENTED = 8; pub const XP_PSEUDO_STREAM = 16; pub const XP_GRACEFUL_CLOSE = 32; pub const XP_EXPEDITED_DATA = 64; pub const XP_CONNECT_DATA = 128; pub const XP_DISCONNECT_DATA = 256; pub const XP_SUPPORTS_BROADCAST = 512; pub const XP_SUPPORTS_MULTICAST = 1024; pub const XP_BANDWIDTH_ALLOCATION = 2048; pub const XP_FRAGMENTATION = 4096; pub const XP_ENCRYPTS = 8192; pub const RES_SOFT_SEARCH = 1; pub const RES_FIND_MULTIPLE = 2; pub const SET_SERVICE_PARTIAL_SUCCESS = 1; pub const UDP_NOCHECKSUM = 1; pub const UDP_CHECKSUM_COVERAGE = 20; pub const GAI_STRERROR_BUFFER_SIZE = 1024; pub const LPCONDITIONPROC = fn ( lpCallerId: *WSABUF, lpCallerData: *WSABUF, lpSQOS: *QOS, lpGQOS: *QOS, lpCalleeId: *WSABUF, lpCalleeData: *WSABUF, g: *u32, dwCallbackData: usize, ) callconv(WINAPI) i32; pub const LPWSAOVERLAPPED_COMPLETION_ROUTINE = fn ( dwError: u32, cbTransferred: u32, lpOverlapped: *OVERLAPPED, dwFlags: u32, ) callconv(WINAPI) void; pub const FLOWSPEC = extern struct { TokenRate: u32, TokenBucketSize: u32, PeakBandwidth: u32, Latency: u32, DelayVariation: u32, ServiceType: u32, MaxSduSize: u32, MinimumPolicedSize: u32, }; pub const QOS = extern struct { SendingFlowspec: FLOWSPEC, ReceivingFlowspec: FLOWSPEC, ProviderSpecific: WSABUF, }; pub const SOCKET_ADDRESS = extern struct { lpSockaddr: *sockaddr, iSockaddrLength: i32, }; pub const SOCKET_ADDRESS_LIST = extern struct { iAddressCount: i32, Address: [1]SOCKET_ADDRESS, }; pub const WSADATA = if (@sizeOf(usize) == @sizeOf(u64)) extern struct { wVersion: WORD, wHighVersion: WORD, iMaxSockets: u16, iMaxUdpDg: u16, lpVendorInfo: *u8, szDescription: [WSADESCRIPTION_LEN + 1]u8, szSystemStatus: [WSASYS_STATUS_LEN + 1]u8, } else extern struct { wVersion: WORD, wHighVersion: WORD, szDescription: [WSADESCRIPTION_LEN + 1]u8, szSystemStatus: [WSASYS_STATUS_LEN + 1]u8, iMaxSockets: u16, iMaxUdpDg: u16, lpVendorInfo: *u8, }; pub const WSAPROTOCOLCHAIN = extern struct { ChainLen: c_int, ChainEntries: [MAX_PROTOCOL_CHAIN]DWORD, }; pub const WSAPROTOCOL_INFOA = extern struct { dwServiceFlags1: DWORD, dwServiceFlags2: DWORD, dwServiceFlags3: DWORD, dwServiceFlags4: DWORD, dwProviderFlags: DWORD, ProviderId: GUID, dwCatalogEntryId: DWORD, ProtocolChain: WSAPROTOCOLCHAIN, iVersion: c_int, iAddressFamily: c_int, iMaxSockAddr: c_int, iMinSockAddr: c_int, iSocketType: c_int, iProtocol: c_int, iProtocolMaxOffset: c_int, iNetworkByteOrder: c_int, iSecurityScheme: c_int, dwMessageSize: DWORD, dwProviderReserved: DWORD, szProtocol: [WSAPROTOCOL_LEN + 1]CHAR, }; pub const WSAPROTOCOL_INFOW = extern struct { dwServiceFlags1: DWORD, dwServiceFlags2: DWORD, dwServiceFlags3: DWORD, dwServiceFlags4: DWORD, dwProviderFlags: DWORD, ProviderId: GUID, dwCatalogEntryId: DWORD, ProtocolChain: WSAPROTOCOLCHAIN, iVersion: c_int, iAddressFamily: c_int, iMaxSockAddr: c_int, iMinSockAddr: c_int, iSocketType: c_int, iProtocol: c_int, iProtocolMaxOffset: c_int, iNetworkByteOrder: c_int, iSecurityScheme: c_int, dwMessageSize: DWORD, dwProviderReserved: DWORD, szProtocol: [WSAPROTOCOL_LEN + 1]WCHAR, }; pub const sockproto = extern struct { sp_family: u16, sp_protocol: u16, }; pub const linger = extern struct { l_onoff: u16, l_linger: u16, }; pub const WSANETWORKEVENTS = extern struct { lNetworkEvents: i32, iErrorCode: [10]i32, }; pub const WSAOVERLAPPED = extern struct { Internal: DWORD, InternalHigh: DWORD, Offset: DWORD, OffsetHigh: DWORD, hEvent: ?WSAEVENT, }; pub const addrinfo = addrinfoa; pub const addrinfoa = extern struct { flags: i32, family: i32, socktype: i32, protocol: i32, addrlen: usize, canonname: ?[*:0]u8, addr: ?*sockaddr, next: ?*addrinfo, }; pub const addrinfoexA = extern struct { ai_flags: i32, ai_family: i32, ai_socktype: i32, ai_protocol: i32, ai_addrlen: usize, ai_canonname: [*:0]u8, ai_addr: *sockaddr, ai_blob: *c_void, ai_bloblen: usize, ai_provider: *GUID, ai_next: *addrinfoexA, }; pub const sockaddr = extern struct { family: ADDRESS_FAMILY, data: [14]u8, }; pub const sockaddr_storage = extern struct { family: ADDRESS_FAMILY, __pad1: [6]u8, __align: i64, __pad2: [112]u8, }; /// IPv4 socket address pub const sockaddr_in = extern struct { family: ADDRESS_FAMILY = AF_INET, port: USHORT, addr: u32, zero: [8]u8 = [8]u8{ 0, 0, 0, 0, 0, 0, 0, 0 }, }; /// IPv6 socket address pub const sockaddr_in6 = extern struct { family: ADDRESS_FAMILY = AF_INET6, port: USHORT, flowinfo: u32, addr: [16]u8, scope_id: u32, }; /// UNIX domain socket address pub const sockaddr_un = extern struct { family: ADDRESS_FAMILY = AF_UNIX, path: [108]u8, }; pub const WSABUF = extern struct { len: ULONG, buf: [*]u8, }; pub const msghdr = WSAMSG; pub const msghdr_const = WSAMSG_const; pub const WSAMSG_const = extern struct { name: *const sockaddr, namelen: INT, lpBuffers: [*]WSABUF, dwBufferCount: DWORD, Control: WSABUF, dwFlags: DWORD, }; pub const WSAMSG = extern struct { name: *sockaddr, namelen: INT, lpBuffers: [*]WSABUF, dwBufferCount: DWORD, Control: WSABUF, dwFlags: DWORD, }; pub const WSAPOLLFD = pollfd; pub const pollfd = extern struct { fd: SOCKET, events: SHORT, revents: SHORT, }; pub const TRANSMIT_FILE_BUFFERS = extern struct { Head: *c_void, HeadLength: u32, Tail: *c_void, TailLength: u32, }; pub const LPFN_TRANSMITFILE = fn ( hSocket: SOCKET, hFile: HANDLE, nNumberOfBytesToWrite: u32, nNumberOfBytesPerSend: u32, lpOverlapped: ?*OVERLAPPED, lpTransmitBuffers: ?*TRANSMIT_FILE_BUFFERS, dwReserved: u32, ) callconv(WINAPI) BOOL; pub const LPFN_ACCEPTEX = fn ( sListenSocket: SOCKET, sAcceptSocket: SOCKET, lpOutputBuffer: *c_void, dwReceiveDataLength: u32, dwLocalAddressLength: u32, dwRemoteAddressLength: u32, lpdwBytesReceived: *u32, lpOverlapped: *OVERLAPPED, ) callconv(WINAPI) BOOL; pub const LPFN_GETACCEPTEXSOCKADDRS = fn ( lpOutputBuffer: *c_void, dwReceiveDataLength: u32, dwLocalAddressLength: u32, dwRemoteAddressLength: u32, LocalSockaddr: **sockaddr, LocalSockaddrLength: *i32, RemoteSockaddr: **sockaddr, RemoteSockaddrLength: *i32, ) callconv(WINAPI) void; pub const LPFN_WSASENDMSG = fn ( s: SOCKET, lpMsg: *const WSAMSG_const, dwFlags: u32, lpNumberOfBytesSent: ?*u32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub const LPFN_WSARECVMSG = fn ( s: SOCKET, lpMsg: *WSAMSG, lpdwNumberOfBytesRecv: ?*u32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub const LPSERVICE_CALLBACK_PROC = fn ( lParam: LPARAM, hAsyncTaskHandle: HANDLE, ) callconv(WINAPI) void; pub const SERVICE_ASYNC_INFO = extern struct { lpServiceCallbackProc: LPSERVICE_CALLBACK_PROC, lParam: LPARAM, hAsyncTaskHandle: HANDLE, }; pub const LPLOOKUPSERVICE_COMPLETION_ROUTINE = fn ( dwError: u32, dwBytes: u32, lpOverlapped: *OVERLAPPED, ) callconv(WINAPI) void; pub const fd_set = extern struct { fd_count: u32, fd_array: [64]SOCKET, }; pub const hostent = extern struct { h_name: [*]u8, h_aliases: **i8, h_addrtype: i16, h_length: i16, h_addr_list: **i8, }; // https://docs.microsoft.com/en-au/windows/win32/winsock/windows-sockets-error-codes-2 pub const WinsockError = extern enum(u16) { /// Specified event object handle is invalid. /// An application attempts to use an event object, but the specified handle is not valid. WSA_INVALID_HANDLE = 6, /// Insufficient memory available. /// An application used a Windows Sockets function that directly maps to a Windows function. /// The Windows function is indicating a lack of required memory resources. WSA_NOT_ENOUGH_MEMORY = 8, /// One or more parameters are invalid. /// An application used a Windows Sockets function which directly maps to a Windows function. /// The Windows function is indicating a problem with one or more parameters. WSA_INVALID_PARAMETER = 87, /// Overlapped operation aborted. /// An overlapped operation was canceled due to the closure of the socket, or the execution of the SIO_FLUSH command in WSAIoctl. WSA_OPERATION_ABORTED = 995, /// Overlapped I/O event object not in signaled state. /// The application has tried to determine the status of an overlapped operation which is not yet completed. /// Applications that use WSAGetOverlappedResult (with the fWait flag set to FALSE) in a polling mode to determine when an overlapped operation has completed, get this error code until the operation is complete. WSA_IO_INCOMPLETE = 996, /// The application has initiated an overlapped operation that cannot be completed immediately. /// A completion indication will be given later when the operation has been completed. WSA_IO_PENDING = 997, /// Interrupted function call. /// A blocking operation was interrupted by a call to WSACancelBlockingCall. WSAEINTR = 10004, /// File handle is not valid. /// The file handle supplied is not valid. WSAEBADF = 10009, /// Permission denied. /// An attempt was made to access a socket in a way forbidden by its access permissions. /// An example is using a broadcast address for sendto without broadcast permission being set using setsockopt(SO_BROADCAST). /// Another possible reason for the WSAEACCES error is that when the bind function is called (on Windows NT 4.0 with SP4 and later), another application, service, or kernel mode driver is bound to the same address with exclusive access. /// Such exclusive access is a new feature of Windows NT 4.0 with SP4 and later, and is implemented by using the SO_EXCLUSIVEADDRUSE option. WSAEACCES = 10013, /// Bad address. /// The system detected an invalid pointer address in attempting to use a pointer argument of a call. /// This error occurs if an application passes an invalid pointer value, or if the length of the buffer is too small. /// For instance, if the length of an argument, which is a sockaddr structure, is smaller than the sizeof(sockaddr). WSAEFAULT = 10014, /// Invalid argument. /// Some invalid argument was supplied (for example, specifying an invalid level to the setsockopt function). /// In some instances, it also refers to the current state of the socket—for instance, calling accept on a socket that is not listening. WSAEINVAL = 10022, /// Too many open files. /// Too many open sockets. Each implementation may have a maximum number of socket handles available, either globally, per process, or per thread. WSAEMFILE = 10024, /// Resource temporarily unavailable. /// This error is returned from operations on nonblocking sockets that cannot be completed immediately, for example recv when no data is queued to be read from the socket. /// It is a nonfatal error, and the operation should be retried later. /// It is normal for WSAEWOULDBLOCK to be reported as the result from calling connect on a nonblocking SOCK_STREAM socket, since some time must elapse for the connection to be established. WSAEWOULDBLOCK = 10035, /// Operation now in progress. /// A blocking operation is currently executing. /// Windows Sockets only allows a single blocking operation—per- task or thread—to be outstanding, and if any other function call is made (whether or not it references that or any other socket) the function fails with the WSAEINPROGRESS error. WSAEINPROGRESS = 10036, /// Operation already in progress. /// An operation was attempted on a nonblocking socket with an operation already in progress—that is, calling connect a second time on a nonblocking socket that is already connecting, or canceling an asynchronous request (WSAAsyncGetXbyY) that has already been canceled or completed. WSAEALREADY = 10037, /// Socket operation on nonsocket. /// An operation was attempted on something that is not a socket. /// Either the socket handle parameter did not reference a valid socket, or for select, a member of an fd_set was not valid. WSAENOTSOCK = 10038, /// Destination address required. /// A required address was omitted from an operation on a socket. /// For example, this error is returned if sendto is called with the remote address of ADDR_ANY. WSAEDESTADDRREQ = 10039, /// Message too long. /// A message sent on a datagram socket was larger than the internal message buffer or some other network limit, or the buffer used to receive a datagram was smaller than the datagram itself. WSAEMSGSIZE = 10040, /// Protocol wrong type for socket. /// A protocol was specified in the socket function call that does not support the semantics of the socket type requested. /// For example, the ARPA Internet UDP protocol cannot be specified with a socket type of SOCK_STREAM. WSAEPROTOTYPE = 10041, /// Bad protocol option. /// An unknown, invalid or unsupported option or level was specified in a getsockopt or setsockopt call. WSAENOPROTOOPT = 10042, /// Protocol not supported. /// The requested protocol has not been configured into the system, or no implementation for it exists. /// For example, a socket call requests a SOCK_DGRAM socket, but specifies a stream protocol. WSAEPROTONOSUPPORT = 10043, /// Socket type not supported. /// The support for the specified socket type does not exist in this address family. /// For example, the optional type SOCK_RAW might be selected in a socket call, and the implementation does not support SOCK_RAW sockets at all. WSAESOCKTNOSUPPORT = 10044, /// Operation not supported. /// The attempted operation is not supported for the type of object referenced. /// Usually this occurs when a socket descriptor to a socket that cannot support this operation is trying to accept a connection on a datagram socket. WSAEOPNOTSUPP = 10045, /// Protocol family not supported. /// The protocol family has not been configured into the system or no implementation for it exists. /// This message has a slightly different meaning from WSAEAFNOSUPPORT. /// However, it is interchangeable in most cases, and all Windows Sockets functions that return one of these messages also specify WSAEAFNOSUPPORT. WSAEPFNOSUPPORT = 10046, /// Address family not supported by protocol family. /// An address incompatible with the requested protocol was used. /// All sockets are created with an associated address family (that is, AF_INET for Internet Protocols) and a generic protocol type (that is, SOCK_STREAM). /// This error is returned if an incorrect protocol is explicitly requested in the socket call, or if an address of the wrong family is used for a socket, for example, in sendto. WSAEAFNOSUPPORT = 10047, /// Address already in use. /// Typically, only one usage of each socket address (protocol/IP address/port) is permitted. /// This error occurs if an application attempts to bind a socket to an IP address/port that has already been used for an existing socket, or a socket that was not closed properly, or one that is still in the process of closing. /// For server applications that need to bind multiple sockets to the same port number, consider using setsockopt (SO_REUSEADDR). /// Client applications usually need not call bind at all—connect chooses an unused port automatically. /// When bind is called with a wildcard address (involving ADDR_ANY), a WSAEADDRINUSE error could be delayed until the specific address is committed. /// This could happen with a call to another function later, including connect, listen, WSAConnect, or WSAJoinLeaf. WSAEADDRINUSE = 10048, /// Cannot assign requested address. /// The requested address is not valid in its context. /// This normally results from an attempt to bind to an address that is not valid for the local computer. /// This can also result from connect, sendto, WSAConnect, WSAJoinLeaf, or WSASendTo when the remote address or port is not valid for a remote computer (for example, address or port 0). WSAEADDRNOTAVAIL = 10049, /// Network is down. /// A socket operation encountered a dead network. /// This could indicate a serious failure of the network system (that is, the protocol stack that the Windows Sockets DLL runs over), the network interface, or the local network itself. WSAENETDOWN = 10050, /// Network is unreachable. /// A socket operation was attempted to an unreachable network. /// This usually means the local software knows no route to reach the remote host. WSAENETUNREACH = 10051, /// Network dropped connection on reset. /// The connection has been broken due to keep-alive activity detecting a failure while the operation was in progress. /// It can also be returned by setsockopt if an attempt is made to set SO_KEEPALIVE on a connection that has already failed. WSAENETRESET = 10052, /// Software caused connection abort. /// An established connection was aborted by the software in your host computer, possibly due to a data transmission time-out or protocol error. WSAECONNABORTED = 10053, /// Connection reset by peer. /// An existing connection was forcibly closed by the remote host. /// This normally results if the peer application on the remote host is suddenly stopped, the host is rebooted, the host or remote network interface is disabled, or the remote host uses a hard close (see setsockopt for more information on the SO_LINGER option on the remote socket). /// This error may also result if a connection was broken due to keep-alive activity detecting a failure while one or more operations are in progress. /// Operations that were in progress fail with WSAENETRESET. Subsequent operations fail with WSAECONNRESET. WSAECONNRESET = 10054, /// No buffer space available. /// An operation on a socket could not be performed because the system lacked sufficient buffer space or because a queue was full. WSAENOBUFS = 10055, /// Socket is already connected. /// A connect request was made on an already-connected socket. /// Some implementations also return this error if sendto is called on a connected SOCK_DGRAM socket (for SOCK_STREAM sockets, the to parameter in sendto is ignored) although other implementations treat this as a legal occurrence. WSAEISCONN = 10056, /// Socket is not connected. /// A request to send or receive data was disallowed because the socket is not connected and (when sending on a datagram socket using sendto) no address was supplied. /// Any other type of operation might also return this error—for example, setsockopt setting SO_KEEPALIVE if the connection has been reset. WSAENOTCONN = 10057, /// Cannot send after socket shutdown. /// A request to send or receive data was disallowed because the socket had already been shut down in that direction with a previous shutdown call. /// By calling shutdown a partial close of a socket is requested, which is a signal that sending or receiving, or both have been discontinued. WSAESHUTDOWN = 10058, /// Too many references. /// Too many references to some kernel object. WSAETOOMANYREFS = 10059, /// Connection timed out. /// A connection attempt failed because the connected party did not properly respond after a period of time, or the established connection failed because the connected host has failed to respond. WSAETIMEDOUT = 10060, /// Connection refused. /// No connection could be made because the target computer actively refused it. /// This usually results from trying to connect to a service that is inactive on the foreign host—that is, one with no server application running. WSAECONNREFUSED = 10061, /// Cannot translate name. /// Cannot translate a name. WSAELOOP = 10062, /// Name too long. /// A name component or a name was too long. WSAENAMETOOLONG = 10063, /// Host is down. /// A socket operation failed because the destination host is down. A socket operation encountered a dead host. /// Networking activity on the local host has not been initiated. /// These conditions are more likely to be indicated by the error WSAETIMEDOUT. WSAEHOSTDOWN = 10064, /// No route to host. /// A socket operation was attempted to an unreachable host. See WSAENETUNREACH. WSAEHOSTUNREACH = 10065, /// Directory not empty. /// Cannot remove a directory that is not empty. WSAENOTEMPTY = 10066, /// Too many processes. /// A Windows Sockets implementation may have a limit on the number of applications that can use it simultaneously. /// WSAStartup may fail with this error if the limit has been reached. WSAEPROCLIM = 10067, /// User quota exceeded. /// Ran out of user quota. WSAEUSERS = 10068, /// Disk quota exceeded. /// Ran out of disk quota. WSAEDQUOT = 10069, /// Stale file handle reference. /// The file handle reference is no longer available. WSAESTALE = 10070, /// Item is remote. /// The item is not available locally. WSAEREMOTE = 10071, /// Network subsystem is unavailable. /// This error is returned by WSAStartup if the Windows Sockets implementation cannot function at this time because the underlying system it uses to provide network services is currently unavailable. /// Users should check: /// - That the appropriate Windows Sockets DLL file is in the current path. /// - That they are not trying to use more than one Windows Sockets implementation simultaneously. /// - If there is more than one Winsock DLL on your system, be sure the first one in the path is appropriate for the network subsystem currently loaded. /// - The Windows Sockets implementation documentation to be sure all necessary components are currently installed and configured correctly. WSASYSNOTREADY = 10091, /// Winsock.dll version out of range. /// The current Windows Sockets implementation does not support the Windows Sockets specification version requested by the application. /// Check that no old Windows Sockets DLL files are being accessed. WSAVERNOTSUPPORTED = 10092, /// Successful WSAStartup not yet performed. /// Either the application has not called WSAStartup or WSAStartup failed. /// The application may be accessing a socket that the current active task does not own (that is, trying to share a socket between tasks), or WSACleanup has been called too many times. WSANOTINITIALISED = 10093, /// Graceful shutdown in progress. /// Returned by WSARecv and WSARecvFrom to indicate that the remote party has initiated a graceful shutdown sequence. WSAEDISCON = 10101, /// No more results. /// No more results can be returned by the WSALookupServiceNext function. WSAENOMORE = 10102, /// Call has been canceled. /// A call to the WSALookupServiceEnd function was made while this call was still processing. The call has been canceled. WSAECANCELLED = 10103, /// Procedure call table is invalid. /// The service provider procedure call table is invalid. /// A service provider returned a bogus procedure table to Ws2_32.dll. /// This is usually caused by one or more of the function pointers being NULL. WSAEINVALIDPROCTABLE = 10104, /// Service provider is invalid. /// The requested service provider is invalid. /// This error is returned by the WSCGetProviderInfo and WSCGetProviderInfo32 functions if the protocol entry specified could not be found. /// This error is also returned if the service provider returned a version number other than 2.0. WSAEINVALIDPROVIDER = 10105, /// Service provider failed to initialize. /// The requested service provider could not be loaded or initialized. /// This error is returned if either a service provider's DLL could not be loaded (LoadLibrary failed) or the provider's WSPStartup or NSPStartup function failed. WSAEPROVIDERFAILEDINIT = 10106, /// System call failure. /// A system call that should never fail has failed. /// This is a generic error code, returned under various conditions. /// Returned when a system call that should never fail does fail. /// For example, if a call to WaitForMultipleEvents fails or one of the registry functions fails trying to manipulate the protocol/namespace catalogs. /// Returned when a provider does not return SUCCESS and does not provide an extended error code. /// Can indicate a service provider implementation error. WSASYSCALLFAILURE = 10107, /// Service not found. /// No such service is known. The service cannot be found in the specified name space. WSASERVICE_NOT_FOUND = 10108, /// Class type not found. /// The specified class was not found. WSATYPE_NOT_FOUND = 10109, /// No more results. /// No more results can be returned by the WSALookupServiceNext function. WSA_E_NO_MORE = 10110, /// Call was canceled. /// A call to the WSALookupServiceEnd function was made while this call was still processing. The call has been canceled. WSA_E_CANCELLED = 10111, /// Database query was refused. /// A database query failed because it was actively refused. WSAEREFUSED = 10112, /// Host not found. /// No such host is known. The name is not an official host name or alias, or it cannot be found in the database(s) being queried. /// This error may also be returned for protocol and service queries, and means that the specified name could not be found in the relevant database. WSAHOST_NOT_FOUND = 11001, /// Nonauthoritative host not found. /// This is usually a temporary error during host name resolution and means that the local server did not receive a response from an authoritative server. A retry at some time later may be successful. WSATRY_AGAIN = 11002, /// This is a nonrecoverable error. /// This indicates that some sort of nonrecoverable error occurred during a database lookup. /// This may be because the database files (for example, BSD-compatible HOSTS, SERVICES, or PROTOCOLS files) could not be found, or a DNS request was returned by the server with a severe error. WSANO_RECOVERY = 11003, /// Valid name, no data record of requested type. /// The requested name is valid and was found in the database, but it does not have the correct associated data being resolved for. /// The usual example for this is a host name-to-address translation attempt (using gethostbyname or WSAAsyncGetHostByName) which uses the DNS (Domain Name Server). /// An MX record is returned but no A record—indicating the host itself exists, but is not directly reachable. WSANO_DATA = 11004, /// QoS receivers. /// At least one QoS reserve has arrived. WSA_QOS_RECEIVERS = 11005, /// QoS senders. /// At least one QoS send path has arrived. WSA_QOS_SENDERS = 11006, /// No QoS senders. /// There are no QoS senders. WSA_QOS_NO_SENDERS = 11007, /// QoS no receivers. /// There are no QoS receivers. WSA_QOS_NO_RECEIVERS = 11008, /// QoS request confirmed. /// The QoS reserve request has been confirmed. WSA_QOS_REQUEST_CONFIRMED = 11009, /// QoS admission error. /// A QoS error occurred due to lack of resources. WSA_QOS_ADMISSION_FAILURE = 11010, /// QoS policy failure. /// The QoS request was rejected because the policy system couldn't allocate the requested resource within the existing policy. WSA_QOS_POLICY_FAILURE = 11011, /// QoS bad style. /// An unknown or conflicting QoS style was encountered. WSA_QOS_BAD_STYLE = 11012, /// QoS bad object. /// A problem was encountered with some part of the filterspec or the provider-specific buffer in general. WSA_QOS_BAD_OBJECT = 11013, /// QoS traffic control error. /// An error with the underlying traffic control (TC) API as the generic QoS request was converted for local enforcement by the TC API. /// This could be due to an out of memory error or to an internal QoS provider error. WSA_QOS_TRAFFIC_CTRL_ERROR = 11014, /// QoS generic error. /// A general QoS error. WSA_QOS_GENERIC_ERROR = 11015, /// QoS service type error. /// An invalid or unrecognized service type was found in the QoS flowspec. WSA_QOS_ESERVICETYPE = 11016, /// QoS flowspec error. /// An invalid or inconsistent flowspec was found in the QOS structure. WSA_QOS_EFLOWSPEC = 11017, /// Invalid QoS provider buffer. /// An invalid QoS provider-specific buffer. WSA_QOS_EPROVSPECBUF = 11018, /// Invalid QoS filter style. /// An invalid QoS filter style was used. WSA_QOS_EFILTERSTYLE = 11019, /// Invalid QoS filter type. /// An invalid QoS filter type was used. WSA_QOS_EFILTERTYPE = 11020, /// Incorrect QoS filter count. /// An incorrect number of QoS FILTERSPECs were specified in the FLOWDESCRIPTOR. WSA_QOS_EFILTERCOUNT = 11021, /// Invalid QoS object length. /// An object with an invalid ObjectLength field was specified in the QoS provider-specific buffer. WSA_QOS_EOBJLENGTH = 11022, /// Incorrect QoS flow count. /// An incorrect number of flow descriptors was specified in the QoS structure. WSA_QOS_EFLOWCOUNT = 11023, /// Unrecognized QoS object. /// An unrecognized object was found in the QoS provider-specific buffer. WSA_QOS_EUNKOWNPSOBJ = 11024, /// Invalid QoS policy object. /// An invalid policy object was found in the QoS provider-specific buffer. WSA_QOS_EPOLICYOBJ = 11025, /// Invalid QoS flow descriptor. /// An invalid QoS flow descriptor was found in the flow descriptor list. WSA_QOS_EFLOWDESC = 11026, /// Invalid QoS provider-specific flowspec. /// An invalid or inconsistent flowspec was found in the QoS provider-specific buffer. WSA_QOS_EPSFLOWSPEC = 11027, /// Invalid QoS provider-specific filterspec. /// An invalid FILTERSPEC was found in the QoS provider-specific buffer. WSA_QOS_EPSFILTERSPEC = 11028, /// Invalid QoS shape discard mode object. /// An invalid shape discard mode object was found in the QoS provider-specific buffer. WSA_QOS_ESDMODEOBJ = 11029, /// Invalid QoS shaping rate object. /// An invalid shaping rate object was found in the QoS provider-specific buffer. WSA_QOS_ESHAPERATEOBJ = 11030, /// Reserved policy QoS element type. /// A reserved policy element was found in the QoS provider-specific buffer. WSA_QOS_RESERVED_PETYPE = 11031, _, }; pub extern "ws2_32" fn accept( s: SOCKET, addr: ?*sockaddr, addrlen: ?*i32, ) callconv(WINAPI) SOCKET; pub extern "ws2_32" fn bind( s: SOCKET, name: *const sockaddr, namelen: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn closesocket( s: SOCKET, ) callconv(WINAPI) i32; pub extern "ws2_32" fn connect( s: SOCKET, name: *const sockaddr, namelen: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn ioctlsocket( s: SOCKET, cmd: i32, argp: *u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn getpeername( s: SOCKET, name: *sockaddr, namelen: *i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn getsockname( s: SOCKET, name: *sockaddr, namelen: *i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn getsockopt( s: SOCKET, level: i32, optname: i32, optval: [*]u8, optlen: *i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn htonl( hostlong: u32, ) callconv(WINAPI) u32; pub extern "ws2_32" fn htons( hostshort: u16, ) callconv(WINAPI) u16; pub extern "ws2_32" fn inet_addr( cp: ?[*]const u8, ) callconv(WINAPI) u32; pub extern "ws2_32" fn listen( s: SOCKET, backlog: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn ntohl( netlong: u32, ) callconv(WINAPI) u32; pub extern "ws2_32" fn ntohs( netshort: u16, ) callconv(WINAPI) u16; pub extern "ws2_32" fn recv( s: SOCKET, buf: [*]u8, len: i32, flags: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn recvfrom( s: SOCKET, buf: [*]u8, len: i32, flags: i32, from: ?*sockaddr, fromlen: ?*i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn select( nfds: i32, readfds: ?*fd_set, writefds: ?*fd_set, exceptfds: ?*fd_set, timeout: ?*const timeval, ) callconv(WINAPI) i32; pub extern "ws2_32" fn send( s: SOCKET, buf: [*]const u8, len: i32, flags: u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn sendto( s: SOCKET, buf: [*]const u8, len: i32, flags: i32, to: *const sockaddr, tolen: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn setsockopt( s: SOCKET, level: i32, optname: i32, optval: ?[*]const u8, optlen: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn shutdown( s: SOCKET, how: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn socket( af: i32, @"type": i32, protocol: i32, ) callconv(WINAPI) SOCKET; pub extern "ws2_32" fn WSAStartup( wVersionRequired: WORD, lpWSAData: *WSADATA, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSACleanup() callconv(WINAPI) i32; pub extern "ws2_32" fn WSASetLastError(iError: i32) callconv(WINAPI) void; pub extern "ws2_32" fn WSAGetLastError() callconv(WINAPI) WinsockError; pub extern "ws2_32" fn WSAIsBlocking() callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSAUnhookBlockingHook() callconv(WINAPI) i32; pub extern "ws2_32" fn WSASetBlockingHook(lpBlockFunc: FARPROC) callconv(WINAPI) FARPROC; pub extern "ws2_32" fn WSACancelBlockingCall() callconv(WINAPI) i32; pub extern "ws2_32" fn WSAAsyncGetServByName( hWnd: HWND, wMsg: u32, name: [*:0]const u8, proto: ?[*:0]const u8, buf: [*]u8, buflen: i32, ) callconv(WINAPI) HANDLE; pub extern "ws2_32" fn WSAAsyncGetServByPort( hWnd: HWND, wMsg: u32, port: i32, proto: ?[*:0]const u8, buf: [*]u8, buflen: i32, ) callconv(WINAPI) HANDLE; pub extern "ws2_32" fn WSAAsyncGetProtoByName( hWnd: HWND, wMsg: u32, name: [*:0]const u8, buf: [*]u8, buflen: i32, ) callconv(WINAPI) HANDLE; pub extern "ws2_32" fn WSAAsyncGetProtoByNumber( hWnd: HWND, wMsg: u32, number: i32, buf: [*]u8, buflen: i32, ) callconv(WINAPI) HANDLE; pub extern "ws2_32" fn WSACancelAsyncRequest(hAsyncTaskHandle: HANDLE) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAAsyncSelect( s: SOCKET, hWnd: HWND, wMsg: u32, lEvent: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAAccept( s: SOCKET, addr: ?*sockaddr, addrlen: ?*i32, lpfnCondition: ?LPCONDITIONPROC, dwCallbackData: usize, ) callconv(WINAPI) SOCKET; pub extern "ws2_32" fn WSACloseEvent(hEvent: HANDLE) callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSAConnect( s: SOCKET, name: *const sockaddr, namelen: i32, lpCallerData: ?*WSABUF, lpCalleeData: ?*WSABUF, lpSQOS: ?*QOS, lpGQOS: ?*QOS, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAConnectByNameW( s: SOCKET, nodename: [*:0]const u16, servicename: [*:0]const u16, LocalAddressLength: ?*u32, LocalAddress: ?*sockaddr, RemoteAddressLength: ?*u32, RemoteAddress: ?*sockaddr, timeout: ?*const timeval, Reserved: *OVERLAPPED, ) callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSAConnectByNameA( s: SOCKET, nodename: [*:0]const u8, servicename: [*:0]const u8, LocalAddressLength: ?*u32, LocalAddress: ?*sockaddr, RemoteAddressLength: ?*u32, RemoteAddress: ?*sockaddr, timeout: ?*const timeval, Reserved: *OVERLAPPED, ) callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSAConnectByList( s: SOCKET, SocketAddress: *SOCKET_ADDRESS_LIST, LocalAddressLength: ?*u32, LocalAddress: ?*sockaddr, RemoteAddressLength: ?*u32, RemoteAddress: ?*sockaddr, timeout: ?*const timeval, Reserved: *OVERLAPPED, ) callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSACreateEvent() callconv(WINAPI) HANDLE; pub extern "ws2_32" fn WSADuplicateSocketA( s: SOCKET, dwProcessId: u32, lpProtocolInfo: *WSAPROTOCOL_INFOA, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSADuplicateSocketW( s: SOCKET, dwProcessId: u32, lpProtocolInfo: *WSAPROTOCOL_INFOW, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAEnumNetworkEvents( s: SOCKET, hEventObject: HANDLE, lpNetworkEvents: *WSANETWORKEVENTS, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAEnumProtocolsA( lpiProtocols: ?*i32, lpProtocolBuffer: ?*WSAPROTOCOL_INFOA, lpdwBufferLength: *u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAEnumProtocolsW( lpiProtocols: ?*i32, lpProtocolBuffer: ?*WSAPROTOCOL_INFOW, lpdwBufferLength: *u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAEventSelect( s: SOCKET, hEventObject: HANDLE, lNetworkEvents: i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAGetOverlappedResult( s: SOCKET, lpOverlapped: *OVERLAPPED, lpcbTransfer: *u32, fWait: BOOL, lpdwFlags: *u32, ) callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSAGetQOSByName( s: SOCKET, lpQOSName: *WSABUF, lpQOS: *QOS, ) callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSAHtonl( s: SOCKET, hostlong: u32, lpnetlong: *u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAHtons( s: SOCKET, hostshort: u16, lpnetshort: *u16, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAIoctl( s: SOCKET, dwIoControlCode: u32, lpvInBuffer: ?*const c_void, cbInBuffer: u32, lpvOutbuffer: ?*c_void, cbOutbuffer: u32, lpcbBytesReturned: *u32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAJoinLeaf( s: SOCKET, name: *const sockaddr, namelen: i32, lpCallerdata: ?*WSABUF, lpCalleeData: ?*WSABUF, lpSQOS: ?*QOS, lpGQOS: ?*QOS, dwFlags: u32, ) callconv(WINAPI) SOCKET; pub extern "ws2_32" fn WSANtohl( s: SOCKET, netlong: u32, lphostlong: *u32, ) callconv(WINAPI) u32; pub extern "ws2_32" fn WSANtohs( s: SOCKET, netshort: u16, lphostshort: *u16, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSARecv( s: SOCKET, lpBuffers: [*]WSABUF, dwBufferCouynt: u32, lpNumberOfBytesRecv: ?*u32, lpFlags: *u32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSARecvDisconnect( s: SOCKET, lpInboundDisconnectData: ?*WSABUF, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSARecvFrom( s: SOCKET, lpBuffers: [*]WSABUF, dwBuffercount: u32, lpNumberOfBytesRecvd: ?*u32, lpFlags: *u32, lpFrom: ?*sockaddr, lpFromlen: ?*i32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAResetEvent(hEvent: HANDLE) callconv(WINAPI) i32; pub extern "ws2_32" fn WSASend( s: SOCKET, lpBuffers: [*]WSABUF, dwBufferCount: u32, lpNumberOfBytesSent: ?*u32, dwFlags: u32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSASendMsg( s: SOCKET, lpMsg: *const WSAMSG_const, dwFlags: u32, lpNumberOfBytesSent: ?*u32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSARecvMsg( s: SOCKET, lpMsg: *WSAMSG, lpdwNumberOfBytesRecv: ?*u32, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSASendDisconnect( s: SOCKET, lpOutboundDisconnectData: ?*WSABUF, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSASendTo( s: SOCKET, lpBuffers: [*]WSABUF, dwBufferCount: u32, lpNumberOfBytesSent: ?*u32, dwFlags: u32, lpTo: ?*const sockaddr, iToLen: i32, lpOverlapped: ?*OVERLAPPED, lpCompletionRounte: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSASetEvent( hEvent: HANDLE, ) callconv(WINAPI) BOOL; pub extern "ws2_32" fn WSASocketA( af: i32, @"type": i32, protocol: i32, lpProtocolInfo: ?*WSAPROTOCOL_INFOA, g: u32, dwFlags: u32, ) callconv(WINAPI) SOCKET; pub extern "ws2_32" fn WSASocketW( af: i32, @"type": i32, protocol: i32, lpProtocolInfo: ?*WSAPROTOCOL_INFOW, g: u32, dwFlags: u32, ) callconv(WINAPI) SOCKET; pub extern "ws2_32" fn WSAWaitForMultipleEvents( cEvents: u32, lphEvents: [*]const HANDLE, fWaitAll: BOOL, dwTimeout: u32, fAlertable: BOOL, ) callconv(WINAPI) u32; pub extern "ws2_32" fn WSAAddressToStringA( lpsaAddress: *sockaddr, dwAddressLength: u32, lpProtocolInfo: ?*WSAPROTOCOL_INFOA, lpszAddressString: [*]u8, lpdwAddressStringLength: *u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAAddressToStringW( lpsaAddress: *sockaddr, dwAddressLength: u32, lpProtocolInfo: ?*WSAPROTOCOL_INFOW, lpszAddressString: [*]u16, lpdwAddressStringLength: *u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAStringToAddressA( AddressString: [*:0]const u8, AddressFamily: i32, lpProtocolInfo: ?*WSAPROTOCOL_INFOA, lpAddress: *sockaddr, lpAddressLength: *i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAStringToAddressW( AddressString: [*:0]const u16, AddressFamily: i32, lpProtocolInfo: ?*WSAPROTOCOL_INFOW, lpAddrses: *sockaddr, lpAddressLength: *i32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAProviderConfigChange( lpNotificationHandle: *HANDLE, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPWSAOVERLAPPED_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn WSAPoll( fdArray: [*]WSAPOLLFD, fds: u32, timeout: i32, ) callconv(WINAPI) i32; pub extern "mswsock" fn WSARecvEx( s: SOCKET, buf: [*]u8, len: i32, flags: *i32, ) callconv(WINAPI) i32; pub extern "mswsock" fn TransmitFile( hSocket: SOCKET, hFile: HANDLE, nNumberOfBytesToWrite: u32, nNumberOfBytesPerSend: u32, lpOverlapped: ?*OVERLAPPED, lpTransmitBuffers: ?*TRANSMIT_FILE_BUFFERS, dwReserved: u32, ) callconv(WINAPI) BOOL; pub extern "mswsock" fn AcceptEx( sListenSocket: SOCKET, sAcceptSocket: SOCKET, lpOutputBuffer: *c_void, dwReceiveDataLength: u32, dwLocalAddressLength: u32, dwRemoteAddressLength: u32, lpdwBytesReceived: *u32, lpOverlapped: *OVERLAPPED, ) callconv(WINAPI) BOOL; pub extern "mswsock" fn GetAcceptExSockaddrs( lpOutputBuffer: *c_void, dwReceiveDataLength: u32, dwLocalAddressLength: u32, dwRemoteAddressLength: u32, LocalSockaddr: **sockaddr, LocalSockaddrLength: *i32, RemoteSockaddr: **sockaddr, RemoteSockaddrLength: *i32, ) callconv(WINAPI) void; pub extern "ws2_32" fn WSAProviderCompleteAsyncCall( hAsyncCall: HANDLE, iRetCode: i32, ) callconv(WINAPI) i32; pub extern "mswsock" fn EnumProtocolsA( lpiProtocols: ?*i32, lpProtocolBuffer: *c_void, lpdwBufferLength: *u32, ) callconv(WINAPI) i32; pub extern "mswsock" fn EnumProtocolsW( lpiProtocols: ?*i32, lpProtocolBuffer: *c_void, lpdwBufferLength: *u32, ) callconv(WINAPI) i32; pub extern "mswsock" fn GetAddressByNameA( dwNameSpace: u32, lpServiceType: *GUID, lpServiceName: ?[*:0]u8, lpiProtocols: ?*i32, dwResolution: u32, lpServiceAsyncInfo: ?*SERVICE_ASYNC_INFO, lpCsaddrBuffer: *c_void, lpAliasBuffer: ?[*:0]const u8, lpdwAliasBufferLength: *u32, ) callconv(WINAPI) i32; pub extern "mswsock" fn GetAddressByNameW( dwNameSpace: u32, lpServiceType: *GUID, lpServiceName: ?[*:0]u16, lpiProtocols: ?*i32, dwResolution: u32, lpServiceAsyncInfo: ?*SERVICE_ASYNC_INFO, lpCsaddrBuffer: *c_void, ldwBufferLEngth: *u32, lpAliasBuffer: ?[*:0]u16, lpdwAliasBufferLength: *u32, ) callconv(WINAPI) i32; pub extern "mswsock" fn GetTypeByNameA( lpServiceName: [*:0]u8, lpServiceType: *GUID, ) callconv(WINAPI) i32; pub extern "mswsock" fn GetTypeByNameW( lpServiceName: [*:0]u16, lpServiceType: *GUID, ) callconv(WINAPI) i32; pub extern "mswsock" fn GetNameByTypeA( lpServiceType: *GUID, lpServiceName: [*:0]u8, dwNameLength: u32, ) callconv(WINAPI) i32; pub extern "mswsock" fn GetNameByTypeW( lpServiceType: *GUID, lpServiceName: [*:0]u16, dwNameLength: u32, ) callconv(WINAPI) i32; pub extern "ws2_32" fn getaddrinfo( pNodeName: ?[*:0]const u8, pServiceName: ?[*:0]const u8, pHints: ?*const addrinfoa, ppResult: **addrinfoa, ) callconv(WINAPI) i32; pub extern "ws2_32" fn GetAddrInfoExA( pName: ?[*:0]const u8, pServiceName: ?[*:0]const u8, dwNameSapce: u32, lpNspId: ?*GUID, hints: ?*const addrinfoexA, ppResult: **addrinfoexA, timeout: ?*timeval, lpOverlapped: ?*OVERLAPPED, lpCompletionRoutine: ?LPLOOKUPSERVICE_COMPLETION_ROUTINE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn GetAddrInfoExCancel( lpHandle: *HANDLE, ) callconv(WINAPI) i32; pub extern "ws2_32" fn GetAddrInfoExOverlappedResult( lpOverlapped: *OVERLAPPED, ) callconv(WINAPI) i32; pub extern "ws2_32" fn freeaddrinfo( pAddrInfo: ?*addrinfoa, ) callconv(WINAPI) void; pub extern "ws2_32" fn FreeAddrInfoEx( pAddrInfoEx: ?*addrinfoexA, ) callconv(WINAPI) void; pub extern "ws2_32" fn getnameinfo( pSockaddr: *const sockaddr, SockaddrLength: i32, pNodeBuffer: ?[*]u8, NodeBufferSize: u32, pServiceBuffer: ?[*]u8, ServiceBufferName: u32, Flags: i32, ) callconv(WINAPI) i32; pub extern "IPHLPAPI" fn if_nametoindex( InterfaceName: [*:0]const u8, ) callconv(WINAPI) u32;
lib/std/os/windows/ws2_32.zig
const std = @import("std"); const c = @cImport({ @cInclude("epoxy/gl.h"); }); comptime { std.testing.refAllDecls(@This()); } pub usingnamespace @import("types.zig"); pub const ErrorHandling = enum { /// OpenGL functions will log the error, but will not assert that no error happened log, /// Asserts that no errors will happen. assert, /// No error checking will be executed. Gotta go fast! none, }; const error_handling: ErrorHandling = if (@hasDecl(@import("root"), "")) @import("root").opengl_error_handling else if (std.builtin.mode == .ReleaseFast) .none else .assert; /// Checks if a OpenGL error happend and may yield it. /// This function is configurable via `opengl_error_handling` in the root file. /// In Debug mode, unexpected error codes will be unreachable, in all release modes /// they will be safely wrapped to `error.UnexpectedError`. fn checkError() void { if (error_handling == .none) return; var error_code = c.glGetError(); if (error_code == c.GL_NO_ERROR) return; while (error_code != c.GL_NO_ERROR) : (error_code = c.glGetError()) { const name = switch (error_code) { c.GL_INVALID_ENUM => "invalid enum", c.GL_INVALID_VALUE => "invalid value", c.GL_INVALID_OPERATION => "invalid operation", c.GL_STACK_OVERFLOW => "stack overflow", c.GL_STACK_UNDERFLOW => "stack underflow", c.GL_OUT_OF_MEMORY => "out of memory", c.GL_INVALID_FRAMEBUFFER_OPERATION => "invalid framebuffer operation", // c.GL_INVALID_FRAMEBUFFER_OPERATION_EXT => Error.InvalidFramebufferOperation, // c.GL_INVALID_FRAMEBUFFER_OPERATION_OES => Error.InvalidFramebufferOperation, c.GL_TABLE_TOO_LARGE => "Table too large", // c.GL_TABLE_TOO_LARGE_EXT => Error.TableTooLarge, c.GL_TEXTURE_TOO_LARGE_EXT => "Texture too large", else => "unknown error", }; std.log.scoped(.OpenGL).err("OpenGL failure: {s}\n", .{name}); switch (error_handling) { .log => {}, .assert => @panic("OpenGL error"), .none => unreachable, } } } /// Integer conversion helper. fn cs2gl(size: usize) c.GLsizei { return @intCast(c.GLsizei, size); } fn ui2gl(val: usize) c.GLuint { return @intCast(c.GLuint, val); } fn b2gl(b: bool) c.GLboolean { return if (b) c.GL_TRUE else c.GL_FALSE; } pub const DebugSource = enum { api, window_system, shader_compiler, third_party, application, other, }; pub const DebugMessageType = enum { @"error", deprecated_behavior, undefined_behavior, portability, performance, other, }; pub const DebugSeverity = enum { high, medium, low, notification, }; fn DebugMessageCallbackHandler(comptime Context: type) type { return if (Context == void) fn (source: DebugSource, msg_type: DebugMessageType, id: usize, severity: DebugSeverity, message: []const u8) void else fn (context: Context, source: DebugSource, msg_type: DebugMessageType, id: usize, severity: DebugSeverity, message: []const u8) void; } /// Sets the OpenGL debug callback handler in zig style. /// `context` may be a pointer or `{}`. pub fn debugMessageCallback(context: anytype, comptime handler: DebugMessageCallbackHandler(@TypeOf(context))) void { const is_void = (@TypeOf(context) == void); const Context = @TypeOf(context); const H = struct { fn translateSource(source: c.GLuint) DebugSource { return switch (source) { c.GL_DEBUG_SOURCE_API => DebugSource.api, // c.GL_DEBUG_SOURCE_API_ARB => DebugSource.api, // c.GL_DEBUG_SOURCE_API_KHR => DebugSource.api, c.GL_DEBUG_SOURCE_WINDOW_SYSTEM => DebugSource.window_system, // c.GL_DEBUG_SOURCE_WINDOW_SYSTEM_ARB => DebugSource.window_system, // c.GL_DEBUG_SOURCE_WINDOW_SYSTEM_KHR => DebugSource.window_system, c.GL_DEBUG_SOURCE_SHADER_COMPILER => DebugSource.shader_compiler, // c.GL_DEBUG_SOURCE_SHADER_COMPILER_ARB => DebugSource.shader_compiler, // c.GL_DEBUG_SOURCE_SHADER_COMPILER_KHR => DebugSource.shader_compiler, c.GL_DEBUG_SOURCE_THIRD_PARTY => DebugSource.third_party, // c.GL_DEBUG_SOURCE_THIRD_PARTY_ARB => DebugSource.third_party, // c.GL_DEBUG_SOURCE_THIRD_PARTY_KHR => DebugSource.third_party, c.GL_DEBUG_SOURCE_APPLICATION => DebugSource.application, // c.GL_DEBUG_SOURCE_APPLICATION_ARB => DebugSource.application, // c.GL_DEBUG_SOURCE_APPLICATION_KHR => DebugSource.application, c.GL_DEBUG_SOURCE_OTHER => DebugSource.other, // c.GL_DEBUG_SOURCE_OTHER_ARB => DebugSource.other, // c.GL_DEBUG_SOURCE_OTHER_KHR => DebugSource.other, else => DebugSource.other, }; } fn translateMessageType(msg_type: c.GLuint) DebugMessageType { return switch (msg_type) { c.GL_DEBUG_TYPE_ERROR => DebugMessageType.@"error", // c.GL_DEBUG_TYPE_ERROR_ARB => DebugMessageType.@"error", // c.GL_DEBUG_TYPE_ERROR_KHR => DebugMessageType.@"error", c.GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR => DebugMessageType.deprecated_behavior, // c.GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_ARB => DebugMessageType.deprecated_behavior, // c.GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR => DebugMessageType.deprecated_behavior, c.GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR => DebugMessageType.undefined_behavior, // c.GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_ARB => DebugMessageType.undefined_behavior, // c.GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR => DebugMessageType.undefined_behavior, c.GL_DEBUG_TYPE_PORTABILITY => DebugMessageType.portability, // c.GL_DEBUG_TYPE_PORTABILITY_ARB => DebugMessageType.portability, // c.GL_DEBUG_TYPE_PORTABILITY_KHR => DebugMessageType.portability, c.GL_DEBUG_TYPE_PERFORMANCE => DebugMessageType.performance, // c.GL_DEBUG_TYPE_PERFORMANCE_ARB => DebugMessageType.performance, // c.GL_DEBUG_TYPE_PERFORMANCE_KHR => DebugMessageType.performance, c.GL_DEBUG_TYPE_OTHER => DebugMessageType.other, // c.GL_DEBUG_TYPE_OTHER_ARB => DebugMessageType.other, // c.GL_DEBUG_TYPE_OTHER_KHR => DebugMessageType.other, else => DebugMessageType.other, }; } fn translateSeverity(sev: c.GLuint) DebugSeverity { return switch (sev) { c.GL_DEBUG_SEVERITY_HIGH => DebugSeverity.high, // c.GL_DEBUG_SEVERITY_HIGH_AMD => DebugSeverity.high, // c.GL_DEBUG_SEVERITY_HIGH_ARB => DebugSeverity.high, // c.GL_DEBUG_SEVERITY_HIGH_KHR => DebugSeverity.high, c.GL_DEBUG_SEVERITY_MEDIUM => DebugSeverity.medium, // c.GL_DEBUG_SEVERITY_MEDIUM_AMD => DebugSeverity.medium, // c.GL_DEBUG_SEVERITY_MEDIUM_ARB => DebugSeverity.medium, // c.GL_DEBUG_SEVERITY_MEDIUM_KHR => DebugSeverity.medium, c.GL_DEBUG_SEVERITY_LOW => DebugSeverity.low, // c.GL_DEBUG_SEVERITY_LOW_AMD => DebugSeverity.low, // c.GL_DEBUG_SEVERITY_LOW_ARB => DebugSeverity.low, // c.GL_DEBUG_SEVERITY_LOW_KHR => DebugSeverity.low, c.GL_DEBUG_SEVERITY_NOTIFICATION => DebugSeverity.notification, // c.GL_DEBUG_SEVERITY_NOTIFICATION_KHR => DebugSeverity.notification, else => DebugSeverity.high, }; } fn callback( c_source: c.GLenum, c_msg_type: c.GLenum, id: c.GLuint, c_severity: c.GLenum, length: c.GLsizei, c_message: [*c]const c.GLchar, userParam: ?*const c_void, ) callconv(.C) void { const debug_source = translateSource(c_source); const msg_type = translateMessageType(c_msg_type); const severity = translateSeverity(c_severity); const message = c_message[0..@intCast(usize, length)]; if (is_void) { handler(debug_source, msg_type, id, severity, message); } else { handler(@intToPtr(Context, @ptrToInt(userParam)), debug_source, msg_type, id, severity, message); } } }; if (is_void) c.glDebugMessageCallback(H.callback, null) else c.glDebugMessageCallback(H.callback, @ptrCast(?*const c_void, context)); checkError(); } pub fn clearColor(r: f32, g: f32, b: f32, a: f32) void { c.glClearColor(r, g, b, a); checkError(); } pub fn clearDepth(depth: f32) void { c.glClearDepth(depth); checkError(); } pub fn clear(mask: struct { color: bool = false, depth: bool = false, stencil: bool = false }) void { c.glClear(@as(c.GLbitfield, if (mask.color) c.GL_COLOR_BUFFER_BIT else 0) | @as(c.GLbitfield, if (mask.depth) c.GL_DEPTH_BUFFER_BIT else 0) | @as(c.GLbitfield, if (mask.stencil) c.GL_STENCIL_BUFFER_BIT else 0)); checkError(); } /////////////////////////////////////////////////////////////////////////////// // Vertex Arrays pub fn createVertexArrays(items: []VertexArray) void { c.glCreateVertexArrays(cs2gl(items.len), @ptrCast([*]c.GLuint, items.ptr)); checkError(); } pub fn createVertexArray() VertexArray { var vao: VertexArray = undefined; createVertexArrays(@ptrCast([*]VertexArray, &vao)[0..1]); return vao; } pub fn genVertexArrays(items: []VertexArray) void { c.glGenVertexArrays(cs2gl(items.len), @ptrCast([*]c.GLuint, items.ptr)); checkError(); } pub fn genVertexArray() VertexArray { var vao: VertexArray = undefined; genVertexArrays(@ptrCast([*]VertexArray, &vao)[0..1]); return vao; } pub fn bindVertexArray(vao: VertexArray) void { c.glBindVertexArray(@enumToInt(vao)); checkError(); } pub fn deleteVertexArrays(items: []const VertexArray) void { c.glDeleteVertexArrays(cs2gl(items.len), @ptrCast([*]const c.GLuint, items.ptr)); } pub fn deleteVertexArray(vao: VertexArray) void { deleteVertexArrays(@ptrCast([*]const VertexArray, &vao)[0..1]); } pub fn enableVertexAttribArray(index: u32) void { c.glEnableVertexAttribArray(index); checkError(); } pub fn disableVertexAttribArray(index: u32) void { c.glDisableVertexAttribArray(index); checkError(); } pub fn enableVertexArrayAttrib(vertexArray: VertexArray, index: u32) void { c.glEnableVertexArrayAttrib(@enumToInt(vertexArray), index); checkError(); } pub fn disableVertexArrayAttrib(vertexArray: VertexArray, index: u32) void { c.glDisableVertexArrayAttrib(@enumToInt(vertexArray), index); checkError(); } pub const Type = enum(c.GLenum) { byte = c.GL_BYTE, short = c.GL_SHORT, int = c.GL_INT, fixed = c.GL_FIXED, float = c.GL_FLOAT, half_float = c.GL_HALF_FLOAT, double = c.GL_DOUBLE, unsigned_byte = c.GL_UNSIGNED_BYTE, unsigned_short = c.GL_UNSIGNED_SHORT, unsigned_int = c.GL_UNSIGNED_INT, int_2_10_10_10_rev = c.GL_INT_2_10_10_10_REV, unsigned_int_2_10_10_10_rev = c.GL_UNSIGNED_INT_2_10_10_10_REV, unsigned_int_10_f_11_f_11_f_rev = c.GL_UNSIGNED_INT_10F_11F_11F_REV, }; pub fn vertexAttribFormat(attribindex: u32, size: u32, attribute_type: Type, normalized: bool, relativeoffset: usize) void { c.glVertexAttribFormat( attribindex, @intCast(c.GLint, size), @enumToInt(attribute_type), b2gl(normalized), ui2gl(relativeoffset), ); checkError(); } pub fn vertexAttribIFormat(attribindex: u32, size: u32, attribute_type: Type, relativeoffset: usize) void { c.glVertexAttribIFormat( attribindex, @intCast(c.GLint, size), @enumToInt(attribute_type), ui2gl(relativeoffset), ); checkError(); } pub fn vertexAttribLFormat(attribindex: u32, size: u32, attribute_type: Type, relativeoffset: usize) void { c.glVertexAttribLFormat( attribindex, @intCast(c.GLint, size), @enumToInt(attribute_type), ui2gl(relativeoffset), ); checkError(); } pub fn vertexAttribPointer(attribindex: u32, size: u32, attribute_type: Type, normalized: bool, stride: usize, relativeoffset: usize) void { c.glVertexAttribPointer( attribindex, @intCast(c.GLint, size), @enumToInt(attribute_type), b2gl(normalized), cs2gl(stride), @intToPtr(*allowzero const c_void, relativeoffset), ); checkError(); } pub fn vertexArrayAttribFormat( vertexArray: VertexArray, attribindex: u32, size: u32, attribute_type: Type, normalized: bool, relativeoffset: usize, ) void { c.glVertexArrayAttribFormat( @enumToInt(vertexArray), attribindex, @intCast(c.GLint, size), @enumToInt(attribute_type), b2gl(normalized), ui2gl(relativeoffset), ); checkError(); } pub fn vertexArrayAttribIFormat(vertexArray: VertexArray, attribindex: u32, size: u32, attribute_type: Type, relativeoffset: usize) void { c.glVertexArrayAttribIFormat( @enumToInt(vertexArray), attribindex, @intCast( c.GLint, size, ), @enumToInt(attribute_type), ui2gl(relativeoffset), ); checkError(); } pub fn vertexArrayAttribLFormat(vertexArray: VertexArray, attribindex: u32, size: u32, attribute_type: Type, relativeoffset: usize) void { c.glVertexArrayAttribLFormat( @enumToInt(vertexArray), attribindex, @intCast( c.GLint, size, ), @enumToInt(attribute_type), @intCast(c.GLuint, relativeoffset), ); checkError(); } pub fn vertexAttribBinding(attribindex: u32, bindingindex: u32) void { c.glVertexAttribBinding( attribindex, bindingindex, ); checkError(); } pub fn vertexArrayAttribBinding(vertexArray: VertexArray, attribindex: u32, bindingindex: u32) void { c.glVertexArrayAttribBinding( @enumToInt(vertexArray), attribindex, bindingindex, ); checkError(); } pub fn bindVertexBuffer(bindingindex: u32, buffer: Buffer, offset: usize, stride: usize) void { c.glBindVertexBuffer(bindingindex, @enumToInt(buffer), cs2gl(offset), cs2gl(stride)); checkError(); } pub fn vertexArrayVertexBuffer(vertexArray: VertexArray, bindingindex: u32, buffer: Buffer, offset: usize, stride: usize) void { c.glVertexArrayVertexBuffer(@enumToInt(vertexArray), bindingindex, @enumToInt(buffer), cs2gl(offset), cs2gl(stride)); checkError(); } pub fn vertexArrayElementBuffer(vertexArray: VertexArray, buffer: Buffer) void { c.glVertexArrayElementBuffer(@enumToInt(vertexArray), @enumToInt(buffer)); checkError(); } /////////////////////////////////////////////////////////////////////////////// // Buffer pub const BufferTarget = enum(c.GLenum) { /// Vertex attributes array_buffer = c.GL_ARRAY_BUFFER, /// Atomic counter storage atomic_counter_buffer = c.GL_ATOMIC_COUNTER_BUFFER, /// Buffer copy source copy_read_buffer = c.GL_COPY_READ_BUFFER, /// Buffer copy destination copy_write_buffer = c.GL_COPY_WRITE_BUFFER, /// Indirect compute dispatch commands dispatch_indirect_buffer = c.GL_DISPATCH_INDIRECT_BUFFER, /// Indirect command arguments draw_indirect_buffer = c.GL_DRAW_INDIRECT_BUFFER, /// Vertex array indices element_array_buffer = c.GL_ELEMENT_ARRAY_BUFFER, /// Pixel read target pixel_pack_buffer = c.GL_PIXEL_PACK_BUFFER, /// Texture data source pixel_unpack_buffer = c.GL_PIXEL_UNPACK_BUFFER, /// Query result buffer query_buffer = c.GL_QUERY_BUFFER, /// Read-write storage for shaders shader_storage_buffer = c.GL_SHADER_STORAGE_BUFFER, /// Texture data buffer texture_buffer = c.GL_TEXTURE_BUFFER, /// Transform feedback buffer transform_feedback_buffer = c.GL_TRANSFORM_FEEDBACK_BUFFER, /// Uniform block storage uniform_buffer = c.GL_UNIFORM_BUFFER, }; pub fn createBuffers(items: []Buffer) void { c.glCreateBuffers(cs2gl(items.len), @ptrCast([*]c.GLuint, items.ptr)); checkError(); } pub fn createBuffer() Buffer { var buf: Buffer = undefined; createBuffers(@ptrCast([*]Buffer, &buf)[0..1]); return buf; } pub fn genBuffers(items: []Buffer) void { c.glGenBuffers(cs2gl(items.len), @ptrCast([*]c.GLuint, items.ptr)); checkError(); } pub fn genBuffer() Buffer { var buf: Buffer = undefined; genBuffers(@ptrCast([*]Buffer, &buf)[0..1]); return buf; } pub fn bindBuffer(buf: Buffer, target: BufferTarget) void { c.glBindBuffer(@enumToInt(target), @enumToInt(buf)); checkError(); } pub fn deleteBuffers(items: []const Buffer) void { c.glDeleteBuffers(cs2gl(items.len), @ptrCast([*]const c.GLuint, items.ptr)); } pub fn deleteBuffer(buf: Buffer) void { deleteBuffers(@ptrCast([*]const Buffer, &buf)[0..1]); } pub const BufferUsage = enum(c.GLenum) { stream_draw = c.GL_STREAM_DRAW, stream_read = c.GL_STREAM_READ, stream_copy = c.GL_STREAM_COPY, static_draw = c.GL_STATIC_DRAW, static_read = c.GL_STATIC_READ, static_copy = c.GL_STATIC_COPY, dynamic_draw = c.GL_DYNAMIC_DRAW, dynamic_read = c.GL_DYNAMIC_READ, dynamic_copy = c.GL_DYNAMIC_COPY, }; // using align(1) as we are not required to have aligned data here pub fn namedBufferData(buf: Buffer, comptime T: type, items: []align(1) const T, usage: BufferUsage) void { c.glNamedBufferData( @enumToInt(buf), cs2gl(@sizeOf(T) * items.len), items.ptr, @enumToInt(usage), ); checkError(); } pub fn bufferData(target: BufferTarget, comptime T: type, items: []align(1) const T, usage: BufferUsage) void { c.glBufferData( @enumToInt(target), cs2gl(@sizeOf(T) * items.len), items.ptr, @enumToInt(usage), ); checkError(); } /////////////////////////////////////////////////////////////////////////////// // Shaders pub const ShaderType = enum(c.GLenum) { compute = c.GL_COMPUTE_SHADER, vertex = c.GL_VERTEX_SHADER, tess_control = c.GL_TESS_CONTROL_SHADER, tess_evaluation = c.GL_TESS_EVALUATION_SHADER, geometry = c.GL_GEOMETRY_SHADER, fragment = c.GL_FRAGMENT_SHADER, }; pub fn createShader(shaderType: ShaderType) Shader { const shader = @intToEnum(Shader, c.glCreateShader(@enumToInt(shaderType))); if (shader == .invalid) { checkError(); unreachable; } return shader; } pub fn deleteShader(shader: Shader) void { c.glDeleteShader(@enumToInt(shader)); checkError(); } pub fn compileShader(shader: Shader) void { c.glCompileShader(@enumToInt(shader)); checkError(); } pub fn shaderSource(shader: Shader, comptime N: comptime_int, sources: *const [N][]const u8) void { var lengths: [N]c.GLint = undefined; for (lengths) |*len, i| { len.* = @intCast(c.GLint, sources[i].len); } var ptrs: [N]*const c.GLchar = undefined; for (ptrs) |*ptr, i| { ptr.* = @ptrCast(*const c.GLchar, sources[i].ptr); } c.glShaderSource(@enumToInt(shader), N, &ptrs, &lengths); checkError(); } pub const ShaderParameter = enum(c.GLenum) { shader_type = c.GL_SHADER_TYPE, delete_status = c.GL_DELETE_STATUS, compile_status = c.GL_COMPILE_STATUS, info_log_length = c.GL_INFO_LOG_LENGTH, shader_source_length = c.GL_SHADER_SOURCE_LENGTH, }; pub fn getShader(shader: Shader, parameter: ShaderParameter) c.GLint { var value: c.GLint = undefined; c.glGetShaderiv(@enumToInt(shader), @enumToInt(parameter), &value); checkError(); return value; } pub fn getShaderInfoLog(shader: Shader, allocator: *std.mem.Allocator) ![:0]const u8 { const length = getShader(shader, .info_log_length); const log = try allocator.allocWithOptions(u8, @intCast(usize, length) + 1, null, 0); errdefer allocator.free(log); var actual_length: c.GLsizei = undefined; c.glGetShaderInfoLog(@enumToInt(shader), cs2gl(log.len), &actual_length, log.ptr); checkError(); log[@intCast(usize, actual_length)] = 0; return log[0..@intCast(usize, actual_length) :0]; } /////////////////////////////////////////////////////////////////////////////// // Program pub fn createProgram() Program { const program = @intToEnum(Program, c.glCreateProgram()); if (program == .invalid) { checkError(); unreachable; } return program; } pub fn deleteProgram(program: Program) void { c.glDeleteProgram(@enumToInt(program)); checkError(); } pub fn linkProgram(program: Program) void { c.glLinkProgram(@enumToInt(program)); checkError(); } pub fn attachShader(program: Program, shader: Shader) void { c.glAttachShader(@enumToInt(program), @enumToInt(shader)); checkError(); } pub fn detachShader(program: Program, shader: Shader) void { c.glDetachShader(@enumToInt(program), @enumToInt(shader)); checkError(); } pub fn useProgram(program: Program) void { c.glUseProgram(@enumToInt(program)); checkError(); } pub const ProgramParameter = enum(c.GLenum) { delete_status = c.GL_DELETE_STATUS, link_status = c.GL_LINK_STATUS, validate_status = c.GL_VALIDATE_STATUS, info_log_length = c.GL_INFO_LOG_LENGTH, attached_shaders = c.GL_ATTACHED_SHADERS, active_atomic_counter_buffers = c.GL_ACTIVE_ATOMIC_COUNTER_BUFFERS, active_attributes = c.GL_ACTIVE_ATTRIBUTES, active_attribute_max_length = c.GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, active_uniforms = c.GL_ACTIVE_UNIFORMS, active_uniform_blocks = c.GL_ACTIVE_UNIFORM_BLOCKS, active_uniform_block_max_name_length = c.GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH, active_uniform_max_length = c.GL_ACTIVE_UNIFORM_MAX_LENGTH, compute_work_group_size = c.GL_COMPUTE_WORK_GROUP_SIZE, program_binary_length = c.GL_PROGRAM_BINARY_LENGTH, transform_feedback_buffer_mode = c.GL_TRANSFORM_FEEDBACK_BUFFER_MODE, transform_feedback_varyings = c.GL_TRANSFORM_FEEDBACK_VARYINGS, transform_feedback_varying_max_length = c.GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH, geometry_vertices_out = c.GL_GEOMETRY_VERTICES_OUT, geometry_input_type = c.GL_GEOMETRY_INPUT_TYPE, geometry_output_type = c.GL_GEOMETRY_OUTPUT_TYPE, }; pub fn getProgram(program: Program, parameter: ProgramParameter) c.GLint { var value: c.GLint = undefined; c.glGetProgramiv(@enumToInt(program), @enumToInt(parameter), &value); checkError(); return value; } pub fn getProgramInfoLog(program: Program, allocator: *std.mem.Allocator) ![:0]const u8 { const length = getProgram(program, .info_log_length); const log = try allocator.allocWithOptions(u8, @intCast(usize, length) + 1, null, 0); errdefer allocator.free(log); var actual_length: c.GLsizei = undefined; c.glGetProgramInfoLog(@enumToInt(program), cs2gl(log.len), &actual_length, log.ptr); checkError(); log[@intCast(usize, actual_length)] = 0; return log[0..@intCast(usize, actual_length) :0]; } pub fn getUniformLocation(program: Program, name: [:0]const u8) ?u32 { const loc = c.glGetUniformLocation(@enumToInt(program), name.ptr); checkError(); if (loc < 0) return null; return @intCast(u32, loc); } pub fn getAttribLocation(program: Program, name: [:0]const u8) ?u32 { const loc = c.glGetAttribLocation(@enumToInt(program), name.ptr); checkError(); if (loc < 0) return null; return @intCast(u32, loc); } /////////////////////////////////////////////////////////////////////////////// // Uniforms pub fn programUniform1ui(program: Program, location: ?u32, value: u32) void { if (location) |loc| { c.glProgramUniform1ui(@enumToInt(program), @intCast(c.GLint, loc), value); checkError(); } } pub fn programUniform1i(program: Program, location: ?u32, value: i32) void { if (location) |loc| { c.glProgramUniform1i(@enumToInt(program), @intCast(c.GLint, loc), value); checkError(); } } pub fn programUniform1f(program: Program, location: ?u32, value: f32) void { if (location) |loc| { c.glProgramUniform1f(@enumToInt(program), @intCast(c.GLint, loc), value); checkError(); } } pub fn programUniform3f(program: Program, location: ?u32, x: f32, y: f32, z: f32) void { if (location) |loc| { c.glProgramUniform3f(@enumToInt(program), @intCast(c.GLint, loc), x, y, z); checkError(); } } pub fn programUniform4f(program: Program, location: ?u32, x: f32, y: f32, z: f32, w: f32) void { if (location) |loc| { c.glProgramUniform4f(@enumToInt(program), @intCast(c.GLint, loc), x, y, z, w); checkError(); } } pub fn programUniformMatrix4(program: Program, location: ?u32, transpose: bool, items: []const [4][4]f32) void { if (location) |loc| { c.glProgramUniformMatrix4fv( @enumToInt(program), @intCast(c.GLint, loc), cs2gl(items.len), b2gl(transpose), @ptrCast(*const f32, items.ptr), ); checkError(); } } pub fn uniform1f(location: ?u32, v0: f32) void { if (location) |loc| { c.glUniform1f(@intCast(c.GLint, loc), v0); checkError(); } } pub fn uniform2f(location: ?u32, v0: f32, v1: f32) void { if (location) |loc| { c.glUniform2f(@intCast(c.GLint, loc), v0, v1); checkError(); } } pub fn uniform3f(location: ?u32, v0: f32, v1: f32, v2: f32) void { if (location) |loc| { c.glUniform3f(@intCast(c.GLint, loc), v0, v1, v2); checkError(); } } pub fn uniform4f(location: ?u32, v0: f32, v1: f32, v2: f32, v3: f32) void { if (location) |loc| { c.glUniform4f(@intCast(c.GLint, loc), v0, v1, v2, v3); checkError(); } } pub fn uniform1i(location: ?u32, v0: i32) void { if (location) |loc| { c.glUniform1i(@intCast(c.GLint, loc), v0); checkError(); } } pub fn uniform2i(location: ?u32, v0: i32, v1: i32) void { if (location) |loc| { c.glUniform2i(@intCast(c.GLint, loc), v0, v1); checkError(); } } pub fn uniform3i(location: ?u32, v0: i32, v1: i32, v2: i32) void { if (location) |loc| { c.glUniform3i(@intCast(c.GLint, loc), v0, v1, v2); checkError(); } } pub fn uniform4i(location: ?u32, v0: i32, v1: i32, v2: i32, v3: i32) void { if (location) |loc| { c.glUniform4i(@intCast(c.GLint, loc), v0, v1, v2, v3); checkError(); } } pub fn uniform1ui(location: ?u32, v0: u32) void { if (location) |loc| { c.glUniform1ui(@intCast(c.GLint, loc), v0); checkError(); } } pub fn uniform2ui(location: ?u32, v0: u32, v1: u32) void { if (location) |loc| { c.glUniform2ui(@intCast(c.GLint, loc), v0, v1); checkError(); } } pub fn uniform3ui(location: ?u32, v0: u32, v1: u32, v2: u32) void { if (location) |loc| { c.glUniform3ui(@intCast(c.GLint, loc), v0, v1, v2); checkError(); } } pub fn uniform4ui(location: ?u32, v0: u32, v1: u32, v2: u32, v3: u32) void { if (location) |loc| { c.glUniform4ui(@intCast(c.GLint, loc), v0, v1, v2, v3); checkError(); } } pub fn uniform1fv(location: ?u32, items: []const f32) void { if (location) |loc| { c.glUniform1fv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const f32, items.ptr)); checkError(); } } pub fn uniform2fv(location: ?u32, items: []const [2]f32) void { if (location) |loc| { c.glUniform2fv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const f32, items.ptr)); checkError(); } } pub fn uniform3fv(location: ?u32, items: []const [3]f32) void { if (location) |loc| { c.glUniform3fv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const f32, items.ptr)); checkError(); } } pub fn uniform4fv(location: ?u32, items: []const [4]f32) void { if (location) |loc| { c.glUniform4fv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const f32, items.ptr)); checkError(); } } pub fn uniform1iv(location: ?u32, items: []const i32) void { if (location) |loc| { c.glUniform1iv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const i32, items.ptr)); checkError(); } } pub fn uniform2iv(location: ?u32, items: []const [2]i32) void { if (location) |loc| { c.glUniform2iv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const i32, items.ptr)); checkError(); } } pub fn uniform3iv(location: ?u32, items: []const [3]i32) void { if (location) |loc| { c.glUniform3iv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const i32, items.ptr)); checkError(); } } pub fn uniform4iv(location: ?u32, items: []const [4]i32) void { if (location) |loc| { c.glUniform4iv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const i32, items.ptr)); checkError(); } } pub fn uniform1uiv(location: ?u32, items: []const u32) void { if (location) |loc| { c.glUniform1uiv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const u32, items.ptr)); checkError(); } } pub fn uniform2uiv(location: ?u32, items: []const [2]u32) void { if (location) |loc| { c.glUniform2uiv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const u32, items.ptr)); checkError(); } } pub fn uniform3uiv(location: ?u32, items: []const [3]u32) void { if (location) |loc| { c.glUniform3uiv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const u32, items.ptr)); checkError(); } } pub fn uniform4uiv(location: ?u32, items: []const [4]u32) void { if (location) |loc| { c.glUniform4uiv(@intCast(c.GLint, loc), cs2gl(items.len), @ptrCast(*const u32, items.ptr)); checkError(); } } pub fn uniform1i64(location: ?u32, v0: i64) void { if (location) |loc| { c.glUniform1i64ARB(@intCast(c.GLint, loc), v0); checkError(); } } pub fn uniform2i64(location: ?u32, v0: i64, v1: i64) void { if (location) |loc| { c.glUniform2i64ARB(@intCast(c.GLint, loc), v0, v1); checkError(); } } pub fn uniform3i64(location: ?u32, v0: i64, v1: i64, v2: i64) void { if (location) |loc| { c.glUniform3i64ARB(@intCast(c.GLint, loc), v0, v1, v2); checkError(); } } pub fn uniform4i64(location: ?u32, v0: i64, v1: i64, v2: i64, v3: i64) void { if (location) |loc| { c.glUniform4i64ARB(@intCast(c.GLint, loc), v0, v1, v2, v3); checkError(); } } /////////////////////////////////////////////////////////////////////////////// // Draw Calls pub const PrimitiveType = enum(c.GLenum) { points = c.GL_POINTS, line_strip = c.GL_LINE_STRIP, line_loop = c.GL_LINE_LOOP, lines = c.GL_LINES, line_strip_adjacency = c.GL_LINE_STRIP_ADJACENCY, lines_adjacency = c.GL_LINES_ADJACENCY, triangle_strip = c.GL_TRIANGLE_STRIP, triangle_fan = c.GL_TRIANGLE_FAN, triangles = c.GL_TRIANGLES, triangle_strip_adjacency = c.GL_TRIANGLE_STRIP_ADJACENCY, triangles_adjacency = c.GL_TRIANGLES_ADJACENCY, patches = c.GL_PATCHES, }; pub fn drawArrays(primitiveType: PrimitiveType, first: usize, count: usize) void { c.glDrawArrays(@enumToInt(primitiveType), cs2gl(first), cs2gl(count)); checkError(); } pub const ElementType = enum(c.GLenum) { u8 = c.GL_UNSIGNED_BYTE, u16 = c.GL_UNSIGNED_SHORT, u32 = c.GL_UNSIGNED_INT, }; pub fn drawElements(primitiveType: PrimitiveType, count: usize, element_type: ElementType, indices: usize) void { c.glDrawElements( @enumToInt(primitiveType), cs2gl(count), @enumToInt(element_type), @intToPtr(*allowzero const c_void, indices), ); checkError(); } pub fn drawElementsInstanced(primitiveType: PrimitiveType, count: usize, element_type: ElementType, indices: usize, instance_count: usize) void { c.glDrawElementsInstanced( @enumToInt(primitiveType), cs2gl(count), @enumToInt(element_type), @intToPtr(*allowzero const c_void, indices), cs2gl(instance_count), ); checkError(); } /////////////////////////////////////////////////////////////////////////////// // Status Control pub const Capabilities = enum(c.GLenum) { blend = c.GL_BLEND, // clip_distance = c.GL_CLIP_DISTANCE, color_logic_op = c.GL_COLOR_LOGIC_OP, cull_face = c.GL_CULL_FACE, debug_output = c.GL_DEBUG_OUTPUT, debug_output_synchronous = c.GL_DEBUG_OUTPUT_SYNCHRONOUS, depth_clamp = c.GL_DEPTH_CLAMP, depth_test = c.GL_DEPTH_TEST, dither = c.GL_DITHER, framebuffer_srgb = c.GL_FRAMEBUFFER_SRGB, line_smooth = c.GL_LINE_SMOOTH, multisample = c.GL_MULTISAMPLE, polygon_offset_fill = c.GL_POLYGON_OFFSET_FILL, polygon_offset_line = c.GL_POLYGON_OFFSET_LINE, polygon_offset_point = c.GL_POLYGON_OFFSET_POINT, polygon_smooth = c.GL_POLYGON_SMOOTH, primitive_restart = c.GL_PRIMITIVE_RESTART, primitive_restart_fixed_index = c.GL_PRIMITIVE_RESTART_FIXED_INDEX, rasterizer_discard = c.GL_RASTERIZER_DISCARD, sample_alpha_to_coverage = c.GL_SAMPLE_ALPHA_TO_COVERAGE, sample_alpha_to_one = c.GL_SAMPLE_ALPHA_TO_ONE, sample_coverage = c.GL_SAMPLE_COVERAGE, sample_shading = c.GL_SAMPLE_SHADING, sample_mask = c.GL_SAMPLE_MASK, scissor_test = c.GL_SCISSOR_TEST, stencil_test = c.GL_STENCIL_TEST, texture_cube_map_seamless = c.GL_TEXTURE_CUBE_MAP_SEAMLESS, program_point_size = c.GL_PROGRAM_POINT_SIZE, }; pub fn enable(cap: Capabilities) void { c.glEnable(@enumToInt(cap)); checkError(); } pub fn disable(cap: Capabilities) void { c.glDisable(@enumToInt(cap)); checkError(); } pub fn enableI(cap: Capabilities, index: u32) void { c.glEnablei(@enumToInt(cap), index); checkError(); } pub fn disableI(cap: Capabilities, index: u32) void { c.glDisablei(@enumToInt(cap), index); checkError(); } pub fn depthMask(enabled: bool) void { c.glDepthMask(if (enabled) c.GL_TRUE else c.GL_FALSE); checkError(); } pub const DepthFunc = enum(c.GLenum) { never = c.GL_NEVER, less = c.GL_LESS, equal = c.GL_EQUAL, less_or_equal = c.GL_LEQUAL, greater = c.GL_GREATER, not_equal = c.GL_NOTEQUAL, greator_or_equal = c.GL_GEQUAL, always = c.GL_ALWAYS, }; pub fn depthFunc(func: DepthFunc) void { c.glDepthFunc(@enumToInt(func)); checkError(); } pub const BlendFactor = enum(c.GLenum) { zero = c.GL_ZERO, one = c.GL_ONE, src_color = c.GL_SRC_COLOR, one_minus_src_color = c.GL_ONE_MINUS_SRC_COLOR, dst_color = c.GL_DST_COLOR, one_minus_dst_color = c.GL_ONE_MINUS_DST_COLOR, src_alpha = c.GL_SRC_ALPHA, one_minus_src_alpha = c.GL_ONE_MINUS_SRC_ALPHA, dst_alpha = c.GL_DST_ALPHA, one_minus_dst_alpha = c.GL_ONE_MINUS_DST_ALPHA, constant_color = c.GL_CONSTANT_COLOR, one_minus_constant_color = c.GL_ONE_MINUS_CONSTANT_COLOR, constant_alpha = c.GL_CONSTANT_ALPHA, one_minus_constant_alpha = c.GL_ONE_MINUS_CONSTANT_ALPHA, }; pub fn blendFunc(sfactor: BlendFactor, dfactor: BlendFactor) void { c.glBlendFunc(@enumToInt(sfactor), @enumToInt(dfactor)); checkError(); } pub fn blendFuncSeparate(srcRGB: BlendFactor, dstRGB: BlendFactor, srcAlpha: BlendFactor, dstAlpha: BlendFactor) void { c.glBlendFuncSeparate(@enumToInt(srcRGB), @enumToInt(dstRGB), @enumToInt(srcAlpha), @enumToInt(dstAlpha)); checkError(); } pub fn polygonOffset(factor: f32, units: f32) void { c.glPolygonOffset(factor, units); checkError(); } pub fn pointSize(size: f32) void { c.glPointSize(size); checkError(); } pub fn lineWidth(size: f32) void { c.glLineWidth(size); checkError(); } pub const TextureTarget = enum(c.GLenum) { @"1d" = c.GL_TEXTURE_1D, @"2d" = c.GL_TEXTURE_2D, @"3d" = c.GL_TEXTURE_3D, @"1d_array" = c.GL_TEXTURE_1D_ARRAY, @"2d_array" = c.GL_TEXTURE_2D_ARRAY, rectangle = c.GL_TEXTURE_RECTANGLE, cube_map = c.GL_TEXTURE_CUBE_MAP, cube_map_array = c.GL_TEXTURE_CUBE_MAP_ARRAY, buffer = c.GL_TEXTURE_BUFFER, @"2d_multisample" = c.GL_TEXTURE_2D_MULTISAMPLE, @"2d_multisample_array" = c.GL_TEXTURE_2D_MULTISAMPLE_ARRAY, }; pub fn genTexture() Texture { var tex_name: c.GLuint = undefined; c.glGenTextures(1, &tex_name); checkError(); return @intToEnum(Texture, tex_name); } pub fn createTexture(texture_target: TextureTarget) Texture { var tex_name: c.GLuint = undefined; c.glCreateTextures(@enumToInt(texture_target), 1, &tex_name); checkError(); const texture = @intToEnum(Texture, tex_name); if (texture == .invalid) { checkError(); unreachable; } return texture; } pub fn deleteTexture(texture: Texture) void { var id = @enumToInt(texture); c.glDeleteTextures(1, &id); } pub fn bindTextureUnit(texture: Texture, unit: u32) void { c.glBindTextureUnit(unit, @enumToInt(texture)); checkError(); } pub fn bindTexture(texture: Texture, target: TextureTarget) void { c.glBindTexture(@enumToInt(target), @enumToInt(texture)); checkError(); } pub fn activeTexture(texture_unit: TextureUnit) void { c.glActiveTexture(@enumToInt(texture_unit)); checkError(); } pub const TextureUnit = enum(c.GLenum) { texture_0 = c.GL_TEXTURE0, texture_1 = c.GL_TEXTURE1, texture_2 = c.GL_TEXTURE2, }; pub const TextureParameter = enum(c.GLenum) { depth_stencil_texture_mode = c.GL_DEPTH_STENCIL_TEXTURE_MODE, base_level = c.GL_TEXTURE_BASE_LEVEL, compare_func = c.GL_TEXTURE_COMPARE_FUNC, compare_mode = c.GL_TEXTURE_COMPARE_MODE, lod_bias = c.GL_TEXTURE_LOD_BIAS, min_filter = c.GL_TEXTURE_MIN_FILTER, mag_filter = c.GL_TEXTURE_MAG_FILTER, min_lod = c.GL_TEXTURE_MIN_LOD, max_lod = c.GL_TEXTURE_MAX_LOD, max_level = c.GL_TEXTURE_MAX_LEVEL, swizzle_r = c.GL_TEXTURE_SWIZZLE_R, swizzle_g = c.GL_TEXTURE_SWIZZLE_G, swizzle_b = c.GL_TEXTURE_SWIZZLE_B, swizzle_a = c.GL_TEXTURE_SWIZZLE_A, wrap_s = c.GL_TEXTURE_WRAP_S, wrap_t = c.GL_TEXTURE_WRAP_T, wrap_r = c.GL_TEXTURE_WRAP_R, }; pub fn TextureParameterType(comptime param: TextureParameter) type { // see https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTexParameter.xhtml return switch (param) { .wrap_s, .wrap_t, .wrap_r => enum(c.GLint) { clamp_to_edge = c.GL_CLAMP_TO_EDGE, clamp_to_border = c.GL_CLAMP_TO_BORDER, mirrored_repeat = c.GL_MIRRORED_REPEAT, repeat = c.GL_REPEAT, mirror_clamp_to_edge = c.GL_MIRROR_CLAMP_TO_EDGE, }, .mag_filter => enum(c.GLint) { nearest = c.GL_NEAREST, linear = c.GL_LINEAR, }, .min_filter => enum(c.GLint) { nearest = c.GL_NEAREST, linear = c.GL_LINEAR, nearest_mipmap_nearest = c.GL_NEAREST_MIPMAP_NEAREST, linear_mipmap_nearest = c.GL_LINEAR_MIPMAP_NEAREST, nearest_mipmap_linear = c.GL_NEAREST_MIPMAP_LINEAR, linear_mipmap_linear = c.GL_LINEAR_MIPMAP_LINEAR, }, .compare_mode => enum(c.GLint) { none = c.GL_NONE, }, else => @compileError("textureParameter not implemented yet for " ++ @tagName(param)), }; } pub fn texParameter(target: TextureTarget, comptime parameter: TextureParameter, value: TextureParameterType(parameter)) void { const T = TextureParameterType(parameter); const info = @typeInfo(T); if (info == .Enum) { c.glTexParameteri(@enumToInt(target), @enumToInt(parameter), @enumToInt(value)); } else { @compileError(@tagName(info) ++ " is not supported yet by texParameter"); } checkError(); } pub fn textureParameter(texture: Texture, comptime parameter: TextureParameter, value: TextureParameterType(parameter)) void { const T = TextureParameterType(parameter); const info = @typeInfo(T); if (info == .Enum) { c.glTextureParameteri(@enumToInt(texture), @enumToInt(parameter), @enumToInt(value)); } else { @compileError(@tagName(info) ++ " is not supported yet by textureParameter"); } checkError(); } pub const TextureInternalFormat = enum(c.GLenum) { r8 = c.GL_R8, r8_snorm = c.GL_R8_SNORM, r16 = c.GL_R16, r16_snorm = c.GL_R16_SNORM, rg8 = c.GL_RG8, rg8_snorm = c.GL_RG8_SNORM, rg16 = c.GL_RG16, rg16_snorm = c.GL_RG16_SNORM, r3_g3_b2 = c.GL_R3_G3_B2, rgb4 = c.GL_RGB4, rgb5 = c.GL_RGB5, rgb8 = c.GL_RGB8, rgb8_snorm = c.GL_RGB8_SNORM, rgb10 = c.GL_RGB10, rgb12 = c.GL_RGB12, rgb16_snorm = c.GL_RGB16_SNORM, rgba2 = c.GL_RGBA2, rgba4 = c.GL_RGBA4, rgb5_a1 = c.GL_RGB5_A1, rgba8 = c.GL_RGBA8, rgba8_snorm = c.GL_RGBA8_SNORM, rgb10_a2 = c.GL_RGB10_A2, rgb10_a2ui = c.GL_RGB10_A2UI, rgba12 = c.GL_RGBA12, rgba16 = c.GL_RGBA16, srgb8 = c.GL_SRGB8, srgb8_alpha8 = c.GL_SRGB8_ALPHA8, r16f = c.GL_R16F, rg16f = c.GL_RG16F, rgb16f = c.GL_RGB16F, rgba16f = c.GL_RGBA16F, r32f = c.GL_R32F, rg32f = c.GL_RG32F, rgb32f = c.GL_RGB32F, rgba32f = c.GL_RGBA32F, r11f_g11f_b10f = c.GL_R11F_G11F_B10F, rgb9_e5 = c.GL_RGB9_E5, r8i = c.GL_R8I, r8ui = c.GL_R8UI, r16i = c.GL_R16I, r16ui = c.GL_R16UI, r32i = c.GL_R32I, r32ui = c.GL_R32UI, rg8i = c.GL_RG8I, rg8ui = c.GL_RG8UI, rg16i = c.GL_RG16I, rg16ui = c.GL_RG16UI, rg32i = c.GL_RG32I, rg32ui = c.GL_RG32UI, rgb8i = c.GL_RGB8I, rgb8ui = c.GL_RGB8UI, rgb16i = c.GL_RGB16I, rgb16ui = c.GL_RGB16UI, rgb32i = c.GL_RGB32I, rgb32ui = c.GL_RGB32UI, rgba8i = c.GL_RGBA8I, rgba8ui = c.GL_RGBA8UI, rgba16i = c.GL_RGBA16I, rgba16ui = c.GL_RGBA16UI, rgba32i = c.GL_RGBA32I, rgba32ui = c.GL_RGBA32UI, depth_component16 = c.GL_DEPTH_COMPONENT16, }; pub fn textureStorage2D( texture: Texture, levels: usize, internalformat: TextureInternalFormat, width: usize, height: usize, ) void { c.glTextureStorage2D( @enumToInt(texture), @intCast(c.GLsizei, levels), @enumToInt(internalformat), @intCast(c.GLsizei, width), @intCast(c.GLsizei, height), ); checkError(); } pub fn textureStorage3D( texture: Texture, levels: usize, internalformat: TextureInternalFormat, width: usize, height: usize, depth: usize, ) void { c.glTextureStorage3D( @enumToInt(texture), @intCast(c.GLsizei, levels), @enumToInt(internalformat), @intCast(c.GLsizei, width), @intCast(c.GLsizei, height), @intCast(c.GLsizei, depth), ); checkError(); } pub const PixelFormat = enum(c.GLenum) { red = c.GL_RED, rg = c.GL_RG, rgb = c.GL_RGB, bgr = c.GL_BGR, rgba = c.GL_RGBA, bgra = c.GL_BGRA, depth_component = c.GL_DEPTH_COMPONENT, stencil_index = c.GL_STENCIL_INDEX, luminance = c.GL_LUMINANCE, }; pub const PixelType = enum(c.GLenum) { unsigned_byte = c.GL_UNSIGNED_BYTE, byte = c.GL_BYTE, unsigned_short = c.GL_UNSIGNED_SHORT, short = c.GL_SHORT, unsigned_int = c.GL_UNSIGNED_INT, int = c.GL_INT, float = c.GL_FLOAT, unsigned_byte_3_3_2 = c.GL_UNSIGNED_BYTE_3_3_2, unsigned_byte_2_3_3_rev = c.GL_UNSIGNED_BYTE_2_3_3_REV, unsigned_short_5_6_5 = c.GL_UNSIGNED_SHORT_5_6_5, unsigned_short_5_6_5_rev = c.GL_UNSIGNED_SHORT_5_6_5_REV, unsigned_short_4_4_4_4 = c.GL_UNSIGNED_SHORT_4_4_4_4, unsigned_short_4_4_4_4_rev = c.GL_UNSIGNED_SHORT_4_4_4_4_REV, unsigned_short_5_5_5_1 = c.GL_UNSIGNED_SHORT_5_5_5_1, unsigned_short_1_5_5_5_rev = c.GL_UNSIGNED_SHORT_1_5_5_5_REV, unsigned_int_8_8_8_8 = c.GL_UNSIGNED_INT_8_8_8_8, unsigned_int_8_8_8_8_rev = c.GL_UNSIGNED_INT_8_8_8_8_REV, unsigned_int_10_10_10_2 = c.GL_UNSIGNED_INT_10_10_10_2, unsigned_int_2_10_10_10_rev = c.GL_UNSIGNED_INT_2_10_10_10_REV, }; pub fn textureImage2D( texture: TextureTarget, level: usize, pixel_internal_format: PixelFormat, width: usize, height: usize, pixel_format: PixelFormat, pixel_type: PixelType, data: ?[*]const u8, ) void { c.glTexImage2D( @enumToInt(texture), @intCast(c.GLint, level), @intCast(c.GLint, @enumToInt(pixel_internal_format)), @intCast(c.GLsizei, width), @intCast(c.GLsizei, height), 0, @enumToInt(pixel_format), @enumToInt(pixel_type), data, ); checkError(); } pub fn textureSubImage2D( texture: Texture, level: usize, xoffset: usize, yoffset: usize, width: usize, height: usize, pixel_format: PixelFormat, pixel_type: PixelType, data: ?[*]const u8, ) void { c.glTextureSubImage2D( @enumToInt(texture), @intCast(c.GLint, level), @intCast(c.GLint, xoffset), @intCast(c.GLint, yoffset), @intCast(c.GLsizei, width), @intCast(c.GLsizei, height), @enumToInt(pixel_format), @enumToInt(pixel_type), data, ); checkError(); } pub fn textureSubImage3D( texture: Texture, level: usize, xoffset: usize, yoffset: usize, zoffset: usize, width: usize, height: usize, depth: usize, pixel_format: PixelFormat, pixel_type: PixelType, pixels: ?[*]const u8, ) void { c.glTextureSubImage3D( @enumToInt(texture), @intCast(c.GLint, level), @intCast(c.GLint, xoffset), @intCast(c.GLint, yoffset), @intCast(c.GLint, zoffset), @intCast(c.GLsizei, width), @intCast(c.GLsizei, height), @intCast(c.GLsizei, depth), @enumToInt(pixel_format), @enumToInt(pixel_type), pixels, ); checkError(); } pub const PixelStoreParameter = enum(c.GLenum) { pack_swap_bytes = c.GL_PACK_SWAP_BYTES, pack_lsb_first = c.GL_PACK_LSB_FIRST, pack_row_length = c.GL_PACK_ROW_LENGTH, pack_image_height = c.GL_PACK_IMAGE_HEIGHT, pack_skip_pixels = c.GL_PACK_SKIP_PIXELS, pack_skip_rows = c.GL_PACK_SKIP_ROWS, pack_skip_images = c.GL_PACK_SKIP_IMAGES, pack_alignment = c.GL_PACK_ALIGNMENT, unpack_swap_bytes = c.GL_UNPACK_SWAP_BYTES, unpack_lsb_first = c.GL_UNPACK_LSB_FIRST, unpack_row_length = c.GL_UNPACK_ROW_LENGTH, unpack_image_height = c.GL_UNPACK_IMAGE_HEIGHT, unpack_skip_pixels = c.GL_UNPACK_SKIP_PIXELS, unpack_skip_rows = c.GL_UNPACK_SKIP_ROWS, unpack_skip_images = c.GL_UNPACK_SKIP_IMAGES, unpack_alignment = c.GL_UNPACK_ALIGNMENT, }; pub fn pixelStore(param: PixelStoreParameter, value: usize) void { c.glPixelStorei(@enumToInt(param), @intCast(c.GLint, value)); checkError(); } pub fn viewport(x: i32, y: i32, width: usize, height: usize) void { c.glViewport(@intCast(c.GLint, x), @intCast(c.GLint, y), @intCast(c.GLsizei, width), @intCast(c.GLsizei, height)); checkError(); } pub fn scissor(x: i32, y: i32, width: usize, height: usize) void { c.glScissor(@intCast(c.GLint, x), @intCast(c.GLint, y), @intCast(c.GLsizei, width), @intCast(c.GLsizei, height)); checkError(); } pub const FramebufferTarget = enum(c.GLenum) { buffer = c.GL_FRAMEBUFFER, }; pub const Framebuffer = enum(c.GLuint) { invalid = 0, _, pub const gen = genFramebuffer; pub const create = createFramebuffer; pub const delete = deleteFramebuffer; pub const bind = bindFrameBuffer; pub const texture = framebufferTexture; pub const texture2D = framebufferTexture2D; pub const checkStatus = checkFramebufferStatus; }; pub fn createFramebuffer() Framebuffer { var fb_name: c.GLuint = undefined; c.glCreateFramebuffers(1, &fb_name); checkError(); const framebuffer = @intToEnum(Framebuffer, fb_name); if (framebuffer == .invalid) { checkError(); unreachable; } return framebuffer; } pub fn genFramebuffer() Framebuffer { var fb_name: c.GLuint = undefined; c.glGenFramebuffers(1, &fb_name); checkError(); const framebuffer = @intToEnum(Framebuffer, fb_name); if (framebuffer == .invalid) unreachable; return framebuffer; } pub fn deleteFramebuffer(buf: Framebuffer) void { var fb_name = @enumToInt(buf); c.glDeleteFramebuffers(1, &fb_name); } pub fn bindFrameBuffer(buf: Framebuffer, target: FramebufferTarget) void { c.glBindFramebuffer(@enumToInt(target), @enumToInt(buf)); checkError(); } pub const FramebufferAttachment = enum(c.GLenum) { color0 = c.GL_COLOR_ATTACHMENT0, color1 = c.GL_COLOR_ATTACHMENT1, color2 = c.GL_COLOR_ATTACHMENT2, color3 = c.GL_COLOR_ATTACHMENT3, color4 = c.GL_COLOR_ATTACHMENT4, color5 = c.GL_COLOR_ATTACHMENT5, color6 = c.GL_COLOR_ATTACHMENT6, color7 = c.GL_COLOR_ATTACHMENT7, depth = c.GL_DEPTH_ATTACHMENT, stencil = c.GL_STENCIL_ATTACHMENT, depth_stencil = c.GL_DEPTH_STENCIL_ATTACHMENT, max_color = c.GL_MAX_COLOR_ATTACHMENTS, }; pub fn framebufferTexture(buffer: Framebuffer, target: FramebufferTarget, attachment: FramebufferAttachment, texture: Texture, level: i32) void { buffer.bind(.buffer); c.glFramebufferTexture(@enumToInt(target), @enumToInt(attachment), @intCast(c.GLuint, @enumToInt(texture)), @intCast(c.GLint, level)); checkError(); } pub const FramebufferTextureTarget = enum(c.GLenum) { @"1d" = c.GL_TEXTURE_1D, @"2d" = c.GL_TEXTURE_2D, @"3d" = c.GL_TEXTURE_3D, @"1d_array" = c.GL_TEXTURE_1D_ARRAY, @"2d_array" = c.GL_TEXTURE_2D_ARRAY, rectangle = c.GL_TEXTURE_RECTANGLE, cube_map_positive_x = c.GL_TEXTURE_CUBE_MAP_POSITIVE_X, cube_map_negative_x = c.GL_TEXTURE_CUBE_MAP_NEGATIVE_X, cube_map_positive_y = c.GL_TEXTURE_CUBE_MAP_POSITIVE_Y, cube_map_negative_y = c.GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, cube_map_positive_z = c.GL_TEXTURE_CUBE_MAP_POSITIVE_Z, cube_map_negative_z = c.GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, buffer = c.GL_TEXTURE_BUFFER, @"2d_multisample" = c.GL_TEXTURE_2D_MULTISAMPLE, @"2d_multisample_array" = c.GL_TEXTURE_2D_MULTISAMPLE_ARRAY, }; pub fn framebufferTexture2D(buffer: Framebuffer, target: FramebufferTarget, attachment: FramebufferAttachment, textarget: FramebufferTextureTarget, texture: Texture, level: i32) void { buffer.bind(.buffer); c.glFramebufferTexture2D(@enumToInt(target), @enumToInt(attachment), @enumToInt(textarget), @intCast(c.GLuint, @enumToInt(texture)), @intCast(c.GLint, level)); checkError(); } const FramebufferStatus = enum(c.GLuint) { complete = c.GL_FRAMEBUFFER_COMPLETE, }; pub fn checkFramebufferStatus(target: FramebufferTarget) FramebufferStatus { const status = @intToEnum(FramebufferStatus, c.glCheckFramebufferStatus(@enumToInt(target))); return status; } pub fn drawBuffers(bufs: []const FramebufferAttachment) void { c.glDrawBuffers(cs2gl(bufs.len), @ptrCast([*]const c.GLuint, bufs.ptr)); } /////////////////////////////////////////////////////////////////////////////// // Parameters pub const Parameter = enum(c.GLenum) { active_texture = c.GL_ACTIVE_TEXTURE, aliased_line_width_range = c.GL_ALIASED_LINE_WIDTH_RANGE, array_buffer_binding = c.GL_ARRAY_BUFFER_BINDING, blend = c.GL_BLEND, blend_color = c.GL_BLEND_COLOR, blend_dst_alpha = c.GL_BLEND_DST_ALPHA, blend_dst_rgb = c.GL_BLEND_DST_RGB, blend_equation_alpha = c.GL_BLEND_EQUATION_ALPHA, blend_equation_rgb = c.GL_BLEND_EQUATION_RGB, blend_src_alpha = c.GL_BLEND_SRC_ALPHA, blend_src_rgb = c.GL_BLEND_SRC_RGB, color_clear_value = c.GL_COLOR_CLEAR_VALUE, color_logic_op = c.GL_COLOR_LOGIC_OP, color_writemask = c.GL_COLOR_WRITEMASK, compressed_texture_formats = c.GL_COMPRESSED_TEXTURE_FORMATS, context_flags = c.GL_CONTEXT_FLAGS, cull_face = c.GL_CULL_FACE, current_program = c.GL_CURRENT_PROGRAM, depth_clear_value = c.GL_DEPTH_CLEAR_VALUE, depth_func = c.GL_DEPTH_FUNC, depth_range = c.GL_DEPTH_RANGE, depth_test = c.GL_DEPTH_TEST, depth_writemask = c.GL_DEPTH_WRITEMASK, dither = c.GL_DITHER, doublebuffer = c.GL_DOUBLEBUFFER, draw_buffer = c.GL_DRAW_BUFFER, draw_buffer0 = c.GL_DRAW_BUFFER0, draw_buffer1 = c.GL_DRAW_BUFFER1, draw_buffer2 = c.GL_DRAW_BUFFER2, draw_buffer3 = c.GL_DRAW_BUFFER3, draw_buffer4 = c.GL_DRAW_BUFFER4, draw_buffer5 = c.GL_DRAW_BUFFER5, draw_buffer6 = c.GL_DRAW_BUFFER6, draw_buffer7 = c.GL_DRAW_BUFFER7, draw_buffer8 = c.GL_DRAW_BUFFER8, draw_buffer9 = c.GL_DRAW_BUFFER9, draw_buffer10 = c.GL_DRAW_BUFFER10, draw_buffer11 = c.GL_DRAW_BUFFER11, draw_buffer12 = c.GL_DRAW_BUFFER12, draw_buffer13 = c.GL_DRAW_BUFFER13, draw_buffer14 = c.GL_DRAW_BUFFER14, draw_buffer15 = c.GL_DRAW_BUFFER15, draw_framebuffer_binding = c.GL_DRAW_FRAMEBUFFER_BINDING, element_array_buffer_binding = c.GL_ELEMENT_ARRAY_BUFFER_BINDING, fragment_shader_derivative_hint = c.GL_FRAGMENT_SHADER_DERIVATIVE_HINT, line_smooth = c.GL_LINE_SMOOTH, line_smooth_hint = c.GL_LINE_SMOOTH_HINT, line_width = c.GL_LINE_WIDTH, logic_op_mode = c.GL_LOGIC_OP_MODE, major_version = c.GL_MAJOR_VERSION, max_3d_texture_size = c.GL_MAX_3D_TEXTURE_SIZE, max_array_texture_layers = c.GL_MAX_ARRAY_TEXTURE_LAYERS, max_clip_distances = c.GL_MAX_CLIP_DISTANCES, max_color_texture_samples = c.GL_MAX_COLOR_TEXTURE_SAMPLES, max_combined_fragment_uniform_components = c.GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS, max_combined_geometry_uniform_components = c.GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS, max_combined_texture_image_units = c.GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, max_combined_uniform_blocks = c.GL_MAX_COMBINED_UNIFORM_BLOCKS, max_combined_vertex_uniform_components = c.GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS, max_cube_map_texture_size = c.GL_MAX_CUBE_MAP_TEXTURE_SIZE, max_depth_texture_samples = c.GL_MAX_DEPTH_TEXTURE_SAMPLES, max_draw_buffers = c.GL_MAX_DRAW_BUFFERS, max_dual_source_draw_buffers = c.GL_MAX_DUAL_SOURCE_DRAW_BUFFERS, max_elements_indices = c.GL_MAX_ELEMENTS_INDICES, max_elements_vertices = c.GL_MAX_ELEMENTS_VERTICES, max_fragment_input_components = c.GL_MAX_FRAGMENT_INPUT_COMPONENTS, max_fragment_uniform_blocks = c.GL_MAX_FRAGMENT_UNIFORM_BLOCKS, max_fragment_uniform_components = c.GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, max_geometry_input_components = c.GL_MAX_GEOMETRY_INPUT_COMPONENTS, max_geometry_output_components = c.GL_MAX_GEOMETRY_OUTPUT_COMPONENTS, max_geometry_texture_image_units = c.GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS, max_geometry_uniform_blocks = c.GL_MAX_GEOMETRY_UNIFORM_BLOCKS, max_geometry_uniform_components = c.GL_MAX_GEOMETRY_UNIFORM_COMPONENTS, max_integer_samples = c.GL_MAX_INTEGER_SAMPLES, max_program_texel_offset = c.GL_MAX_PROGRAM_TEXEL_OFFSET, max_rectangle_texture_size = c.GL_MAX_RECTANGLE_TEXTURE_SIZE, max_renderbuffer_size = c.GL_MAX_RENDERBUFFER_SIZE, max_sample_mask_words = c.GL_MAX_SAMPLE_MASK_WORDS, max_server_wait_timeout = c.GL_MAX_SERVER_WAIT_TIMEOUT, max_texture_buffer_size = c.GL_MAX_TEXTURE_BUFFER_SIZE, max_texture_image_units = c.GL_MAX_TEXTURE_IMAGE_UNITS, max_texture_lod_bias = c.GL_MAX_TEXTURE_LOD_BIAS, max_texture_size = c.GL_MAX_TEXTURE_SIZE, max_uniform_block_size = c.GL_MAX_UNIFORM_BLOCK_SIZE, max_uniform_buffer_bindings = c.GL_MAX_UNIFORM_BUFFER_BINDINGS, max_varying_components = c.GL_MAX_VARYING_COMPONENTS, // max_varying_floats = c.GL_MAX_VARYING_FLOATS, max_vertex_attribs = c.GL_MAX_VERTEX_ATTRIBS, max_vertex_output_components = c.GL_MAX_VERTEX_OUTPUT_COMPONENTS, max_vertex_texture_image_units = c.GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, max_vertex_uniform_blocks = c.GL_MAX_VERTEX_UNIFORM_BLOCKS, max_vertex_uniform_components = c.GL_MAX_VERTEX_UNIFORM_COMPONENTS, max_viewport_dims = c.GL_MAX_VIEWPORT_DIMS, min_program_texel_offset = c.GL_MIN_PROGRAM_TEXEL_OFFSET, minor_version = c.GL_MINOR_VERSION, num_compressed_texture_formats = c.GL_NUM_COMPRESSED_TEXTURE_FORMATS, num_extensions = c.GL_NUM_EXTENSIONS, pack_alignment = c.GL_PACK_ALIGNMENT, pack_image_height = c.GL_PACK_IMAGE_HEIGHT, pack_lsb_first = c.GL_PACK_LSB_FIRST, pack_row_length = c.GL_PACK_ROW_LENGTH, pack_skip_images = c.GL_PACK_SKIP_IMAGES, pack_skip_pixels = c.GL_PACK_SKIP_PIXELS, pack_skip_rows = c.GL_PACK_SKIP_ROWS, pack_swap_bytes = c.GL_PACK_SWAP_BYTES, pixel_pack_buffer_binding = c.GL_PIXEL_PACK_BUFFER_BINDING, pixel_unpack_buffer_binding = c.GL_PIXEL_UNPACK_BUFFER_BINDING, point_fade_threshold_size = c.GL_POINT_FADE_THRESHOLD_SIZE, point_size = c.GL_POINT_SIZE, point_size_granularity = c.GL_POINT_SIZE_GRANULARITY, point_size_range = c.GL_POINT_SIZE_RANGE, polygon_offset_factor = c.GL_POLYGON_OFFSET_FACTOR, polygon_offset_fill = c.GL_POLYGON_OFFSET_FILL, polygon_offset_line = c.GL_POLYGON_OFFSET_LINE, polygon_offset_point = c.GL_POLYGON_OFFSET_POINT, polygon_offset_units = c.GL_POLYGON_OFFSET_UNITS, polygon_smooth = c.GL_POLYGON_SMOOTH, polygon_smooth_hint = c.GL_POLYGON_SMOOTH_HINT, primitive_restart_index = c.GL_PRIMITIVE_RESTART_INDEX, program_point_size = c.GL_PROGRAM_POINT_SIZE, provoking_vertex = c.GL_PROVOKING_VERTEX, read_buffer = c.GL_READ_BUFFER, read_framebuffer_binding = c.GL_READ_FRAMEBUFFER_BINDING, renderbuffer_binding = c.GL_RENDERBUFFER_BINDING, sample_buffers = c.GL_SAMPLE_BUFFERS, sample_coverage_invert = c.GL_SAMPLE_COVERAGE_INVERT, sample_coverage_value = c.GL_SAMPLE_COVERAGE_VALUE, sampler_binding = c.GL_SAMPLER_BINDING, samples = c.GL_SAMPLES, scissor_box = c.GL_SCISSOR_BOX, scissor_test = c.GL_SCISSOR_TEST, smooth_line_width_granularity = c.GL_SMOOTH_LINE_WIDTH_GRANULARITY, smooth_line_width_range = c.GL_SMOOTH_LINE_WIDTH_RANGE, stencil_back_fail = c.GL_STENCIL_BACK_FAIL, stencil_back_func = c.GL_STENCIL_BACK_FUNC, stencil_back_pass_depth_fail = c.GL_STENCIL_BACK_PASS_DEPTH_FAIL, stencil_back_pass_depth_pass = c.GL_STENCIL_BACK_PASS_DEPTH_PASS, stencil_back_ref = c.GL_STENCIL_BACK_REF, stencil_back_value_mask = c.GL_STENCIL_BACK_VALUE_MASK, stencil_back_writemask = c.GL_STENCIL_BACK_WRITEMASK, stencil_clear_value = c.GL_STENCIL_CLEAR_VALUE, stencil_fail = c.GL_STENCIL_FAIL, stencil_func = c.GL_STENCIL_FUNC, stencil_pass_depth_fail = c.GL_STENCIL_PASS_DEPTH_FAIL, stencil_pass_depth_pass = c.GL_STENCIL_PASS_DEPTH_PASS, stencil_ref = c.GL_STENCIL_REF, stencil_test = c.GL_STENCIL_TEST, stencil_value_mask = c.GL_STENCIL_VALUE_MASK, stencil_writemask = c.GL_STENCIL_WRITEMASK, stereo = c.GL_STEREO, subpixel_bits = c.GL_SUBPIXEL_BITS, texture_binding_1d = c.GL_TEXTURE_BINDING_1D, texture_binding_1d_array = c.GL_TEXTURE_BINDING_1D_ARRAY, texture_binding_2d = c.GL_TEXTURE_BINDING_2D, texture_binding_2d_array = c.GL_TEXTURE_BINDING_2D_ARRAY, texture_binding_2d_multisample = c.GL_TEXTURE_BINDING_2D_MULTISAMPLE, texture_binding_2d_multisample_array = c.GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY, texture_binding_3d = c.GL_TEXTURE_BINDING_3D, texture_binding_buffer = c.GL_TEXTURE_BINDING_BUFFER, texture_binding_cube_map = c.GL_TEXTURE_BINDING_CUBE_MAP, texture_binding_rectangle = c.GL_TEXTURE_BINDING_RECTANGLE, texture_compression_hint = c.GL_TEXTURE_COMPRESSION_HINT, timestamp = c.GL_TIMESTAMP, transform_feedback_buffer_binding = c.GL_TRANSFORM_FEEDBACK_BUFFER_BINDING, transform_feedback_buffer_size = c.GL_TRANSFORM_FEEDBACK_BUFFER_SIZE, transform_feedback_buffer_start = c.GL_TRANSFORM_FEEDBACK_BUFFER_START, uniform_buffer_binding = c.GL_UNIFORM_BUFFER_BINDING, uniform_buffer_offset_alignment = c.GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, uniform_buffer_size = c.GL_UNIFORM_BUFFER_SIZE, uniform_buffer_start = c.GL_UNIFORM_BUFFER_START, unpack_alignment = c.GL_UNPACK_ALIGNMENT, unpack_image_height = c.GL_UNPACK_IMAGE_HEIGHT, unpack_lsb_first = c.GL_UNPACK_LSB_FIRST, unpack_row_length = c.GL_UNPACK_ROW_LENGTH, unpack_skip_images = c.GL_UNPACK_SKIP_IMAGES, unpack_skip_pixels = c.GL_UNPACK_SKIP_PIXELS, unpack_skip_rows = c.GL_UNPACK_SKIP_ROWS, unpack_swap_bytes = c.GL_UNPACK_SWAP_BYTES, viewport = c.GL_VIEWPORT, }; pub fn getInteger(parameter: Parameter) i32 { var value: c.GLint = undefined; c.glGetIntegerv(@enumToInt(parameter), &value); checkError(); return value; } pub const StringParameter = enum(c.GLenum) { vendor = c.GL_VENDOR, renderer = c.GL_RENDERER, version = c.GL_VERSION, shading_language_version = c.GL_SHADING_LANGUAGE_VERSION, extensions = c.GL_EXTENSIONS, }; pub fn getStringi(parameter: StringParameter, index: u32) ?[:0]const u8 { return std.mem.span(c.glGetStringi(@enumToInt(parameter), index)); } pub fn hasExtension(extension: [:0]const u8) bool { const count = getInteger(.num_extensions); var i: u32 = 0; while (i < count) : (i += 1) { const ext = getStringi(.extensions, i) orelse return false; if (std.mem.eql(u8, ext, extension)) { return true; } } return false; }
zgl.zig
const std = @import("std"); const assert = std.debug.assert; const log = std.log.scoped(.codegen); const math = std.math; const mem = std.mem; const Allocator = mem.Allocator; const builtin = @import("builtin"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); const TypedValue = @import("../../TypedValue.zig"); const ErrorMsg = Module.ErrorMsg; const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Type = @import("../../type.zig").Type; const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; const FnResult = @import("../../codegen.zig").FnResult; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const RegisterManager = RegisterManagerFn(Self, Register, &abi.allocatable_regs); const build_options = @import("build_options"); const bits = @import("bits.zig"); const abi = @import("abi.zig"); const Register = bits.Register; const Self = @This(); const InnerError = error{ OutOfMemory, CodegenFail, OutOfRegisters, }; const RegisterView = enum(u1) { caller, callee, }; gpa: Allocator, air: Air, liveness: Liveness, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: usize, src_loc: Module.SrcLoc, stack_align: u32, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// MIR extra data mir_extra: std.ArrayListUnmanaged(u32) = .{}, /// Byte offset within the source file of the ending curly. end_di_line: u32, end_di_column: u32, /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{}, /// Whenever there is a runtime branch, we push a Branch onto this stack, /// and pop it off when the runtime branch joins. This provides an "overlay" /// of the table of mappings from instructions to `MCValue` from within the branch. /// This way we can modify the `MCValue` for an instruction in different ways /// within different branches. Special consideration is needed when a branch /// joins with its parent, to make sure all instructions have the same MCValue /// across each runtime branch upon joining. branch_stack: *std.ArrayList(Branch), // Key is the block instruction blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, register_manager: RegisterManager = .{}, /// Maps offset to what is stored there. stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, /// Offset from the stack base, representing the end of the stack frame. max_end_stack: u32 = 0, /// Represents the current end stack offset. If there is no existing slot /// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. next_stack_offset: u32 = 0, /// Debug field, used to find bugs in the compiler. air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. /// TODO Look into deleting this tag and using `dead` instead, since every use /// of MCValue.none should be instead looking at the type and noticing it is 0 bits. none, /// Control flow will not allow this value to be observed. unreach, /// No more references to this value remain. dead, /// The value is undefined. undef, /// A pointer-sized integer that fits in a register. /// If the type is a pointer, this is the pointer address in virtual address space. immediate: u64, /// The value is in a target-specific register. register: Register, /// The value is in memory at a hard-coded address. /// If the type is a pointer, it means the pointer address is at this memory location. memory: u64, /// The value is one of the stack variables. /// If the type is a pointer, it means the pointer address is in the stack at this offset. stack_offset: u32, /// The value is a pointer to one of the stack variables (payload is stack offset). ptr_stack_offset: u32, fn isMemory(mcv: MCValue) bool { return switch (mcv) { .memory, .stack_offset => true, else => false, }; } fn isImmediate(mcv: MCValue) bool { return switch (mcv) { .immediate => true, else => false, }; } fn isMutable(mcv: MCValue) bool { return switch (mcv) { .none => unreachable, .unreach => unreachable, .dead => unreachable, .immediate, .memory, .ptr_stack_offset, .undef, => false, .register, .stack_offset, => true, }; } }; const Branch = struct { inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, fn deinit(self: *Branch, gpa: Allocator) void { self.inst_table.deinit(gpa); self.* = undefined; } }; const StackAllocation = struct { inst: Air.Inst.Index, /// TODO do we need size? should be determined by inst.ty.abiSize() size: u32, }; const BlockData = struct { relocs: std.ArrayListUnmanaged(Mir.Inst.Index), /// The first break instruction encounters `null` here and chooses a /// machine code value for the block result, populating this field. /// Following break instructions encounter that value and use it for /// the location to store their block results. mcv: MCValue, }; const CallMCValues = struct { args: []MCValue, return_value: MCValue, stack_byte_count: u32, stack_align: u32, fn deinit(self: *CallMCValues, func: *Self) void { func.gpa.free(self.args); self.* = undefined; } }; const BigTomb = struct { function: *Self, inst: Air.Inst.Index, tomb_bits: Liveness.Bpi, big_tomb_bits: u32, bit_index: usize, fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void { const this_bit_index = bt.bit_index; bt.bit_index += 1; const op_int = @enumToInt(op_ref); if (op_int < Air.Inst.Ref.typed_value_map.len) return; const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); if (this_bit_index < Liveness.bpi - 1) { const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0; if (!dies) return; } else { const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1)); const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0; if (!dies) return; } bt.function.processDeath(op_index); } fn finishAir(bt: *BigTomb, result: MCValue) void { const is_used = !bt.function.liveness.isUnused(bt.inst); if (is_used) { log.debug("%{d} => {}", .{ bt.inst, result }); const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); } bt.function.finishAirBookkeeping(); } }; pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, module_fn: *Module.Fn, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!FnResult { if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } const mod = bin_file.options.module.?; const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { assert(branch_stack.items.len == 1); branch_stack.items[0].deinit(bin_file.allocator); branch_stack.deinit(); } try branch_stack.append(.{}); var function = Self{ .gpa = bin_file.allocator, .air = air, .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, .code = code, .debug_output = debug_output, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` .fn_type = fn_type, .arg_index = 0, .branch_stack = &branch_stack, .src_loc = src_loc, .stack_align = undefined, .end_di_line = module_fn.rbrace_line, .end_di_column = module_fn.rbrace_column, }; defer function.stack.deinit(bin_file.allocator); defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) { error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.OutOfRegisters => return FnResult{ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), }, else => |e| return e, }; defer call_info.deinit(&function); function.args = call_info.args; function.ret_mcv = call_info.return_value; function.stack_align = call_info.stack_align; function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.OutOfRegisters => return FnResult{ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), }, else => |e| return e, }; var mir = Mir{ .instructions = function.mir_instructions.toOwnedSlice(), .extra = function.mir_extra.toOwnedSlice(bin_file.allocator), }; defer mir.deinit(bin_file.allocator); var emit = Emit{ .mir = mir, .bin_file = bin_file, .debug_output = debug_output, .target = &bin_file.options.target, .src_loc = src_loc, .code = code, .prev_di_pc = 0, .prev_di_line = module_fn.lbrace_line, .prev_di_column = module_fn.lbrace_column, }; defer emit.deinit(); emit.emitMir() catch |err| switch (err) { error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, else => |e| return e, }; if (function.err_msg) |em| { return FnResult{ .fail = em }; } else { return FnResult{ .appended = {} }; } } fn gen(self: *Self) !void { const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // TODO Finish function prologue and epilogue for sparcv9. // TODO Backpatch stack offset // save %sp, -176, %sp _ = try self.addInst(.{ .tag = .save, .data = .{ .arithmetic_3op = .{ .is_imm = true, .rd = .sp, .rs1 = .sp, .rs2_or_imm = .{ .imm = -176 }, }, }, }); _ = try self.addInst(.{ .tag = .dbg_prologue_end, .data = .{ .nop = {} }, }); try self.genBody(self.air.getMainBody()); _ = try self.addInst(.{ .tag = .dbg_epilogue_begin, .data = .{ .nop = {} }, }); // exitlude jumps if (self.exitlude_jump_relocs.items.len > 0 and self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2) { // If the last Mir instruction (apart from the // dbg_epilogue_begin) is the last exitlude jump // relocation (which would just jump one instruction // further), it can be safely removed self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop()); } for (self.exitlude_jump_relocs.items) |jmp_reloc| { _ = jmp_reloc; return self.fail("TODO add branches in sparcv9", .{}); } // return %i7 + 8 _ = try self.addInst(.{ .tag = .@"return", .data = .{ .arithmetic_2op = .{ .is_imm = true, .rs1 = .@"i7", .rs2_or_imm = .{ .imm = 8 }, }, }, }); // TODO Find a way to fill this slot // nop _ = try self.addInst(.{ .tag = .nop, .data = .{ .nop = {} }, }); } else { _ = try self.addInst(.{ .tag = .dbg_prologue_end, .data = .{ .nop = {} }, }); try self.genBody(self.air.getMainBody()); _ = try self.addInst(.{ .tag = .dbg_epilogue_begin, .data = .{ .nop = {} }, }); } // Drop them off at the rbrace. _ = try self.addInst(.{ .tag = .dbg_line, .data = .{ .dbg_line_column = .{ .line = self.end_di_line, .column = self.end_di_column, } }, }); } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { const air_tags = self.air.instructions.items(.tag); for (body) |inst| { const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); switch (air_tags[inst]) { // zig fmt: off .add, .ptr_add => @panic("TODO try self.airBinOp(inst)"), .addwrap => @panic("TODO try self.airAddWrap(inst)"), .add_sat => @panic("TODO try self.airAddSat(inst)"), .sub, .ptr_sub => @panic("TODO try self.airBinOp(inst)"), .subwrap => @panic("TODO try self.airSubWrap(inst)"), .sub_sat => @panic("TODO try self.airSubSat(inst)"), .mul => @panic("TODO try self.airMul(inst)"), .mulwrap => @panic("TODO try self.airMulWrap(inst)"), .mul_sat => @panic("TODO try self.airMulSat(inst)"), .rem => @panic("TODO try self.airRem(inst)"), .mod => @panic("TODO try self.airMod(inst)"), .shl, .shl_exact => @panic("TODO try self.airShl(inst)"), .shl_sat => @panic("TODO try self.airShlSat(inst)"), .min => @panic("TODO try self.airMin(inst)"), .max => @panic("TODO try self.airMax(inst)"), .slice => @panic("TODO try self.airSlice(inst)"), .sqrt, .sin, .cos, .tan, .exp, .exp2, .log, .log2, .log10, .fabs, .floor, .ceil, .round, .trunc_float, => @panic("TODO try self.airUnaryMath(inst)"), .add_with_overflow => @panic("TODO try self.airAddWithOverflow(inst)"), .sub_with_overflow => @panic("TODO try self.airSubWithOverflow(inst)"), .mul_with_overflow => @panic("TODO try self.airMulWithOverflow(inst)"), .shl_with_overflow => @panic("TODO try self.airShlWithOverflow(inst)"), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), .cmp_lt => @panic("TODO try self.airCmp(inst, .lt)"), .cmp_lte => @panic("TODO try self.airCmp(inst, .lte)"), .cmp_eq => @panic("TODO try self.airCmp(inst, .eq)"), .cmp_gte => @panic("TODO try self.airCmp(inst, .gte)"), .cmp_gt => @panic("TODO try self.airCmp(inst, .gt)"), .cmp_neq => @panic("TODO try self.airCmp(inst, .neq)"), .cmp_vector => @panic("TODO try self.airCmpVector(inst)"), .cmp_lt_errors_len => @panic("TODO try self.airCmpLtErrorsLen(inst)"), .bool_and => @panic("TODO try self.airBoolOp(inst)"), .bool_or => @panic("TODO try self.airBoolOp(inst)"), .bit_and => @panic("TODO try self.airBitAnd(inst)"), .bit_or => @panic("TODO try self.airBitOr(inst)"), .xor => @panic("TODO try self.airXor(inst)"), .shr, .shr_exact => @panic("TODO try self.airShr(inst)"), .alloc => @panic("TODO try self.airAlloc(inst)"), .ret_ptr => try self.airRetPtr(inst), .arg => try self.airArg(inst), .assembly => try self.airAsm(inst), .bitcast => @panic("TODO try self.airBitCast(inst)"), .block => try self.airBlock(inst), .br => @panic("TODO try self.airBr(inst)"), .breakpoint => try self.airBreakpoint(), .ret_addr => @panic("TODO try self.airRetAddr(inst)"), .frame_addr => @panic("TODO try self.airFrameAddress(inst)"), .fence => @panic("TODO try self.airFence()"), .cond_br => @panic("TODO try self.airCondBr(inst)"), .dbg_stmt => try self.airDbgStmt(inst), .fptrunc => @panic("TODO try self.airFptrunc(inst)"), .fpext => @panic("TODO try self.airFpext(inst)"), .intcast => @panic("TODO try self.airIntCast(inst)"), .trunc => @panic("TODO try self.airTrunc(inst)"), .bool_to_int => @panic("TODO try self.airBoolToInt(inst)"), .is_non_null => @panic("TODO try self.airIsNonNull(inst)"), .is_non_null_ptr => @panic("TODO try self.airIsNonNullPtr(inst)"), .is_null => @panic("TODO try self.airIsNull(inst)"), .is_null_ptr => @panic("TODO try self.airIsNullPtr(inst)"), .is_non_err => @panic("TODO try self.airIsNonErr(inst)"), .is_non_err_ptr => @panic("TODO try self.airIsNonErrPtr(inst)"), .is_err => @panic("TODO try self.airIsErr(inst)"), .is_err_ptr => @panic("TODO try self.airIsErrPtr(inst)"), .load => @panic("TODO try self.airLoad(inst)"), .loop => @panic("TODO try self.airLoop(inst)"), .not => @panic("TODO try self.airNot(inst)"), .ptrtoint => @panic("TODO try self.airPtrToInt(inst)"), .ret => try self.airRet(inst), .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst), .struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"), .struct_field_val=> @panic("TODO try self.airStructFieldVal(inst)"), .array_to_slice => @panic("TODO try self.airArrayToSlice(inst)"), .int_to_float => @panic("TODO try self.airIntToFloat(inst)"), .float_to_int => @panic("TODO try self.airFloatToInt(inst)"), .cmpxchg_strong => @panic("TODO try self.airCmpxchg(inst)"), .cmpxchg_weak => @panic("TODO try self.airCmpxchg(inst)"), .atomic_rmw => @panic("TODO try self.airAtomicRmw(inst)"), .atomic_load => @panic("TODO try self.airAtomicLoad(inst)"), .memcpy => @panic("TODO try self.airMemcpy(inst)"), .memset => @panic("TODO try self.airMemset(inst)"), .set_union_tag => @panic("TODO try self.airSetUnionTag(inst)"), .get_union_tag => @panic("TODO try self.airGetUnionTag(inst)"), .clz => @panic("TODO try self.airClz(inst)"), .ctz => @panic("TODO try self.airCtz(inst)"), .popcount => @panic("TODO try self.airPopcount(inst)"), .byte_swap => @panic("TODO try self.airByteSwap(inst)"), .bit_reverse => @panic("TODO try self.airBitReverse(inst)"), .tag_name => @panic("TODO try self.airTagName(inst)"), .error_name => @panic("TODO try self.airErrorName(inst)"), .splat => @panic("TODO try self.airSplat(inst)"), .select => @panic("TODO try self.airSelect(inst)"), .shuffle => @panic("TODO try self.airShuffle(inst)"), .reduce => @panic("TODO try self.airReduce(inst)"), .aggregate_init => @panic("TODO try self.airAggregateInit(inst)"), .union_init => @panic("TODO try self.airUnionInit(inst)"), .prefetch => @panic("TODO try self.airPrefetch(inst)"), .mul_add => @panic("TODO try self.airMulAdd(inst)"), .dbg_var_ptr, .dbg_var_val, => try self.airDbgVar(inst), .dbg_inline_begin, .dbg_inline_end, => try self.airDbgInline(inst), .dbg_block_begin, .dbg_block_end, => try self.airDbgBlock(inst), .call => try self.airCall(inst, .auto), .call_always_tail => try self.airCall(inst, .always_tail), .call_never_tail => try self.airCall(inst, .never_tail), .call_never_inline => try self.airCall(inst, .never_inline), .atomic_store_unordered => @panic("TODO try self.airAtomicStore(inst, .Unordered)"), .atomic_store_monotonic => @panic("TODO try self.airAtomicStore(inst, .Monotonic)"), .atomic_store_release => @panic("TODO try self.airAtomicStore(inst, .Release)"), .atomic_store_seq_cst => @panic("TODO try self.airAtomicStore(inst, .SeqCst)"), .struct_field_ptr_index_0 => @panic("TODO try self.airStructFieldPtrIndex(inst, 0)"), .struct_field_ptr_index_1 => @panic("TODO try self.airStructFieldPtrIndex(inst, 1)"), .struct_field_ptr_index_2 => @panic("TODO try self.airStructFieldPtrIndex(inst, 2)"), .struct_field_ptr_index_3 => @panic("TODO try self.airStructFieldPtrIndex(inst, 3)"), .field_parent_ptr => @panic("TODO try self.airFieldParentPtr(inst)"), .switch_br => try self.airSwitch(inst), .slice_ptr => @panic("TODO try self.airSlicePtr(inst)"), .slice_len => @panic("TODO try self.airSliceLen(inst)"), .ptr_slice_len_ptr => @panic("TODO try self.airPtrSliceLenPtr(inst)"), .ptr_slice_ptr_ptr => @panic("TODO try self.airPtrSlicePtrPtr(inst)"), .array_elem_val => @panic("TODO try self.airArrayElemVal(inst)"), .slice_elem_val => @panic("TODO try self.airSliceElemVal(inst)"), .slice_elem_ptr => @panic("TODO try self.airSliceElemPtr(inst)"), .ptr_elem_val => @panic("TODO try self.airPtrElemVal(inst)"), .ptr_elem_ptr => @panic("TODO try self.airPtrElemPtr(inst)"), .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => @panic("TODO try self.airOptionalPayload(inst)"), .optional_payload_ptr => @panic("TODO try self.airOptionalPayloadPtr(inst)"), .optional_payload_ptr_set => @panic("TODO try self.airOptionalPayloadPtrSet(inst)"), .unwrap_errunion_err => @panic("TODO try self.airUnwrapErrErr(inst)"), .unwrap_errunion_payload => @panic("TODO try self.airUnwrapErrPayload(inst)"), .unwrap_errunion_err_ptr => @panic("TODO try self.airUnwrapErrErrPtr(inst)"), .unwrap_errunion_payload_ptr=> @panic("TODO try self.airUnwrapErrPayloadPtr(inst)"), .errunion_payload_ptr_set => @panic("TODO try self.airErrUnionPayloadPtrSet(inst)"), .wrap_optional => @panic("TODO try self.airWrapOptional(inst)"), .wrap_errunion_payload => @panic("TODO try self.airWrapErrUnionPayload(inst)"), .wrap_errunion_err => @panic("TODO try self.airWrapErrUnionErr(inst)"), .wasm_memory_size => unreachable, .wasm_memory_grow => unreachable, // zig fmt: on } if (std.debug.runtime_safety) { if (self.air_bookkeeping < old_air_bookkeeping + 1) { std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] }); } } } } fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = (extra.data.flags & 0x80000000) != 0; const clobbers_len = @truncate(u31, extra.data.flags); var extra_i: usize = extra.end; const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.outputs_len]); extra_i += outputs.len; const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.inputs_len]); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); const result: MCValue = if (dead) .dead else result: { if (outputs.len > 1) { return self.fail("TODO implement codegen for asm with more than 1 output", .{}); } const output_constraint: ?[]const u8 = for (outputs) |output| { if (output != .none) { return self.fail("TODO implement codegen for non-expr asm", .{}); } const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += (constraint.len + name.len + (2 + 3)) / 4; break constraint; } else null; for (inputs) |input| { const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); const constraint = std.mem.sliceTo(input_bytes, 0); const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += (constraint.len + name.len + (2 + 3)) / 4; if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); } { var clobber_i: u32 = 0; while (clobber_i < clobbers_len) : (clobber_i += 1) { const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += clobber.len / 4 + 1; // TODO honor these } } const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; if (mem.eql(u8, asm_source, "ta 0x6d")) { _ = try self.addInst(.{ .tag = .tcc, .data = .{ .trap = .{ .is_imm = true, .cond = 0b1000, // TODO need to look into changing this into an enum .rs2_or_imm = .{ .imm = 0x6d }, }, }, }); } else { return self.fail("TODO implement a full SPARCv9 assembly parsing", .{}); } if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { break :result MCValue{ .none = {} }; } }; simple: { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); var buf_index: usize = 0; for (outputs) |output| { if (output == .none) continue; if (buf_index >= buf.len) break :simple; buf[buf_index] = output; buf_index += 1; } if (buf_index + inputs.len > buf.len) break :simple; std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs); return self.finishAir(inst, result, buf); } var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len); for (outputs) |output| { if (output == .none) continue; bt.feed(output); } for (inputs) |input| { bt.feed(input); } return bt.finishAir(result); } fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; const ty = self.air.typeOfIndex(inst); _ = ty; const result = self.args[arg_index]; // TODO support stack-only arguments // TODO Copy registers to the stack const mcv = result; _ = try self.addInst(.{ .tag = .dbg_arg, .data = .{ .dbg_arg_info = .{ .air_inst = inst, .arg_index = arg_index, }, }, }); if (self.liveness.isUnused(inst)) return self.finishAirBookkeeping(); switch (mcv) { .register => |reg| { self.register_manager.getRegAssumeFree(reg, inst); }, else => {}, } return self.finishAir(inst, mcv, .{ .none, .none, .none }); } fn airBlock(self: *Self, inst: Air.Inst.Index) !void { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, // It also acts as a receptacle for break operands. // Here we use `MCValue.none` to represent a null value so that the first // break instruction will choose a MCValue for the block result and overwrite // this field. Following break instructions will use that MCValue to put their // block results. .mcv = MCValue{ .none = {} }, }); defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.genBody(body); // relocations for `bpcc` instructions const relocs = &self.blocks.getPtr(inst).?.relocs; if (relocs.items.len > 0 and relocs.items[relocs.items.len - 1] == self.mir_instructions.len - 1) { // If the last Mir instruction is the last relocation (which // would just jump one instruction further), it can be safely // removed self.mir_instructions.orderedRemove(relocs.pop()); } for (relocs.items) |reloc| { try self.performReloc(reloc); } const result = self.blocks.getPtr(inst).?.mcv; return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airBreakpoint(self: *Self) !void { // ta 0x01 _ = try self.addInst(.{ .tag = .tcc, .data = .{ .trap = .{ .is_imm = true, .cond = 0b1000, // TODO need to look into changing this into an enum .rs2_or_imm = .{ .imm = 0x01 }, }, }, }); return self.finishAirBookkeeping(); } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.Modifier) !void { if (modifier == .always_tail) return self.fail("TODO implement tail calls for {}", .{self.target.cpu.arch}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); const ty = self.air.typeOf(callee); const fn_ty = switch (ty.zigTypeTag()) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, }; var info = try self.resolveCallingConventionValues(fn_ty, .caller); defer info.deinit(self); for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none => continue, .undef => unreachable, .immediate => unreachable, .unreach => unreachable, .dead => unreachable, .memory => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, } } // Due to incremental compilation, how function calls are generated depends // on linking. if (self.air.value(callee)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const mod = self.bin_file.options.module.?; break :blk @intCast(u32, got.p_vaddr + mod.declPtr(func.owner_decl).link.elf.offset_table_index * ptr_bytes); } else unreachable; try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jmpl, .data = .{ .branch_link_indirect = .{ .reg = .o7 } }, }); } else if (func_value.castTag(.extern_fn)) |_| { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); } else { assert(ty.zigTypeTag() == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .o7, mcv); _ = try self.addInst(.{ .tag = .jmpl, .data = .{ .branch_link_indirect = .{ .reg = .o7 } }, }); } const result = info.return_value; if (args.len + 1 <= Liveness.bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); buf[0] = callee; std.mem.copy(Air.Inst.Ref, buf[1..], args); return self.finishAir(inst, result, buf); } @panic("TODO handle return value with BigTomb"); } fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { // TODO emit debug info lexical block return self.finishAir(inst, .dead, .{ .none, .none, .none }); } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const function = self.air.values[ty_pl.payload].castTag(.function).?.data; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); } fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; _ = try self.addInst(.{ .tag = .dbg_line, .data = .{ .dbg_line_column = .{ .line = dbg_stmt.line, .column = dbg_stmt.column, }, }, }); return self.finishAirBookkeeping(); } fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const name = self.air.nullTerminatedString(pl_op.payload); const operand = pl_op.operand; // TODO emit debug info for this variable _ = name; return self.finishAir(inst, .dead, .{ operand, .none, .none }); } fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); try self.ret(operand); return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); _ = ptr; return self.fail("TODO implement airRetLoad for {}", .{self.target.cpu.arch}); //return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } fn airStore(self: *Self, inst: Air.Inst.Index) !void { _ = self; _ = inst; return self.fail("TODO implement store for {}", .{self.target.cpu.arch}); } fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { _ = self; _ = inst; return self.fail("TODO implement switch for {}", .{self.target.cpu.arch}); } // Common helper functions fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); self.next_stack_offset = offset + abi_size; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; try self.stack.putNoClobber(self.gpa, offset, .{ .inst = inst, .size = abi_size, }); return offset; } /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); if (!elem_ty.hasRuntimeBits()) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized // allocations will always have an offset > 0. return @as(u32, 0); } const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { const mod = self.bin_file.options.module.?; return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { const mod = self.bin_file.options.module.?; return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) self.stack_align = abi_align; if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. if (abi_size <= 8) { if (self.register_manager.tryAllocReg(inst)) |reg| { return MCValue{ .register = reg }; } } } const stack_offset = try self.allocMem(inst, abi_size, abi_align); return MCValue{ .stack_offset = stack_offset }; } /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { const reg = try self.register_manager.allocReg(null); try self.genSetReg(ty, reg, mcv); return reg; } fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; try table.ensureUnusedCapacity(self.gpa, additional_count); } fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } /// Called when there are no operands, and the instruction is always unreferenced. fn finishAirBookkeeping(self: *Self) void { if (std.debug.runtime_safety) { self.air_bookkeeping += 1; } } fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { const dies = @truncate(u1, tomb_bits) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(inst, result); switch (result) { .register => |reg| { // In some cases (such as bitcast), an operand // may be the same MCValue as the result. If // that operand died and was a register, it // was freed by processDeath. We have to // "re-allocate" the register. if (self.register_manager.isRegFree(reg)) { self.register_manager.getRegAssumeFree(reg, inst); } }, else => {}, } } self.finishAirBookkeeping(); } fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_type: type, off: off_type, abi_size: u64) !void { assert(off_type == Register or off_type == i13); const is_imm = (off_type == i13); const rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off }; switch (abi_size) { 1 => { _ = try self.addInst(.{ .tag = .ldub, .data = .{ .arithmetic_3op = .{ .is_imm = is_imm, .rd = value_reg, .rs1 = addr_reg, .rs2_or_imm = rs2_or_imm, }, }, }); }, 2 => { _ = try self.addInst(.{ .tag = .lduh, .data = .{ .arithmetic_3op = .{ .is_imm = is_imm, .rd = value_reg, .rs1 = addr_reg, .rs2_or_imm = rs2_or_imm, }, }, }); }, 4 => { _ = try self.addInst(.{ .tag = .lduw, .data = .{ .arithmetic_3op = .{ .is_imm = is_imm, .rd = value_reg, .rs1 = addr_reg, .rs2_or_imm = rs2_or_imm, }, }, }); }, 8 => { _ = try self.addInst(.{ .tag = .ldx, .data = .{ .arithmetic_3op = .{ .is_imm = is_imm, .rd = value_reg, .rs1 = addr_reg, .rs2_or_imm = rs2_or_imm, }, }, }); }, 3, 5, 6, 7 => return self.fail("TODO: genLoad for more abi_sizes", .{}), else => unreachable, } } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .ptr_stack_offset => |off| { const simm13 = math.cast(u12, off) catch return self.fail("TODO larger stack offsets", .{}); _ = try self.addInst(.{ .tag = .add, .data = .{ .arithmetic_3op = .{ .is_imm = true, .rd = reg, .rs1 = .sp, .rs2_or_imm = .{ .imm = simm13 }, }, }, }); }, .immediate => |x| { if (x <= math.maxInt(u12)) { _ = try self.addInst(.{ .tag = .@"or", .data = .{ .arithmetic_3op = .{ .is_imm = true, .rd = reg, .rs1 = .g0, .rs2_or_imm = .{ .imm = @truncate(u12, x) }, }, }, }); } else if (x <= math.maxInt(u32)) { _ = try self.addInst(.{ .tag = .sethi, .data = .{ .sethi = .{ .rd = reg, .imm = @truncate(u22, x >> 10), }, }, }); _ = try self.addInst(.{ .tag = .@"or", .data = .{ .arithmetic_3op = .{ .is_imm = true, .rd = reg, .rs1 = reg, .rs2_or_imm = .{ .imm = @truncate(u10, x) }, }, }, }); } else if (x <= math.maxInt(u44)) { try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 12) }); _ = try self.addInst(.{ .tag = .sllx, .data = .{ .shift = .{ .is_imm = true, .width = .shift64, .rd = reg, .rs1 = reg, .rs2_or_imm = .{ .imm = 12 }, }, }, }); _ = try self.addInst(.{ .tag = .@"or", .data = .{ .arithmetic_3op = .{ .is_imm = true, .rd = reg, .rs1 = reg, .rs2_or_imm = .{ .imm = @truncate(u12, x) }, }, }, }); } else { // Need to allocate a temporary register to load 64-bit immediates. const tmp_reg = try self.register_manager.allocReg(null); try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) }); try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) }); _ = try self.addInst(.{ .tag = .sllx, .data = .{ .shift = .{ .is_imm = true, .width = .shift64, .rd = reg, .rs1 = reg, .rs2_or_imm = .{ .imm = 32 }, }, }, }); _ = try self.addInst(.{ .tag = .@"or", .data = .{ .arithmetic_3op = .{ .is_imm = false, .rd = reg, .rs1 = reg, .rs2_or_imm = .{ .rs2 = tmp_reg }, }, }, }); } }, .register => |src_reg| { // If the registers are the same, nothing to do. if (src_reg.id() == reg.id()) return; // or %g0, src, dst (aka mov src, dst) _ = try self.addInst(.{ .tag = .@"or", .data = .{ .arithmetic_3op = .{ .is_imm = false, .rd = reg, .rs1 = .g0, .rs2_or_imm = .{ .rs2 = src_reg }, }, }, }); }, .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); }, .stack_offset => |off| { const simm13 = math.cast(u12, off) catch return self.fail("TODO larger stack offsets", .{}); try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const abi_size = ty.abiSize(self.target.*); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => return self.fail("TODO implement memset", .{}), } }, .immediate, .ptr_stack_offset, => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .register => return self.fail("TODO implement storing types abi_size={}", .{abi_size}), .memory, .stack_offset => return self.fail("TODO implement memcpy", .{}), } } fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; if (typed_value.val.castTag(.decl_ref)) |payload| { return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { return self.lowerDeclRef(typed_value, payload.data.decl_index); } const target = self.target.*; switch (typed_value.ty.zigTypeTag()) { .Int => { const info = typed_value.ty.intInfo(self.target.*); if (info.bits <= 64) { const unsigned = switch (info.signedness) { .signed => blk: { const signed = typed_value.val.toSignedInt(); break :blk @bitCast(u64, signed); }, .unsigned => typed_value.val.toUnsignedInt(target), }; return MCValue{ .immediate = unsigned }; } else { return self.fail("TODO implement int genTypedValue of > 64 bits", .{}); } }, .ComptimeInt => unreachable, // semantic analysis prevents this .ComptimeFloat => unreachable, // semantic analysis prevents this else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}), } } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { // Treat each stack item as a "layer" on top of the previous one. var i: usize = self.branch_stack.items.len; while (true) { i -= 1; if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| { assert(mcv != .dead); return mcv; } } } fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb { try self.ensureProcessDeathCapacity(operand_count + 1); return BigTomb{ .function = self, .inst = inst, .tomb_bits = self.liveness.getTombBits(inst), .big_tomb_bits = self.liveness.special.get(inst) orelse 0, .bit_index = 0, }; } fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? if (tv.ty.zigTypeTag() == .Pointer) blk: { if (tv.ty.castPtrToFn()) |_| break :blk; if (!tv.ty.elemType2().hasRuntimeBits()) { return MCValue.none; } } const mod = self.bin_file.options.module.?; const decl = mod.declPtr(decl_index); mod.markDeclAlive(decl); if (self.bin_file.cast(link.File.Elf)) |elf_file| { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else { return self.fail("TODO codegen non-ELF const Decl pointer", .{}); } } fn parseRegName(name: []const u8) ?Register { if (@hasDecl(Register, "parseRegName")) { return Register.parseRegName(name); } return std.meta.stringToEnum(Register, name); } fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { .bpcc => self.mir_instructions.items(.data)[inst].branch_predict.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), else => unreachable, } } /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { const air_tags = self.air.instructions.items(.tag); if (air_tags[inst] == .constant) return; // Constants are immortal. // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacity(inst, .dead); switch (prev_value) { .register => |reg| { self.register_manager.freeReg(reg); }, else => {}, // TODO process stack allocation death } } /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { const cc = fn_ty.fnCallingConvention(); const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); defer self.gpa.free(param_types); fn_ty.fnParamTypes(param_types); var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, .stack_align = undefined, }; errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); switch (cc) { .Naked => { assert(result.args.len == 0); result.return_value = .{ .unreach = {} }; result.stack_byte_count = 0; result.stack_align = 1; return result; }, .Unspecified, .C => { // SPARC Compliance Definition 2.4.1, Chapter 3 // Low-Level System Information (64-bit psABI) - Function Calling Sequence var next_register: usize = 0; var next_stack_offset: u32 = 0; // The caller puts the argument in %o0-%o5, which becomes %i0-%i5 inside the callee. const argument_registers = switch (role) { .caller => abi.c_abi_int_param_regs_caller_view, .callee => abi.c_abi_int_param_regs_callee_view, }; for (param_types) |ty, i| { const param_size = @intCast(u32, ty.abiSize(self.target.*)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; next_register += 1; } else { result.args[i] = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } else if (param_size <= 16) { if (next_register < argument_registers.len - 1) { return self.fail("TODO MCValues with 2 registers", .{}); } else if (next_register < argument_registers.len) { return self.fail("TODO MCValues split register + stack", .{}); } else { result.args[i] = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } else { result.args[i] = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } result.stack_byte_count = next_stack_offset; result.stack_align = 16; if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; } else if (!ret_ty.hasRuntimeBits()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { .caller => .{ .register = abi.c_abi_int_return_regs_caller_view[0] }, .callee => .{ .register = abi.c_abi_int_return_regs_callee_view[0] }, }; } else { return self.fail("TODO support more return values for sparcv9", .{}); } } }, else => return self.fail("TODO implement function parameters for {} on sparcv9", .{cc}), } return result; } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // First section of indexes correspond to a set number of constant values. const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; if (!tv.ty.hasRuntimeBits()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); } // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); if (!inst_ty.hasRuntimeBits()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, .val = self.air.values[ty_pl.payload], }); } return gop.value_ptr.*; }, .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } fn ret(self: *Self, mcv: MCValue) !void { const ret_ty = self.fn_type.fnReturnType(); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for an instruction, patch this later const index = try self.addInst(.{ .tag = .nop, .data = .{ .nop = {} }, }); try self.exitlude_jump_relocs.append(self.gpa, index); } fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; switch (mcv) { .register => |reg| { // If it's in the registers table, need to associate the register with the // new instruction. if (RegisterManager.indexOfRegIntoTracked(reg)) |index| { if (!self.register_manager.isRegFree(reg)) { self.register_manager.registers[index] = inst; } } log.debug("%{d} => {} (reused)", .{ inst, reg }); }, .stack_offset => |off| { log.debug("%{d} => stack offset {d} (reused)", .{ inst, off }); }, else => return false, } // Prevent the operand deaths processing code from deallocating it. self.liveness.clearOperandDeath(inst, op_index); // That makes us responsible for doing the rest of the stuff that processDeath would have done. const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead); return true; } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { switch (loc) { .none => return, .register => |reg| return self.genSetReg(ty, reg, val), .stack_offset => |off| return self.genSetStack(ty, off, val), .memory => { return self.fail("TODO implement setRegOrMem for memory", .{}); }, else => unreachable, } } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const stack_mcv = try self.allocRegOrMem(inst, false); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. fn wantSafety(self: *Self) bool { return switch (self.bin_file.options.optimize_mode) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, .ReleaseSmall => false, }; }
src/arch/sparcv9/CodeGen.zig
const std = @import("std"); const kernel = @import("../../kernel.zig"); const assert = kernel.assert; const TODO = kernel.TODO; const align_forward = kernel.align_forward; const string_eq = kernel.string_eq; const starts_with = kernel.string_starts_with; const ends_with = kernel.string_ends_with; const read_big_endian = std.mem.readIntSliceBig; const page_size = kernel.arch.page_size; const Memory = kernel.Memory; const print = kernel.arch.print; const write = kernel.arch.write; const DeviceTree = @This(); var result: DeviceTree.Result = undefined; const soft_separator = "----------------------------------------------------------------\n"; const hard_separator = "================================================================\n"; header: Header, base_address: u64, main_nodes_start: u64, pub fn parse(self: *@This()) void { write(hard_separator); defer write(hard_separator); print("Starting parsing the Flattened Device Tree...\n", .{}); self.header = DeviceTree.Header.read(@intToPtr([*]const u8, self.base_address)[0..@sizeOf(DeviceTree.Header)]) catch unreachable; DeviceTree.MemoryReservationBlock.parse(self.header, self.base_address); var dt_structure_block_parser = DeviceTree.StructureBlock.Parser{ .slice = undefined, .i = 0, .device_tree = self }; dt_structure_block_parser.parse(); print("Done parsing the FDT\n", .{}); } pub const SearchType = enum { exact, start, end, }; pub fn get_node_finding_parser(self: *@This()) StructureBlock.Parser { const slice_size = self.header.device_tree_struct_size - self.main_nodes_start; return StructureBlock.Parser{ .slice = @intToPtr([*]u8, self.base_address + self.header.device_tree_struct_offset + self.main_nodes_start)[0..slice_size], .i = 0, .device_tree = self, }; } pub fn find_property(self: *@This(), main_node: []const u8, property_name: []const u8, comptime search_type: SearchType, maybe_intermediate_nodes: ?[]const []const u8, comptime maybe_intermediate_search_types: ?[]const SearchType) ?StructureBlock.Parser.Property { var parser = self.get_node_finding_parser(); if (parser.find_node_from_current_offset(main_node, search_type)) |_| { if (maybe_intermediate_nodes) |intermediate_nodes| { if (maybe_intermediate_search_types) |intermediate_search_types| { var last_node = false; for (intermediate_nodes) |node, i| { const intermediate_search_type = intermediate_search_types[i]; last_node = parser.find_node_from_current_offset(node, intermediate_search_type) != null; } kernel.assert(@src(), last_node); return parser.find_property_in_current_node(property_name); } } else { return parser.find_property_in_current_node(property_name); } } return null; } const FindNodeResult = struct { parser: StructureBlock.Parser, name: []const u8, }; pub fn find_node(self: *@This(), node: []const u8, comptime search_type: SearchType) ?FindNodeResult { var parser = self.get_node_finding_parser(); if (parser.find_node_from_current_offset(node, search_type)) |node_name| { return FindNodeResult{ .parser = parser, .name = node_name, }; } return null; } const Header = struct { magic: u32, size: u32, device_tree_struct_offset: u32, device_tree_strings_offset: u32, rsvmap_memory_offset: u32, version: u32, last_comp_version: u32, boot_cpuid_physical_address: u32, device_tree_strings_size: u32, device_tree_struct_size: u32, const expected_magic = 0xd00dfeed; const ReadError = error{ incorrect_magic, }; fn read(bytes: []const u8) ReadError!DeviceTree.Header { var bytes_it = bytes; var device_tree_header: DeviceTree.Header = undefined; var device_tree_it_bytes = @ptrCast([*]u32, &device_tree_header); for (device_tree_it_bytes[0 .. @sizeOf(DeviceTree.Header) / @sizeOf(u32)]) |*device_tree_n| { device_tree_n.* = read_big_endian(u32, bytes_it); bytes_it.ptr += @sizeOf(u32); bytes_it.len -= @sizeOf(u32); } assert(@src(), device_tree_header.magic == expected_magic); return device_tree_header; } }; const MemoryReservationBlock = struct { const Entry = struct { address: u64, size: u64, }; fn parse(header: Header, header_offset: u64) void { const memory_reservation_block_offset = header.rsvmap_memory_offset; const block_address = header_offset + memory_reservation_block_offset; var block_it = @intToPtr([*]u8, block_address); var entry_count: u64 = 0; while (true) { var entry: Entry = undefined; entry.address = read_big_endian(u64, block_it[0..@sizeOf(u64)]); block_it += @sizeOf(u64); entry.size = read_big_endian(u64, block_it[0..@sizeOf(u64)]); block_it += @sizeOf(u64); if (entry.address == 0 and entry.size == 0) break; print("Entry (0x{x}, 0x{x})\n", .{ entry.address, entry.size }); entry_count += 1; } } }; const StructureBlock = struct { const Parser = struct { slice: []const u8, i: u64, device_tree: *DeviceTree, fn parse(self: *@This()) void { const offset = self.device_tree.header.device_tree_struct_offset; const size = self.device_tree.header.device_tree_struct_size; const address = self.device_tree.base_address + offset; self.slice = @intToPtr([*]u8, address)[0..size]; var address_cells: u32 = 0; var size_cells: u32 = 0; while (true) { const main_token: Token = self.parse_token(); if (main_token == .end) break; assert(@src(), self.i < self.slice.len); assert(@src(), main_token == .begin_node); const node_name = self.parse_begin_node(); assert(@src(), string_eq(node_name, "")); while (true) { const token = self.parse_token(); switch (token) { .property => { const descriptor = self.parse_property_value_descriptor(); const key = self.parse_string_in_string_table(descriptor); print("{s}: ", .{key}); if (string_eq(key, "#address-cells")) { assert(@src(), descriptor.len == @sizeOf(u32)); address_cells = self.parse_int(u32); print("{}\n", .{address_cells}); } else if (string_eq(key, "#size-cells")) { assert(@src(), descriptor.len == @sizeOf(u32)); size_cells = self.parse_int(u32); print("{}\n", .{size_cells}); } else if (string_eq(key, "compatible")) { const value = self.parse_property_name(descriptor); print("{s}\n", .{value}); } else if (string_eq(key, "model")) { const value = self.parse_property_name(descriptor); print("{s}\n", .{value}); } else { TODO(@src()); } }, .begin_node => { if (self.device_tree.main_nodes_start == 0) { self.device_tree.main_nodes_start = self.i - @sizeOf(Token); } self.parse_node(1, address_cells, size_cells); }, .end_node => break, else => kernel.panic("Unexpected token: {}\n", .{token}), } } } // Add the kernel memory region //const kernel_address = kernel.bounds.get_start(); //const kernel_end = kernel.bounds.get_end(); //const kernel_size = kernel_end - kernel_address; //assert(@src(), kernel_address & (page_size - 1) == 0); //assert(@src(), kernel_end & (page_size - 1) == 0); //result.reserved_memory_regions[result.reserved_memory_region_count].address = kernel_address; //result.reserved_memory_regions[result.reserved_memory_region_count].size = kernel_size; //result.reserved_memory_region_count += 1; //// Add the FDT memory region //result.reserved_memory_regions[result.reserved_memory_region_count].address = header_address; //result.reserved_memory_regions[result.reserved_memory_region_count].size = align_forward(header.size, page_size); //result.reserved_memory_region_count += 1; //result.address = header_address; //return &result; } fn parse_node(self: *@This(), identation: u32, parent_address_cells: u32, parent_size_cells: u32) void { const node_name = self.parse_begin_node(); print_ident(identation, "* {s}:\n", .{node_name}); const attribute_identation = identation + 1; _ = attribute_identation; var address_cells: u32 = parent_address_cells; var size_cells: u32 = parent_size_cells; while (true) { const token = self.parse_token(); switch (token) { .property => { const property_value_descriptor = self.parse_property_value_descriptor(); const property_name = self.parse_string_in_string_table(property_value_descriptor); print_ident(attribute_identation, "{s}: ", .{property_name}); // First check the standard ones if (string_eq(property_name, "compatible")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else if (string_eq(property_name, "model")) { TODO(@src()); } else if (string_eq(property_name, "phandle")) { assert(@src(), property_value_descriptor.len == @sizeOf(u32)); const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "status")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else if (string_eq(property_name, "#address-cells")) { assert(@src(), property_value_descriptor.len == @sizeOf(u32)); address_cells = self.parse_int(u32); print("{}", .{address_cells}); } else if (string_eq(property_name, "#size-cells")) { assert(@src(), property_value_descriptor.len == @sizeOf(u32)); size_cells = self.parse_int(u32); print_ident(attribute_identation, "{}", .{size_cells}); } else if (string_eq(property_name, "reg")) { const address_byte_count = address_cells * @sizeOf(u32); const size_byte_count = size_cells * @sizeOf(u32); const pair_byte_count = address_byte_count + size_byte_count; const pair_count = property_value_descriptor.len / pair_byte_count; var pair_i: u64 = 0; while (pair_i < pair_count) : (pair_i += 1) { write("("); switch (address_byte_count) { @sizeOf(u32) => { const value = self.parse_int(u32); print("0x{x}", .{value}); }, @sizeOf(u64) => { const value = self.parse_int(u64); print("0x{x}", .{value}); }, 0 => {}, else => unreachable, } write(" , "); switch (size_byte_count) { @sizeOf(u64) => { const value = self.parse_int(u64); print("0x{x}", .{value}); }, 0 => {}, else => unreachable, } write("), "); } } else if (string_eq(property_name, "virtual-reg")) { TODO(@src()); } else if (string_eq(property_name, "ranges")) { if (property_value_descriptor.len != 0) { self.i += property_value_descriptor.len; write("TODO"); //write("\n"); //for (self.slice[i .. i + 100]) |b, index| { //print("[{}] {c}\n", .{ index, b }); //} //TODO(@src()); } else { write("empty"); //self.i += 1; } } else if (string_eq(property_name, "dma-ranges")) { TODO(@src()); } else if (string_eq(property_name, "dma-coherent")) { assert(@src(), property_value_descriptor.len == 0); } else { //Non-standard ones if (starts_with(node_name, "flash")) { if (string_eq(property_name, "bank-width")) { assert(@src(), property_value_descriptor.len == @sizeOf(u32)); const bank_width = self.parse_int(u32); print("{}", .{bank_width}); } else { TODO(@src()); } } else if (string_eq(node_name, "chosen")) { // Chosen is a standard node if (string_eq(property_name, "bootargs")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else if (string_eq(property_name, "stdout-path")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else { print("Property unknown: {s}", .{property_name}); TODO(@src()); } } else if (starts_with(node_name, "memory")) { if (string_eq(property_name, "device_type")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else { TODO(@src()); } } else if (string_eq(node_name, "cpus")) { if (string_eq(property_name, "timebase-frequency")) { self.parse_and_print_freq(property_value_descriptor); } else { TODO(@src()); } } else if (starts_with(node_name, "cpu@")) { if (string_eq(property_name, "device_type")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else if (string_eq(property_name, "riscv,isa")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else if (string_eq(property_name, "mmu-type")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else { TODO(@src()); } } else if (string_eq(node_name, "interrupt-controller")) { if (string_eq(property_name, "#interrupt-cells")) { const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "interrupt-controller")) { assert(@src(), property_value_descriptor.len == 0); } else { TODO(@src()); } } else if (starts_with(node_name, "core")) { if (string_eq(property_name, "cpu")) { const value = self.parse_int(u32); print("{}", .{value}); } else { TODO(@src()); } } else if (starts_with(node_name, "rtc@") or starts_with(node_name, "uart@")) { if (string_eq(property_name, "interrupts")) { const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "interrupt-parent")) { const value = self.parse_int(u32); print("{}", .{value}); //const value = property_value_descriptor.len; //print("{}", .{value}); //TODO(@src()); } else if (string_eq(property_name, "clock-frequency")) { self.parse_and_print_freq(property_value_descriptor); } else { TODO(@src()); } } else if (string_eq(node_name, "poweroff") or string_eq(node_name, "reboot")) { if (string_eq(property_name, "value")) { const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "offset")) { const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "regmap")) { const value = self.parse_int(u32); print("{}", .{value}); } else { TODO(@src()); } } else if (starts_with(node_name, "pci@")) { if (string_eq(property_name, "interrupt-map-mask")) { const value = self.parse_int(u64); const value2 = self.parse_int(u64); print("0x{x}, 0x{x}", .{ value, value2 }); } else if (string_eq(property_name, "interrupt-map")) { // TODO self.i += property_value_descriptor.len; //var i: u32 = 0; //while (i < byte_count) : (i += @sizeOf(u64)) { //_ = self.parse_int(u64); //} write("TODO"); } else if (string_eq(property_name, "bus-range")) { const value = self.parse_int(u64); print("{}", .{value}); } else if (string_eq(property_name, "linux,pci-domain")) { const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "device_type")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else if (string_eq(property_name, "#interrupt-cells")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else { TODO(@src()); } } else if (starts_with(node_name, "virtio_mmio@")) { if (string_eq(property_name, "interrupts")) { const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "interrupt-parent")) { const value = self.parse_int(u32); print("{}", .{value}); } else { TODO(@src()); } } else if (starts_with(node_name, "plic@")) { if (string_eq(property_name, "riscv,ndev")) { const value = self.parse_int(u32); print("{}", .{value}); } else if (string_eq(property_name, "interrupts-extended")) { //const value = property_value_descriptor.len; const value1 = self.parse_int(u32); const value2 = self.parse_int(u32); const value3 = self.parse_int(u32); const value4 = self.parse_int(u32); print("{}, {}, {}, {} ", .{ value1, value2, value3, value4 }); write("TODO"); } else if (string_eq(property_name, "interrupt-controller")) { assert(@src(), property_value_descriptor.len == 0); } else if (string_eq(property_name, "#interrupt-cells")) { const value = self.parse_property_name(property_value_descriptor); print("{s}", .{value}); } else { TODO(@src()); } } else if (starts_with(node_name, "clint@")) { if (string_eq(property_name, "interrupts-extended")) { //const value = property_value_descriptor.len; const value1 = self.parse_int(u32); const value2 = self.parse_int(u32); const value3 = self.parse_int(u32); const value4 = self.parse_int(u32); print("{}, {}, {}, {} ", .{ value1, value2, value3, value4 }); write("TODO"); } else { TODO(@src()); } } else { TODO(@src()); } } write("\n"); }, .begin_node => self.parse_node(attribute_identation + 1, address_cells, size_cells), .end_node => break, else => kernel.panic("NI: {s}\n", .{@tagName(token)}), } } } // This assumes the begin_node token has already been parsed pub fn find_property_in_current_node(self: *@This(), wanted_property_name: []const u8) ?Property { while (true) { const token = self.parse_token(); switch (token) { .property => { const property_value_descriptor = self.parse_property_value_descriptor(); const property_name = self.parse_string_in_string_table(property_value_descriptor); const property_value = self.slice.ptr[self.i .. self.i + property_value_descriptor.len]; self.i = align_to_u32(self.i + property_value_descriptor.len); if (string_eq(property_name, wanted_property_name)) { return Property{ .name = property_name, .value = property_value, }; } }, else => kernel.panic("NI find: {}\n", .{token}), } } return null; } fn skip_property_value(self: *@This(), property_value_descriptor: Property.ValueDescriptor) void { self.i = align_to_u32(self.i + property_value_descriptor.len); } fn parse_and_print_freq(self: *@This(), property_value_descriptor: Property.ValueDescriptor) void { switch (property_value_descriptor.len) { @sizeOf(u32) => { const value = self.parse_int(u32); print("{} Hz", .{value}); }, @sizeOf(u64) => { TODO(@src()); }, else => unreachable, } } fn print_ident(identation: u32, comptime format: []const u8, args: anytype) void { var ident_it: u32 = 0; while (ident_it < identation) : (ident_it += 1) { write(" "); } print(format, args); } fn write_ident(identation: u32, bytes: []const u8) void { var ident_it: u32 = 0; while (ident_it < identation) : (ident_it += 1) { write(" "); } write(bytes); } fn parse_property_name(self: *@This(), descriptor: Property.ValueDescriptor) []const u8 { const property_value = self.slice[self.i .. self.i + descriptor.len]; self.i = align_to_u32(self.i + descriptor.len); return property_value; } //fn parse_properly_encoded_array(self: *@This(), descriptor: Property.ValueDescriptor) []const u8 { //} pub fn skip_node(self: *@This()) void { while (true) { const skip_token = self.parse_token(); switch (skip_token) { .begin_node => { self.skip_cstr(); self.skip_node(); }, .property => { self.skip_property(); }, .end_node => { break; }, else => kernel.panic("token unimplemented: {}\n", .{skip_token}), } } } fn skip_property(self: *@This()) void { const descriptor = self.parse_property_value_descriptor(); self.skip_property_value(descriptor); } fn skip_cstr(self: *@This()) void { const len = std.mem.len(@ptrCast([*:0]const u8, self.slice[self.i..].ptr)); self.i = align_to_u32(self.i + len + 1); } fn parse_begin_node(self: *@This()) []const u8 { const node_name = self.read_cstr_advancing_it(); self.i = align_to_u32(self.i); return node_name; } fn parse_property_value_descriptor(self: *@This()) Property.ValueDescriptor { return Property.ValueDescriptor{ .len = self.parse_int(u32), .name_offset = self.parse_int(u32), }; } fn parse_string_in_string_table(self: *@This(), descriptor: Property.ValueDescriptor) []const u8 { const strings_offset = self.device_tree.header.device_tree_strings_offset; const string_offset = self.device_tree.base_address + strings_offset + descriptor.name_offset; const property_key_cstr = @intToPtr([*:0]u8, string_offset); const value = property_key_cstr[0..std.mem.len(property_key_cstr)]; return value; } fn read_cstr_advancing_it(self: *@This()) []const u8 { const cstr_len = std.mem.len(@ptrCast([*:0]const u8, self.slice[self.i..].ptr)); const cstr = self.slice[self.i .. self.i + cstr_len]; self.i += cstr_len + 1; return cstr; } fn parse_token(self: *@This()) Token { assert(@src(), self.i & 0b11 == 0); const token_int = self.parse_int(u32); //logger.debug("Trying to cast possible valid token {} into an enum\n", .{token_int}); const token = @intToEnum(Token, token_int); return token; } fn parse_int(self: *@This(), comptime Int: type) Int { const int = read_big_endian(Int, self.slice[self.i..]); self.i += @sizeOf(Int); return int; } const Token = enum(u32) { begin_node = 1, end_node = 2, property = 3, nop = 4, end = 9, }; const Property = struct { name: []const u8, value: []const u8, const ValueDescriptor = struct { len: u32, name_offset: u32, }; }; const Types = enum(u32) { empty = 0, int32 = 1, int64 = 2, string = 3, phandle = 4, string_list = 5, }; inline fn align_to_u32(i: u64) u64 { return align_forward(i, @sizeOf(u32)); } fn find_node_from_current_offset(self: *@This(), wanted_node_name: []const u8, search_type: SearchType) ?[]const u8 { while (true) { const token = self.parse_token(); switch (token) { .begin_node => { const node_name = self.parse_begin_node(); const found = switch (search_type) { .exact => string_eq(node_name, wanted_node_name), .start => starts_with(node_name, wanted_node_name), .end => ends_with(node_name, wanted_node_name), }; if (found) { return node_name; } self.skip_node(); }, .property => self.skip_property(), .end_node => break, else => kernel.panic("NI: {}\n", .{token}), } } return null; } pub fn get_subnode(self: *@This()) ?[]const u8 { while (true) { const token = self.parse_token(); switch (token) { .begin_node => { const node_name = self.parse_begin_node(); return node_name; }, .property => self.skip_property(), .end_node => break, else => unreachable, } } return null; } }; }; pub const Result = struct { memory_regions: [1024]Memory.Region.Descriptor, memory_region_count: u64, reserved_memory_regions: [64]Memory.Region.Descriptor, reserved_memory_region_count: u64, address: u64, };
src/kernel/arch/riscv64/device_tree.zig
const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; const fifo = std.fifo; const io = std.io; const math = std.math; const mem = std.mem; const testing = std.testing; const ArrayList = std.ArrayList; const deflate = @import("compressor.zig"); const inflate = @import("decompressor.zig"); const compressor = deflate.compressor; const decompressor = inflate.decompressor; const huffman_only = deflate.huffman_only; fn testSync(level: deflate.Compression, input: []const u8) !void { if (input.len == 0) { return; } var divided_buf = fifo .LinearFifo(u8, fifo.LinearFifoBufferType.Dynamic) .init(testing.allocator); defer divided_buf.deinit(); var whole_buf = std.ArrayList(u8).init(testing.allocator); defer whole_buf.deinit(); var multi_writer = io.multiWriter(.{ divided_buf.writer(), whole_buf.writer(), }).writer(); var comp = try compressor( testing.allocator, multi_writer, .{ .level = level }, ); defer comp.deinit(); { var decomp = try decompressor( testing.allocator, divided_buf.reader(), null, ); defer decomp.deinit(); // Write first half of the input and flush() var half: usize = (input.len + 1) / 2; var half_len: usize = half - 0; { _ = try comp.writer().writeAll(input[0..half]); // Flush try comp.flush(); // Read back var decompressed = try testing.allocator.alloc(u8, half_len); defer testing.allocator.free(decompressed); var read = try decomp.reader().readAll(decompressed); // read at least half try expect(read == half_len); try expect(mem.eql(u8, input[0..half], decompressed)); } // Write last half of the input and close() half_len = input.len - half; { _ = try comp.writer().writeAll(input[half..]); // Close try comp.close(); // Read back var decompressed = try testing.allocator.alloc(u8, half_len); defer testing.allocator.free(decompressed); var read = try decomp.reader().readAll(decompressed); try expect(read == half_len); try expect(mem.eql(u8, input[half..], decompressed)); // Extra read var final: [10]u8 = undefined; read = try decomp.reader().readAll(&final); try expect(read == 0); // expect ended stream to return 0 bytes _ = decomp.close(); } } _ = try comp.writer().writeAll(input); try comp.close(); // stream should work for ordinary reader too (reading whole_buf in one go) var whole_buf_reader = io.fixedBufferStream(whole_buf.items).reader(); var decomp = try decompressor(testing.allocator, whole_buf_reader, null); defer decomp.deinit(); var decompressed = try testing.allocator.alloc(u8, input.len); defer testing.allocator.free(decompressed); _ = try decomp.reader().readAll(decompressed); _ = decomp.close(); try expect(mem.eql(u8, input, decompressed)); } fn testToFromWithLevelAndLimit(level: deflate.Compression, input: []const u8, limit: u32) !void { var compressed = std.ArrayList(u8).init(testing.allocator); defer compressed.deinit(); var comp = try compressor(testing.allocator, compressed.writer(), .{ .level = level }); defer comp.deinit(); try comp.writer().writeAll(input); try comp.close(); if (limit > 0) { try expect(compressed.items.len <= limit); } var fib = io.fixedBufferStream(compressed.items); var decomp = try decompressor(testing.allocator, fib.reader(), null); defer decomp.deinit(); var decompressed = try testing.allocator.alloc(u8, input.len); defer testing.allocator.free(decompressed); var read: usize = try decomp.reader().readAll(decompressed); try expect(read == input.len); try expect(mem.eql(u8, input, decompressed)); if (builtin.zig_backend == .stage1) { try testSync(level, input); } } fn testToFromWithLimit(input: []const u8, limit: [11]u32) !void { try testToFromWithLevelAndLimit(.no_compression, input, limit[0]); try testToFromWithLevelAndLimit(.best_speed, input, limit[1]); try testToFromWithLevelAndLimit(.level_2, input, limit[2]); try testToFromWithLevelAndLimit(.level_3, input, limit[3]); try testToFromWithLevelAndLimit(.level_4, input, limit[4]); try testToFromWithLevelAndLimit(.level_5, input, limit[5]); try testToFromWithLevelAndLimit(.level_6, input, limit[6]); try testToFromWithLevelAndLimit(.level_7, input, limit[7]); try testToFromWithLevelAndLimit(.level_8, input, limit[8]); try testToFromWithLevelAndLimit(.best_compression, input, limit[9]); try testToFromWithLevelAndLimit(.huffman_only, input, limit[10]); } test "deflate/inflate" { var limits = [_]u32{0} ** 11; var test0 = [_]u8{}; var test1 = [_]u8{0x11}; var test2 = [_]u8{ 0x11, 0x12 }; var test3 = [_]u8{ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 }; var test4 = [_]u8{ 0x11, 0x10, 0x13, 0x41, 0x21, 0x21, 0x41, 0x13, 0x87, 0x78, 0x13 }; try testToFromWithLimit(&test0, limits); try testToFromWithLimit(&test1, limits); try testToFromWithLimit(&test2, limits); try testToFromWithLimit(&test3, limits); try testToFromWithLimit(&test4, limits); var large_data_chunk = try testing.allocator.alloc(u8, 100_000); defer testing.allocator.free(large_data_chunk); // fill with random data for (large_data_chunk) |_, i| { var mul: u8 = @truncate(u8, i); _ = @mulWithOverflow(u8, mul, mul, &mul); large_data_chunk[i] = mul; } try testToFromWithLimit(large_data_chunk, limits); } test "very long sparse chunk" { // A SparseReader returns a stream consisting of 0s ending with 65,536 (1<<16) 1s. // This tests missing hash references in a very large input. const SparseReader = struct { l: usize, // length cur: usize, // current position const Self = @This(); const Error = error{}; pub const Reader = io.Reader(*Self, Error, read); pub fn reader(self: *Self) Reader { return .{ .context = self }; } fn read(s: *Self, b: []u8) Error!usize { var n: usize = 0; // amount read if (s.cur >= s.l) { return 0; } n = b.len; var cur = s.cur + n; if (cur > s.l) { n -= cur - s.l; cur = s.l; } for (b[0..n]) |_, i| { if (s.cur + i >= s.l -| (1 << 16)) { b[i] = 1; } else { b[i] = 0; } } s.cur = cur; return n; } }; var comp = try compressor( testing.allocator, io.null_writer, .{ .level = .best_speed }, ); defer comp.deinit(); var writer = comp.writer(); var sparse = SparseReader{ .l = 0x23e8, .cur = 0 }; var reader = sparse.reader(); var read: usize = 1; var written: usize = 0; while (read > 0) { var buf: [1 << 15]u8 = undefined; // 32,768 bytes buffer read = try reader.read(&buf); written += try writer.write(buf[0..read]); } try expect(written == 0x23e8); } test "compressor reset" { for (std.enums.values(deflate.Compression)) |c| { try testWriterReset(c, null); try testWriterReset(c, "dict"); try testWriterReset(c, "hello"); } } fn testWriterReset(level: deflate.Compression, dict: ?[]const u8) !void { const filler = struct { fn writeData(c: anytype) !void { const msg = "all your base are belong to us"; try c.writer().writeAll(msg); try c.flush(); const hello = "hello world"; var i: usize = 0; while (i < 1024) : (i += 1) { try c.writer().writeAll(hello); } i = 0; while (i < 65000) : (i += 1) { try c.writer().writeAll("x"); } } }; var buf1 = ArrayList(u8).init(testing.allocator); defer buf1.deinit(); var buf2 = ArrayList(u8).init(testing.allocator); defer buf2.deinit(); var comp = try compressor( testing.allocator, buf1.writer(), .{ .level = level, .dictionary = dict }, ); defer comp.deinit(); try filler.writeData(&comp); try comp.close(); comp.reset(buf2.writer()); try filler.writeData(&comp); try comp.close(); try expect(mem.eql(u8, buf1.items, buf2.items)); } test "decompressor dictionary" { const dict = "hello world"; // dictionary const text = "hello again world"; var compressed = fifo .LinearFifo(u8, fifo.LinearFifoBufferType.Dynamic) .init(testing.allocator); defer compressed.deinit(); var comp = try compressor( testing.allocator, compressed.writer(), .{ .level = .level_5, .dictionary = null, // no dictionary }, ); defer comp.deinit(); // imitate a compressor with a dictionary try comp.writer().writeAll(dict); try comp.flush(); compressed.discard(compressed.readableLength()); // empty the output try comp.writer().writeAll(text); try comp.close(); var decompressed = try testing.allocator.alloc(u8, text.len); defer testing.allocator.free(decompressed); var decomp = try decompressor( testing.allocator, compressed.reader(), dict, ); defer decomp.deinit(); _ = try decomp.reader().readAll(decompressed); try expect(mem.eql(u8, decompressed, "hello again world")); } test "compressor dictionary" { const dict = "hello world"; const text = "hello again world"; var compressed_nd = fifo .LinearFifo(u8, fifo.LinearFifoBufferType.Dynamic) .init(testing.allocator); // compressed with no dictionary defer compressed_nd.deinit(); var compressed_d = ArrayList(u8).init(testing.allocator); // compressed with a dictionary defer compressed_d.deinit(); // imitate a compressor with a dictionary var comp_nd = try compressor( testing.allocator, compressed_nd.writer(), .{ .level = .level_5, .dictionary = null, // no dictionary }, ); defer comp_nd.deinit(); try comp_nd.writer().writeAll(dict); try comp_nd.flush(); compressed_nd.discard(compressed_nd.readableLength()); // empty the output try comp_nd.writer().writeAll(text); try comp_nd.close(); // use a compressor with a dictionary var comp_d = try compressor( testing.allocator, compressed_d.writer(), .{ .level = .level_5, .dictionary = dict, // with a dictionary }, ); defer comp_d.deinit(); try comp_d.writer().writeAll(text); try comp_d.close(); try expect(mem.eql(u8, compressed_nd.readableSlice(0), compressed_d.items)); } // Update the hash for best_speed only if d.index < d.maxInsertIndex // See https://golang.org/issue/2508 test "Go non-regression test for 2508" { var comp = try compressor( testing.allocator, io.null_writer, .{ .level = .best_speed }, ); defer comp.deinit(); var buf = [_]u8{0} ** 1024; var i: usize = 0; while (i < 131_072) : (i += 1) { try comp.writer().writeAll(&buf); try comp.close(); } } test "deflate/inflate string" { // Skip wasi because it does not support std.fs.openDirAbsolute() if (builtin.os.tag == .wasi) return error.SkipZigTest; const current_dir = try std.fs.openDirAbsolute(std.fs.path.dirname(@src().file).?, .{}); const testdata_dir = try current_dir.openDir("testdata", .{}); const StringTest = struct { filename: []const u8, limit: [11]u32, }; var deflate_inflate_string_tests = [_]StringTest{ .{ .filename = "compress-e.txt", .limit = [11]u32{ 100_018, // no_compression 50_650, // best_speed 50_960, // 2 51_150, // 3 50_930, // 4 50_790, // 5 50_790, // 6 50_790, // 7 50_790, // 8 50_790, // best_compression 43_683, // huffman_only }, }, .{ .filename = "rfc1951.txt", .limit = [11]u32{ 36_954, // no_compression 12_952, // best_speed 12_228, // 2 12_016, // 3 11_466, // 4 11_191, // 5 11_129, // 6 11_120, // 7 11_112, // 8 11_109, // best_compression 20_273, // huffman_only }, }, }; for (deflate_inflate_string_tests) |t| { const golden_file = try testdata_dir.openFile(t.filename, .{}); defer golden_file.close(); var golden = try golden_file.reader().readAllAlloc(testing.allocator, math.maxInt(usize)); defer testing.allocator.free(golden); try testToFromWithLimit(golden, t.limit); } } test "inflate reset" { const strings = [_][]const u8{ "lorem ipsum izzle fo rizzle", "the quick brown fox jumped over", }; var compressed_strings = [_]ArrayList(u8){ ArrayList(u8).init(testing.allocator), ArrayList(u8).init(testing.allocator), }; defer compressed_strings[0].deinit(); defer compressed_strings[1].deinit(); for (strings) |s, i| { var comp = try compressor( testing.allocator, compressed_strings[i].writer(), .{ .level = .level_6 }, ); defer comp.deinit(); try comp.writer().writeAll(s); try comp.close(); } var fib = io.fixedBufferStream(compressed_strings[0].items); var decomp = try decompressor(testing.allocator, fib.reader(), null); defer decomp.deinit(); var decompressed_0: []u8 = try decomp.reader() .readAllAlloc(testing.allocator, math.maxInt(usize)); defer testing.allocator.free(decompressed_0); fib = io.fixedBufferStream(compressed_strings[1].items); try decomp.reset(fib.reader(), null); var decompressed_1: []u8 = try decomp.reader() .readAllAlloc(testing.allocator, math.maxInt(usize)); defer testing.allocator.free(decompressed_1); _ = decomp.close(); try expect(strings[0].len == decompressed_0.len); try expect(strings[1].len == decompressed_1.len); try expect(mem.eql(u8, strings[0], decompressed_0)); try expect(mem.eql(u8, strings[1], decompressed_1)); } test "inflate reset dictionary" { const dict = "the lorem fox"; const strings = [_][]const u8{ "lorem ipsum izzle fo rizzle", "the quick brown fox jumped over", }; var compressed_strings = [_]ArrayList(u8){ ArrayList(u8).init(testing.allocator), ArrayList(u8).init(testing.allocator), }; defer compressed_strings[0].deinit(); defer compressed_strings[1].deinit(); for (strings) |s, i| { var comp = try compressor( testing.allocator, compressed_strings[i].writer(), .{ .level = .level_6 }, ); defer comp.deinit(); try comp.writer().writeAll(s); try comp.close(); } var fib = io.fixedBufferStream(compressed_strings[0].items); var decomp = try decompressor(testing.allocator, fib.reader(), dict); defer decomp.deinit(); var decompressed_0: []u8 = try decomp.reader() .readAllAlloc(testing.allocator, math.maxInt(usize)); defer testing.allocator.free(decompressed_0); fib = io.fixedBufferStream(compressed_strings[1].items); try decomp.reset(fib.reader(), dict); var decompressed_1: []u8 = try decomp.reader() .readAllAlloc(testing.allocator, math.maxInt(usize)); defer testing.allocator.free(decompressed_1); _ = decomp.close(); try expect(strings[0].len == decompressed_0.len); try expect(strings[1].len == decompressed_1.len); try expect(mem.eql(u8, strings[0], decompressed_0)); try expect(mem.eql(u8, strings[1], decompressed_1)); }
lib/std/compress/deflate/compressor_test.zig
const print = @import("std").debug.print; pub fn main() void { // ALL numeric literals in Zig are of type comptime_int or // comptime_float. They are of arbitrary size (as big or // little as you need). // // Notice how we don't have to specify a size like "u8", // "i32", or "f64" when we assign identifiers immutably with // "const". // // When we use these identifiers in our program, the VALUES // are inserted at compile time into the executable code. The // IDENTIFIERS "const_int" and "const_float" don't exist in // our compiled application! const const_int = 12345; const const_float = 987.654; print("Immutable: {}, {d:.3}; ", .{const_int, const_float}); // But something changes when we assign the exact same values // to identifiers mutably with "var". // // The literals are STILL comptime_int and comptime_float, // but we wish to assign them to identifiers which are // mutable at runtime. // // To be mutable at runtime, these identifiers must refer to // areas of memory. In order to refer to areas of memory, Zig // must know exactly how much memory to reserve for these // values. Therefore, it follows that we just specify numeric // types with specific sizes. The comptime numbers will be // coerced (if they'll fit!) into your chosen runtime types. comptime var var_int: u32 = 12345; comptime var var_float: f32 = 987.654; // We can change what is stored at the areas set aside for // "var_int" and "var_float" in the running compiled program. var_int = 54321; var_float = 456.789; print("Mutable: {}, {d:.3}; ", .{var_int, var_float}); // Bonus: Now that we're familiar with Zig's builtins, we can // also inspect the types to see what they are, no guessing // needed! print("Types: {}, {}, {}, {}\n", .{ @TypeOf(const_int), @TypeOf(const_float), @TypeOf(var_int), @TypeOf(var_float), }); }
exercises/066_comptime.zig
const std = @import("std"); const PrintHelper = @import("print_helper.zig").PrintHelper; const BuiltinPackage = @import("builtins.zig").BuiltinPackage; const ParseResult = @import("parse.zig").ParseResult; const Module = @import("parse.zig").Module; const ModuleParam = @import("parse.zig").ModuleParam; const ModuleCodeGen = @import("codegen.zig").ModuleCodeGen; const CodeGenResult = @import("codegen.zig").CodeGenResult; const ExpressionResult = @import("codegen.zig").ExpressionResult; const BufferValue = @import("codegen.zig").BufferValue; const FloatValue = @import("codegen.zig").FloatValue; const BufferDest = @import("codegen.zig").BufferDest; const Instruction = @import("codegen.zig").Instruction; const CodeGenCustomModuleInner = @import("codegen.zig").CodeGenCustomModuleInner; const CompiledScript = @import("compile.zig").CompiledScript; const State = struct { script: CompiledScript, module: ?Module, helper: PrintHelper, pub fn print(self: *State, comptime fmt: []const u8, args: var) !void { try self.helper.print(self, fmt, args); } pub fn printArgValue(self: *State, comptime arg_format: []const u8, arg: var) !void { if (comptime std.mem.eql(u8, arg_format, "identifier")) { try self.printIdentifier(arg); } else if (comptime std.mem.eql(u8, arg_format, "module_name")) { try self.printModuleName(arg); } else if (comptime std.mem.eql(u8, arg_format, "buffer_dest")) { try self.printBufferDest(arg); } else if (comptime std.mem.eql(u8, arg_format, "expression_result")) { try self.printExpressionResult(arg); } else { @compileError("unknown arg_format: \"" ++ arg_format ++ "\""); } } fn printIdentifier(self: *State, string: []const u8) !void { if (std.zig.Token.getKeyword(string) != null) { try self.print("@\"{str}\"", .{string}); } else { try self.print("{str}", .{string}); } } fn printModuleName(self: *State, module_index: usize) !void { const module = self.script.modules[module_index]; if (module.zig_package_name) |pkg_name| { try self.print("{identifier}.{identifier}", .{ pkg_name, module.builtin_name.? }); } else { try self.print("_module{usize}", .{module_index}); } } fn printExpressionResult(self: *State, result: ExpressionResult) (error{NoModule} || std.os.WriteError)!void { switch (result) { .nothing => unreachable, .temp_buffer => |temp_ref| try self.print("temps[{usize}]", .{temp_ref.index}), .temp_float => |temp_ref| try self.print("temp_float{usize}", .{temp_ref.index}), .literal_boolean => |value| try self.print("{bool}", .{value}), .literal_number => |value| try self.print("{number_literal}", .{value}), .literal_enum_value => |v| { if (v.payload) |payload| { try self.print(".{{ .{identifier} = {expression_result} }}", .{ v.label, payload.* }); } else { try self.print(".{identifier}", .{v.label}); } }, .literal_curve => |curve_index| try self.print("&_curve{usize}", .{curve_index}), .literal_track => |track_index| try self.print("_track{usize}", .{track_index}), .literal_module => |module_index| try self.print("{module_name}", .{module_index}), .self_param => |i| { const module = self.module orelse return error.NoModule; try self.print("params.{identifier}", .{module.params[i].name}); }, .track_param => |x| { try self.print("_result.params.{identifier}", .{self.script.tracks[x.track_index].params[x.param_index].name}); }, } } fn printBufferDest(self: *State, value: BufferDest) !void { switch (value) { .temp_buffer_index => |i| try self.print("temps[{usize}]", .{i}), .output_index => |i| try self.print("outputs[{usize}]", .{i}), } } }; pub fn generateZig(out: std.io.StreamSource.OutStream, builtin_packages: []const BuiltinPackage, script: CompiledScript) !void { var self: State = .{ .script = script, .module = null, .helper = PrintHelper.init(out), }; try self.print("// THIS FILE WAS GENERATED BY THE ZANGC COMPILER\n\n", .{}); try self.print("const std = @import(\"std\");\n", .{}); try self.print("const zang = @import(\"zang\");\n", .{}); for (builtin_packages) |pkg| { if (!std.mem.eql(u8, pkg.zig_package_name, "zang")) { try self.print("const {str} = @import(\"{str}\");\n", .{ pkg.zig_package_name, pkg.zig_import_path }); } } if (script.exported_modules.len > 0) try self.print("\n", .{}); for (script.exported_modules) |em| { try self.print("pub const {identifier} = {module_name};\n", .{ em.name, em.module_index }); } const num_builtins = blk: { var n: usize = 0; for (builtin_packages) |pkg| { n += pkg.builtins.len; } break :blk n; }; for (script.curves) |curve, curve_index| { try self.print("\n", .{}); try self.print("const _curve{usize} = [_]zang.CurveNode{{\n", .{curve_index}); for (curve.points) |point| { try self.print(".{{ .t = {number_literal}, .value = {number_literal} }},\n", .{ point.t, point.value }); } try self.print("}};\n", .{}); } for (script.tracks) |track, track_index| { try self.print("\n", .{}); try self.print("const _track{usize} = struct {{\n", .{track_index}); try self.print("const Params = struct {{\n", .{}); try printParamDecls(&self, track.params, false); try self.print("}};\n", .{}); try self.print("const notes = [_]zang.Notes(Params).SongEvent{{\n", .{}); for (track.notes) |note, note_index| { try self.print(".{{ .t = {number_literal}, .note_id = {usize}, .params = .{{", .{ note.t, note_index + 1 }); for (track.params) |param, param_index| { if (param_index > 0) { try self.print(",", .{}); } try self.print(" .{str} = {expression_result}", .{ param.name, script.track_results[track_index].note_values[note_index][param_index] }); } try self.print(" }} }},\n", .{}); } try self.print("}};\n", .{}); try self.print("}};\n", .{}); } for (script.modules) |module, i| { const module_result = script.module_results[i]; const inner = switch (module_result.inner) { .builtin => continue, .custom => |x| x, }; self.module = module; try self.print("\n", .{}); try self.print("const _module{usize} = struct {{\n", .{i}); try self.print("pub const num_outputs = {usize};\n", .{module_result.num_outputs}); try self.print("pub const num_temps = {usize};\n", .{module_result.num_temps}); try self.print("pub const Params = struct {{\n", .{}); try printParamDecls(&self, module.params, false); try self.print("}};\n", .{}); // this is for oxid. it wants a version of the params without sample_rate, which can be used with impulse queues. try self.print("pub const NoteParams = struct {{\n", .{}); try printParamDecls(&self, module.params, true); try self.print("}};\n", .{}); try self.print("\n", .{}); for (inner.fields) |field, j| { const field_module = script.modules[field.module_index]; try self.print("field{usize}: {module_name},\n", .{ j, field.module_index }); } for (inner.delays) |delay_decl, j| { try self.print("delay{usize}: zang.Delay({usize}),\n", .{ j, delay_decl.num_samples }); } for (inner.note_trackers) |note_tracker_decl, j| { try self.print("tracker{usize}: zang.Notes(_track{usize}.Params).NoteTracker,\n", .{ j, note_tracker_decl.track_index }); } for (inner.triggers) |trigger_decl, j| { try self.print("trigger{usize}: zang.Trigger(_track{usize}.Params),\n", .{ j, trigger_decl.track_index }); } try self.print("\n", .{}); try self.print("pub fn init() _module{usize} {{\n", .{i}); try self.print("return .{{\n", .{}); for (inner.fields) |field, j| { const field_module = script.modules[field.module_index]; try self.print(".field{usize} = {module_name}.init(),\n", .{ j, field.module_index }); } for (inner.delays) |delay_decl, j| { try self.print(".delay{usize} = zang.Delay({usize}).init(),\n", .{ j, delay_decl.num_samples }); } for (inner.note_trackers) |note_tracker_decl, j| { try self.print(".tracker{usize} = zang.Notes(_track{usize}.Params).NoteTracker.init(&_track{usize}.notes),\n", .{ j, note_tracker_decl.track_index, note_tracker_decl.track_index }); } for (inner.triggers) |trigger_decl, j| { try self.print(".trigger{usize} = zang.Trigger(_track{usize}.Params).init(),\n", .{ j, trigger_decl.track_index }); } try self.print("}};\n", .{}); try self.print("}}\n", .{}); try self.print("\n", .{}); try self.print("pub fn paint(self: *_module{usize}, span: zang.Span, outputs: [num_outputs][]f32, temps: [num_temps][]f32, note_id_changed: bool, params: Params) void {{\n", .{i}); for (inner.instructions) |instr| { try genInstruction(&self, module, inner, instr, "span", "note_id_changed"); } try self.print("}}\n", .{}); try self.print("}};\n", .{}); } self.helper.finish(); } fn printParamDecls(self: *State, params: []const ModuleParam, skip_sample_rate: bool) !void { for (params) |param| { if (skip_sample_rate and std.mem.eql(u8, param.name, "sample_rate")) { continue; } const type_name = switch (param.param_type) { .boolean => "bool", .buffer => "[]const f32", .constant => "f32", .constant_or_buffer => "zang.ConstantOrBuffer", .curve => "[]const zang.CurveNode", .one_of => |e| e.zig_name, }; try self.print("{identifier}: {str},\n", .{ param.name, type_name }); } } fn genInstruction( self: *State, module: Module, inner: CodeGenCustomModuleInner, instr: Instruction, span: []const u8, note_id_changed: []const u8, ) (error{NoModule} || std.os.WriteError)!void { switch (instr) { .copy_buffer => |x| { const func: []const u8 = switch (x.out) { .output_index => "addInto", else => "copy", }; try self.print("zang.{str}({str}, {buffer_dest}, {expression_result});\n", .{ func, span, x.out, x.in }); }, .float_to_buffer => |x| { const func: []const u8 = switch (x.out) { .output_index => "addScalarInto", else => "set", }; try self.print("zang.{str}({str}, {buffer_dest}, {expression_result});\n", .{ func, span, x.out, x.in }); }, .cob_to_buffer => |x| { try self.print("switch (params.{identifier}) {{\n", .{module.params[x.in_self_param].name}); switch (x.out) { .output_index => { try self.print(".constant => |v| zang.addScalarInto({str}, {buffer_dest}, v),\n", .{ span, x.out }); try self.print(".buffer => |v| zang.addInto({str}, {buffer_dest}, v),\n", .{ span, x.out }); }, else => { try self.print(".constant => |v| zang.set({str}, {buffer_dest}, v),\n", .{ span, x.out }); try self.print(".buffer => |v| zang.copy({str}, {buffer_dest}, v),\n", .{ span, x.out }); }, } try self.print("}}\n", {}); }, .arith_float => |x| { try self.print("const temp_float{usize} = ", .{x.out.temp_float_index}); switch (x.op) { .abs => try self.print("std.math.fabs({expression_result});\n", .{x.a}), .cos => try self.print("std.math.cos({expression_result});\n", .{x.a}), .neg => try self.print("-{expression_result};\n", .{x.a}), .sin => try self.print("std.math.sin({expression_result});\n", .{x.a}), .sqrt => try self.print("std.math.sqrt({expression_result});\n", .{x.a}), } }, .arith_buffer => |x| { try self.print("{{\n", .{}); try self.print("var i = {str}.start;\n", .{span}); try self.print("while (i < {str}.end) : (i += 1) {{\n", .{span}); try self.print("{buffer_dest}[i] ", .{x.out}); switch (x.out) { .output_index => try self.print("+= ", .{}), else => try self.print("= ", .{}), } switch (x.op) { .abs => try self.print("std.math.fabs({expression_result}[i]);\n", .{x.a}), .cos => try self.print("std.math.cos({expression_result}[i]);\n", .{x.a}), .neg => try self.print("-{expression_result}[i];\n", .{x.a}), .sin => try self.print("std.math.sin({expression_result}[i]);\n", .{x.a}), .sqrt => try self.print("std.math.sqrt({expression_result}[i]);\n", .{x.a}), } try self.print("}}\n", .{}); try self.print("}}\n", .{}); }, .arith_float_float => |x| { try self.print("const temp_float{usize} = ", .{x.out.temp_float_index}); switch (x.op) { .add => try self.print("{expression_result} + {expression_result};\n", .{ x.a, x.b }), .sub => try self.print("{expression_result} - {expression_result};\n", .{ x.a, x.b }), .mul => try self.print("{expression_result} * {expression_result};\n", .{ x.a, x.b }), .div => try self.print("{expression_result} / {expression_result};\n", .{ x.a, x.b }), .pow => try self.print("std.math.pow(f32, {expression_result}, {expression_result});\n", .{ x.a, x.b }), .max => try self.print("std.math.max({expression_result}, {expression_result});\n", .{ x.a, x.b }), .min => try self.print("std.math.min({expression_result}, {expression_result});\n", .{ x.a, x.b }), } }, .arith_float_buffer => |x| { switch (x.op) { .sub, .div, .pow, .max, .min => { try self.print("{{\n", .{}); try self.print("var i = {str}.start;\n", .{span}); try self.print("while (i < {str}.end) : (i += 1) {{\n", .{span}); try self.print("{buffer_dest}[i] ", .{x.out}); switch (x.out) { .output_index => try self.print("+= ", .{}), else => try self.print("= ", .{}), } switch (x.op) { .sub => try self.print("{expression_result} - {expression_result}[i];\n", .{ x.a, x.b }), .div => try self.print("{expression_result} / {expression_result}[i];\n", .{ x.a, x.b }), .pow => try self.print("std.math.pow(f32, {expression_result}, {expression_result}[i]);\n", .{ x.a, x.b }), .max => try self.print("std.math.max({expression_result}, {expression_result}[i]);\n", .{ x.a, x.b }), .min => try self.print("std.math.min({expression_result}, {expression_result}[i]);\n", .{ x.a, x.b }), else => unreachable, } try self.print("}}\n", .{}); try self.print("}}\n", .{}); }, .add, .mul => { switch (x.out) { .output_index => {}, else => try self.print("zang.zero({str}, {buffer_dest});\n", .{ span, x.out }), } switch (x.op) { .add => try self.print("zang.addScalar", .{}), .mul => try self.print("zang.multiplyScalar", .{}), else => unreachable, } // swap order, since the supported operators are commutative try self.print("({str}, {buffer_dest}, {expression_result}, {expression_result});\n", .{ span, x.out, x.b, x.a }); }, } }, .arith_buffer_float => |x| { switch (x.op) { .sub, .div, .pow, .max, .min => { try self.print("{{\n", .{}); try self.print("var i = {str}.start;\n", .{span}); try self.print("while (i < {str}.end) : (i += 1) {{\n", .{span}); try self.print("{buffer_dest}[i] ", .{x.out}); switch (x.out) { .output_index => try self.print("+= ", .{}), else => try self.print("= ", .{}), } switch (x.op) { .sub => try self.print("{expression_result}[i] - {expression_result};\n", .{ x.a, x.b }), .div => try self.print("{expression_result}[i] / {expression_result};\n", .{ x.a, x.b }), .pow => try self.print("std.math.pow(f32, {expression_result}[i], {expression_result});\n", .{ x.a, x.b }), .max => try self.print("std.math.max({expression_result}[i], {expression_result});\n", .{ x.a, x.b }), .min => try self.print("std.math.min({expression_result}[i], {expression_result});\n", .{ x.a, x.b }), else => unreachable, } try self.print("}}\n", .{}); try self.print("}}\n", .{}); }, else => { switch (x.out) { .output_index => {}, else => try self.print("zang.zero({str}, {buffer_dest});\n", .{ span, x.out }), } switch (x.op) { .add => try self.print("zang.addScalar", .{}), .mul => try self.print("zang.multiplyScalar", .{}), else => unreachable, } try self.print("({str}, {buffer_dest}, {expression_result}, {expression_result});\n", .{ span, x.out, x.a, x.b }); }, } }, .arith_buffer_buffer => |x| { switch (x.op) { .sub, .div, .pow, .max, .min => { try self.print("{{\n", .{}); try self.print("var i = {str}.start;\n", .{span}); try self.print("while (i < {str}.end) : (i += 1) {{\n", .{span}); try self.print("{buffer_dest}[i] ", .{x.out}); switch (x.out) { .output_index => try self.print("+= ", .{}), else => try self.print("= ", .{}), } switch (x.op) { .sub => try self.print("{expression_result}[i] - {expression_result}[i];\n", .{ x.a, x.b }), .div => try self.print("{expression_result}[i] / {expression_result}[i];\n", .{ x.a, x.b }), .pow => try self.print("std.math.pow(f32, {expression_result}[i], {expression_result}[i]);\n", .{ x.a, x.b }), .max => try self.print("std.math.max({expression_result}[i], {expression_result}[i]);\n", .{ x.a, x.b }), .min => try self.print("std.math.min({expression_result}[i], {expression_result}[i]);\n", .{ x.a, x.b }), else => unreachable, } try self.print("}}\n", .{}); try self.print("}}\n", .{}); }, else => { switch (x.out) { .output_index => {}, else => try self.print("zang.zero({str}, {buffer_dest});\n", .{ span, x.out }), } switch (x.op) { .add => try self.print("zang.add", .{}), .mul => try self.print("zang.multiply", .{}), else => unreachable, } try self.print("({str}, {buffer_dest}, {expression_result}, {expression_result});\n", .{ span, x.out, x.a, x.b }); }, } }, .call => |call| { const field_module_index = inner.fields[call.field_index].module_index; const callee_module = self.script.modules[field_module_index]; switch (call.out) { .output_index => {}, else => try self.print("zang.zero({str}, {buffer_dest});\n", .{ span, call.out }), } try self.print("self.field{usize}.paint({str}, .{{", .{ call.field_index, span }); try self.print("{buffer_dest}}}, .{{", .{call.out}); // callee temps for (call.temps) |n, j| { if (j > 0) { try self.print(", ", .{}); } try self.print("temps[{usize}]", .{n}); } // callee params try self.print("}}, {identifier}, .{{\n", .{note_id_changed}); for (call.args) |arg, j| { const callee_param = callee_module.params[j]; try self.print(".{identifier} = ", .{callee_param.name}); if (callee_param.param_type == .constant_or_buffer) { // coerce to ConstantOrBuffer? switch (arg) { .nothing => {}, .temp_buffer => |temp_ref| try self.print("zang.buffer(temps[{usize}])", .{temp_ref.index}), .temp_float => |temp_ref| try self.print("zang.constant(temp_float{usize})", .{temp_ref.index}), .literal_boolean => unreachable, .literal_number => |value| try self.print("zang.constant({number_literal})", .{value}), .literal_enum_value => unreachable, .literal_curve => unreachable, .literal_track => unreachable, .literal_module => unreachable, .self_param => |index| { const param = module.params[index]; switch (param.param_type) { .boolean => unreachable, .buffer => try self.print("zang.buffer(params.{identifier})", .{param.name}), .constant => try self.print("zang.constant(params.{identifier})", .{param.name}), .constant_or_buffer => try self.print("params.{identifier}", .{param.name}), .curve => unreachable, .one_of => unreachable, } }, .track_param => |x| { const param = self.script.tracks[x.track_index].params[x.param_index]; switch (param.param_type) { .boolean => unreachable, .buffer => try self.print("zang.buffer(_result.params.{identifier})", .{param.name}), .constant => try self.print("zang.constant(_result.params.{identifier})", .{param.name}), .constant_or_buffer => try self.print("_result.params.{identifier}", .{param.name}), .curve => unreachable, .one_of => unreachable, } }, } } else { try self.print("{expression_result}", .{arg}); } try self.print(",\n", .{}); } try self.print("}});\n", .{}); }, .track_call => |track_call| { // FIXME hacked in support for params.note_on. // i really need to rethink how note_on works and whether it belongs in "user land" (params) or not. const has_note_on = for (module.params) |param| { if (std.mem.eql(u8, param.name, "note_on")) break true; } else false; if (has_note_on) { try self.print("if (params.note_on and {identifier}) {{\n", .{note_id_changed}); } else { try self.print("if ({identifier}) {{\n", .{note_id_changed}); } try self.print("self.tracker{usize}.reset();\n", .{track_call.note_tracker_index}); try self.print("self.trigger{usize}.reset();\n", .{track_call.trigger_index}); try self.print("}}\n", .{}); // FIXME protect against division by zero? try self.print("const _iap{usize} = self.tracker{usize}.consume(params.sample_rate / {expression_result}, {str}.end - {str}.start);\n", .{ track_call.note_tracker_index, track_call.note_tracker_index, track_call.speed, span, span }); try self.print("var _ctr{usize} = self.trigger{usize}.counter({str}, _iap{usize});\n", .{ track_call.trigger_index, track_call.trigger_index, span, track_call.note_tracker_index }); try self.print("while (self.trigger{usize}.next(&_ctr{usize})) |_result| {{\n", .{ track_call.trigger_index, track_call.trigger_index }); if (has_note_on) { try self.print("const _new_note = (params.note_on and {identifier}) or _result.note_id_changed;\n", .{note_id_changed}); } else { try self.print("const _new_note = {identifier} or _result.note_id_changed;\n", .{note_id_changed}); } for (track_call.instructions) |sub_instr| { try genInstruction(self, module, inner, sub_instr, "_result.span", "_new_note"); } try self.print("}}\n", .{}); }, .delay => |delay| { // this next line kind of sucks, if the delay loop iterates more than once, // we'll have done some overlapping zeroing. // maybe readDelayBuffer should do the zeroing internally. switch (delay.out) { .output_index => {}, else => try self.print("zang.zero({str}, {buffer_dest});\n", .{ span, delay.out }), } try self.print("{{\n", .{}); try self.print("var start = span.start;\n", .{}); try self.print("const end = span.end;\n", .{}); try self.print("while (start < end) {{\n", .{}); try self.print("// temps[{usize}] will be the destination for writing into the feedback buffer\n", .{ delay.feedback_out_temp_buffer_index, }); try self.print("zang.zero(zang.Span.init(start, end), temps[{usize}]);\n", .{ delay.feedback_out_temp_buffer_index, }); try self.print("// temps[{usize}] will contain the delay buffer's previous contents\n", .{ delay.feedback_temp_buffer_index, }); try self.print("zang.zero(zang.Span.init(start, end), temps[{usize}]);\n", .{ delay.feedback_temp_buffer_index, }); try self.print("const samples_read = self.delay{usize}.readDelayBuffer(temps[{usize}][start..end]);\n", .{ delay.delay_index, delay.feedback_temp_buffer_index, }); try self.print("const inner_span = zang.Span.init(start, start + samples_read);\n", .{}); // FIXME script should be able to output separately into the delay buffer, and the final result. // for now, i'm hardcoding it so that delay buffer is copied to final result, and the delay expression // is sent to the delay buffer. i need some new syntax in the language before i can implement // this properly try self.print("\n", .{}); //try indent(out, indentation); //try out.print("// copy the old delay buffer contents into the result (hardcoded for now)\n", .{}); //try indent(out, indentation); //try out.print("zang.addInto({str}, ", .{span}); //try printBufferDest(out, delay_begin.out); //try out.print(", temps[{usize}]);\n", .{delay_begin.feedback_temp_buffer_index}); //try out.print("\n", .{}); try self.print("// inner expression\n", .{}); for (delay.instructions) |sub_instr| { try genInstruction(self, module, inner, sub_instr, "inner_span", note_id_changed); } // end try self.print("\n", .{}); try self.print("// write expression result into the delay buffer\n", .{}); try self.print("self.delay{usize}.writeDelayBuffer(temps[{usize}][start..start + samples_read]);\n", .{ delay.delay_index, delay.feedback_out_temp_buffer_index, }); try self.print("start += samples_read;\n", .{}); try self.print("}}\n", .{}); try self.print("}}\n", .{}); }, } }
src/zangscript/codegen_zig.zig
const std = @import("std"); const examples = [_][]const u8{ "play", "song", "subsong", "envelope", "stereo", "curve", "detuned", "laser", "portamento", "arpeggiator", "sampler", "polyphony", "polyphony2", "delay", "mouse", "two", "script", "script_runtime_mono", "script_runtime_poly", "vibrato", }; pub fn build(b: *std.build.Builder) void { b.step("test", "Run all tests").dependOn(&b.addTest("test.zig").step); inline for (examples) |name| { b.step(name, "Run example '" ++ name ++ "'").dependOn(&example(b, name).run().step); } b.step("write_wav", "Run example 'write_wav'").dependOn(&writeWav(b).run().step); b.step("zangc", "Build zangscript compiler").dependOn(&zangc(b).step); } fn example( b: *std.build.Builder, comptime name: []const u8, ) *std.build.LibExeObjStep { var o = b.addExecutable(name, "examples/example.zig"); o.setBuildMode(b.standardReleaseOptions()); o.setOutputDir("zig-cache"); o.addPackagePath("wav", "examples/zig-wav/wav.zig"); o.addPackagePath("zang", "src/zang.zig"); o.addPackagePath("zang-12tet", "src/zang-12tet.zig"); o.addPackagePath("zangscript", "src/zangscript.zig"); o.addBuildOption([]const u8, "example", "\"example_" ++ name ++ ".zig\""); o.linkSystemLibrary("SDL2"); o.linkSystemLibrary("c"); return o; } fn writeWav(b: *std.build.Builder) *std.build.LibExeObjStep { var o = b.addExecutable("write_wav", "examples/write_wav.zig"); o.setBuildMode(b.standardReleaseOptions()); o.setOutputDir("zig-cache"); o.addPackagePath("wav", "examples/zig-wav/wav.zig"); o.addPackagePath("zang", "src/zang.zig"); o.addPackagePath("zang-12tet", "src/zang-12tet.zig"); return o; } fn zangc(b: *std.build.Builder) *std.build.LibExeObjStep { var o = b.addExecutable("zangc", "tools/zangc.zig"); o.setBuildMode(b.standardReleaseOptions()); o.setOutputDir("zig-cache"); o.addPackagePath("zangscript", "src/zangscript.zig"); return o; }
build.zig
const sabaton = @import("root").sabaton; const std = @import("std"); var bus0base: usize = undefined; var bus0size: usize = undefined; var bar32base: usize = undefined; var bar64base: usize = undefined; fn pci_bars_callback(dev: Addr) bool { const header_type = dev.get_header_type(); const num_bars: u8 = switch(header_type & 0x7F) { 0x00 => 6, 0x01 => 2, else => 0, }; var bar_idx: u8 = 0; while(bar_idx < num_bars) : (bar_idx += 1) { const bar_bits = dev.read(u32, 0x10 + bar_idx * 4); dev.write(u32, 0x10 + bar_idx * 4, 0xFFFFFFFF); const bar_value = dev.read(u32, 0x10 + bar_idx * 4); if(bar_bits & 1 != 0) continue; // Not a memory BAR const is64 = ((bar_value & 0b110) >> 1) == 2; var bar_size = @as(u64, bar_value & 0xFFFFFFF0); if(is64) { dev.write(u32, 0x10 + (bar_idx + 1) * 4, 0xFFFFFFFF); bar_size |= @as(u64, dev.read(u32, 0x10 + (bar_idx + 1) * 4)) << 32; } // Negate BAR size bar_size = ~bar_size +% 1; if(!is64) { bar_size &= (1 << 32) - 1; } if(bar_size == 0) continue; var base = if(is64) &bar64base else &bar32base; // Align to BAR size base.* += bar_size - 1; base.* &= ~(bar_size - 1); if(sabaton.debug) { if(is64) { sabaton.puts("64 bit BAR: \n"); } else { sabaton.puts("32 bit BAR: \n"); } sabaton.log_hex(" BAR index: ", bar_idx); sabaton.log_hex(" BAR bits: ", bar_bits); sabaton.log_hex(" BAR size: ", bar_size); sabaton.log_hex(" BAR addr: ", base.*); } // Write BAR dev.write(u32, 0x10 + bar_idx * 4, @truncate(u32, base.*) | bar_bits); if(is64) { dev.write(u32, 0x10 + (bar_idx + 1) * 4, @truncate(u32, base.* >> 32)); } dev.write(u16, 4, 1 << 1); // Increment BAR pointer base .* += bar_size; bar_idx += @boolToInt(is64); } // We never want to stop iterating return false; } pub fn init_from_dtb(root: *sabaton.paging.Root) void { const pci_blob = sabaton.vital(sabaton.dtb.find("pcie@", "reg"), "Cannot find pci base dtb", true); bus0base = std.mem.readIntBig(u64, pci_blob[0..][0..8]); bus0size = std.mem.readIntBig(u64, pci_blob[8..][0..8]); if(sabaton.debug) { sabaton.log_hex("PCI config space base: ", bus0base); sabaton.log_hex("PCI config space size: ", bus0size); } sabaton.paging.map(bus0base, bus0base, bus0size, .rw, .mmio, root); sabaton.paging.map(bus0base + sabaton.upper_half_phys_base, bus0base, bus0size, .rw, .mmio, root); const bar_blob = sabaton.vital(sabaton.dtb.find("pcie@", "ranges"), "Cannot find pci ranges dtb", true); bar32base = std.mem.readIntBig(u64, bar_blob[0x28..][0..8]); bar64base = std.mem.readIntBig(u64, bar_blob[0x3C..][0..8]); const bar32size = std.mem.readIntBig(u64, bar_blob[0x30..][0..8]); const bar64size = std.mem.readIntBig(u64, bar_blob[0x44..][0..8]); if(sabaton.debug) { sabaton.log_hex("PCI BAR32 base: ", bar32base); sabaton.log_hex("PCI BAR32 size: ", bar32size); sabaton.log_hex("PCI BAR64 base: ", bar64base); sabaton.log_hex("PCI BAR64 size: ", bar64size); } // This should already be present in mmio region, if it's not, open an issue. // sabaton.paging.map(bar32base, bar32base, bar32size, .rw, .mmio, root); // sabaton.paging.map(bar32base + sabaton.upper_half_phys_base, bar32base, bar32size, .rw, .mmio, root); sabaton.paging.map(bar64base, bar64size, bus0size, .rw, .mmio, root); sabaton.paging.map(bar64base + sabaton.upper_half_phys_base, bar64size, bar64size, .rw, .mmio, root); _ = scan(pci_bars_callback); } pub const Addr = struct { bus: u8, device: u5, function: u3, fn mmio(self: @This(), offset: u8) u64 { return bus0base + (@as(u64, self.device) << 15 | @as(u64, self.function) << 12 | @as(u64, offset)); } pub fn read(self: @This(), comptime T: type, offset: u8) T { return @intToPtr(*volatile T, self.mmio(offset)).*; } pub fn write(self: @This(), comptime T: type, offset: u8, value: T) void { @intToPtr(*volatile T, self.mmio(offset)).* = value; } pub fn get_vendor_id(self: @This()) u16 { return self.read(u16, 0x00); } pub fn get_product_id(self: @This()) u16 { return self.read(u16, 0x02); } pub fn get_class(self: @This()) u8 { return self.read(u8, 0x0B); } pub fn get_subclass(self: @This()) u8 { return self.read(u8, 0x0A); } pub fn get_progif(self: @This()) u8 { return self.read(u8, 0x09); } pub fn get_header_type(self: @This()) u8 { return self.read(u8, 0x0E); } }; fn device_scan(callback: fn(Addr) bool, bus: u8, device: u5) bool { var addr: Addr = .{ .bus = bus, .device = device, .function = 0, }; if(addr.get_vendor_id() == 0xFFFF) return false; // Device not present, ignore if(callback(addr)) return true; if(addr.get_header_type() & 0x80 == 0) return false; // Not multifunction device, ignore addr.function += 1; while(addr.function < (1 << 3)) : (addr.function += 1) { if(callback(addr)) return true; } return false; } fn bus_scan(callback: fn(Addr) bool, bus: u8) bool { var device: usize = 0; while(device < (1 << 5)) : (device += 1) { if(device_scan(callback, bus, @truncate(u5, device))) return true; } return false; } pub fn scan(callback: fn(Addr) bool) bool { return bus_scan(callback, 0); }
src/platform/pci.zig
const std = @import("std"); const math = std.math; const assert = std.debug.assert; const L = std.unicode.utf8ToUtf16LeStringLiteral; const zwin32 = @import("zwin32"); const w = zwin32.base; const d3d12 = zwin32.d3d12; const hrPanic = zwin32.hrPanic; const hrPanicOnFail = zwin32.hrPanicOnFail; const zd3d12 = @import("zd3d12"); const common = @import("common"); const GuiRenderer = common.GuiRenderer; const c = common.c; const zm = @import("zmath"); // We need to export below symbols for DirectX 12 Agility SDK. pub export const D3D12SDKVersion: u32 = 4; pub export const D3D12SDKPath: [*:0]const u8 = ".\\d3d12\\"; const content_dir = @import("build_options").content_dir; const window_name = "zig-gamedev: intro 4"; const window_width = 1920; const window_height = 1080; // By convention, we use 'Pso_' prefix for structures that are also defined in HLSL code // (see 'DrawConst' and 'FrameConst' in intro4.hlsl). const Pso_DrawConst = struct { object_to_world: [16]f32, }; const Pso_FrameConst = struct { world_to_clip: [16]f32, }; const Vertex = struct { position: [3]f32, normal: [3]f32, texcoord: [2]f32, }; const DemoState = struct { gctx: zd3d12.GraphicsContext, guictx: GuiRenderer, frame_stats: common.FrameStats, non_bindless_pso: zd3d12.PipelineHandle, bindless_pso: zd3d12.PipelineHandle, is_bindless_mode_active: bool, vertex_buffer: zd3d12.ResourceHandle, index_buffer: zd3d12.ResourceHandle, depth_texture: zd3d12.ResourceHandle, depth_texture_dsv: d3d12.CPU_DESCRIPTOR_HANDLE, mesh_num_vertices: u32, mesh_num_indices: u32, mesh_texture: zd3d12.ResourceHandle, mesh_texture_srv: d3d12.CPU_DESCRIPTOR_HANDLE, camera: struct { position: [3]f32, forward: [3]f32, pitch: f32, yaw: f32, }, mouse: struct { cursor_prev_x: i32, cursor_prev_y: i32, }, }; fn init(gpa_allocator: std.mem.Allocator) DemoState { // Create application window and initialize dear imgui library. const window = common.initWindow(gpa_allocator, window_name, window_width, window_height) catch unreachable; // Create temporary memory allocator for use during initialization. We pass this allocator to all // subsystems that need memory and then free everyting with a single deallocation. var arena_allocator_state = std.heap.ArenaAllocator.init(gpa_allocator); defer arena_allocator_state.deinit(); const arena_allocator = arena_allocator_state.allocator(); // Create DirectX 12 context. var gctx = zd3d12.GraphicsContext.init(gpa_allocator, window); var non_bindless_pso: zd3d12.PipelineHandle = undefined; var bindless_pso: zd3d12.PipelineHandle = undefined; { const input_layout_desc = [_]d3d12.INPUT_ELEMENT_DESC{ d3d12.INPUT_ELEMENT_DESC.init("POSITION", 0, .R32G32B32_FLOAT, 0, 0, .PER_VERTEX_DATA, 0), d3d12.INPUT_ELEMENT_DESC.init("_Normal", 0, .R32G32B32_FLOAT, 0, 12, .PER_VERTEX_DATA, 0), d3d12.INPUT_ELEMENT_DESC.init("_Texcoord", 0, .R32G32B32_FLOAT, 0, 24, .PER_VERTEX_DATA, 0), }; var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault(); pso_desc.InputLayout = .{ .pInputElementDescs = &input_layout_desc, .NumElements = input_layout_desc.len, }; pso_desc.RTVFormats[0] = .R8G8B8A8_UNORM; pso_desc.NumRenderTargets = 1; pso_desc.DSVFormat = .D32_FLOAT; pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf; pso_desc.PrimitiveTopologyType = .TRIANGLE; non_bindless_pso = gctx.createGraphicsShaderPipeline( arena_allocator, &pso_desc, content_dir ++ "shaders/intro4.vs.cso", content_dir ++ "shaders/intro4.ps.cso", ); bindless_pso = gctx.createGraphicsShaderPipeline( arena_allocator, &pso_desc, content_dir ++ "shaders/intro4_bindless.vs.cso", content_dir ++ "shaders/intro4_bindless.ps.cso", ); } // Load a mesh from file and store the data in temporary arrays. var mesh_indices = std.ArrayList(u32).init(arena_allocator); var mesh_positions = std.ArrayList([3]f32).init(arena_allocator); var mesh_normals = std.ArrayList([3]f32).init(arena_allocator); var mesh_texcoords = std.ArrayList([2]f32).init(arena_allocator); { const data = common.parseAndLoadGltfFile(content_dir ++ "SciFiHelmet/SciFiHelmet.gltf"); defer c.cgltf_free(data); common.appendMeshPrimitive(data, 0, 0, &mesh_indices, &mesh_positions, &mesh_normals, &mesh_texcoords, null); } const mesh_num_indices = @intCast(u32, mesh_indices.items.len); const mesh_num_vertices = @intCast(u32, mesh_positions.items.len); // Create vertex buffer and return a *handle* to the underlying Direct3D12 resource. const vertex_buffer = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &d3d12.RESOURCE_DESC.initBuffer(mesh_num_vertices * @sizeOf(Vertex)), d3d12.RESOURCE_STATE_COPY_DEST, null, ) catch |err| hrPanic(err); // Create index buffer and return a *handle* to the underlying Direct3D12 resource. const index_buffer = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &d3d12.RESOURCE_DESC.initBuffer(mesh_num_indices * @sizeOf(u32)), d3d12.RESOURCE_STATE_COPY_DEST, null, ) catch |err| hrPanic(err); // Create depth texture resource. const depth_texture = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &blk: { var desc = d3d12.RESOURCE_DESC.initTex2d(.D32_FLOAT, gctx.viewport_width, gctx.viewport_height, 1); desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_DEPTH_STENCIL | d3d12.RESOURCE_FLAG_DENY_SHADER_RESOURCE; break :blk desc; }, d3d12.RESOURCE_STATE_DEPTH_WRITE, &d3d12.CLEAR_VALUE.initDepthStencil(.D32_FLOAT, 1.0, 0), ) catch |err| hrPanic(err); // Create depth texture 'view' - a descriptor which can be send to Direct3D 12 API. const depth_texture_dsv = gctx.allocateCpuDescriptors(.DSV, 1); gctx.device.CreateDepthStencilView( gctx.lookupResource(depth_texture).?, // Get the D3D12 resource from a handle. null, depth_texture_dsv, ); // Open D3D12 command list, setup descriptor heap, etc. After this call we can upload resources to the GPU, // draw 3D graphics etc. gctx.beginFrame(); // Create and upload graphics resources for dear imgui renderer. var guictx = GuiRenderer.init(arena_allocator, &gctx, 1, content_dir); // Create texture resource and submit GPU commands which copies texture data from CPU // to high-performance GPU memory where resource resides. const mesh_texture = gctx.createAndUploadTex2dFromFile( content_dir ++ "SciFiHelmet/SciFiHelmet_AmbientOcclusion.png", .{}, // Default parameters mean that we want whole mipmap chain. ) catch |err| hrPanic(err); // Generate mipmaps for the mesh texture. { // Our generator uses fast compute shader to generate all texture levels. var mipgen = zd3d12.MipmapGenerator.init(arena_allocator, &gctx, .R8G8B8A8_UNORM, content_dir); defer mipgen.deinit(&gctx); mipgen.generateMipmaps(&gctx, mesh_texture); gctx.finishGpuCommands(); // Wait for the GPU so that we can release the generator. } gctx.addTransitionBarrier(mesh_texture, d3d12.RESOURCE_STATE_PIXEL_SHADER_RESOURCE); // Non-bindless path init. // Allocate one (uninitialized) CPU descriptor handle that will be copied to the GPU descriptor heap // just before a drawcall. const mesh_texture_srv = gctx.allocateCpuDescriptors(.CBV_SRV_UAV, 1); // Initialize descriptor handle - after below call `mesh_texture_srv' will be a valid descriptor handle // which will be used to identify and interpret data stored in texture resource. gctx.device.CreateShaderResourceView(gctx.lookupResource(mesh_texture).?, null, mesh_texture_srv); // Bindless path init. { // pub const PersistentDescriptor = struct { // cpu_handle: d3d12.CPU_DESCRIPTOR_HANDLE, // gpu_handle: d3d12.GPU_DESCRIPTOR_HANDLE, // index: u32, // index in the 'ResourceDescriptorHeap' array // }; // Allocate one persistent GPU descriptor handle. It will be automatically // available in the shader via 'ResourceDescriptorHeap' array ('ResourceDescriptorHeap[0]' in this case). const bindless_descriptor = gctx.allocatePersistentGpuDescriptors(1); gctx.device.CreateShaderResourceView( gctx.lookupResource(mesh_texture).?, null, bindless_descriptor.cpu_handle, ); } // Fill vertex buffer with vertex data. { // Allocate memory from upload heap and fill it with vertex data. const verts = gctx.allocateUploadBufferRegion(Vertex, mesh_num_vertices); for (mesh_positions.items) |_, i| { verts.cpu_slice[i].position = mesh_positions.items[i]; verts.cpu_slice[i].normal = mesh_normals.items[i]; verts.cpu_slice[i].texcoord = mesh_texcoords.items[i]; } // Copy vertex data from upload heap to vertex buffer resource that resides in high-performance memory // on the GPU. gctx.cmdlist.CopyBufferRegion( gctx.lookupResource(vertex_buffer).?, 0, verts.buffer, verts.buffer_offset, verts.cpu_slice.len * @sizeOf(@TypeOf(verts.cpu_slice[0])), ); } // Fill index buffer with index data. { // Allocate memory from upload heap and fill it with index data. const indices = gctx.allocateUploadBufferRegion(u32, mesh_num_indices); for (mesh_indices.items) |_, i| { indices.cpu_slice[i] = mesh_indices.items[i]; } // Copy index data from upload heap to index buffer resource that resides in high-performance memory // on the GPU. gctx.cmdlist.CopyBufferRegion( gctx.lookupResource(index_buffer).?, 0, indices.buffer, indices.buffer_offset, indices.cpu_slice.len * @sizeOf(@TypeOf(indices.cpu_slice[0])), ); } // Transition vertex and index buffers from 'copy dest' state to the state appropriate for rendering. gctx.addTransitionBarrier(vertex_buffer, d3d12.RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); gctx.addTransitionBarrier(index_buffer, d3d12.RESOURCE_STATE_INDEX_BUFFER); gctx.flushResourceBarriers(); // This will send command list to the GPU, call 'Present' and do some other bookkeeping. gctx.endFrame(); // Wait for the GPU to finish all commands. gctx.finishGpuCommands(); return .{ .gctx = gctx, .guictx = guictx, .frame_stats = common.FrameStats.init(), .bindless_pso = bindless_pso, .non_bindless_pso = non_bindless_pso, .is_bindless_mode_active = false, .vertex_buffer = vertex_buffer, .index_buffer = index_buffer, .depth_texture = depth_texture, .depth_texture_dsv = depth_texture_dsv, .mesh_num_vertices = mesh_num_vertices, .mesh_num_indices = mesh_num_indices, .mesh_texture = mesh_texture, .mesh_texture_srv = mesh_texture_srv, .camera = .{ .position = [3]f32{ 0.0, 5.0, -5.0 }, .forward = [3]f32{ 0.0, 0.0, 1.0 }, .pitch = 0.175 * math.pi, .yaw = 0.0, }, .mouse = .{ .cursor_prev_x = 0, .cursor_prev_y = 0, }, }; } fn deinit(demo: *DemoState, gpa_allocator: std.mem.Allocator) void { demo.gctx.finishGpuCommands(); demo.guictx.deinit(&demo.gctx); demo.gctx.deinit(gpa_allocator); common.deinitWindow(gpa_allocator); demo.* = undefined; } fn update(demo: *DemoState) void { // Update frame counter and fps stats. demo.frame_stats.update(demo.gctx.window, window_name); const dt = demo.frame_stats.delta_time; // Update dear imgui common. After this call we can define our widgets. common.newImGuiFrame(dt); c.igSetNextWindowPos( c.ImVec2{ .x = @intToFloat(f32, demo.gctx.viewport_width) - 600.0 - 20, .y = 20.0 }, c.ImGuiCond_FirstUseEver, c.ImVec2{ .x = 0.0, .y = 0.0 }, ); c.igSetNextWindowSize(.{ .x = 600.0, .y = -1 }, c.ImGuiCond_Always); _ = c.igBegin( "Demo Settings", null, c.ImGuiWindowFlags_NoMove | c.ImGuiWindowFlags_NoResize | c.ImGuiWindowFlags_NoSavedSettings, ); c.igBulletText("", ""); c.igSameLine(0, -1); c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "Right Mouse Button + Drag", ""); c.igSameLine(0, -1); c.igText(" : rotate camera", ""); c.igBulletText("", ""); c.igSameLine(0, -1); c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "W, A, S, D", ""); c.igSameLine(0, -1); c.igText(" : move camera", ""); _ = c.igCheckbox("Use bindless texture", &demo.is_bindless_mode_active); c.igEnd(); // Handle camera rotation with mouse. { var pos: w.POINT = undefined; _ = w.GetCursorPos(&pos); const delta_x = @intToFloat(f32, pos.x) - @intToFloat(f32, demo.mouse.cursor_prev_x); const delta_y = @intToFloat(f32, pos.y) - @intToFloat(f32, demo.mouse.cursor_prev_y); demo.mouse.cursor_prev_x = pos.x; demo.mouse.cursor_prev_y = pos.y; if (w.GetAsyncKeyState(w.VK_RBUTTON) < 0) { demo.camera.pitch += 0.0025 * delta_y; demo.camera.yaw += 0.0025 * delta_x; demo.camera.pitch = math.min(demo.camera.pitch, 0.48 * math.pi); demo.camera.pitch = math.max(demo.camera.pitch, -0.48 * math.pi); demo.camera.yaw = zm.modAngle(demo.camera.yaw); } } // Handle camera movement with 'WASD' keys. { const speed = zm.f32x4s(10.0); const delta_time = zm.f32x4s(demo.frame_stats.delta_time); const transform = zm.mul(zm.rotationX(demo.camera.pitch), zm.rotationY(demo.camera.yaw)); var forward = zm.normalize3(zm.mul(zm.f32x4(0.0, 0.0, 1.0, 0.0), transform)); zm.store(demo.camera.forward[0..], forward, 3); const right = speed * delta_time * zm.normalize3(zm.cross3(zm.f32x4(0.0, 1.0, 0.0, 0.0), forward)); forward = speed * delta_time * forward; // Load camera position from memory to SIMD register ('3' means that we want to load three components). var cpos = zm.load(demo.camera.position[0..], zm.Vec, 3); if (w.GetAsyncKeyState('W') < 0) { cpos += forward; } else if (w.GetAsyncKeyState('S') < 0) { cpos -= forward; } if (w.GetAsyncKeyState('D') < 0) { cpos += right; } else if (w.GetAsyncKeyState('A') < 0) { cpos -= right; } // Copy updated position from SIMD register to memory. zm.store(demo.camera.position[0..], cpos, 3); } } fn draw(demo: *DemoState) void { var gctx = &demo.gctx; const cam_world_to_view = zm.lookToLh( zm.load(demo.camera.position[0..], zm.Vec, 3), zm.load(demo.camera.forward[0..], zm.Vec, 3), zm.f32x4(0.0, 1.0, 0.0, 0.0), ); const cam_view_to_clip = zm.perspectiveFovLh( 0.25 * math.pi, @intToFloat(f32, gctx.viewport_width) / @intToFloat(f32, gctx.viewport_height), 0.01, 200.0, ); const cam_world_to_clip = zm.mul(cam_world_to_view, cam_view_to_clip); // Begin DirectX 12 rendering. gctx.beginFrame(); // Get current back buffer resource and transition it to 'render target' state. const back_buffer = gctx.getBackBuffer(); gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_RENDER_TARGET); gctx.flushResourceBarriers(); gctx.cmdlist.OMSetRenderTargets( 1, &[_]d3d12.CPU_DESCRIPTOR_HANDLE{back_buffer.descriptor_handle}, w.TRUE, &demo.depth_texture_dsv, ); gctx.cmdlist.ClearRenderTargetView( back_buffer.descriptor_handle, &[4]f32{ 0.2, 0.2, 0.2, 1.0 }, 0, null, ); gctx.cmdlist.ClearDepthStencilView(demo.depth_texture_dsv, d3d12.CLEAR_FLAG_DEPTH, 1.0, 0, 0, null); if (demo.is_bindless_mode_active) gctx.setCurrentPipeline(demo.bindless_pso) else gctx.setCurrentPipeline(demo.non_bindless_pso); // Set input assembler (IA) state. gctx.cmdlist.IASetPrimitiveTopology(.TRIANGLELIST); gctx.cmdlist.IASetVertexBuffers(0, 1, &[_]d3d12.VERTEX_BUFFER_VIEW{.{ .BufferLocation = gctx.lookupResource(demo.vertex_buffer).?.GetGPUVirtualAddress(), .SizeInBytes = demo.mesh_num_vertices * @sizeOf(Vertex), .StrideInBytes = @sizeOf(Vertex), }}); gctx.cmdlist.IASetIndexBuffer(&.{ .BufferLocation = gctx.lookupResource(demo.index_buffer).?.GetGPUVirtualAddress(), .SizeInBytes = demo.mesh_num_indices * @sizeOf(u32), .Format = .R32_UINT, }); if (!demo.is_bindless_mode_active) { // Bind mesh texture (copy CPU descriptor handle to GPU descriptor heap). gctx.cmdlist.SetGraphicsRootDescriptorTable( 2, // Slot index 2 in Root Signature (SRV(t0), see intro4.hlsl). gctx.copyDescriptorsToGpuHeap(1, demo.mesh_texture_srv), ); } // Upload per-frame constant data (camera xform). { // Allocate memory for one instance of Pso_FrameConst structure. const mem = gctx.allocateUploadMemory(Pso_FrameConst, 1); // Copy 'cam_world_to_clip' matrix to upload memory. We need to transpose it because // HLSL uses column-major matrices by default (zmath uses row-major matrices). zm.storeMat(mem.cpu_slice[0].world_to_clip[0..], zm.transpose(cam_world_to_clip)); // Set GPU handle of our allocated memory region so that it is visible to the shader. gctx.cmdlist.SetGraphicsRootConstantBufferView( 1, // Slot index 1 in Root Signature (CBV(b1), see intro4.hlsl). mem.gpu_base, ); } // For each object, upload per-draw constant data (object to world xform) and draw. { var z: f32 = -9.0; while (z <= 9.0) : (z += 3.0) { var x: f32 = -9.0; while (x <= 9.0) : (x += 3.0) { // Compute translation matrix. const object_to_world = zm.translation(x, 0.0, z); // Allocate memory for one instance of Pso_DrawConst structure. const mem = gctx.allocateUploadMemory(Pso_DrawConst, 1); // Copy 'object_to_world' matrix to upload memory. We need to transpose it because // HLSL uses column-major matrices by default (zmath uses row-major matrices). zm.storeMat(mem.cpu_slice[0].object_to_world[0..], zm.transpose(object_to_world)); // Set GPU handle of our allocated memory region so that it is visible to the shader. gctx.cmdlist.SetGraphicsRootConstantBufferView( 0, // Slot index 0 in Root Signature (CBV(b0), see intro4.hlsl). mem.gpu_base, ); gctx.cmdlist.DrawIndexedInstanced(demo.mesh_num_indices, 1, 0, 0, 0); } } } // Draw dear imgui widgets. demo.guictx.draw(gctx); gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_PRESENT); gctx.flushResourceBarriers(); // Call 'Present' and prepare for the next frame. gctx.endFrame(); } pub fn main() !void { // Initialize some low-level Windows stuff (DPI awarness, COM), check Windows version and also check // if DirectX 12 Agility SDK is supported. common.init(); defer common.deinit(); // Create main memory allocator for our application. var gpa_allocator_state = std.heap.GeneralPurposeAllocator(.{}){}; defer { const leaked = gpa_allocator_state.deinit(); std.debug.assert(leaked == false); } const gpa_allocator = gpa_allocator_state.allocator(); var demo = init(gpa_allocator); defer deinit(&demo, gpa_allocator); while (true) { var message = std.mem.zeroes(w.user32.MSG); const has_message = w.user32.peekMessageA(&message, null, 0, 0, w.user32.PM_REMOVE) catch false; if (has_message) { _ = w.user32.translateMessage(&message); _ = w.user32.dispatchMessageA(&message); if (message.message == w.user32.WM_QUIT) { break; } } else { update(&demo); draw(&demo); } } }
samples/intro/src/intro4.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const nvg = @import("nanovg"); const gui = @import("../gui.zig"); const event = @import("../event.zig"); const Point = @import("../geometry.zig").Point; const Rect = @import("../geometry.zig").Rect; const Button = @This(); pub const ButtonStyle = enum(u8) { default, toolbar, }; widget: gui.Widget, allocator: *Allocator, text: [:0]const u8, font_size: f32 = 9, iconFn: ?fn () void = null, icon_x: f32 = 2, icon_y: f32 = 2, style: ButtonStyle = .default, enabled: bool = true, hovered: bool = false, pressed: bool = false, checked: bool = false, auto_repeat_timer: gui.Timer, auto_repeat_interval: u32 = 0, // in milliseconds onClickFn: ?fn (*Self) void = null, onEnterFn: ?fn (*Self) void = null, onLeaveFn: ?fn (*Self) void = null, const Self = @This(); pub fn init(allocator: *Allocator, rect: Rect(f32), text: [:0]const u8) !*Self { var self = try allocator.create(Self); self.* = Self{ .widget = gui.Widget.init(allocator, rect), .allocator = allocator, .text = text, .auto_repeat_timer = gui.Timer{ .on_elapsed_fn = onAutoRepeatTimerElapsed, .ctx = @ptrToInt(self), }, }; self.widget.focus_policy.mouse = true; self.widget.focus_policy.keyboard = true; self.widget.drawFn = draw; self.widget.onMouseDownFn = onMouseDown; self.widget.onMouseUpFn = onMouseUp; self.widget.onEnterFn = onEnter; self.widget.onLeaveFn = onLeave; return self; } pub fn deinit(self: *Self) void { self.auto_repeat_timer.stop(); self.widget.deinit(); self.allocator.destroy(self); } pub fn click(self: *Self) void { if (!self.enabled) return; if (self.onClickFn) |clickFn| { clickFn(self); } } pub fn onMouseDown(widget: *gui.Widget, mouse_event: *const event.MouseEvent) void { const self = @fieldParentPtr(Self, "widget", widget); if (!self.enabled) return; const mouse_position = Point(f32).make(mouse_event.x, mouse_event.y); self.hovered = widget.getRect().contains(mouse_position); if (mouse_event.button == .left) { if (self.hovered) { self.pressed = true; if (self.auto_repeat_interval > 0) { self.click(); self.auto_repeat_timer.start(self.auto_repeat_interval); } } } } fn onMouseUp(widget: *gui.Widget, mouse_event: *const event.MouseEvent) void { const self = @fieldParentPtr(Self, "widget", widget); if (!self.enabled) return; const mouse_position = Point(f32).make(mouse_event.x, mouse_event.y); self.hovered = widget.getRect().contains(mouse_position); if (mouse_event.button == .left) { self.pressed = false; self.auto_repeat_timer.stop(); if (self.hovered and self.auto_repeat_interval == 0) { self.click(); } } } fn onEnter(widget: *gui.Widget) void { const self = @fieldParentPtr(Self, "widget", widget); self.hovered = true; if (self.onEnterFn) |enterFn| enterFn(self); } fn onLeave(widget: *gui.Widget) void { const self = @fieldParentPtr(Self, "widget", widget); self.hovered = false; if (self.onLeaveFn) |leaveFn| leaveFn(self); } fn onAutoRepeatTimerElapsed(context: usize) void { var button = @intToPtr(*Button, context); if (button.onClickFn) |onClickFn| { onClickFn(button); } } pub fn draw(widget: *gui.Widget) void { const self = @fieldParentPtr(Self, "widget", widget); const rect = widget.relative_rect; switch (self.style) { .default => { gui.drawPanel(rect.x + 1, rect.y + 1, rect.w - 2, rect.h - 2, 1, self.enabled and self.hovered, (self.enabled and self.pressed) or self.checked); const is_focused = widget.isFocused(); // border nvg.beginPath(); // if (is_focused) { // nvg.rect(rect.x + 1, rect.y + 1, rect.w - 2, rect.h - 2); // nvg.strokeWidth(2); // } else { nvg.rect(rect.x + 0.5, rect.y + 0.5, rect.w - 1, rect.h - 1); // } nvg.strokeColor(if (is_focused) nvg.rgb(0, 0, 0) else gui.theme_colors.border); nvg.stroke(); nvg.strokeWidth(1); }, .toolbar => { if ((self.enabled and self.hovered) or self.checked) { const depth: f32 = 1; gui.drawPanel(rect.x, rect.y, rect.w, rect.h, depth, false, (self.enabled and self.pressed) or self.checked); } }, } // nvg.beginPath(); // nvg.roundedRect(rect.x + 1.5, rect.y + 1.5, rect.w - 3, rect.h - 3, 1); // nvg.fillColor(gui.theme_colors.background); // nvg.fill(); // nvg.strokeColor(gui.theme_colors.light); // nvg.stroke(); // nvg.beginPath(); // nvg.roundedRect(rect.x + 0.5, rect.y + 0.5, rect.w - 1, rect.h - 1, 2); // nvg.strokeColor(gui.theme_colors.border); // nvg.stroke(); nvg.fontFace("guifont"); nvg.fontSize(gui.pixelsToPoints(self.font_size)); nvg.textAlign(.{ .horizontal = .center, .vertical = .middle}); nvg.fillColor(nvg.rgb(0, 0, 0)); _ = nvg.text(rect.x + 0.5 * rect.w, rect.y + 0.5 * rect.h, self.text); if (self.iconFn) |iconFn| { nvg.save(); nvg.translate(rect.x + self.icon_x, rect.y + self.icon_y); iconFn(); nvg.restore(); } }
src/gui/widgets/Button.zig
const int = @import("int.zig"); const std = @import("std"); const builtin = std.builtin; pub const Error = error{InvalidPointer}; /// A data structure representing a relative pointer in binary data. /// Useful for decoding binary data using only `packed` structs. pub fn RelativePointer( /// The pointer type that this relative pointer represents comptime Ptr: type, /// The integer used as the storage for the pointer comptime Int: type, /// The endian of the backing integer comptime endian: builtin.Endian, /// An external offset that is subtracted from the inner /// integer before it is converted to a real pointer. comptime offset: comptime_int, /// The value of the null pointer. Only used if `Ptr` is /// an optional pointer. comptime null_ptr: comptime_int, ) type { return packed struct { inner: Inner, const Inner = int.Int(Int, endian); const Slice = @Type(blk: { var info = ptr_info; info.size = .Slice; break :blk builtin.TypeInfo{ .Pointer = info }; }); const SliceNoSentinel = @Type(blk: { var info = ptr_info; info.size = .Slice; info.sentinel = null; break :blk builtin.TypeInfo{ .Pointer = info }; }); const NonOptionalPtr = switch (@typeInfo(Ptr)) { .Optional => |opt| opt.child, else => Ptr, }; const ptr_info = @typeInfo(NonOptionalPtr).Pointer; const Data = if (ptr_info.is_const) []const u8 else []u8; const is_optional = @typeInfo(Ptr) == .Optional; const ptr_sentinel = @ptrCast(*const ptr_info.child, ptr_info.sentinel.?).*; /// Given a slice of data, and a pointer that points into this /// data, construct a `RelativePointer`. pub fn init(ptr: anytype, data: []const u8) Error!@This() { const ptr_is_optional = @typeInfo(@TypeOf(ptr)) == .Optional or @typeInfo(@TypeOf(ptr)) == .Null; if (is_optional and ptr_is_optional and ptr == null) return @This(){ .inner = Inner.init(null_ptr) }; const i = @intCast(Int, @ptrToInt(ptr) - @ptrToInt(data.ptr)); if (data.len < i + @sizeOf(ptr_info.child)) return error.InvalidPointer; const res = std.math.add(Int, i, offset) // catch return error.InvalidPointer; return @This(){ .inner = Inner.init(res) }; } /// Convert a `RelativePointer` to a pointer to within `data`. pub fn toPtr(ptr: @This(), data: Data) Error!Ptr { if (is_optional and ptr.inner.value() == null_ptr) return null; const i = try ptr.toInt(); if (data.len < i + @sizeOf(ptr_info.child) * @boolToInt(ptr_info.size == .One)) return error.InvalidPointer; return @ptrCast(Ptr, @alignCast(ptr_info.alignment, &data[i])); } /// Converts a `RelativePointer` to an unknown number of /// elements to a slice. pub fn toSlice(ptr: @This(), data: Data, len: usize) Error!SliceNoSentinel { if (is_optional and ptr.inner.value() == null_ptr) return @as(NonOptionalPtr, undefined)[0..0]; if (len == 0) return @as(NonOptionalPtr, undefined)[0..0]; const p = try ptr.toPtr(data); const start = @ptrToInt(p) - @ptrToInt(data.ptr); const end = start + len * @sizeOf(ptr_info.child); if (data.len < end) return error.InvalidPointer; return if (is_optional) p.?[0..len] else p[0..len]; } /// Converts a `RelativePointer` to an unknown number of /// elements to a slice that contains as many elements as /// possible from the pointer to the end of `data`. pub fn toSliceEnd(ptr: @This(), data: Data) Error!SliceNoSentinel { const rest = std.math.sub(usize, data.len, try ptr.toInt()) catch return error.InvalidPointer; return ptr.toSlice(data, rest / @sizeOf(ptr_info.child)); } /// Converts a `RelativePointer` to an unknown number of /// elements to a slice that contains all the elements until /// the sentinel. pub fn toSliceZ(ptr: @This(), data: Data) Error!Slice { const res = try ptr.toSliceEnd(data); for (res) |item, len| { if (std.meta.eql(item, ptr_sentinel)) return res[0..len :ptr_sentinel]; } return error.InvalidPointer; } /// Converts a `RelativePointer` to an unknown number of /// elements to a slice that contains all the elements until /// the sentinel. pub fn toSliceZ2(ptr: @This(), data: Data, sentinel: ptr_info.child) Error!SliceNoSentinel { const res = try ptr.toSliceEnd(data); for (res) |item, len| { if (std.meta.eql(item, sentinel)) return res[0..len]; } return error.InvalidPointer; } fn toInt(ptr: @This()) Error!Int { return std.math.sub(Int, ptr.inner.value(), offset) // catch return error.InvalidPointer; } }; } // zig fmt: off test "RelativePointer" { @setEvalBranchQuota(100000000); inline for ([_]bool{true, false}) |is_optional| inline for ([_]builtin.Endian{.Little,.Big}) |endian| inline for ([_]usize{25,50,75}) |offset| inline for ([_]usize{0,4,100}) |null_ptr| inline for ([_]type{u8, u16, u32}) |Child| inline for ([_]type{u8, u16, u32}) |Int| { const Ptr = if (is_optional) ?*Child else *Child; const RPtr = RelativePointer( Ptr, Int, endian, offset, null_ptr, ); const RPtr2 = RelativePointer( [*]Child, Int, endian, offset, null_ptr, ); var data = [_]Child{2, 4, 6, 8, 10}; const bytes = if (Child != u8) std.mem.sliceAsBytes(&data) else &data; for (data) |*expect| { const p = try RPtr.init(expect, bytes); const actual = if (is_optional) (try p.toPtr(bytes)).? else try p.toPtr(bytes); try std.testing.expectEqual(expect, actual); } if (is_optional) { const p = try RPtr.init(null, bytes); try std.testing.expectEqual(@as(Ptr, null), try p.toPtr(bytes)); } for (data[0..4]) |_, i| { const expect = data[i..i+1]; const p = try RPtr2.init(expect.ptr, bytes); const actual = try p.toSlice(bytes, 1); try std.testing.expectEqualSlices(Child, expect, actual); } }; } // zig fmt: on pub const Layout = enum { pointer_first, len_first, }; pub fn RelativeSlice( /// The slice type that this relative slice represents comptime Slice: type, /// The integer used as the storage for both pointer and length comptime Int: type, /// The endian of the backing integer comptime endian: builtin.Endian, /// The layout of the slice comptime layout: Layout, /// An external offset that is subtracted from the inner /// integer before it is converted to a real slice. comptime offset: comptime_int, ) type { return packed struct { inner: Inner, const Ptr = @Type(builtin.TypeInfo{ .Pointer = .{ .size = .Many, .is_const = @typeInfo(Slice).Pointer.is_const, .is_volatile = @typeInfo(Slice).Pointer.is_volatile, .alignment = @typeInfo(Slice).Pointer.alignment, .child = @typeInfo(Slice).Pointer.child, .is_allowzero = @typeInfo(Slice).Pointer.is_allowzero, .address_space = @typeInfo(Slice).Pointer.address_space, .sentinel = @typeInfo(Slice).Pointer.sentinel, }, }); const RPtr = RelativePointer(Ptr, Int, endian, offset, 0); const Inner = switch (layout) { .pointer_first => packed struct { ptr: RPtr, len: RPtr.Inner, }, .len_first => packed struct { len: RPtr.Inner, ptr: RPtr, }, }; pub fn init(slice: Slice, data: []const u8) Error!@This() { const data_end = @ptrToInt(data.ptr) + data.len; const start = @ptrToInt(slice.ptr) - @ptrToInt(data.ptr); const end = start + (slice.len * @sizeOf(RPtr.ptr_info.child)); if (data_end < end) return error.InvalidPointer; return @This(){ .inner = .{ .ptr = try RPtr.init(slice.ptr, data), .len = RPtr.Inner.init(@intCast(Int, slice.len)), }, }; } pub fn toSlice(slice: @This(), data: RPtr.Data) Error!Slice { return slice.inner.ptr.toSlice(data, slice.len()); } pub fn len(slice: @This()) Int { return slice.inner.len.value(); } }; } // zig fmt: off test "RelativeSlice" { @setEvalBranchQuota(100000000); inline for ([_]builtin.Endian{.Little,.Big}) |endian| inline for ([_]usize{25,50,75}) |offset| inline for ([_]type{u8, u16, u32}) |Child| inline for ([_]type{u8, u16, u32}) |Int| { const Slice1 = RelativeSlice( []Child, Int, endian, .pointer_first, offset, ); const Slice2 = RelativeSlice( []Child, Int, endian, .len_first, offset, ); var data = [_]Child{2, 4, 6, 8, 10}; const bytes = if (Child != u8) std.mem.sliceAsBytes(&data) else &data; for (data[0..4]) |_, i| { const expect = data[i..i+1]; const slice1 = try (try Slice1.init(expect, bytes)).toSlice(bytes); const slice2 = try (try Slice2.init(expect, bytes)).toSlice(bytes); try std.testing.expectEqualSlices(Child, expect, slice1); try std.testing.expectEqualSlices(Child, expect, slice2); } }; } // zig fmt: on
src/core/rom/ptr.zig
const std = @import("std"); const os = @import("root").os; const paging = os.memory.paging; const RangeAlloc = os.lib.range_alloc.RangeAlloc; const Mutex = os.thread.Mutex; var sbrk_head: usize = undefined; pub fn init(phys_high: usize) !void { os.log("Initializing vmm with base 0x{X}\n", .{phys_high}); sbrk_mutex.init(); sbrk_head = phys_high; } var sbrk_mutex = Mutex{}; pub fn nonbacked_sbrk(num_bytes: usize) ![]u8 { sbrk_mutex.lock(); defer sbrk_mutex.unlock(); const ret = sbrk_head; sbrk_head += num_bytes; return @intToPtr([*]u8, ret)[0..num_bytes]; } pub fn sbrk(num_bytes: usize) ![]u8 { const ret = try nonbacked_sbrk(num_bytes); try paging.map(.{ .virt = @ptrToInt(ret.ptr), .size = num_bytes, .perm = paging.rw(), .memtype = .MemoryWriteBack, }); return ret; } /// Describes the lifetime of the memory aquired from an allocator const Lifetime = enum { /// Ephemeral memory won't last for the entire uptime of the kernel, /// it can be freed to make it available to the rest of the system. Ephemeral, /// Eternal memory will remain allocated until system shutdown. It /// cannot be freed. Ever. Eternal, }; /// Range allocator for backed memory var range = RangeAlloc{.materialize_bytes = sbrk}; /// Range allocator for nonbacked memory pub var nonbacked_range = RangeAlloc{.materialize_bytes = nonbacked_sbrk}; var ephemeral_alloc = std.heap.GeneralPurposeAllocator(.{ .thread_safe = true, .MutexType = os.thread.Mutex, }){ .backing_allocator = &range.allocator, }; /// The virtual memory is backed by physical pages. /// You can dereference these pointers just like in your /// normal programs pub fn backed( lifetime: Lifetime, ) *std.mem.Allocator { switch(lifetime) { .Ephemeral => return &ephemeral_alloc.allocator, .Eternal => return &range.allocator, } } /// The virtual memory is _NOT_ backed by physical pages. /// If you dereference this memory, you _WILL_ get a page /// fault. The pointers into this memory cannot be dereferenced /// before mapping the memory to some physical memory. pub fn nonbacked() *std.mem.Allocator { return &nonbacked_range.allocator; } export fn laihost_malloc(sz: usize) ?*c_void { if(sz == 0) return os.lib.lai.NULL; const mem = os.memory.vmm.backed(.Ephemeral).alloc(u8, sz) catch return os.lib.lai.NULL; return @ptrCast(*c_void, mem.ptr); } export fn laihost_realloc(ptr: ?*c_void, newsize: usize, oldsize: usize) ?*c_void { std.debug.assert((ptr == null) == (oldsize == 0)); if(oldsize == 0) { return laihost_malloc(newsize); } if(newsize == 0) { laihost_free(ptr, oldsize); return os.lib.lai.NULL; } const ret = laihost_malloc(newsize); @memcpy(@ptrCast([*]u8, ret), @ptrCast([*]const u8, ptr), oldsize); laihost_free(ptr, oldsize); return ret; } export fn laihost_free(ptr: ?*c_void, oldsize: usize) void { if(ptr == null) return; os.memory.vmm.backed(.Ephemeral).free(@ptrCast([*]u8, ptr)[0..oldsize]); }
src/memory/vmm.zig
const atan2 = @import("std").math.atan2; const sqrt = @import("std").math.sqrt; const cos = @import("std").math.cos; const sin = @import("std").math.sin; usingnamespace @import("common.zig"); /// Generic Vector2 Type pub fn Generic(comptime T: type) type { switch (T) { i16, i32, i64, i128, f16, f32, f64, f128 => { return struct { const Self = @This(); /// X value x: T = 0, /// Y value y: T = 0, /// Adds two Vector2s and returns the result pub fn add(self: Self, other: Self) Self { return .{ .x = self.x + other.x, .y = self.y + other.y }; } /// Add values to the self and returns the result pub fn addValues(self: Self, x: T, y: T) Self { return .{ .x = self.x + x, .y = self.y + y }; } /// Subtracts two Vector2s and returns the result pub fn sub(self: Self, other: Self) Self { return .{ .x = self.x - other.x, .y = self.y - other.y }; } /// Subtract values to the self and returns the result pub fn subValues(self: Self, x: T, y: T) Self { return .{ .x = self.x - x, .y = self.y - y }; } /// Divides two Vector2s and returns the result pub fn div(self: Self, other: Self) Self { return .{ .x = self.x / other.x, .y = self.y / other.y }; } /// Divide values to the self and returns the result pub fn divValues(self: Self, x: T, y: T) Self { return .{ .x = self.x / x, .y = self.y / y }; } /// Multiplies two Vector2s and returns the result pub fn mul(self: Self, other: Self) Self { return .{ .x = self.x * other.x, .y = self.y * other.y }; } /// Multiply values to the self and returns the result pub fn mulValues(self: Self, x: T, y: T) Self { return .{ .x = self.x * x, .y = self.y * y }; } /// Calculate angle from two Vector2s in X-axis in degrees pub fn angle(v1: Self, v2: Self) T { const result: T = atan2(T, v2.y - v1.y, v2.x - v1.x) * @as(T, (180.0 / PI)); if (result < 0) return result + 360; return result; } /// Calculate the toward position pub fn moveTowards(v1: Self, v2: Self, speed: T) Self { const ang: T = atan2(T, v2.y - v1.y, v2.x - v1.x); return Self{ .x = v1.x + cos(ang) * speed, .y = v1.y + sin(ang) * speed, }; } /// Calculate the distance between two points pub fn distance(v1: Self, v2: Self) T { const dx = v1.x - v2.x; const dy = v1.y - v2.y; return sqrt(dx * dx + dy * dy); } }; }, else => @compileError("Vector2 not implemented for " ++ @typeName(T)), } }
src/kiragine/kira/math/vec2.zig
pub const FS_INITIALIZING = @as(u32, 536870912); pub const FS_DIALING = @as(u32, 536870913); pub const FS_TRANSMITTING = @as(u32, 536870914); pub const FS_RECEIVING = @as(u32, 536870916); pub const FS_COMPLETED = @as(u32, 536870920); pub const FS_HANDLED = @as(u32, 536870928); pub const FS_LINE_UNAVAILABLE = @as(u32, 536870944); pub const FS_BUSY = @as(u32, 536870976); pub const FS_NO_ANSWER = @as(u32, 536871040); pub const FS_BAD_ADDRESS = @as(u32, 536871168); pub const FS_NO_DIAL_TONE = @as(u32, 536871424); pub const FS_DISCONNECTED = @as(u32, 536871936); pub const FS_FATAL_ERROR = @as(u32, 536872960); pub const FS_NOT_FAX_CALL = @as(u32, 536875008); pub const FS_CALL_DELAYED = @as(u32, 536879104); pub const FS_CALL_BLACKLISTED = @as(u32, 536887296); pub const FS_USER_ABORT = @as(u32, 538968064); pub const FS_ANSWERED = @as(u32, 545259520); pub const FAXDEVRECEIVE_SIZE = @as(u32, 4096); pub const FAXDEVREPORTSTATUS_SIZE = @as(u32, 4096); pub const FAX_ERR_START = @as(i32, 7001); pub const FAX_ERR_SRV_OUTOFMEMORY = @as(i32, 7001); pub const FAX_ERR_GROUP_NOT_FOUND = @as(i32, 7002); pub const FAX_ERR_BAD_GROUP_CONFIGURATION = @as(i32, 7003); pub const FAX_ERR_GROUP_IN_USE = @as(i32, 7004); pub const FAX_ERR_RULE_NOT_FOUND = @as(i32, 7005); pub const FAX_ERR_NOT_NTFS = @as(i32, 7006); pub const FAX_ERR_DIRECTORY_IN_USE = @as(i32, 7007); pub const FAX_ERR_FILE_ACCESS_DENIED = @as(i32, 7008); pub const FAX_ERR_MESSAGE_NOT_FOUND = @as(i32, 7009); pub const FAX_ERR_DEVICE_NUM_LIMIT_EXCEEDED = @as(i32, 7010); pub const FAX_ERR_NOT_SUPPORTED_ON_THIS_SKU = @as(i32, 7011); pub const FAX_ERR_VERSION_MISMATCH = @as(i32, 7012); pub const FAX_ERR_RECIPIENTS_LIMIT = @as(i32, 7013); pub const FAX_ERR_END = @as(i32, 7013); pub const FAX_E_SRV_OUTOFMEMORY = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214503)); pub const FAX_E_GROUP_NOT_FOUND = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214502)); pub const FAX_E_BAD_GROUP_CONFIGURATION = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214501)); pub const FAX_E_GROUP_IN_USE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214500)); pub const FAX_E_RULE_NOT_FOUND = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214499)); pub const FAX_E_NOT_NTFS = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214498)); pub const FAX_E_DIRECTORY_IN_USE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214497)); pub const FAX_E_FILE_ACCESS_DENIED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214496)); pub const FAX_E_MESSAGE_NOT_FOUND = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214495)); pub const FAX_E_DEVICE_NUM_LIMIT_EXCEEDED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214494)); pub const FAX_E_NOT_SUPPORTED_ON_THIS_SKU = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214493)); pub const FAX_E_VERSION_MISMATCH = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214492)); pub const FAX_E_RECIPIENTS_LIMIT = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147214491)); pub const JT_UNKNOWN = @as(u32, 0); pub const JT_SEND = @as(u32, 1); pub const JT_RECEIVE = @as(u32, 2); pub const JT_ROUTING = @as(u32, 3); pub const JT_FAIL_RECEIVE = @as(u32, 4); pub const JS_PENDING = @as(u32, 0); pub const JS_INPROGRESS = @as(u32, 1); pub const JS_DELETING = @as(u32, 2); pub const JS_FAILED = @as(u32, 4); pub const JS_PAUSED = @as(u32, 8); pub const JS_NOLINE = @as(u32, 16); pub const JS_RETRYING = @as(u32, 32); pub const JS_RETRIES_EXCEEDED = @as(u32, 64); pub const FPS_DIALING = @as(u32, 536870913); pub const FPS_SENDING = @as(u32, 536870914); pub const FPS_RECEIVING = @as(u32, 536870916); pub const FPS_COMPLETED = @as(u32, 536870920); pub const FPS_HANDLED = @as(u32, 536870928); pub const FPS_UNAVAILABLE = @as(u32, 536870944); pub const FPS_BUSY = @as(u32, 536870976); pub const FPS_NO_ANSWER = @as(u32, 536871040); pub const FPS_BAD_ADDRESS = @as(u32, 536871168); pub const FPS_NO_DIAL_TONE = @as(u32, 536871424); pub const FPS_DISCONNECTED = @as(u32, 536871936); pub const FPS_FATAL_ERROR = @as(u32, 536872960); pub const FPS_NOT_FAX_CALL = @as(u32, 536875008); pub const FPS_CALL_DELAYED = @as(u32, 536879104); pub const FPS_CALL_BLACKLISTED = @as(u32, 536887296); pub const FPS_INITIALIZING = @as(u32, 536903680); pub const FPS_OFFLINE = @as(u32, 536936448); pub const FPS_RINGING = @as(u32, 537001984); pub const FPS_AVAILABLE = @as(u32, 537919488); pub const FPS_ABORTING = @as(u32, 538968064); pub const FPS_ROUTING = @as(u32, 541065216); pub const FPS_ANSWERED = @as(u32, 545259520); pub const FPF_RECEIVE = @as(u32, 1); pub const FPF_SEND = @as(u32, 2); pub const FPF_VIRTUAL = @as(u32, 4); pub const FEI_DIALING = @as(u32, 1); pub const FEI_SENDING = @as(u32, 2); pub const FEI_RECEIVING = @as(u32, 3); pub const FEI_COMPLETED = @as(u32, 4); pub const FEI_BUSY = @as(u32, 5); pub const FEI_NO_ANSWER = @as(u32, 6); pub const FEI_BAD_ADDRESS = @as(u32, 7); pub const FEI_NO_DIAL_TONE = @as(u32, 8); pub const FEI_DISCONNECTED = @as(u32, 9); pub const FEI_FATAL_ERROR = @as(u32, 10); pub const FEI_NOT_FAX_CALL = @as(u32, 11); pub const FEI_CALL_DELAYED = @as(u32, 12); pub const FEI_CALL_BLACKLISTED = @as(u32, 13); pub const FEI_RINGING = @as(u32, 14); pub const FEI_ABORTING = @as(u32, 15); pub const FEI_ROUTING = @as(u32, 16); pub const FEI_MODEM_POWERED_ON = @as(u32, 17); pub const FEI_MODEM_POWERED_OFF = @as(u32, 18); pub const FEI_IDLE = @as(u32, 19); pub const FEI_FAXSVC_ENDED = @as(u32, 20); pub const FEI_ANSWERED = @as(u32, 21); pub const FEI_JOB_QUEUED = @as(u32, 22); pub const FEI_DELETED = @as(u32, 23); pub const FEI_INITIALIZING = @as(u32, 24); pub const FEI_LINE_UNAVAILABLE = @as(u32, 25); pub const FEI_HANDLED = @as(u32, 26); pub const FEI_FAXSVC_STARTED = @as(u32, 27); pub const FAX_JOB_SUBMIT = @as(u32, 1); pub const FAX_JOB_QUERY = @as(u32, 2); pub const FAX_CONFIG_QUERY = @as(u32, 4); pub const FAX_CONFIG_SET = @as(u32, 8); pub const FAX_PORT_QUERY = @as(u32, 16); pub const FAX_PORT_SET = @as(u32, 32); pub const FAX_JOB_MANAGE = @as(u32, 64); pub const lDEFAULT_PREFETCH_SIZE = @as(i32, 100); pub const wcharREASSIGN_RECIPIENTS_DELIMITER = @as(u16, 59); //-------------------------------------------------------------------------------- // Section: Types (260) //-------------------------------------------------------------------------------- pub const FAX_ENUM_LOG_LEVELS = enum(i32) { NONE = 0, MIN = 1, MED = 2, MAX = 3, }; pub const FAXLOG_LEVEL_NONE = FAX_ENUM_LOG_LEVELS.NONE; pub const FAXLOG_LEVEL_MIN = FAX_ENUM_LOG_LEVELS.MIN; pub const FAXLOG_LEVEL_MED = FAX_ENUM_LOG_LEVELS.MED; pub const FAXLOG_LEVEL_MAX = FAX_ENUM_LOG_LEVELS.MAX; pub const FAX_ENUM_LOG_CATEGORIES = enum(i32) { INIT = 1, OUTBOUND = 2, INBOUND = 3, UNKNOWN = 4, }; pub const FAXLOG_CATEGORY_INIT = FAX_ENUM_LOG_CATEGORIES.INIT; pub const FAXLOG_CATEGORY_OUTBOUND = FAX_ENUM_LOG_CATEGORIES.OUTBOUND; pub const FAXLOG_CATEGORY_INBOUND = FAX_ENUM_LOG_CATEGORIES.INBOUND; pub const FAXLOG_CATEGORY_UNKNOWN = FAX_ENUM_LOG_CATEGORIES.UNKNOWN; pub const FAX_LOG_CATEGORYA = extern struct { Name: ?[*:0]const u8, Category: u32, Level: u32, }; pub const FAX_LOG_CATEGORYW = extern struct { Name: ?[*:0]const u16, Category: u32, Level: u32, }; pub const FAX_TIME = extern struct { Hour: u16, Minute: u16, }; pub const FAX_CONFIGURATIONA = extern struct { SizeOfStruct: u32, Retries: u32, RetryDelay: u32, DirtyDays: u32, Branding: BOOL, UseDeviceTsid: BOOL, ServerCp: BOOL, PauseServerQueue: BOOL, StartCheapTime: FAX_TIME, StopCheapTime: FAX_TIME, ArchiveOutgoingFaxes: BOOL, ArchiveDirectory: ?[*:0]const u8, Reserved: ?[*:0]const u8, }; pub const FAX_CONFIGURATIONW = extern struct { SizeOfStruct: u32, Retries: u32, RetryDelay: u32, DirtyDays: u32, Branding: BOOL, UseDeviceTsid: BOOL, ServerCp: BOOL, PauseServerQueue: BOOL, StartCheapTime: FAX_TIME, StopCheapTime: FAX_TIME, ArchiveOutgoingFaxes: BOOL, ArchiveDirectory: ?[*:0]const u16, Reserved: ?[*:0]const u16, }; pub const FAX_ENUM_JOB_COMMANDS = enum(i32) { UNKNOWN = 0, DELETE = 1, PAUSE = 2, RESUME = 3, }; pub const JC_UNKNOWN = FAX_ENUM_JOB_COMMANDS.UNKNOWN; pub const JC_DELETE = FAX_ENUM_JOB_COMMANDS.DELETE; pub const JC_PAUSE = FAX_ENUM_JOB_COMMANDS.PAUSE; pub const JC_RESUME = FAX_ENUM_JOB_COMMANDS.RESUME; pub const FAX_DEVICE_STATUSA = extern struct { SizeOfStruct: u32, CallerId: ?[*:0]const u8, Csid: ?[*:0]const u8, CurrentPage: u32, DeviceId: u32, DeviceName: ?[*:0]const u8, DocumentName: ?[*:0]const u8, JobType: u32, PhoneNumber: ?[*:0]const u8, RoutingString: ?[*:0]const u8, SenderName: ?[*:0]const u8, RecipientName: ?[*:0]const u8, Size: u32, StartTime: FILETIME, Status: u32, StatusString: ?[*:0]const u8, SubmittedTime: FILETIME, TotalPages: u32, Tsid: ?[*:0]const u8, UserName: ?[*:0]const u8, }; pub const FAX_DEVICE_STATUSW = extern struct { SizeOfStruct: u32, CallerId: ?[*:0]const u16, Csid: ?[*:0]const u16, CurrentPage: u32, DeviceId: u32, DeviceName: ?[*:0]const u16, DocumentName: ?[*:0]const u16, JobType: u32, PhoneNumber: ?[*:0]const u16, RoutingString: ?[*:0]const u16, SenderName: ?[*:0]const u16, RecipientName: ?[*:0]const u16, Size: u32, StartTime: FILETIME, Status: u32, StatusString: ?[*:0]const u16, SubmittedTime: FILETIME, TotalPages: u32, Tsid: ?[*:0]const u16, UserName: ?[*:0]const u16, }; pub const FAX_JOB_ENTRYA = extern struct { SizeOfStruct: u32, JobId: u32, UserName: ?[*:0]const u8, JobType: u32, QueueStatus: u32, Status: u32, Size: u32, PageCount: u32, RecipientNumber: ?[*:0]const u8, RecipientName: ?[*:0]const u8, Tsid: ?[*:0]const u8, SenderName: ?[*:0]const u8, SenderCompany: ?[*:0]const u8, SenderDept: ?[*:0]const u8, BillingCode: ?[*:0]const u8, ScheduleAction: u32, ScheduleTime: SYSTEMTIME, DeliveryReportType: u32, DeliveryReportAddress: ?[*:0]const u8, DocumentName: ?[*:0]const u8, }; pub const FAX_JOB_ENTRYW = extern struct { SizeOfStruct: u32, JobId: u32, UserName: ?[*:0]const u16, JobType: u32, QueueStatus: u32, Status: u32, Size: u32, PageCount: u32, RecipientNumber: ?[*:0]const u16, RecipientName: ?[*:0]const u16, Tsid: ?[*:0]const u16, SenderName: ?[*:0]const u16, SenderCompany: ?[*:0]const u16, SenderDept: ?[*:0]const u16, BillingCode: ?[*:0]const u16, ScheduleAction: u32, ScheduleTime: SYSTEMTIME, DeliveryReportType: u32, DeliveryReportAddress: ?[*:0]const u16, DocumentName: ?[*:0]const u16, }; pub const FAX_PORT_INFOA = extern struct { SizeOfStruct: u32, DeviceId: u32, State: u32, Flags: u32, Rings: u32, Priority: u32, DeviceName: ?[*:0]const u8, Tsid: ?[*:0]const u8, Csid: ?[*:0]const u8, }; pub const FAX_PORT_INFOW = extern struct { SizeOfStruct: u32, DeviceId: u32, State: u32, Flags: u32, Rings: u32, Priority: u32, DeviceName: ?[*:0]const u16, Tsid: ?[*:0]const u16, Csid: ?[*:0]const u16, }; pub const FAX_ROUTING_METHODA = extern struct { SizeOfStruct: u32, DeviceId: u32, Enabled: BOOL, DeviceName: ?[*:0]const u8, Guid: ?[*:0]const u8, FriendlyName: ?[*:0]const u8, FunctionName: ?[*:0]const u8, ExtensionImageName: ?[*:0]const u8, ExtensionFriendlyName: ?[*:0]const u8, }; pub const FAX_ROUTING_METHODW = extern struct { SizeOfStruct: u32, DeviceId: u32, Enabled: BOOL, DeviceName: ?[*:0]const u16, Guid: ?[*:0]const u16, FriendlyName: ?[*:0]const u16, FunctionName: ?[*:0]const u16, ExtensionImageName: ?[*:0]const u16, ExtensionFriendlyName: ?[*:0]const u16, }; pub const FAX_GLOBAL_ROUTING_INFOA = extern struct { SizeOfStruct: u32, Priority: u32, Guid: ?[*:0]const u8, FriendlyName: ?[*:0]const u8, FunctionName: ?[*:0]const u8, ExtensionImageName: ?[*:0]const u8, ExtensionFriendlyName: ?[*:0]const u8, }; pub const FAX_GLOBAL_ROUTING_INFOW = extern struct { SizeOfStruct: u32, Priority: u32, Guid: ?[*:0]const u16, FriendlyName: ?[*:0]const u16, FunctionName: ?[*:0]const u16, ExtensionImageName: ?[*:0]const u16, ExtensionFriendlyName: ?[*:0]const u16, }; pub const FAX_COVERPAGE_INFOA = extern struct { SizeOfStruct: u32, CoverPageName: ?[*:0]const u8, UseServerCoverPage: BOOL, RecName: ?[*:0]const u8, RecFaxNumber: ?[*:0]const u8, RecCompany: ?[*:0]const u8, RecStreetAddress: ?[*:0]const u8, RecCity: ?[*:0]const u8, RecState: ?[*:0]const u8, RecZip: ?[*:0]const u8, RecCountry: ?[*:0]const u8, RecTitle: ?[*:0]const u8, RecDepartment: ?[*:0]const u8, RecOfficeLocation: ?[*:0]const u8, RecHomePhone: ?[*:0]const u8, RecOfficePhone: ?[*:0]const u8, SdrName: ?[*:0]const u8, SdrFaxNumber: ?[*:0]const u8, SdrCompany: ?[*:0]const u8, SdrAddress: ?[*:0]const u8, SdrTitle: ?[*:0]const u8, SdrDepartment: ?[*:0]const u8, SdrOfficeLocation: ?[*:0]const u8, SdrHomePhone: ?[*:0]const u8, SdrOfficePhone: ?[*:0]const u8, Note: ?[*:0]const u8, Subject: ?[*:0]const u8, TimeSent: SYSTEMTIME, PageCount: u32, }; pub const FAX_COVERPAGE_INFOW = extern struct { SizeOfStruct: u32, CoverPageName: ?[*:0]const u16, UseServerCoverPage: BOOL, RecName: ?[*:0]const u16, RecFaxNumber: ?[*:0]const u16, RecCompany: ?[*:0]const u16, RecStreetAddress: ?[*:0]const u16, RecCity: ?[*:0]const u16, RecState: ?[*:0]const u16, RecZip: ?[*:0]const u16, RecCountry: ?[*:0]const u16, RecTitle: ?[*:0]const u16, RecDepartment: ?[*:0]const u16, RecOfficeLocation: ?[*:0]const u16, RecHomePhone: ?[*:0]const u16, RecOfficePhone: ?[*:0]const u16, SdrName: ?[*:0]const u16, SdrFaxNumber: ?[*:0]const u16, SdrCompany: ?[*:0]const u16, SdrAddress: ?[*:0]const u16, SdrTitle: ?[*:0]const u16, SdrDepartment: ?[*:0]const u16, SdrOfficeLocation: ?[*:0]const u16, SdrHomePhone: ?[*:0]const u16, SdrOfficePhone: ?[*:0]const u16, Note: ?[*:0]const u16, Subject: ?[*:0]const u16, TimeSent: SYSTEMTIME, PageCount: u32, }; pub const FAX_ENUM_JOB_SEND_ATTRIBUTES = enum(i32) { NOW = 0, SPECIFIC_TIME = 1, DISCOUNT_PERIOD = 2, }; pub const JSA_NOW = FAX_ENUM_JOB_SEND_ATTRIBUTES.NOW; pub const JSA_SPECIFIC_TIME = FAX_ENUM_JOB_SEND_ATTRIBUTES.SPECIFIC_TIME; pub const JSA_DISCOUNT_PERIOD = FAX_ENUM_JOB_SEND_ATTRIBUTES.DISCOUNT_PERIOD; pub const FAX_ENUM_DELIVERY_REPORT_TYPES = enum(i32) { NONE = 0, EMAIL = 1, INBOX = 2, }; pub const DRT_NONE = FAX_ENUM_DELIVERY_REPORT_TYPES.NONE; pub const DRT_EMAIL = FAX_ENUM_DELIVERY_REPORT_TYPES.EMAIL; pub const DRT_INBOX = FAX_ENUM_DELIVERY_REPORT_TYPES.INBOX; pub const FAX_JOB_PARAMA = extern struct { SizeOfStruct: u32, RecipientNumber: ?[*:0]const u8, RecipientName: ?[*:0]const u8, Tsid: ?[*:0]const u8, SenderName: ?[*:0]const u8, SenderCompany: ?[*:0]const u8, SenderDept: ?[*:0]const u8, BillingCode: ?[*:0]const u8, ScheduleAction: u32, ScheduleTime: SYSTEMTIME, DeliveryReportType: u32, DeliveryReportAddress: ?[*:0]const u8, DocumentName: ?[*:0]const u8, CallHandle: u32, Reserved: [3]usize, }; pub const FAX_JOB_PARAMW = extern struct { SizeOfStruct: u32, RecipientNumber: ?[*:0]const u16, RecipientName: ?[*:0]const u16, Tsid: ?[*:0]const u16, SenderName: ?[*:0]const u16, SenderCompany: ?[*:0]const u16, SenderDept: ?[*:0]const u16, BillingCode: ?[*:0]const u16, ScheduleAction: u32, ScheduleTime: SYSTEMTIME, DeliveryReportType: u32, DeliveryReportAddress: ?[*:0]const u16, DocumentName: ?[*:0]const u16, CallHandle: u32, Reserved: [3]usize, }; pub const FAX_EVENTA = extern struct { SizeOfStruct: u32, TimeStamp: FILETIME, DeviceId: u32, EventId: u32, JobId: u32, }; pub const FAX_EVENTW = extern struct { SizeOfStruct: u32, TimeStamp: FILETIME, DeviceId: u32, EventId: u32, JobId: u32, }; pub const FAX_PRINT_INFOA = extern struct { SizeOfStruct: u32, DocName: ?[*:0]const u8, RecipientName: ?[*:0]const u8, RecipientNumber: ?[*:0]const u8, SenderName: ?[*:0]const u8, SenderCompany: ?[*:0]const u8, SenderDept: ?[*:0]const u8, SenderBillingCode: ?[*:0]const u8, Reserved: ?[*:0]const u8, DrEmailAddress: ?[*:0]const u8, OutputFileName: ?[*:0]const u8, }; pub const FAX_PRINT_INFOW = extern struct { SizeOfStruct: u32, DocName: ?[*:0]const u16, RecipientName: ?[*:0]const u16, RecipientNumber: ?[*:0]const u16, SenderName: ?[*:0]const u16, SenderCompany: ?[*:0]const u16, SenderDept: ?[*:0]const u16, SenderBillingCode: ?[*:0]const u16, Reserved: ?[*:0]const u16, DrEmailAddress: ?[*:0]const u16, OutputFileName: ?[*:0]const u16, }; pub const FAX_CONTEXT_INFOA = extern struct { SizeOfStruct: u32, hDC: ?HDC, ServerName: [16]CHAR, }; pub const FAX_CONTEXT_INFOW = extern struct { SizeOfStruct: u32, hDC: ?HDC, ServerName: [16]u16, }; pub const PFAXCONNECTFAXSERVERA = fn( MachineName: ?[*:0]const u8, FaxHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXCONNECTFAXSERVERW = fn( MachineName: ?[*:0]const u16, FaxHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXCLOSE = fn( FaxHandle: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const FAX_ENUM_PORT_OPEN_TYPE = enum(i32) { QUERY = 1, MODIFY = 2, }; pub const PORT_OPEN_QUERY = FAX_ENUM_PORT_OPEN_TYPE.QUERY; pub const PORT_OPEN_MODIFY = FAX_ENUM_PORT_OPEN_TYPE.MODIFY; pub const PFAXOPENPORT = fn( FaxHandle: ?HANDLE, DeviceId: u32, Flags: u32, FaxPortHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXCOMPLETEJOBPARAMSA = fn( JobParams: ?*?*FAX_JOB_PARAMA, CoverpageInfo: ?*?*FAX_COVERPAGE_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXCOMPLETEJOBPARAMSW = fn( JobParams: ?*?*FAX_JOB_PARAMW, CoverpageInfo: ?*?*FAX_COVERPAGE_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSENDDOCUMENTA = fn( FaxHandle: ?HANDLE, FileName: ?[*:0]const u8, JobParams: ?*FAX_JOB_PARAMA, CoverpageInfo: ?*const FAX_COVERPAGE_INFOA, FaxJobId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSENDDOCUMENTW = fn( FaxHandle: ?HANDLE, FileName: ?[*:0]const u16, JobParams: ?*FAX_JOB_PARAMW, CoverpageInfo: ?*const FAX_COVERPAGE_INFOW, FaxJobId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAX_RECIPIENT_CALLBACKA = fn( FaxHandle: ?HANDLE, RecipientNumber: u32, Context: ?*c_void, JobParams: ?*FAX_JOB_PARAMA, CoverpageInfo: ?*FAX_COVERPAGE_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAX_RECIPIENT_CALLBACKW = fn( FaxHandle: ?HANDLE, RecipientNumber: u32, Context: ?*c_void, JobParams: ?*FAX_JOB_PARAMW, CoverpageInfo: ?*FAX_COVERPAGE_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSENDDOCUMENTFORBROADCASTA = fn( FaxHandle: ?HANDLE, FileName: ?[*:0]const u8, FaxJobId: ?*u32, FaxRecipientCallback: ?PFAX_RECIPIENT_CALLBACKA, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSENDDOCUMENTFORBROADCASTW = fn( FaxHandle: ?HANDLE, FileName: ?[*:0]const u16, FaxJobId: ?*u32, FaxRecipientCallback: ?PFAX_RECIPIENT_CALLBACKW, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMJOBSA = fn( FaxHandle: ?HANDLE, JobEntry: ?*?*FAX_JOB_ENTRYA, JobsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMJOBSW = fn( FaxHandle: ?HANDLE, JobEntry: ?*?*FAX_JOB_ENTRYW, JobsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETJOBA = fn( FaxHandle: ?HANDLE, JobId: u32, JobEntry: ?*?*FAX_JOB_ENTRYA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETJOBW = fn( FaxHandle: ?HANDLE, JobId: u32, JobEntry: ?*?*FAX_JOB_ENTRYW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETJOBA = fn( FaxHandle: ?HANDLE, JobId: u32, Command: u32, JobEntry: ?*const FAX_JOB_ENTRYA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETJOBW = fn( FaxHandle: ?HANDLE, JobId: u32, Command: u32, JobEntry: ?*const FAX_JOB_ENTRYW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETPAGEDATA = fn( FaxHandle: ?HANDLE, JobId: u32, Buffer: ?*?*u8, BufferSize: ?*u32, ImageWidth: ?*u32, ImageHeight: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETDEVICESTATUSA = fn( FaxPortHandle: ?HANDLE, DeviceStatus: ?*?*FAX_DEVICE_STATUSA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETDEVICESTATUSW = fn( FaxPortHandle: ?HANDLE, DeviceStatus: ?*?*FAX_DEVICE_STATUSW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXABORT = fn( FaxHandle: ?HANDLE, JobId: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETCONFIGURATIONA = fn( FaxHandle: ?HANDLE, FaxConfig: ?*?*FAX_CONFIGURATIONA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETCONFIGURATIONW = fn( FaxHandle: ?HANDLE, FaxConfig: ?*?*FAX_CONFIGURATIONW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETCONFIGURATIONA = fn( FaxHandle: ?HANDLE, FaxConfig: ?*const FAX_CONFIGURATIONA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETCONFIGURATIONW = fn( FaxHandle: ?HANDLE, FaxConfig: ?*const FAX_CONFIGURATIONW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETLOGGINGCATEGORIESA = fn( FaxHandle: ?HANDLE, Categories: ?*?*FAX_LOG_CATEGORYA, NumberCategories: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETLOGGINGCATEGORIESW = fn( FaxHandle: ?HANDLE, Categories: ?*?*FAX_LOG_CATEGORYW, NumberCategories: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETLOGGINGCATEGORIESA = fn( FaxHandle: ?HANDLE, Categories: ?*const FAX_LOG_CATEGORYA, NumberCategories: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETLOGGINGCATEGORIESW = fn( FaxHandle: ?HANDLE, Categories: ?*const FAX_LOG_CATEGORYW, NumberCategories: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMPORTSA = fn( FaxHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOA, PortsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMPORTSW = fn( FaxHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOW, PortsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETPORTA = fn( FaxPortHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETPORTW = fn( FaxPortHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETPORTA = fn( FaxPortHandle: ?HANDLE, PortInfo: ?*const FAX_PORT_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETPORTW = fn( FaxPortHandle: ?HANDLE, PortInfo: ?*const FAX_PORT_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMROUTINGMETHODSA = fn( FaxPortHandle: ?HANDLE, RoutingMethod: ?*?*FAX_ROUTING_METHODA, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMROUTINGMETHODSW = fn( FaxPortHandle: ?HANDLE, RoutingMethod: ?*?*FAX_ROUTING_METHODW, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENABLEROUTINGMETHODA = fn( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u8, Enabled: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENABLEROUTINGMETHODW = fn( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u16, Enabled: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMGLOBALROUTINGINFOA = fn( FaxHandle: ?HANDLE, RoutingInfo: ?*?*FAX_GLOBAL_ROUTING_INFOA, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXENUMGLOBALROUTINGINFOW = fn( FaxHandle: ?HANDLE, RoutingInfo: ?*?*FAX_GLOBAL_ROUTING_INFOW, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETGLOBALROUTINGINFOA = fn( FaxPortHandle: ?HANDLE, RoutingInfo: ?*const FAX_GLOBAL_ROUTING_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETGLOBALROUTINGINFOW = fn( FaxPortHandle: ?HANDLE, RoutingInfo: ?*const FAX_GLOBAL_ROUTING_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETROUTINGINFOA = fn( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u8, RoutingInfoBuffer: ?*?*u8, RoutingInfoBufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXGETROUTINGINFOW = fn( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u16, RoutingInfoBuffer: ?*?*u8, RoutingInfoBufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETROUTINGINFOA = fn( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u8, RoutingInfoBuffer: ?*const u8, RoutingInfoBufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSETROUTINGINFOW = fn( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u16, RoutingInfoBuffer: ?*const u8, RoutingInfoBufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXINITIALIZEEVENTQUEUE = fn( FaxHandle: ?HANDLE, CompletionPort: ?HANDLE, CompletionKey: usize, hWnd: ?HWND, MessageStart: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXFREEBUFFER = fn( Buffer: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; pub const PFAXSTARTPRINTJOBA = fn( PrinterName: ?[*:0]const u8, PrintInfo: ?*const FAX_PRINT_INFOA, FaxJobId: ?*u32, FaxContextInfo: ?*FAX_CONTEXT_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXSTARTPRINTJOBW = fn( PrinterName: ?[*:0]const u16, PrintInfo: ?*const FAX_PRINT_INFOW, FaxJobId: ?*u32, FaxContextInfo: ?*FAX_CONTEXT_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXPRINTCOVERPAGEA = fn( FaxContextInfo: ?*const FAX_CONTEXT_INFOA, CoverPageInfo: ?*const FAX_COVERPAGE_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXPRINTCOVERPAGEW = fn( FaxContextInfo: ?*const FAX_CONTEXT_INFOW, CoverPageInfo: ?*const FAX_COVERPAGE_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXREGISTERSERVICEPROVIDERW = fn( DeviceProvider: ?[*:0]const u16, FriendlyName: ?[*:0]const u16, ImageName: ?[*:0]const u16, TspName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXUNREGISTERSERVICEPROVIDERW = fn( DeviceProvider: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAX_ROUTING_INSTALLATION_CALLBACKW = fn( FaxHandle: ?HANDLE, Context: ?*c_void, MethodName: ?PWSTR, FriendlyName: ?PWSTR, FunctionName: ?PWSTR, Guid: ?PWSTR, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXREGISTERROUTINGEXTENSIONW = fn( FaxHandle: ?HANDLE, ExtensionName: ?[*:0]const u16, FriendlyName: ?[*:0]const u16, ImageName: ?[*:0]const u16, CallBack: ?PFAX_ROUTING_INSTALLATION_CALLBACKW, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXACCESSCHECK = fn( FaxHandle: ?HANDLE, AccessMask: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const FAX_SEND = extern struct { SizeOfStruct: u32, FileName: ?PWSTR, CallerName: ?PWSTR, CallerNumber: ?PWSTR, ReceiverName: ?PWSTR, ReceiverNumber: ?PWSTR, Branding: BOOL, CallHandle: u32, Reserved: [3]u32, }; pub const FAX_RECEIVE = extern struct { SizeOfStruct: u32, FileName: ?PWSTR, ReceiverName: ?PWSTR, ReceiverNumber: ?PWSTR, Reserved: [4]u32, }; pub const FAX_DEV_STATUS = extern struct { SizeOfStruct: u32, StatusId: u32, StringId: u32, PageCount: u32, CSI: ?PWSTR, CallerId: ?PWSTR, RoutingInfo: ?PWSTR, ErrorCode: u32, Reserved: [3]u32, }; pub const PFAX_SERVICE_CALLBACK = fn( FaxHandle: ?HANDLE, DeviceId: u32, Param1: usize, Param2: usize, Param3: usize, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAX_LINECALLBACK = fn( FaxHandle: ?HANDLE, hDevice: u32, dwMessage: u32, dwInstance: usize, dwParam1: usize, dwParam2: usize, dwParam3: usize, ) callconv(@import("std").os.windows.WINAPI) void; pub const PFAX_SEND_CALLBACK = fn( FaxHandle: ?HANDLE, CallHandle: u32, Reserved1: u32, Reserved2: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVINITIALIZE = fn( param0: u32, param1: ?HANDLE, param2: ?*?PFAX_LINECALLBACK, param3: ?PFAX_SERVICE_CALLBACK, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVVIRTUALDEVICECREATION = fn( DeviceCount: ?*u32, DeviceNamePrefix: *[128]u16, DeviceIdPrefix: ?*u32, CompletionPort: ?HANDLE, CompletionKey: usize, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVSTARTJOB = fn( param0: u32, param1: u32, param2: ?*?HANDLE, param3: ?HANDLE, param4: usize, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVENDJOB = fn( param0: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVSEND = fn( param0: ?HANDLE, param1: ?*FAX_SEND, param2: ?PFAX_SEND_CALLBACK, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVRECEIVE = fn( param0: ?HANDLE, param1: u32, param2: ?*FAX_RECEIVE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVREPORTSTATUS = fn( param0: ?HANDLE, param1: ?*FAX_DEV_STATUS, param2: u32, param3: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVABORTOPERATION = fn( param0: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVCONFIGURE = fn( param0: ?*?HPROPSHEETPAGE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXDEVSHUTDOWN = fn( ) callconv(@import("std").os.windows.WINAPI) HRESULT; const CLSID_FaxServer_Value = @import("../zig.zig").Guid.initString("cda8acb0-8cf5-4f6c-9ba2-5931d40c8cae"); pub const CLSID_FaxServer = &CLSID_FaxServer_Value; const CLSID_FaxDeviceProviders_Value = @import("../zig.zig").Guid.initString("eb8fe768-875a-4f5f-82c5-03f23aac1bd7"); pub const CLSID_FaxDeviceProviders = &CLSID_FaxDeviceProviders_Value; const CLSID_FaxDevices_Value = @import("../zig.zig").Guid.initString("5589e28e-23cb-4919-8808-e6101846e80d"); pub const CLSID_FaxDevices = &CLSID_FaxDevices_Value; const CLSID_FaxInboundRouting_Value = @import("../zig.zig").Guid.initString("e80248ed-ad65-4218-8108-991924d4e7ed"); pub const CLSID_FaxInboundRouting = &CLSID_FaxInboundRouting_Value; const CLSID_FaxFolders_Value = @import("../zig.zig").Guid.initString("c35211d7-5776-48cb-af44-c31be3b2cfe5"); pub const CLSID_FaxFolders = &CLSID_FaxFolders_Value; const CLSID_FaxLoggingOptions_Value = @import("../zig.zig").Guid.initString("1bf9eea6-ece0-4785-a18b-de56e9eef96a"); pub const CLSID_FaxLoggingOptions = &CLSID_FaxLoggingOptions_Value; const CLSID_FaxActivity_Value = @import("../zig.zig").Guid.initString("cfef5d0e-e84d-462e-aabb-87d31eb04fef"); pub const CLSID_FaxActivity = &CLSID_FaxActivity_Value; const CLSID_FaxOutboundRouting_Value = @import("../zig.zig").Guid.initString("c81b385e-b869-4afd-86c0-616498ed9be2"); pub const CLSID_FaxOutboundRouting = &CLSID_FaxOutboundRouting_Value; const CLSID_FaxReceiptOptions_Value = @import("../zig.zig").Guid.initString("6982487b-227b-4c96-a61c-248348b05ab6"); pub const CLSID_FaxReceiptOptions = &CLSID_FaxReceiptOptions_Value; const CLSID_FaxSecurity_Value = @import("../zig.zig").Guid.initString("10c4ddde-abf0-43df-964f-7f3ac21a4c7b"); pub const CLSID_FaxSecurity = &CLSID_FaxSecurity_Value; const CLSID_FaxDocument_Value = @import("../zig.zig").Guid.initString("0f3f9f91-c838-415e-a4f3-3e828ca445e0"); pub const CLSID_FaxDocument = &CLSID_FaxDocument_Value; const CLSID_FaxSender_Value = @import("../zig.zig").Guid.initString("265d84d0-1850-4360-b7c8-758bbb5f0b96"); pub const CLSID_FaxSender = &CLSID_FaxSender_Value; const CLSID_FaxRecipients_Value = @import("../zig.zig").Guid.initString("ea9bdf53-10a9-4d4f-a067-63c8f84f01b0"); pub const CLSID_FaxRecipients = &CLSID_FaxRecipients_Value; const CLSID_FaxIncomingArchive_Value = @import("../zig.zig").Guid.initString("8426c56a-35a1-4c6f-af93-fc952422e2c2"); pub const CLSID_FaxIncomingArchive = &CLSID_FaxIncomingArchive_Value; const CLSID_FaxIncomingQueue_Value = @import("../zig.zig").Guid.initString("69131717-f3f1-40e3-809d-a6cbf7bd85e5"); pub const CLSID_FaxIncomingQueue = &CLSID_FaxIncomingQueue_Value; const CLSID_FaxOutgoingArchive_Value = @import("../zig.zig").Guid.initString("43c28403-e04f-474d-990c-b94669148f59"); pub const CLSID_FaxOutgoingArchive = &CLSID_FaxOutgoingArchive_Value; const CLSID_FaxOutgoingQueue_Value = @import("../zig.zig").Guid.initString("7421169e-8c43-4b0d-bb16-645c8fa40357"); pub const CLSID_FaxOutgoingQueue = &CLSID_FaxOutgoingQueue_Value; const CLSID_FaxIncomingMessageIterator_Value = @import("../zig.zig").Guid.initString("6088e1d8-3fc8-45c2-87b1-909a29607ea9"); pub const CLSID_FaxIncomingMessageIterator = &CLSID_FaxIncomingMessageIterator_Value; const CLSID_FaxIncomingMessage_Value = @import("../zig.zig").Guid.initString("1932fcf7-9d43-4d5a-89ff-03861b321736"); pub const CLSID_FaxIncomingMessage = &CLSID_FaxIncomingMessage_Value; const CLSID_FaxOutgoingJobs_Value = @import("../zig.zig").Guid.initString("92bf2a6c-37be-43fa-a37d-cb0e5f753b35"); pub const CLSID_FaxOutgoingJobs = &CLSID_FaxOutgoingJobs_Value; const CLSID_FaxOutgoingJob_Value = @import("../zig.zig").Guid.initString("71bb429c-0ef9-4915-bec5-a5d897a3e924"); pub const CLSID_FaxOutgoingJob = &CLSID_FaxOutgoingJob_Value; const CLSID_FaxOutgoingMessageIterator_Value = @import("../zig.zig").Guid.initString("8a3224d0-d30b-49de-9813-cb385790fbbb"); pub const CLSID_FaxOutgoingMessageIterator = &CLSID_FaxOutgoingMessageIterator_Value; const CLSID_FaxOutgoingMessage_Value = @import("../zig.zig").Guid.initString("91b4a378-4ad8-4aef-a4dc-97d96e939a3a"); pub const CLSID_FaxOutgoingMessage = &CLSID_FaxOutgoingMessage_Value; const CLSID_FaxIncomingJobs_Value = @import("../zig.zig").Guid.initString("a1bb8a43-8866-4fb7-a15d-6266c875a5cc"); pub const CLSID_FaxIncomingJobs = &CLSID_FaxIncomingJobs_Value; const CLSID_FaxIncomingJob_Value = @import("../zig.zig").Guid.initString("c47311ec-ae32-41b8-ae4b-3eae0629d0c9"); pub const CLSID_FaxIncomingJob = &CLSID_FaxIncomingJob_Value; const CLSID_FaxDeviceProvider_Value = @import("../zig.zig").Guid.initString("17cf1aa3-f5eb-484a-9c9a-4440a5baabfc"); pub const CLSID_FaxDeviceProvider = &CLSID_FaxDeviceProvider_Value; const CLSID_FaxDevice_Value = @import("../zig.zig").Guid.initString("59e3a5b2-d676-484b-a6de-720bfa89b5af"); pub const CLSID_FaxDevice = &CLSID_FaxDevice_Value; const CLSID_FaxActivityLogging_Value = @import("../zig.zig").Guid.initString("f0a0294e-3bbd-48b8-8f13-8c591a55bdbc"); pub const CLSID_FaxActivityLogging = &CLSID_FaxActivityLogging_Value; const CLSID_FaxEventLogging_Value = @import("../zig.zig").Guid.initString("a6850930-a0f6-4a6f-95b7-db2ebf3d02e3"); pub const CLSID_FaxEventLogging = &CLSID_FaxEventLogging_Value; const CLSID_FaxOutboundRoutingGroups_Value = @import("../zig.zig").Guid.initString("ccbea1a5-e2b4-4b57-9421-b04b6289464b"); pub const CLSID_FaxOutboundRoutingGroups = &CLSID_FaxOutboundRoutingGroups_Value; const CLSID_FaxOutboundRoutingGroup_Value = @import("../zig.zig").Guid.initString("0213f3e0-6791-4d77-a271-04d2357c50d6"); pub const CLSID_FaxOutboundRoutingGroup = &CLSID_FaxOutboundRoutingGroup_Value; const CLSID_FaxDeviceIds_Value = @import("../zig.zig").Guid.initString("cdc539ea-7277-460e-8de0-48a0a5760d1f"); pub const CLSID_FaxDeviceIds = &CLSID_FaxDeviceIds_Value; const CLSID_FaxOutboundRoutingRules_Value = @import("../zig.zig").Guid.initString("d385beca-e624-4473-bfaa-9f4000831f54"); pub const CLSID_FaxOutboundRoutingRules = &CLSID_FaxOutboundRoutingRules_Value; const CLSID_FaxOutboundRoutingRule_Value = @import("../zig.zig").Guid.initString("6549eebf-08d1-475a-828b-3bf105952fa0"); pub const CLSID_FaxOutboundRoutingRule = &CLSID_FaxOutboundRoutingRule_Value; const CLSID_FaxInboundRoutingExtensions_Value = @import("../zig.zig").Guid.initString("189a48ed-623c-4c0d-80f2-d66c7b9efec2"); pub const CLSID_FaxInboundRoutingExtensions = &CLSID_FaxInboundRoutingExtensions_Value; const CLSID_FaxInboundRoutingExtension_Value = @import("../zig.zig").Guid.initString("1d7dfb51-7207-4436-a0d9-24e32ee56988"); pub const CLSID_FaxInboundRoutingExtension = &CLSID_FaxInboundRoutingExtension_Value; const CLSID_FaxInboundRoutingMethods_Value = @import("../zig.zig").Guid.initString("25fcb76a-b750-4b82-9266-fbbbae8922ba"); pub const CLSID_FaxInboundRoutingMethods = &CLSID_FaxInboundRoutingMethods_Value; const CLSID_FaxInboundRoutingMethod_Value = @import("../zig.zig").Guid.initString("4b9fd75c-0194-4b72-9ce5-02a8205ac7d4"); pub const CLSID_FaxInboundRoutingMethod = &CLSID_FaxInboundRoutingMethod_Value; const CLSID_FaxJobStatus_Value = @import("../zig.zig").Guid.initString("7bf222f4-be8d-442f-841d-6132742423bb"); pub const CLSID_FaxJobStatus = &CLSID_FaxJobStatus_Value; const CLSID_FaxRecipient_Value = @import("../zig.zig").Guid.initString("60bf3301-7df8-4bd8-9148-7b5801f9efdf"); pub const CLSID_FaxRecipient = &CLSID_FaxRecipient_Value; const CLSID_FaxConfiguration_Value = @import("../zig.zig").Guid.initString("5857326f-e7b3-41a7-9c19-a91b463e2d56"); pub const CLSID_FaxConfiguration = &CLSID_FaxConfiguration_Value; const CLSID_FaxAccountSet_Value = @import("../zig.zig").Guid.initString("fbc23c4b-79e0-4291-bc56-c12e253bbf3a"); pub const CLSID_FaxAccountSet = &CLSID_FaxAccountSet_Value; const CLSID_FaxAccounts_Value = @import("../zig.zig").Guid.initString("da1f94aa-ee2c-47c0-8f4f-2a217075b76e"); pub const CLSID_FaxAccounts = &CLSID_FaxAccounts_Value; const CLSID_FaxAccount_Value = @import("../zig.zig").Guid.initString("a7e0647f-4524-4464-a56d-b9fe666f715e"); pub const CLSID_FaxAccount = &CLSID_FaxAccount_Value; const CLSID_FaxAccountFolders_Value = @import("../zig.zig").Guid.initString("85398f49-c034-4a3f-821c-db7d685e8129"); pub const CLSID_FaxAccountFolders = &CLSID_FaxAccountFolders_Value; const CLSID_FaxAccountIncomingQueue_Value = @import("../zig.zig").Guid.initString("9bcf6094-b4da-45f4-b8d6-ddeb2186652c"); pub const CLSID_FaxAccountIncomingQueue = &CLSID_FaxAccountIncomingQueue_Value; const CLSID_FaxAccountOutgoingQueue_Value = @import("../zig.zig").Guid.initString("feeceefb-c149-48ba-bab8-b791e101f62f"); pub const CLSID_FaxAccountOutgoingQueue = &CLSID_FaxAccountOutgoingQueue_Value; const CLSID_FaxAccountIncomingArchive_Value = @import("../zig.zig").Guid.initString("14b33db5-4c40-4ecf-9ef8-a360cbe809ed"); pub const CLSID_FaxAccountIncomingArchive = &CLSID_FaxAccountIncomingArchive_Value; const CLSID_FaxAccountOutgoingArchive_Value = @import("../zig.zig").Guid.initString("851e7af5-433a-4739-a2df-ad245c2cb98e"); pub const CLSID_FaxAccountOutgoingArchive = &CLSID_FaxAccountOutgoingArchive_Value; const CLSID_FaxSecurity2_Value = @import("../zig.zig").Guid.initString("735c1248-ec89-4c30-a127-656e92e3c4ea"); pub const CLSID_FaxSecurity2 = &CLSID_FaxSecurity2_Value; pub const FAX_JOB_STATUS_ENUM = enum(i32) { PENDING = 1, INPROGRESS = 2, FAILED = 8, PAUSED = 16, NOLINE = 32, RETRYING = 64, RETRIES_EXCEEDED = 128, COMPLETED = 256, CANCELED = 512, CANCELING = 1024, ROUTING = 2048, }; pub const fjsPENDING = FAX_JOB_STATUS_ENUM.PENDING; pub const fjsINPROGRESS = FAX_JOB_STATUS_ENUM.INPROGRESS; pub const fjsFAILED = FAX_JOB_STATUS_ENUM.FAILED; pub const fjsPAUSED = FAX_JOB_STATUS_ENUM.PAUSED; pub const fjsNOLINE = FAX_JOB_STATUS_ENUM.NOLINE; pub const fjsRETRYING = FAX_JOB_STATUS_ENUM.RETRYING; pub const fjsRETRIES_EXCEEDED = FAX_JOB_STATUS_ENUM.RETRIES_EXCEEDED; pub const fjsCOMPLETED = FAX_JOB_STATUS_ENUM.COMPLETED; pub const fjsCANCELED = FAX_JOB_STATUS_ENUM.CANCELED; pub const fjsCANCELING = FAX_JOB_STATUS_ENUM.CANCELING; pub const fjsROUTING = FAX_JOB_STATUS_ENUM.ROUTING; pub const FAX_JOB_EXTENDED_STATUS_ENUM = enum(i32) { NONE = 0, DISCONNECTED = 1, INITIALIZING = 2, DIALING = 3, TRANSMITTING = 4, ANSWERED = 5, RECEIVING = 6, LINE_UNAVAILABLE = 7, BUSY = 8, NO_ANSWER = 9, BAD_ADDRESS = 10, NO_DIAL_TONE = 11, FATAL_ERROR = 12, CALL_DELAYED = 13, CALL_BLACKLISTED = 14, NOT_FAX_CALL = 15, PARTIALLY_RECEIVED = 16, HANDLED = 17, CALL_COMPLETED = 18, CALL_ABORTED = 19, PROPRIETARY = 16777216, }; pub const fjesNONE = FAX_JOB_EXTENDED_STATUS_ENUM.NONE; pub const fjesDISCONNECTED = FAX_JOB_EXTENDED_STATUS_ENUM.DISCONNECTED; pub const fjesINITIALIZING = FAX_JOB_EXTENDED_STATUS_ENUM.INITIALIZING; pub const fjesDIALING = FAX_JOB_EXTENDED_STATUS_ENUM.DIALING; pub const fjesTRANSMITTING = FAX_JOB_EXTENDED_STATUS_ENUM.TRANSMITTING; pub const fjesANSWERED = FAX_JOB_EXTENDED_STATUS_ENUM.ANSWERED; pub const fjesRECEIVING = FAX_JOB_EXTENDED_STATUS_ENUM.RECEIVING; pub const fjesLINE_UNAVAILABLE = FAX_JOB_EXTENDED_STATUS_ENUM.LINE_UNAVAILABLE; pub const fjesBUSY = FAX_JOB_EXTENDED_STATUS_ENUM.BUSY; pub const fjesNO_ANSWER = FAX_JOB_EXTENDED_STATUS_ENUM.NO_ANSWER; pub const fjesBAD_ADDRESS = FAX_JOB_EXTENDED_STATUS_ENUM.BAD_ADDRESS; pub const fjesNO_DIAL_TONE = FAX_JOB_EXTENDED_STATUS_ENUM.NO_DIAL_TONE; pub const fjesFATAL_ERROR = FAX_JOB_EXTENDED_STATUS_ENUM.FATAL_ERROR; pub const fjesCALL_DELAYED = FAX_JOB_EXTENDED_STATUS_ENUM.CALL_DELAYED; pub const fjesCALL_BLACKLISTED = FAX_JOB_EXTENDED_STATUS_ENUM.CALL_BLACKLISTED; pub const fjesNOT_FAX_CALL = FAX_JOB_EXTENDED_STATUS_ENUM.NOT_FAX_CALL; pub const fjesPARTIALLY_RECEIVED = FAX_JOB_EXTENDED_STATUS_ENUM.PARTIALLY_RECEIVED; pub const fjesHANDLED = FAX_JOB_EXTENDED_STATUS_ENUM.HANDLED; pub const fjesCALL_COMPLETED = FAX_JOB_EXTENDED_STATUS_ENUM.CALL_COMPLETED; pub const fjesCALL_ABORTED = FAX_JOB_EXTENDED_STATUS_ENUM.CALL_ABORTED; pub const fjesPROPRIETARY = FAX_JOB_EXTENDED_STATUS_ENUM.PROPRIETARY; pub const FAX_JOB_OPERATIONS_ENUM = enum(i32) { VIEW = 1, PAUSE = 2, RESUME = 4, RESTART = 8, DELETE = 16, RECIPIENT_INFO = 32, SENDER_INFO = 64, }; pub const fjoVIEW = FAX_JOB_OPERATIONS_ENUM.VIEW; pub const fjoPAUSE = FAX_JOB_OPERATIONS_ENUM.PAUSE; pub const fjoRESUME = FAX_JOB_OPERATIONS_ENUM.RESUME; pub const fjoRESTART = FAX_JOB_OPERATIONS_ENUM.RESTART; pub const fjoDELETE = FAX_JOB_OPERATIONS_ENUM.DELETE; pub const fjoRECIPIENT_INFO = FAX_JOB_OPERATIONS_ENUM.RECIPIENT_INFO; pub const fjoSENDER_INFO = FAX_JOB_OPERATIONS_ENUM.SENDER_INFO; pub const FAX_JOB_TYPE_ENUM = enum(i32) { SEND = 0, RECEIVE = 1, ROUTING = 2, }; pub const fjtSEND = FAX_JOB_TYPE_ENUM.SEND; pub const fjtRECEIVE = FAX_JOB_TYPE_ENUM.RECEIVE; pub const fjtROUTING = FAX_JOB_TYPE_ENUM.ROUTING; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxJobStatus_Value = @import("../zig.zig").Guid.initString("8b86f485-fd7f-4824-886b-40c5caa617cc"); pub const IID_IFaxJobStatus = &IID_IFaxJobStatus_Value; pub const IFaxJobStatus = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Status: fn( self: *const IFaxJobStatus, pStatus: ?*FAX_JOB_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Pages: fn( self: *const IFaxJobStatus, plPages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Size: fn( self: *const IFaxJobStatus, plSize: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CurrentPage: fn( self: *const IFaxJobStatus, plCurrentPage: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceId: fn( self: *const IFaxJobStatus, plDeviceId: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CSID: fn( self: *const IFaxJobStatus, pbstrCSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TSID: fn( self: *const IFaxJobStatus, pbstrTSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtendedStatusCode: fn( self: *const IFaxJobStatus, pExtendedStatusCode: ?*FAX_JOB_EXTENDED_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtendedStatus: fn( self: *const IFaxJobStatus, pbstrExtendedStatus: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AvailableOperations: fn( self: *const IFaxJobStatus, pAvailableOperations: ?*FAX_JOB_OPERATIONS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Retries: fn( self: *const IFaxJobStatus, plRetries: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_JobType: fn( self: *const IFaxJobStatus, pJobType: ?*FAX_JOB_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ScheduledTime: fn( self: *const IFaxJobStatus, pdateScheduledTime: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionStart: fn( self: *const IFaxJobStatus, pdateTransmissionStart: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionEnd: fn( self: *const IFaxJobStatus, pdateTransmissionEnd: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CallerId: fn( self: *const IFaxJobStatus, pbstrCallerId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RoutingInformation: fn( self: *const IFaxJobStatus, pbstrRoutingInformation: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_Status(self: *const T, pStatus: ?*FAX_JOB_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_Status(@ptrCast(*const IFaxJobStatus, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_Pages(self: *const T, plPages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_Pages(@ptrCast(*const IFaxJobStatus, self), plPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_Size(self: *const T, plSize: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_Size(@ptrCast(*const IFaxJobStatus, self), plSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_CurrentPage(self: *const T, plCurrentPage: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_CurrentPage(@ptrCast(*const IFaxJobStatus, self), plCurrentPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_DeviceId(self: *const T, plDeviceId: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_DeviceId(@ptrCast(*const IFaxJobStatus, self), plDeviceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_CSID(self: *const T, pbstrCSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_CSID(@ptrCast(*const IFaxJobStatus, self), pbstrCSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_TSID(self: *const T, pbstrTSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_TSID(@ptrCast(*const IFaxJobStatus, self), pbstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_ExtendedStatusCode(self: *const T, pExtendedStatusCode: ?*FAX_JOB_EXTENDED_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_ExtendedStatusCode(@ptrCast(*const IFaxJobStatus, self), pExtendedStatusCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_ExtendedStatus(self: *const T, pbstrExtendedStatus: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_ExtendedStatus(@ptrCast(*const IFaxJobStatus, self), pbstrExtendedStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_AvailableOperations(self: *const T, pAvailableOperations: ?*FAX_JOB_OPERATIONS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_AvailableOperations(@ptrCast(*const IFaxJobStatus, self), pAvailableOperations); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_Retries(self: *const T, plRetries: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_Retries(@ptrCast(*const IFaxJobStatus, self), plRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_JobType(self: *const T, pJobType: ?*FAX_JOB_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_JobType(@ptrCast(*const IFaxJobStatus, self), pJobType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_ScheduledTime(self: *const T, pdateScheduledTime: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_ScheduledTime(@ptrCast(*const IFaxJobStatus, self), pdateScheduledTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_TransmissionStart(self: *const T, pdateTransmissionStart: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_TransmissionStart(@ptrCast(*const IFaxJobStatus, self), pdateTransmissionStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_TransmissionEnd(self: *const T, pdateTransmissionEnd: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_TransmissionEnd(@ptrCast(*const IFaxJobStatus, self), pdateTransmissionEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_CallerId(self: *const T, pbstrCallerId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_CallerId(@ptrCast(*const IFaxJobStatus, self), pbstrCallerId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxJobStatus_get_RoutingInformation(self: *const T, pbstrRoutingInformation: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxJobStatus.VTable, self.vtable).get_RoutingInformation(@ptrCast(*const IFaxJobStatus, self), pbstrRoutingInformation); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_SERVER_EVENTS_TYPE_ENUM = enum(i32) { NONE = 0, IN_QUEUE = 1, OUT_QUEUE = 2, CONFIG = 4, ACTIVITY = 8, QUEUE_STATE = 16, IN_ARCHIVE = 32, OUT_ARCHIVE = 64, FXSSVC_ENDED = 128, DEVICE_STATUS = 256, INCOMING_CALL = 512, }; pub const fsetNONE = FAX_SERVER_EVENTS_TYPE_ENUM.NONE; pub const fsetIN_QUEUE = FAX_SERVER_EVENTS_TYPE_ENUM.IN_QUEUE; pub const fsetOUT_QUEUE = FAX_SERVER_EVENTS_TYPE_ENUM.OUT_QUEUE; pub const fsetCONFIG = FAX_SERVER_EVENTS_TYPE_ENUM.CONFIG; pub const fsetACTIVITY = FAX_SERVER_EVENTS_TYPE_ENUM.ACTIVITY; pub const fsetQUEUE_STATE = FAX_SERVER_EVENTS_TYPE_ENUM.QUEUE_STATE; pub const fsetIN_ARCHIVE = FAX_SERVER_EVENTS_TYPE_ENUM.IN_ARCHIVE; pub const fsetOUT_ARCHIVE = FAX_SERVER_EVENTS_TYPE_ENUM.OUT_ARCHIVE; pub const fsetFXSSVC_ENDED = FAX_SERVER_EVENTS_TYPE_ENUM.FXSSVC_ENDED; pub const fsetDEVICE_STATUS = FAX_SERVER_EVENTS_TYPE_ENUM.DEVICE_STATUS; pub const fsetINCOMING_CALL = FAX_SERVER_EVENTS_TYPE_ENUM.INCOMING_CALL; pub const FAX_SERVER_APIVERSION_ENUM = enum(i32) { @"0" = 0, @"1" = 65536, @"2" = 131072, @"3" = 196608, }; pub const fsAPI_VERSION_0 = FAX_SERVER_APIVERSION_ENUM.@"0"; pub const fsAPI_VERSION_1 = FAX_SERVER_APIVERSION_ENUM.@"1"; pub const fsAPI_VERSION_2 = FAX_SERVER_APIVERSION_ENUM.@"2"; pub const fsAPI_VERSION_3 = FAX_SERVER_APIVERSION_ENUM.@"3"; // TODO: this type is limited to platform 'windows5.0' const IID_IFaxServer_Value = @import("../zig.zig").Guid.initString("475b6469-90a5-4878-a577-17a86e8e3462"); pub const IID_IFaxServer = &IID_IFaxServer_Value; pub const IFaxServer = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, Connect: fn( self: *const IFaxServer, bstrServerName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ServerName: fn( self: *const IFaxServer, pbstrServerName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDeviceProviders: fn( self: *const IFaxServer, ppFaxDeviceProviders: ?*?*IFaxDeviceProviders, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDevices: fn( self: *const IFaxServer, ppFaxDevices: ?*?*IFaxDevices, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_InboundRouting: fn( self: *const IFaxServer, ppFaxInboundRouting: ?*?*IFaxInboundRouting, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Folders: fn( self: *const IFaxServer, pFaxFolders: ?*?*IFaxFolders, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LoggingOptions: fn( self: *const IFaxServer, ppFaxLoggingOptions: ?*?*IFaxLoggingOptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MajorVersion: fn( self: *const IFaxServer, plMajorVersion: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MinorVersion: fn( self: *const IFaxServer, plMinorVersion: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MajorBuild: fn( self: *const IFaxServer, plMajorBuild: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MinorBuild: fn( self: *const IFaxServer, plMinorBuild: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Debug: fn( self: *const IFaxServer, pbDebug: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Activity: fn( self: *const IFaxServer, ppFaxActivity: ?*?*IFaxActivity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutboundRouting: fn( self: *const IFaxServer, ppFaxOutboundRouting: ?*?*IFaxOutboundRouting, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiptOptions: fn( self: *const IFaxServer, ppFaxReceiptOptions: ?*?*IFaxReceiptOptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Security: fn( self: *const IFaxServer, ppFaxSecurity: ?*?*IFaxSecurity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Disconnect: fn( self: *const IFaxServer, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetExtensionProperty: fn( self: *const IFaxServer, bstrGUID: ?BSTR, pvProperty: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetExtensionProperty: fn( self: *const IFaxServer, bstrGUID: ?BSTR, vProperty: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ListenToServerEvents: fn( self: *const IFaxServer, EventTypes: FAX_SERVER_EVENTS_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RegisterDeviceProvider: fn( self: *const IFaxServer, bstrGUID: ?BSTR, bstrFriendlyName: ?BSTR, bstrImageName: ?BSTR, TspName: ?BSTR, lFSPIVersion: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnregisterDeviceProvider: fn( self: *const IFaxServer, bstrUniqueName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RegisterInboundRoutingExtension: fn( self: *const IFaxServer, bstrExtensionName: ?BSTR, bstrFriendlyName: ?BSTR, bstrImageName: ?BSTR, vMethods: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnregisterInboundRoutingExtension: fn( self: *const IFaxServer, bstrExtensionUniqueName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RegisteredEvents: fn( self: *const IFaxServer, pEventTypes: ?*FAX_SERVER_EVENTS_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_APIVersion: fn( self: *const IFaxServer, pAPIVersion: ?*FAX_SERVER_APIVERSION_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_Connect(self: *const T, bstrServerName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).Connect(@ptrCast(*const IFaxServer, self), bstrServerName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_ServerName(self: *const T, pbstrServerName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_ServerName(@ptrCast(*const IFaxServer, self), pbstrServerName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_GetDeviceProviders(self: *const T, ppFaxDeviceProviders: ?*?*IFaxDeviceProviders) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).GetDeviceProviders(@ptrCast(*const IFaxServer, self), ppFaxDeviceProviders); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_GetDevices(self: *const T, ppFaxDevices: ?*?*IFaxDevices) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).GetDevices(@ptrCast(*const IFaxServer, self), ppFaxDevices); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_InboundRouting(self: *const T, ppFaxInboundRouting: ?*?*IFaxInboundRouting) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_InboundRouting(@ptrCast(*const IFaxServer, self), ppFaxInboundRouting); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_Folders(self: *const T, pFaxFolders: ?*?*IFaxFolders) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_Folders(@ptrCast(*const IFaxServer, self), pFaxFolders); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_LoggingOptions(self: *const T, ppFaxLoggingOptions: ?*?*IFaxLoggingOptions) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_LoggingOptions(@ptrCast(*const IFaxServer, self), ppFaxLoggingOptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_MajorVersion(self: *const T, plMajorVersion: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_MajorVersion(@ptrCast(*const IFaxServer, self), plMajorVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_MinorVersion(self: *const T, plMinorVersion: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_MinorVersion(@ptrCast(*const IFaxServer, self), plMinorVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_MajorBuild(self: *const T, plMajorBuild: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_MajorBuild(@ptrCast(*const IFaxServer, self), plMajorBuild); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_MinorBuild(self: *const T, plMinorBuild: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_MinorBuild(@ptrCast(*const IFaxServer, self), plMinorBuild); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_Debug(self: *const T, pbDebug: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_Debug(@ptrCast(*const IFaxServer, self), pbDebug); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_Activity(self: *const T, ppFaxActivity: ?*?*IFaxActivity) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_Activity(@ptrCast(*const IFaxServer, self), ppFaxActivity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_OutboundRouting(self: *const T, ppFaxOutboundRouting: ?*?*IFaxOutboundRouting) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_OutboundRouting(@ptrCast(*const IFaxServer, self), ppFaxOutboundRouting); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_ReceiptOptions(self: *const T, ppFaxReceiptOptions: ?*?*IFaxReceiptOptions) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_ReceiptOptions(@ptrCast(*const IFaxServer, self), ppFaxReceiptOptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_Security(self: *const T, ppFaxSecurity: ?*?*IFaxSecurity) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_Security(@ptrCast(*const IFaxServer, self), ppFaxSecurity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_Disconnect(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).Disconnect(@ptrCast(*const IFaxServer, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_GetExtensionProperty(self: *const T, bstrGUID: ?BSTR, pvProperty: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).GetExtensionProperty(@ptrCast(*const IFaxServer, self), bstrGUID, pvProperty); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_SetExtensionProperty(self: *const T, bstrGUID: ?BSTR, vProperty: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).SetExtensionProperty(@ptrCast(*const IFaxServer, self), bstrGUID, vProperty); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_ListenToServerEvents(self: *const T, EventTypes: FAX_SERVER_EVENTS_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).ListenToServerEvents(@ptrCast(*const IFaxServer, self), EventTypes); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_RegisterDeviceProvider(self: *const T, bstrGUID: ?BSTR, bstrFriendlyName: ?BSTR, bstrImageName: ?BSTR, TspName: ?BSTR, lFSPIVersion: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).RegisterDeviceProvider(@ptrCast(*const IFaxServer, self), bstrGUID, bstrFriendlyName, bstrImageName, TspName, lFSPIVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_UnregisterDeviceProvider(self: *const T, bstrUniqueName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).UnregisterDeviceProvider(@ptrCast(*const IFaxServer, self), bstrUniqueName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_RegisterInboundRoutingExtension(self: *const T, bstrExtensionName: ?BSTR, bstrFriendlyName: ?BSTR, bstrImageName: ?BSTR, vMethods: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).RegisterInboundRoutingExtension(@ptrCast(*const IFaxServer, self), bstrExtensionName, bstrFriendlyName, bstrImageName, vMethods); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_UnregisterInboundRoutingExtension(self: *const T, bstrExtensionUniqueName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).UnregisterInboundRoutingExtension(@ptrCast(*const IFaxServer, self), bstrExtensionUniqueName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_RegisteredEvents(self: *const T, pEventTypes: ?*FAX_SERVER_EVENTS_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_RegisteredEvents(@ptrCast(*const IFaxServer, self), pEventTypes); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer_get_APIVersion(self: *const T, pAPIVersion: ?*FAX_SERVER_APIVERSION_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer.VTable, self.vtable).get_APIVersion(@ptrCast(*const IFaxServer, self), pAPIVersion); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxDeviceProviders_Value = @import("../zig.zig").Guid.initString("9fb76f62-4c7e-43a5-b6fd-502893f7e13e"); pub const IID_IFaxDeviceProviders = &IID_IFaxDeviceProviders_Value; pub const IFaxDeviceProviders = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxDeviceProviders, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxDeviceProviders, vIndex: VARIANT, pFaxDeviceProvider: ?*?*IFaxDeviceProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxDeviceProviders, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProviders_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProviders.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxDeviceProviders, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProviders_get_Item(self: *const T, vIndex: VARIANT, pFaxDeviceProvider: ?*?*IFaxDeviceProvider) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProviders.VTable, self.vtable).get_Item(@ptrCast(*const IFaxDeviceProviders, self), vIndex, pFaxDeviceProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProviders_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProviders.VTable, self.vtable).get_Count(@ptrCast(*const IFaxDeviceProviders, self), plCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxDevices_Value = @import("../zig.zig").Guid.initString("9e46783e-f34f-482e-a360-0416becbbd96"); pub const IID_IFaxDevices = &IID_IFaxDevices_Value; pub const IFaxDevices = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxDevices, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxDevices, vIndex: VARIANT, pFaxDevice: ?*?*IFaxDevice, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxDevices, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ItemById: fn( self: *const IFaxDevices, lId: i32, ppFaxDevice: ?*?*IFaxDevice, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevices_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevices.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxDevices, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevices_get_Item(self: *const T, vIndex: VARIANT, pFaxDevice: ?*?*IFaxDevice) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevices.VTable, self.vtable).get_Item(@ptrCast(*const IFaxDevices, self), vIndex, pFaxDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevices_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevices.VTable, self.vtable).get_Count(@ptrCast(*const IFaxDevices, self), plCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevices_get_ItemById(self: *const T, lId: i32, ppFaxDevice: ?*?*IFaxDevice) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevices.VTable, self.vtable).get_ItemById(@ptrCast(*const IFaxDevices, self), lId, ppFaxDevice); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxInboundRouting_Value = @import("../zig.zig").Guid.initString("8148c20f-9d52-45b1-bf96-38fc12713527"); pub const IID_IFaxInboundRouting = &IID_IFaxInboundRouting_Value; pub const IFaxInboundRouting = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, GetExtensions: fn( self: *const IFaxInboundRouting, pFaxInboundRoutingExtensions: ?*?*IFaxInboundRoutingExtensions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMethods: fn( self: *const IFaxInboundRouting, pFaxInboundRoutingMethods: ?*?*IFaxInboundRoutingMethods, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRouting_GetExtensions(self: *const T, pFaxInboundRoutingExtensions: ?*?*IFaxInboundRoutingExtensions) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRouting.VTable, self.vtable).GetExtensions(@ptrCast(*const IFaxInboundRouting, self), pFaxInboundRoutingExtensions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRouting_GetMethods(self: *const T, pFaxInboundRoutingMethods: ?*?*IFaxInboundRoutingMethods) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRouting.VTable, self.vtable).GetMethods(@ptrCast(*const IFaxInboundRouting, self), pFaxInboundRoutingMethods); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxFolders_Value = @import("../zig.zig").Guid.initString("dce3b2a8-a7ab-42bc-9d0a-3149457261a0"); pub const IID_IFaxFolders = &IID_IFaxFolders_Value; pub const IFaxFolders = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingQueue: fn( self: *const IFaxFolders, pFaxOutgoingQueue: ?*?*IFaxOutgoingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IncomingQueue: fn( self: *const IFaxFolders, pFaxIncomingQueue: ?*?*IFaxIncomingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IncomingArchive: fn( self: *const IFaxFolders, pFaxIncomingArchive: ?*?*IFaxIncomingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingArchive: fn( self: *const IFaxFolders, pFaxOutgoingArchive: ?*?*IFaxOutgoingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxFolders_get_OutgoingQueue(self: *const T, pFaxOutgoingQueue: ?*?*IFaxOutgoingQueue) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxFolders.VTable, self.vtable).get_OutgoingQueue(@ptrCast(*const IFaxFolders, self), pFaxOutgoingQueue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxFolders_get_IncomingQueue(self: *const T, pFaxIncomingQueue: ?*?*IFaxIncomingQueue) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxFolders.VTable, self.vtable).get_IncomingQueue(@ptrCast(*const IFaxFolders, self), pFaxIncomingQueue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxFolders_get_IncomingArchive(self: *const T, pFaxIncomingArchive: ?*?*IFaxIncomingArchive) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxFolders.VTable, self.vtable).get_IncomingArchive(@ptrCast(*const IFaxFolders, self), pFaxIncomingArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxFolders_get_OutgoingArchive(self: *const T, pFaxOutgoingArchive: ?*?*IFaxOutgoingArchive) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxFolders.VTable, self.vtable).get_OutgoingArchive(@ptrCast(*const IFaxFolders, self), pFaxOutgoingArchive); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxLoggingOptions_Value = @import("../zig.zig").Guid.initString("34e64fb9-6b31-4d32-8b27-d286c0c33606"); pub const IID_IFaxLoggingOptions = &IID_IFaxLoggingOptions_Value; pub const IFaxLoggingOptions = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_EventLogging: fn( self: *const IFaxLoggingOptions, pFaxEventLogging: ?*?*IFaxEventLogging, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ActivityLogging: fn( self: *const IFaxLoggingOptions, pFaxActivityLogging: ?*?*IFaxActivityLogging, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxLoggingOptions_get_EventLogging(self: *const T, pFaxEventLogging: ?*?*IFaxEventLogging) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxLoggingOptions.VTable, self.vtable).get_EventLogging(@ptrCast(*const IFaxLoggingOptions, self), pFaxEventLogging); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxLoggingOptions_get_ActivityLogging(self: *const T, pFaxActivityLogging: ?*?*IFaxActivityLogging) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxLoggingOptions.VTable, self.vtable).get_ActivityLogging(@ptrCast(*const IFaxLoggingOptions, self), pFaxActivityLogging); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxActivity_Value = @import("../zig.zig").Guid.initString("4b106f97-3df5-40f2-bc3c-44cb8115ebdf"); pub const IID_IFaxActivity = &IID_IFaxActivity_Value; pub const IFaxActivity = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IncomingMessages: fn( self: *const IFaxActivity, plIncomingMessages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RoutingMessages: fn( self: *const IFaxActivity, plRoutingMessages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingMessages: fn( self: *const IFaxActivity, plOutgoingMessages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QueuedMessages: fn( self: *const IFaxActivity, plQueuedMessages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxActivity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivity_get_IncomingMessages(self: *const T, plIncomingMessages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivity.VTable, self.vtable).get_IncomingMessages(@ptrCast(*const IFaxActivity, self), plIncomingMessages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivity_get_RoutingMessages(self: *const T, plRoutingMessages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivity.VTable, self.vtable).get_RoutingMessages(@ptrCast(*const IFaxActivity, self), plRoutingMessages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivity_get_OutgoingMessages(self: *const T, plOutgoingMessages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivity.VTable, self.vtable).get_OutgoingMessages(@ptrCast(*const IFaxActivity, self), plOutgoingMessages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivity_get_QueuedMessages(self: *const T, plQueuedMessages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivity.VTable, self.vtable).get_QueuedMessages(@ptrCast(*const IFaxActivity, self), plQueuedMessages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivity_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivity.VTable, self.vtable).Refresh(@ptrCast(*const IFaxActivity, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutboundRouting_Value = @import("../zig.zig").Guid.initString("25dc05a4-9909-41bd-a95b-7e5d1dec1d43"); pub const IID_IFaxOutboundRouting = &IID_IFaxOutboundRouting_Value; pub const IFaxOutboundRouting = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, GetGroups: fn( self: *const IFaxOutboundRouting, pFaxOutboundRoutingGroups: ?*?*IFaxOutboundRoutingGroups, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRules: fn( self: *const IFaxOutboundRouting, pFaxOutboundRoutingRules: ?*?*IFaxOutboundRoutingRules, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRouting_GetGroups(self: *const T, pFaxOutboundRoutingGroups: ?*?*IFaxOutboundRoutingGroups) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRouting.VTable, self.vtable).GetGroups(@ptrCast(*const IFaxOutboundRouting, self), pFaxOutboundRoutingGroups); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRouting_GetRules(self: *const T, pFaxOutboundRoutingRules: ?*?*IFaxOutboundRoutingRules) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRouting.VTable, self.vtable).GetRules(@ptrCast(*const IFaxOutboundRouting, self), pFaxOutboundRoutingRules); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_SMTP_AUTHENTICATION_TYPE_ENUM = enum(i32) { ANONYMOUS = 0, BASIC = 1, NTLM = 2, }; pub const fsatANONYMOUS = FAX_SMTP_AUTHENTICATION_TYPE_ENUM.ANONYMOUS; pub const fsatBASIC = FAX_SMTP_AUTHENTICATION_TYPE_ENUM.BASIC; pub const fsatNTLM = FAX_SMTP_AUTHENTICATION_TYPE_ENUM.NTLM; pub const FAX_RECEIPT_TYPE_ENUM = enum(i32) { NONE = 0, MAIL = 1, MSGBOX = 4, }; pub const frtNONE = FAX_RECEIPT_TYPE_ENUM.NONE; pub const frtMAIL = FAX_RECEIPT_TYPE_ENUM.MAIL; pub const frtMSGBOX = FAX_RECEIPT_TYPE_ENUM.MSGBOX; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxReceiptOptions_Value = @import("../zig.zig").Guid.initString("378efaeb-5fcb-4afb-b2ee-e16e80614487"); pub const IID_IFaxReceiptOptions = &IID_IFaxReceiptOptions_Value; pub const IFaxReceiptOptions = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AuthenticationType: fn( self: *const IFaxReceiptOptions, pType: ?*FAX_SMTP_AUTHENTICATION_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AuthenticationType: fn( self: *const IFaxReceiptOptions, Type: FAX_SMTP_AUTHENTICATION_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SMTPServer: fn( self: *const IFaxReceiptOptions, pbstrSMTPServer: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SMTPServer: fn( self: *const IFaxReceiptOptions, bstrSMTPServer: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SMTPPort: fn( self: *const IFaxReceiptOptions, plSMTPPort: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SMTPPort: fn( self: *const IFaxReceiptOptions, lSMTPPort: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SMTPSender: fn( self: *const IFaxReceiptOptions, pbstrSMTPSender: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SMTPSender: fn( self: *const IFaxReceiptOptions, bstrSMTPSender: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SMTPUser: fn( self: *const IFaxReceiptOptions, pbstrSMTPUser: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SMTPUser: fn( self: *const IFaxReceiptOptions, bstrSMTPUser: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AllowedReceipts: fn( self: *const IFaxReceiptOptions, pAllowedReceipts: ?*FAX_RECEIPT_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AllowedReceipts: fn( self: *const IFaxReceiptOptions, AllowedReceipts: FAX_RECEIPT_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SMTPPassword: fn( self: *const IFaxReceiptOptions, pbstrSMTPPassword: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SMTPPassword: fn( self: *const IFaxReceiptOptions, bstrSMTPPassword: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxReceiptOptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxReceiptOptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseForInboundRouting: fn( self: *const IFaxReceiptOptions, pbUseForInboundRouting: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseForInboundRouting: fn( self: *const IFaxReceiptOptions, bUseForInboundRouting: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_AuthenticationType(self: *const T, pType: ?*FAX_SMTP_AUTHENTICATION_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_AuthenticationType(@ptrCast(*const IFaxReceiptOptions, self), pType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_AuthenticationType(self: *const T, Type: FAX_SMTP_AUTHENTICATION_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_AuthenticationType(@ptrCast(*const IFaxReceiptOptions, self), Type); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_SMTPServer(self: *const T, pbstrSMTPServer: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_SMTPServer(@ptrCast(*const IFaxReceiptOptions, self), pbstrSMTPServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_SMTPServer(self: *const T, bstrSMTPServer: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_SMTPServer(@ptrCast(*const IFaxReceiptOptions, self), bstrSMTPServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_SMTPPort(self: *const T, plSMTPPort: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_SMTPPort(@ptrCast(*const IFaxReceiptOptions, self), plSMTPPort); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_SMTPPort(self: *const T, lSMTPPort: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_SMTPPort(@ptrCast(*const IFaxReceiptOptions, self), lSMTPPort); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_SMTPSender(self: *const T, pbstrSMTPSender: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_SMTPSender(@ptrCast(*const IFaxReceiptOptions, self), pbstrSMTPSender); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_SMTPSender(self: *const T, bstrSMTPSender: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_SMTPSender(@ptrCast(*const IFaxReceiptOptions, self), bstrSMTPSender); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_SMTPUser(self: *const T, pbstrSMTPUser: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_SMTPUser(@ptrCast(*const IFaxReceiptOptions, self), pbstrSMTPUser); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_SMTPUser(self: *const T, bstrSMTPUser: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_SMTPUser(@ptrCast(*const IFaxReceiptOptions, self), bstrSMTPUser); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_AllowedReceipts(self: *const T, pAllowedReceipts: ?*FAX_RECEIPT_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_AllowedReceipts(@ptrCast(*const IFaxReceiptOptions, self), pAllowedReceipts); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_AllowedReceipts(self: *const T, AllowedReceipts: FAX_RECEIPT_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_AllowedReceipts(@ptrCast(*const IFaxReceiptOptions, self), AllowedReceipts); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_SMTPPassword(self: *const T, pbstrSMTPPassword: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_SMTPPassword(@ptrCast(*const IFaxReceiptOptions, self), pbstrSMTPPassword); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_SMTPPassword(self: *const T, bstrSMTPPassword: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_SMTPPassword(@ptrCast(*const IFaxReceiptOptions, self), bstrSMTPPassword); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).Refresh(@ptrCast(*const IFaxReceiptOptions, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).Save(@ptrCast(*const IFaxReceiptOptions, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_get_UseForInboundRouting(self: *const T, pbUseForInboundRouting: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).get_UseForInboundRouting(@ptrCast(*const IFaxReceiptOptions, self), pbUseForInboundRouting); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxReceiptOptions_put_UseForInboundRouting(self: *const T, bUseForInboundRouting: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxReceiptOptions.VTable, self.vtable).put_UseForInboundRouting(@ptrCast(*const IFaxReceiptOptions, self), bUseForInboundRouting); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_ACCESS_RIGHTS_ENUM = enum(i32) { SUBMIT_LOW = 1, SUBMIT_NORMAL = 2, SUBMIT_HIGH = 4, QUERY_JOBS = 8, MANAGE_JOBS = 16, QUERY_CONFIG = 32, MANAGE_CONFIG = 64, QUERY_IN_ARCHIVE = 128, MANAGE_IN_ARCHIVE = 256, QUERY_OUT_ARCHIVE = 512, MANAGE_OUT_ARCHIVE = 1024, }; pub const farSUBMIT_LOW = FAX_ACCESS_RIGHTS_ENUM.SUBMIT_LOW; pub const farSUBMIT_NORMAL = FAX_ACCESS_RIGHTS_ENUM.SUBMIT_NORMAL; pub const farSUBMIT_HIGH = FAX_ACCESS_RIGHTS_ENUM.SUBMIT_HIGH; pub const farQUERY_JOBS = FAX_ACCESS_RIGHTS_ENUM.QUERY_JOBS; pub const farMANAGE_JOBS = FAX_ACCESS_RIGHTS_ENUM.MANAGE_JOBS; pub const farQUERY_CONFIG = FAX_ACCESS_RIGHTS_ENUM.QUERY_CONFIG; pub const farMANAGE_CONFIG = FAX_ACCESS_RIGHTS_ENUM.MANAGE_CONFIG; pub const farQUERY_IN_ARCHIVE = FAX_ACCESS_RIGHTS_ENUM.QUERY_IN_ARCHIVE; pub const farMANAGE_IN_ARCHIVE = FAX_ACCESS_RIGHTS_ENUM.MANAGE_IN_ARCHIVE; pub const farQUERY_OUT_ARCHIVE = FAX_ACCESS_RIGHTS_ENUM.QUERY_OUT_ARCHIVE; pub const farMANAGE_OUT_ARCHIVE = FAX_ACCESS_RIGHTS_ENUM.MANAGE_OUT_ARCHIVE; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxSecurity_Value = @import("../zig.zig").Guid.initString("77b508c1-09c0-47a2-91eb-fce7fdf2690e"); pub const IID_IFaxSecurity = &IID_IFaxSecurity_Value; pub const IFaxSecurity = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Descriptor: fn( self: *const IFaxSecurity, pvDescriptor: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Descriptor: fn( self: *const IFaxSecurity, vDescriptor: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_GrantedRights: fn( self: *const IFaxSecurity, pGrantedRights: ?*FAX_ACCESS_RIGHTS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxSecurity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxSecurity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_InformationType: fn( self: *const IFaxSecurity, plInformationType: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_InformationType: fn( self: *const IFaxSecurity, lInformationType: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity_get_Descriptor(self: *const T, pvDescriptor: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity.VTable, self.vtable).get_Descriptor(@ptrCast(*const IFaxSecurity, self), pvDescriptor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity_put_Descriptor(self: *const T, vDescriptor: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity.VTable, self.vtable).put_Descriptor(@ptrCast(*const IFaxSecurity, self), vDescriptor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity_get_GrantedRights(self: *const T, pGrantedRights: ?*FAX_ACCESS_RIGHTS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity.VTable, self.vtable).get_GrantedRights(@ptrCast(*const IFaxSecurity, self), pGrantedRights); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity.VTable, self.vtable).Refresh(@ptrCast(*const IFaxSecurity, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity.VTable, self.vtable).Save(@ptrCast(*const IFaxSecurity, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity_get_InformationType(self: *const T, plInformationType: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity.VTable, self.vtable).get_InformationType(@ptrCast(*const IFaxSecurity, self), plInformationType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity_put_InformationType(self: *const T, lInformationType: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity.VTable, self.vtable).put_InformationType(@ptrCast(*const IFaxSecurity, self), lInformationType); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_PRIORITY_TYPE_ENUM = enum(i32) { LOW = 0, NORMAL = 1, HIGH = 2, }; pub const fptLOW = FAX_PRIORITY_TYPE_ENUM.LOW; pub const fptNORMAL = FAX_PRIORITY_TYPE_ENUM.NORMAL; pub const fptHIGH = FAX_PRIORITY_TYPE_ENUM.HIGH; pub const FAX_COVERPAGE_TYPE_ENUM = enum(i32) { NONE = 0, LOCAL = 1, SERVER = 2, }; pub const fcptNONE = FAX_COVERPAGE_TYPE_ENUM.NONE; pub const fcptLOCAL = FAX_COVERPAGE_TYPE_ENUM.LOCAL; pub const fcptSERVER = FAX_COVERPAGE_TYPE_ENUM.SERVER; pub const FAX_SCHEDULE_TYPE_ENUM = enum(i32) { NOW = 0, SPECIFIC_TIME = 1, DISCOUNT_PERIOD = 2, }; pub const fstNOW = FAX_SCHEDULE_TYPE_ENUM.NOW; pub const fstSPECIFIC_TIME = FAX_SCHEDULE_TYPE_ENUM.SPECIFIC_TIME; pub const fstDISCOUNT_PERIOD = FAX_SCHEDULE_TYPE_ENUM.DISCOUNT_PERIOD; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxDocument_Value = @import("../zig.zig").Guid.initString("b207a246-09e3-4a4e-a7dc-fea31d29458f"); pub const IID_IFaxDocument = &IID_IFaxDocument_Value; pub const IFaxDocument = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Body: fn( self: *const IFaxDocument, pbstrBody: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Body: fn( self: *const IFaxDocument, bstrBody: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Sender: fn( self: *const IFaxDocument, ppFaxSender: ?*?*IFaxSender, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Recipients: fn( self: *const IFaxDocument, ppFaxRecipients: ?*?*IFaxRecipients, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CoverPage: fn( self: *const IFaxDocument, pbstrCoverPage: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_CoverPage: fn( self: *const IFaxDocument, bstrCoverPage: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Subject: fn( self: *const IFaxDocument, pbstrSubject: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Subject: fn( self: *const IFaxDocument, bstrSubject: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Note: fn( self: *const IFaxDocument, pbstrNote: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Note: fn( self: *const IFaxDocument, bstrNote: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ScheduleTime: fn( self: *const IFaxDocument, pdateScheduleTime: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ScheduleTime: fn( self: *const IFaxDocument, dateScheduleTime: f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiptAddress: fn( self: *const IFaxDocument, pbstrReceiptAddress: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ReceiptAddress: fn( self: *const IFaxDocument, bstrReceiptAddress: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DocumentName: fn( self: *const IFaxDocument, pbstrDocumentName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DocumentName: fn( self: *const IFaxDocument, bstrDocumentName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CallHandle: fn( self: *const IFaxDocument, plCallHandle: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_CallHandle: fn( self: *const IFaxDocument, lCallHandle: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CoverPageType: fn( self: *const IFaxDocument, pCoverPageType: ?*FAX_COVERPAGE_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_CoverPageType: fn( self: *const IFaxDocument, CoverPageType: FAX_COVERPAGE_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ScheduleType: fn( self: *const IFaxDocument, pScheduleType: ?*FAX_SCHEDULE_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ScheduleType: fn( self: *const IFaxDocument, ScheduleType: FAX_SCHEDULE_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiptType: fn( self: *const IFaxDocument, pReceiptType: ?*FAX_RECEIPT_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ReceiptType: fn( self: *const IFaxDocument, ReceiptType: FAX_RECEIPT_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_GroupBroadcastReceipts: fn( self: *const IFaxDocument, pbUseGrouping: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_GroupBroadcastReceipts: fn( self: *const IFaxDocument, bUseGrouping: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Priority: fn( self: *const IFaxDocument, pPriority: ?*FAX_PRIORITY_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Priority: fn( self: *const IFaxDocument, Priority: FAX_PRIORITY_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TapiConnection: fn( self: *const IFaxDocument, ppTapiConnection: ?*?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, putref_TapiConnection: fn( self: *const IFaxDocument, pTapiConnection: ?*IDispatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Submit: fn( self: *const IFaxDocument, bstrFaxServerName: ?BSTR, pvFaxOutgoingJobIDs: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ConnectedSubmit: fn( self: *const IFaxDocument, pFaxServer: ?*IFaxServer, pvFaxOutgoingJobIDs: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AttachFaxToReceipt: fn( self: *const IFaxDocument, pbAttachFax: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AttachFaxToReceipt: fn( self: *const IFaxDocument, bAttachFax: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_Body(self: *const T, pbstrBody: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_Body(@ptrCast(*const IFaxDocument, self), pbstrBody); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_Body(self: *const T, bstrBody: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_Body(@ptrCast(*const IFaxDocument, self), bstrBody); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_Sender(self: *const T, ppFaxSender: ?*?*IFaxSender) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_Sender(@ptrCast(*const IFaxDocument, self), ppFaxSender); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_Recipients(self: *const T, ppFaxRecipients: ?*?*IFaxRecipients) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_Recipients(@ptrCast(*const IFaxDocument, self), ppFaxRecipients); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_CoverPage(self: *const T, pbstrCoverPage: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_CoverPage(@ptrCast(*const IFaxDocument, self), pbstrCoverPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_CoverPage(self: *const T, bstrCoverPage: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_CoverPage(@ptrCast(*const IFaxDocument, self), bstrCoverPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_Subject(self: *const T, pbstrSubject: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_Subject(@ptrCast(*const IFaxDocument, self), pbstrSubject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_Subject(self: *const T, bstrSubject: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_Subject(@ptrCast(*const IFaxDocument, self), bstrSubject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_Note(self: *const T, pbstrNote: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_Note(@ptrCast(*const IFaxDocument, self), pbstrNote); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_Note(self: *const T, bstrNote: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_Note(@ptrCast(*const IFaxDocument, self), bstrNote); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_ScheduleTime(self: *const T, pdateScheduleTime: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_ScheduleTime(@ptrCast(*const IFaxDocument, self), pdateScheduleTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_ScheduleTime(self: *const T, dateScheduleTime: f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_ScheduleTime(@ptrCast(*const IFaxDocument, self), dateScheduleTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_ReceiptAddress(self: *const T, pbstrReceiptAddress: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_ReceiptAddress(@ptrCast(*const IFaxDocument, self), pbstrReceiptAddress); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_ReceiptAddress(self: *const T, bstrReceiptAddress: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_ReceiptAddress(@ptrCast(*const IFaxDocument, self), bstrReceiptAddress); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_DocumentName(self: *const T, pbstrDocumentName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_DocumentName(@ptrCast(*const IFaxDocument, self), pbstrDocumentName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_DocumentName(self: *const T, bstrDocumentName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_DocumentName(@ptrCast(*const IFaxDocument, self), bstrDocumentName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_CallHandle(self: *const T, plCallHandle: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_CallHandle(@ptrCast(*const IFaxDocument, self), plCallHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_CallHandle(self: *const T, lCallHandle: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_CallHandle(@ptrCast(*const IFaxDocument, self), lCallHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_CoverPageType(self: *const T, pCoverPageType: ?*FAX_COVERPAGE_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_CoverPageType(@ptrCast(*const IFaxDocument, self), pCoverPageType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_CoverPageType(self: *const T, CoverPageType: FAX_COVERPAGE_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_CoverPageType(@ptrCast(*const IFaxDocument, self), CoverPageType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_ScheduleType(self: *const T, pScheduleType: ?*FAX_SCHEDULE_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_ScheduleType(@ptrCast(*const IFaxDocument, self), pScheduleType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_ScheduleType(self: *const T, ScheduleType: FAX_SCHEDULE_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_ScheduleType(@ptrCast(*const IFaxDocument, self), ScheduleType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_ReceiptType(self: *const T, pReceiptType: ?*FAX_RECEIPT_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_ReceiptType(@ptrCast(*const IFaxDocument, self), pReceiptType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_ReceiptType(self: *const T, ReceiptType: FAX_RECEIPT_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_ReceiptType(@ptrCast(*const IFaxDocument, self), ReceiptType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_GroupBroadcastReceipts(self: *const T, pbUseGrouping: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_GroupBroadcastReceipts(@ptrCast(*const IFaxDocument, self), pbUseGrouping); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_GroupBroadcastReceipts(self: *const T, bUseGrouping: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_GroupBroadcastReceipts(@ptrCast(*const IFaxDocument, self), bUseGrouping); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_Priority(self: *const T, pPriority: ?*FAX_PRIORITY_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_Priority(@ptrCast(*const IFaxDocument, self), pPriority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_Priority(self: *const T, Priority: FAX_PRIORITY_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_Priority(@ptrCast(*const IFaxDocument, self), Priority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_TapiConnection(self: *const T, ppTapiConnection: ?*?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_TapiConnection(@ptrCast(*const IFaxDocument, self), ppTapiConnection); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_putref_TapiConnection(self: *const T, pTapiConnection: ?*IDispatch) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).putref_TapiConnection(@ptrCast(*const IFaxDocument, self), pTapiConnection); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_Submit(self: *const T, bstrFaxServerName: ?BSTR, pvFaxOutgoingJobIDs: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).Submit(@ptrCast(*const IFaxDocument, self), bstrFaxServerName, pvFaxOutgoingJobIDs); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_ConnectedSubmit(self: *const T, pFaxServer: ?*IFaxServer, pvFaxOutgoingJobIDs: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).ConnectedSubmit(@ptrCast(*const IFaxDocument, self), pFaxServer, pvFaxOutgoingJobIDs); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_get_AttachFaxToReceipt(self: *const T, pbAttachFax: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).get_AttachFaxToReceipt(@ptrCast(*const IFaxDocument, self), pbAttachFax); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument_put_AttachFaxToReceipt(self: *const T, bAttachFax: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument.VTable, self.vtable).put_AttachFaxToReceipt(@ptrCast(*const IFaxDocument, self), bAttachFax); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxSender_Value = @import("../zig.zig").Guid.initString("0d879d7d-f57a-4cc6-a6f9-3ee5d527b46a"); pub const IID_IFaxSender = &IID_IFaxSender_Value; pub const IFaxSender = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_BillingCode: fn( self: *const IFaxSender, pbstrBillingCode: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_BillingCode: fn( self: *const IFaxSender, bstrBillingCode: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_City: fn( self: *const IFaxSender, pbstrCity: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_City: fn( self: *const IFaxSender, bstrCity: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Company: fn( self: *const IFaxSender, pbstrCompany: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Company: fn( self: *const IFaxSender, bstrCompany: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Country: fn( self: *const IFaxSender, pbstrCountry: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Country: fn( self: *const IFaxSender, bstrCountry: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Department: fn( self: *const IFaxSender, pbstrDepartment: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Department: fn( self: *const IFaxSender, bstrDepartment: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Email: fn( self: *const IFaxSender, pbstrEmail: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Email: fn( self: *const IFaxSender, bstrEmail: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FaxNumber: fn( self: *const IFaxSender, pbstrFaxNumber: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_FaxNumber: fn( self: *const IFaxSender, bstrFaxNumber: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HomePhone: fn( self: *const IFaxSender, pbstrHomePhone: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_HomePhone: fn( self: *const IFaxSender, bstrHomePhone: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Name: fn( self: *const IFaxSender, pbstrName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Name: fn( self: *const IFaxSender, bstrName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TSID: fn( self: *const IFaxSender, pbstrTSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_TSID: fn( self: *const IFaxSender, bstrTSID: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OfficePhone: fn( self: *const IFaxSender, pbstrOfficePhone: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_OfficePhone: fn( self: *const IFaxSender, bstrOfficePhone: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OfficeLocation: fn( self: *const IFaxSender, pbstrOfficeLocation: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_OfficeLocation: fn( self: *const IFaxSender, bstrOfficeLocation: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_State: fn( self: *const IFaxSender, pbstrState: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_State: fn( self: *const IFaxSender, bstrState: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_StreetAddress: fn( self: *const IFaxSender, pbstrStreetAddress: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_StreetAddress: fn( self: *const IFaxSender, bstrStreetAddress: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Title: fn( self: *const IFaxSender, pbstrTitle: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Title: fn( self: *const IFaxSender, bstrTitle: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ZipCode: fn( self: *const IFaxSender, pbstrZipCode: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ZipCode: fn( self: *const IFaxSender, bstrZipCode: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadDefaultSender: fn( self: *const IFaxSender, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SaveDefaultSender: fn( self: *const IFaxSender, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_BillingCode(self: *const T, pbstrBillingCode: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_BillingCode(@ptrCast(*const IFaxSender, self), pbstrBillingCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_BillingCode(self: *const T, bstrBillingCode: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_BillingCode(@ptrCast(*const IFaxSender, self), bstrBillingCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_City(self: *const T, pbstrCity: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_City(@ptrCast(*const IFaxSender, self), pbstrCity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_City(self: *const T, bstrCity: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_City(@ptrCast(*const IFaxSender, self), bstrCity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_Company(self: *const T, pbstrCompany: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_Company(@ptrCast(*const IFaxSender, self), pbstrCompany); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_Company(self: *const T, bstrCompany: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_Company(@ptrCast(*const IFaxSender, self), bstrCompany); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_Country(self: *const T, pbstrCountry: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_Country(@ptrCast(*const IFaxSender, self), pbstrCountry); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_Country(self: *const T, bstrCountry: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_Country(@ptrCast(*const IFaxSender, self), bstrCountry); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_Department(self: *const T, pbstrDepartment: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_Department(@ptrCast(*const IFaxSender, self), pbstrDepartment); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_Department(self: *const T, bstrDepartment: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_Department(@ptrCast(*const IFaxSender, self), bstrDepartment); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_Email(self: *const T, pbstrEmail: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_Email(@ptrCast(*const IFaxSender, self), pbstrEmail); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_Email(self: *const T, bstrEmail: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_Email(@ptrCast(*const IFaxSender, self), bstrEmail); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_FaxNumber(self: *const T, pbstrFaxNumber: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_FaxNumber(@ptrCast(*const IFaxSender, self), pbstrFaxNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_FaxNumber(self: *const T, bstrFaxNumber: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_FaxNumber(@ptrCast(*const IFaxSender, self), bstrFaxNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_HomePhone(self: *const T, pbstrHomePhone: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_HomePhone(@ptrCast(*const IFaxSender, self), pbstrHomePhone); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_HomePhone(self: *const T, bstrHomePhone: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_HomePhone(@ptrCast(*const IFaxSender, self), bstrHomePhone); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_Name(self: *const T, pbstrName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_Name(@ptrCast(*const IFaxSender, self), pbstrName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_Name(self: *const T, bstrName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_Name(@ptrCast(*const IFaxSender, self), bstrName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_TSID(self: *const T, pbstrTSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_TSID(@ptrCast(*const IFaxSender, self), pbstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_TSID(self: *const T, bstrTSID: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_TSID(@ptrCast(*const IFaxSender, self), bstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_OfficePhone(self: *const T, pbstrOfficePhone: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_OfficePhone(@ptrCast(*const IFaxSender, self), pbstrOfficePhone); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_OfficePhone(self: *const T, bstrOfficePhone: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_OfficePhone(@ptrCast(*const IFaxSender, self), bstrOfficePhone); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_OfficeLocation(self: *const T, pbstrOfficeLocation: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_OfficeLocation(@ptrCast(*const IFaxSender, self), pbstrOfficeLocation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_OfficeLocation(self: *const T, bstrOfficeLocation: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_OfficeLocation(@ptrCast(*const IFaxSender, self), bstrOfficeLocation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_State(self: *const T, pbstrState: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_State(@ptrCast(*const IFaxSender, self), pbstrState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_State(self: *const T, bstrState: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_State(@ptrCast(*const IFaxSender, self), bstrState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_StreetAddress(self: *const T, pbstrStreetAddress: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_StreetAddress(@ptrCast(*const IFaxSender, self), pbstrStreetAddress); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_StreetAddress(self: *const T, bstrStreetAddress: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_StreetAddress(@ptrCast(*const IFaxSender, self), bstrStreetAddress); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_Title(self: *const T, pbstrTitle: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_Title(@ptrCast(*const IFaxSender, self), pbstrTitle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_Title(self: *const T, bstrTitle: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_Title(@ptrCast(*const IFaxSender, self), bstrTitle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_get_ZipCode(self: *const T, pbstrZipCode: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).get_ZipCode(@ptrCast(*const IFaxSender, self), pbstrZipCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_put_ZipCode(self: *const T, bstrZipCode: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).put_ZipCode(@ptrCast(*const IFaxSender, self), bstrZipCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_LoadDefaultSender(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).LoadDefaultSender(@ptrCast(*const IFaxSender, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSender_SaveDefaultSender(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSender.VTable, self.vtable).SaveDefaultSender(@ptrCast(*const IFaxSender, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxRecipient_Value = @import("../zig.zig").Guid.initString("9a3da3a0-538d-42b6-9444-aaa57d0ce2bc"); pub const IID_IFaxRecipient = &IID_IFaxRecipient_Value; pub const IFaxRecipient = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FaxNumber: fn( self: *const IFaxRecipient, pbstrFaxNumber: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_FaxNumber: fn( self: *const IFaxRecipient, bstrFaxNumber: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Name: fn( self: *const IFaxRecipient, pbstrName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Name: fn( self: *const IFaxRecipient, bstrName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipient_get_FaxNumber(self: *const T, pbstrFaxNumber: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipient.VTable, self.vtable).get_FaxNumber(@ptrCast(*const IFaxRecipient, self), pbstrFaxNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipient_put_FaxNumber(self: *const T, bstrFaxNumber: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipient.VTable, self.vtable).put_FaxNumber(@ptrCast(*const IFaxRecipient, self), bstrFaxNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipient_get_Name(self: *const T, pbstrName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipient.VTable, self.vtable).get_Name(@ptrCast(*const IFaxRecipient, self), pbstrName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipient_put_Name(self: *const T, bstrName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipient.VTable, self.vtable).put_Name(@ptrCast(*const IFaxRecipient, self), bstrName); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxRecipients_Value = @import("../zig.zig").Guid.initString("b9c9de5a-894e-4492-9fa3-08c627c11d5d"); pub const IID_IFaxRecipients = &IID_IFaxRecipients_Value; pub const IFaxRecipients = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxRecipients, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxRecipients, lIndex: i32, ppFaxRecipient: ?*?*IFaxRecipient, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxRecipients, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Add: fn( self: *const IFaxRecipients, bstrFaxNumber: ?BSTR, bstrRecipientName: ?BSTR, ppFaxRecipient: ?*?*IFaxRecipient, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Remove: fn( self: *const IFaxRecipients, lIndex: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipients_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipients.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxRecipients, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipients_get_Item(self: *const T, lIndex: i32, ppFaxRecipient: ?*?*IFaxRecipient) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipients.VTable, self.vtable).get_Item(@ptrCast(*const IFaxRecipients, self), lIndex, ppFaxRecipient); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipients_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipients.VTable, self.vtable).get_Count(@ptrCast(*const IFaxRecipients, self), plCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipients_Add(self: *const T, bstrFaxNumber: ?BSTR, bstrRecipientName: ?BSTR, ppFaxRecipient: ?*?*IFaxRecipient) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipients.VTable, self.vtable).Add(@ptrCast(*const IFaxRecipients, self), bstrFaxNumber, bstrRecipientName, ppFaxRecipient); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxRecipients_Remove(self: *const T, lIndex: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxRecipients.VTable, self.vtable).Remove(@ptrCast(*const IFaxRecipients, self), lIndex); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxIncomingArchive_Value = @import("../zig.zig").Guid.initString("76062cc7-f714-4fbd-aa06-ed6e4a4b70f3"); pub const IID_IFaxIncomingArchive = &IID_IFaxIncomingArchive_Value; pub const IFaxIncomingArchive = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseArchive: fn( self: *const IFaxIncomingArchive, pbUseArchive: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseArchive: fn( self: *const IFaxIncomingArchive, bUseArchive: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ArchiveFolder: fn( self: *const IFaxIncomingArchive, pbstrArchiveFolder: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ArchiveFolder: fn( self: *const IFaxIncomingArchive, bstrArchiveFolder: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeQuotaWarning: fn( self: *const IFaxIncomingArchive, pbSizeQuotaWarning: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SizeQuotaWarning: fn( self: *const IFaxIncomingArchive, bSizeQuotaWarning: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HighQuotaWaterMark: fn( self: *const IFaxIncomingArchive, plHighQuotaWaterMark: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_HighQuotaWaterMark: fn( self: *const IFaxIncomingArchive, lHighQuotaWaterMark: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LowQuotaWaterMark: fn( self: *const IFaxIncomingArchive, plLowQuotaWaterMark: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_LowQuotaWaterMark: fn( self: *const IFaxIncomingArchive, lLowQuotaWaterMark: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AgeLimit: fn( self: *const IFaxIncomingArchive, plAgeLimit: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AgeLimit: fn( self: *const IFaxIncomingArchive, lAgeLimit: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeLow: fn( self: *const IFaxIncomingArchive, plSizeLow: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeHigh: fn( self: *const IFaxIncomingArchive, plSizeHigh: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxIncomingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxIncomingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessages: fn( self: *const IFaxIncomingArchive, lPrefetchSize: i32, pFaxIncomingMessageIterator: ?*?*IFaxIncomingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessage: fn( self: *const IFaxIncomingArchive, bstrMessageId: ?BSTR, pFaxIncomingMessage: ?*?*IFaxIncomingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_UseArchive(self: *const T, pbUseArchive: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_UseArchive(@ptrCast(*const IFaxIncomingArchive, self), pbUseArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_put_UseArchive(self: *const T, bUseArchive: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).put_UseArchive(@ptrCast(*const IFaxIncomingArchive, self), bUseArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_ArchiveFolder(self: *const T, pbstrArchiveFolder: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_ArchiveFolder(@ptrCast(*const IFaxIncomingArchive, self), pbstrArchiveFolder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_put_ArchiveFolder(self: *const T, bstrArchiveFolder: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).put_ArchiveFolder(@ptrCast(*const IFaxIncomingArchive, self), bstrArchiveFolder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_SizeQuotaWarning(self: *const T, pbSizeQuotaWarning: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_SizeQuotaWarning(@ptrCast(*const IFaxIncomingArchive, self), pbSizeQuotaWarning); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_put_SizeQuotaWarning(self: *const T, bSizeQuotaWarning: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).put_SizeQuotaWarning(@ptrCast(*const IFaxIncomingArchive, self), bSizeQuotaWarning); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_HighQuotaWaterMark(self: *const T, plHighQuotaWaterMark: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_HighQuotaWaterMark(@ptrCast(*const IFaxIncomingArchive, self), plHighQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_put_HighQuotaWaterMark(self: *const T, lHighQuotaWaterMark: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).put_HighQuotaWaterMark(@ptrCast(*const IFaxIncomingArchive, self), lHighQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_LowQuotaWaterMark(self: *const T, plLowQuotaWaterMark: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_LowQuotaWaterMark(@ptrCast(*const IFaxIncomingArchive, self), plLowQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_put_LowQuotaWaterMark(self: *const T, lLowQuotaWaterMark: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).put_LowQuotaWaterMark(@ptrCast(*const IFaxIncomingArchive, self), lLowQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_AgeLimit(self: *const T, plAgeLimit: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_AgeLimit(@ptrCast(*const IFaxIncomingArchive, self), plAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_put_AgeLimit(self: *const T, lAgeLimit: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).put_AgeLimit(@ptrCast(*const IFaxIncomingArchive, self), lAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_SizeLow(self: *const T, plSizeLow: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_SizeLow(@ptrCast(*const IFaxIncomingArchive, self), plSizeLow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_get_SizeHigh(self: *const T, plSizeHigh: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).get_SizeHigh(@ptrCast(*const IFaxIncomingArchive, self), plSizeHigh); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).Refresh(@ptrCast(*const IFaxIncomingArchive, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).Save(@ptrCast(*const IFaxIncomingArchive, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_GetMessages(self: *const T, lPrefetchSize: i32, pFaxIncomingMessageIterator: ?*?*IFaxIncomingMessageIterator) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).GetMessages(@ptrCast(*const IFaxIncomingArchive, self), lPrefetchSize, pFaxIncomingMessageIterator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingArchive_GetMessage(self: *const T, bstrMessageId: ?BSTR, pFaxIncomingMessage: ?*?*IFaxIncomingMessage) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingArchive.VTable, self.vtable).GetMessage(@ptrCast(*const IFaxIncomingArchive, self), bstrMessageId, pFaxIncomingMessage); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxIncomingQueue_Value = @import("../zig.zig").Guid.initString("902e64ef-8fd8-4b75-9725-6014df161545"); pub const IID_IFaxIncomingQueue = &IID_IFaxIncomingQueue_Value; pub const IFaxIncomingQueue = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Blocked: fn( self: *const IFaxIncomingQueue, pbBlocked: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Blocked: fn( self: *const IFaxIncomingQueue, bBlocked: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxIncomingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxIncomingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetJobs: fn( self: *const IFaxIncomingQueue, pFaxIncomingJobs: ?*?*IFaxIncomingJobs, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetJob: fn( self: *const IFaxIncomingQueue, bstrJobId: ?BSTR, pFaxIncomingJob: ?*?*IFaxIncomingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingQueue_get_Blocked(self: *const T, pbBlocked: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingQueue.VTable, self.vtable).get_Blocked(@ptrCast(*const IFaxIncomingQueue, self), pbBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingQueue_put_Blocked(self: *const T, bBlocked: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingQueue.VTable, self.vtable).put_Blocked(@ptrCast(*const IFaxIncomingQueue, self), bBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingQueue_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingQueue.VTable, self.vtable).Refresh(@ptrCast(*const IFaxIncomingQueue, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingQueue_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingQueue.VTable, self.vtable).Save(@ptrCast(*const IFaxIncomingQueue, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingQueue_GetJobs(self: *const T, pFaxIncomingJobs: ?*?*IFaxIncomingJobs) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingQueue.VTable, self.vtable).GetJobs(@ptrCast(*const IFaxIncomingQueue, self), pFaxIncomingJobs); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingQueue_GetJob(self: *const T, bstrJobId: ?BSTR, pFaxIncomingJob: ?*?*IFaxIncomingJob) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingQueue.VTable, self.vtable).GetJob(@ptrCast(*const IFaxIncomingQueue, self), bstrJobId, pFaxIncomingJob); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutgoingArchive_Value = @import("../zig.zig").Guid.initString("c9c28f40-8d80-4e53-810f-9a79919b49fd"); pub const IID_IFaxOutgoingArchive = &IID_IFaxOutgoingArchive_Value; pub const IFaxOutgoingArchive = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseArchive: fn( self: *const IFaxOutgoingArchive, pbUseArchive: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseArchive: fn( self: *const IFaxOutgoingArchive, bUseArchive: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ArchiveFolder: fn( self: *const IFaxOutgoingArchive, pbstrArchiveFolder: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ArchiveFolder: fn( self: *const IFaxOutgoingArchive, bstrArchiveFolder: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeQuotaWarning: fn( self: *const IFaxOutgoingArchive, pbSizeQuotaWarning: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SizeQuotaWarning: fn( self: *const IFaxOutgoingArchive, bSizeQuotaWarning: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HighQuotaWaterMark: fn( self: *const IFaxOutgoingArchive, plHighQuotaWaterMark: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_HighQuotaWaterMark: fn( self: *const IFaxOutgoingArchive, lHighQuotaWaterMark: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LowQuotaWaterMark: fn( self: *const IFaxOutgoingArchive, plLowQuotaWaterMark: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_LowQuotaWaterMark: fn( self: *const IFaxOutgoingArchive, lLowQuotaWaterMark: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AgeLimit: fn( self: *const IFaxOutgoingArchive, plAgeLimit: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AgeLimit: fn( self: *const IFaxOutgoingArchive, lAgeLimit: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeLow: fn( self: *const IFaxOutgoingArchive, plSizeLow: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeHigh: fn( self: *const IFaxOutgoingArchive, plSizeHigh: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxOutgoingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxOutgoingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessages: fn( self: *const IFaxOutgoingArchive, lPrefetchSize: i32, pFaxOutgoingMessageIterator: ?*?*IFaxOutgoingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessage: fn( self: *const IFaxOutgoingArchive, bstrMessageId: ?BSTR, pFaxOutgoingMessage: ?*?*IFaxOutgoingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_UseArchive(self: *const T, pbUseArchive: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_UseArchive(@ptrCast(*const IFaxOutgoingArchive, self), pbUseArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_put_UseArchive(self: *const T, bUseArchive: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).put_UseArchive(@ptrCast(*const IFaxOutgoingArchive, self), bUseArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_ArchiveFolder(self: *const T, pbstrArchiveFolder: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_ArchiveFolder(@ptrCast(*const IFaxOutgoingArchive, self), pbstrArchiveFolder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_put_ArchiveFolder(self: *const T, bstrArchiveFolder: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).put_ArchiveFolder(@ptrCast(*const IFaxOutgoingArchive, self), bstrArchiveFolder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_SizeQuotaWarning(self: *const T, pbSizeQuotaWarning: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_SizeQuotaWarning(@ptrCast(*const IFaxOutgoingArchive, self), pbSizeQuotaWarning); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_put_SizeQuotaWarning(self: *const T, bSizeQuotaWarning: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).put_SizeQuotaWarning(@ptrCast(*const IFaxOutgoingArchive, self), bSizeQuotaWarning); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_HighQuotaWaterMark(self: *const T, plHighQuotaWaterMark: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_HighQuotaWaterMark(@ptrCast(*const IFaxOutgoingArchive, self), plHighQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_put_HighQuotaWaterMark(self: *const T, lHighQuotaWaterMark: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).put_HighQuotaWaterMark(@ptrCast(*const IFaxOutgoingArchive, self), lHighQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_LowQuotaWaterMark(self: *const T, plLowQuotaWaterMark: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_LowQuotaWaterMark(@ptrCast(*const IFaxOutgoingArchive, self), plLowQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_put_LowQuotaWaterMark(self: *const T, lLowQuotaWaterMark: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).put_LowQuotaWaterMark(@ptrCast(*const IFaxOutgoingArchive, self), lLowQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_AgeLimit(self: *const T, plAgeLimit: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_AgeLimit(@ptrCast(*const IFaxOutgoingArchive, self), plAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_put_AgeLimit(self: *const T, lAgeLimit: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).put_AgeLimit(@ptrCast(*const IFaxOutgoingArchive, self), lAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_SizeLow(self: *const T, plSizeLow: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_SizeLow(@ptrCast(*const IFaxOutgoingArchive, self), plSizeLow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_get_SizeHigh(self: *const T, plSizeHigh: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).get_SizeHigh(@ptrCast(*const IFaxOutgoingArchive, self), plSizeHigh); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).Refresh(@ptrCast(*const IFaxOutgoingArchive, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).Save(@ptrCast(*const IFaxOutgoingArchive, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_GetMessages(self: *const T, lPrefetchSize: i32, pFaxOutgoingMessageIterator: ?*?*IFaxOutgoingMessageIterator) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).GetMessages(@ptrCast(*const IFaxOutgoingArchive, self), lPrefetchSize, pFaxOutgoingMessageIterator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingArchive_GetMessage(self: *const T, bstrMessageId: ?BSTR, pFaxOutgoingMessage: ?*?*IFaxOutgoingMessage) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingArchive.VTable, self.vtable).GetMessage(@ptrCast(*const IFaxOutgoingArchive, self), bstrMessageId, pFaxOutgoingMessage); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutgoingQueue_Value = @import("../zig.zig").Guid.initString("80b1df24-d9ac-4333-b373-487cedc80ce5"); pub const IID_IFaxOutgoingQueue = &IID_IFaxOutgoingQueue_Value; pub const IFaxOutgoingQueue = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Blocked: fn( self: *const IFaxOutgoingQueue, pbBlocked: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Blocked: fn( self: *const IFaxOutgoingQueue, bBlocked: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Paused: fn( self: *const IFaxOutgoingQueue, pbPaused: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Paused: fn( self: *const IFaxOutgoingQueue, bPaused: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AllowPersonalCoverPages: fn( self: *const IFaxOutgoingQueue, pbAllowPersonalCoverPages: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AllowPersonalCoverPages: fn( self: *const IFaxOutgoingQueue, bAllowPersonalCoverPages: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseDeviceTSID: fn( self: *const IFaxOutgoingQueue, pbUseDeviceTSID: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseDeviceTSID: fn( self: *const IFaxOutgoingQueue, bUseDeviceTSID: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Retries: fn( self: *const IFaxOutgoingQueue, plRetries: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Retries: fn( self: *const IFaxOutgoingQueue, lRetries: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RetryDelay: fn( self: *const IFaxOutgoingQueue, plRetryDelay: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_RetryDelay: fn( self: *const IFaxOutgoingQueue, lRetryDelay: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DiscountRateStart: fn( self: *const IFaxOutgoingQueue, pdateDiscountRateStart: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DiscountRateStart: fn( self: *const IFaxOutgoingQueue, dateDiscountRateStart: f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DiscountRateEnd: fn( self: *const IFaxOutgoingQueue, pdateDiscountRateEnd: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DiscountRateEnd: fn( self: *const IFaxOutgoingQueue, dateDiscountRateEnd: f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AgeLimit: fn( self: *const IFaxOutgoingQueue, plAgeLimit: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AgeLimit: fn( self: *const IFaxOutgoingQueue, lAgeLimit: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Branding: fn( self: *const IFaxOutgoingQueue, pbBranding: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Branding: fn( self: *const IFaxOutgoingQueue, bBranding: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxOutgoingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxOutgoingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetJobs: fn( self: *const IFaxOutgoingQueue, pFaxOutgoingJobs: ?*?*IFaxOutgoingJobs, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetJob: fn( self: *const IFaxOutgoingQueue, bstrJobId: ?BSTR, pFaxOutgoingJob: ?*?*IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_Blocked(self: *const T, pbBlocked: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_Blocked(@ptrCast(*const IFaxOutgoingQueue, self), pbBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_Blocked(self: *const T, bBlocked: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_Blocked(@ptrCast(*const IFaxOutgoingQueue, self), bBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_Paused(self: *const T, pbPaused: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_Paused(@ptrCast(*const IFaxOutgoingQueue, self), pbPaused); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_Paused(self: *const T, bPaused: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_Paused(@ptrCast(*const IFaxOutgoingQueue, self), bPaused); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_AllowPersonalCoverPages(self: *const T, pbAllowPersonalCoverPages: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_AllowPersonalCoverPages(@ptrCast(*const IFaxOutgoingQueue, self), pbAllowPersonalCoverPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_AllowPersonalCoverPages(self: *const T, bAllowPersonalCoverPages: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_AllowPersonalCoverPages(@ptrCast(*const IFaxOutgoingQueue, self), bAllowPersonalCoverPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_UseDeviceTSID(self: *const T, pbUseDeviceTSID: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_UseDeviceTSID(@ptrCast(*const IFaxOutgoingQueue, self), pbUseDeviceTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_UseDeviceTSID(self: *const T, bUseDeviceTSID: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_UseDeviceTSID(@ptrCast(*const IFaxOutgoingQueue, self), bUseDeviceTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_Retries(self: *const T, plRetries: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_Retries(@ptrCast(*const IFaxOutgoingQueue, self), plRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_Retries(self: *const T, lRetries: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_Retries(@ptrCast(*const IFaxOutgoingQueue, self), lRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_RetryDelay(self: *const T, plRetryDelay: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_RetryDelay(@ptrCast(*const IFaxOutgoingQueue, self), plRetryDelay); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_RetryDelay(self: *const T, lRetryDelay: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_RetryDelay(@ptrCast(*const IFaxOutgoingQueue, self), lRetryDelay); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_DiscountRateStart(self: *const T, pdateDiscountRateStart: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_DiscountRateStart(@ptrCast(*const IFaxOutgoingQueue, self), pdateDiscountRateStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_DiscountRateStart(self: *const T, dateDiscountRateStart: f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_DiscountRateStart(@ptrCast(*const IFaxOutgoingQueue, self), dateDiscountRateStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_DiscountRateEnd(self: *const T, pdateDiscountRateEnd: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_DiscountRateEnd(@ptrCast(*const IFaxOutgoingQueue, self), pdateDiscountRateEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_DiscountRateEnd(self: *const T, dateDiscountRateEnd: f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_DiscountRateEnd(@ptrCast(*const IFaxOutgoingQueue, self), dateDiscountRateEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_AgeLimit(self: *const T, plAgeLimit: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_AgeLimit(@ptrCast(*const IFaxOutgoingQueue, self), plAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_AgeLimit(self: *const T, lAgeLimit: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_AgeLimit(@ptrCast(*const IFaxOutgoingQueue, self), lAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_get_Branding(self: *const T, pbBranding: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).get_Branding(@ptrCast(*const IFaxOutgoingQueue, self), pbBranding); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_put_Branding(self: *const T, bBranding: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).put_Branding(@ptrCast(*const IFaxOutgoingQueue, self), bBranding); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).Refresh(@ptrCast(*const IFaxOutgoingQueue, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).Save(@ptrCast(*const IFaxOutgoingQueue, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_GetJobs(self: *const T, pFaxOutgoingJobs: ?*?*IFaxOutgoingJobs) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).GetJobs(@ptrCast(*const IFaxOutgoingQueue, self), pFaxOutgoingJobs); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingQueue_GetJob(self: *const T, bstrJobId: ?BSTR, pFaxOutgoingJob: ?*?*IFaxOutgoingJob) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingQueue.VTable, self.vtable).GetJob(@ptrCast(*const IFaxOutgoingQueue, self), bstrJobId, pFaxOutgoingJob); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxIncomingMessageIterator_Value = @import("../zig.zig").Guid.initString("fd73ecc4-6f06-4f52-82a8-f7ba06ae3108"); pub const IID_IFaxIncomingMessageIterator = &IID_IFaxIncomingMessageIterator_Value; pub const IFaxIncomingMessageIterator = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Message: fn( self: *const IFaxIncomingMessageIterator, pFaxIncomingMessage: ?*?*IFaxIncomingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_PrefetchSize: fn( self: *const IFaxIncomingMessageIterator, plPrefetchSize: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_PrefetchSize: fn( self: *const IFaxIncomingMessageIterator, lPrefetchSize: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AtEOF: fn( self: *const IFaxIncomingMessageIterator, pbEOF: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveFirst: fn( self: *const IFaxIncomingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IFaxIncomingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessageIterator_get_Message(self: *const T, pFaxIncomingMessage: ?*?*IFaxIncomingMessage) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessageIterator.VTable, self.vtable).get_Message(@ptrCast(*const IFaxIncomingMessageIterator, self), pFaxIncomingMessage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessageIterator_get_PrefetchSize(self: *const T, plPrefetchSize: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessageIterator.VTable, self.vtable).get_PrefetchSize(@ptrCast(*const IFaxIncomingMessageIterator, self), plPrefetchSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessageIterator_put_PrefetchSize(self: *const T, lPrefetchSize: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessageIterator.VTable, self.vtable).put_PrefetchSize(@ptrCast(*const IFaxIncomingMessageIterator, self), lPrefetchSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessageIterator_get_AtEOF(self: *const T, pbEOF: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessageIterator.VTable, self.vtable).get_AtEOF(@ptrCast(*const IFaxIncomingMessageIterator, self), pbEOF); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessageIterator_MoveFirst(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessageIterator.VTable, self.vtable).MoveFirst(@ptrCast(*const IFaxIncomingMessageIterator, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessageIterator_MoveNext(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessageIterator.VTable, self.vtable).MoveNext(@ptrCast(*const IFaxIncomingMessageIterator, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxIncomingMessage_Value = @import("../zig.zig").Guid.initString("7cab88fa-2ef9-4851-b2f3-1d148fed8447"); pub const IID_IFaxIncomingMessage = &IID_IFaxIncomingMessage_Value; pub const IFaxIncomingMessage = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Id: fn( self: *const IFaxIncomingMessage, pbstrId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Pages: fn( self: *const IFaxIncomingMessage, plPages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Size: fn( self: *const IFaxIncomingMessage, plSize: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceName: fn( self: *const IFaxIncomingMessage, pbstrDeviceName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Retries: fn( self: *const IFaxIncomingMessage, plRetries: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionStart: fn( self: *const IFaxIncomingMessage, pdateTransmissionStart: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionEnd: fn( self: *const IFaxIncomingMessage, pdateTransmissionEnd: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CSID: fn( self: *const IFaxIncomingMessage, pbstrCSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TSID: fn( self: *const IFaxIncomingMessage, pbstrTSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CallerId: fn( self: *const IFaxIncomingMessage, pbstrCallerId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RoutingInformation: fn( self: *const IFaxIncomingMessage, pbstrRoutingInformation: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CopyTiff: fn( self: *const IFaxIncomingMessage, bstrTiffPath: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Delete: fn( self: *const IFaxIncomingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_Id(self: *const T, pbstrId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_Id(@ptrCast(*const IFaxIncomingMessage, self), pbstrId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_Pages(self: *const T, plPages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_Pages(@ptrCast(*const IFaxIncomingMessage, self), plPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_Size(self: *const T, plSize: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_Size(@ptrCast(*const IFaxIncomingMessage, self), plSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_DeviceName(self: *const T, pbstrDeviceName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_DeviceName(@ptrCast(*const IFaxIncomingMessage, self), pbstrDeviceName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_Retries(self: *const T, plRetries: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_Retries(@ptrCast(*const IFaxIncomingMessage, self), plRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_TransmissionStart(self: *const T, pdateTransmissionStart: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_TransmissionStart(@ptrCast(*const IFaxIncomingMessage, self), pdateTransmissionStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_TransmissionEnd(self: *const T, pdateTransmissionEnd: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_TransmissionEnd(@ptrCast(*const IFaxIncomingMessage, self), pdateTransmissionEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_CSID(self: *const T, pbstrCSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_CSID(@ptrCast(*const IFaxIncomingMessage, self), pbstrCSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_TSID(self: *const T, pbstrTSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_TSID(@ptrCast(*const IFaxIncomingMessage, self), pbstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_CallerId(self: *const T, pbstrCallerId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_CallerId(@ptrCast(*const IFaxIncomingMessage, self), pbstrCallerId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_get_RoutingInformation(self: *const T, pbstrRoutingInformation: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).get_RoutingInformation(@ptrCast(*const IFaxIncomingMessage, self), pbstrRoutingInformation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_CopyTiff(self: *const T, bstrTiffPath: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).CopyTiff(@ptrCast(*const IFaxIncomingMessage, self), bstrTiffPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage_Delete(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage.VTable, self.vtable).Delete(@ptrCast(*const IFaxIncomingMessage, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutgoingJobs_Value = @import("../zig.zig").Guid.initString("2c56d8e6-8c2f-4573-944c-e505f8f5aeed"); pub const IID_IFaxOutgoingJobs = &IID_IFaxOutgoingJobs_Value; pub const IFaxOutgoingJobs = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxOutgoingJobs, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxOutgoingJobs, vIndex: VARIANT, pFaxOutgoingJob: ?*?*IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxOutgoingJobs, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJobs_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJobs.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxOutgoingJobs, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJobs_get_Item(self: *const T, vIndex: VARIANT, pFaxOutgoingJob: ?*?*IFaxOutgoingJob) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJobs.VTable, self.vtable).get_Item(@ptrCast(*const IFaxOutgoingJobs, self), vIndex, pFaxOutgoingJob); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJobs_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJobs.VTable, self.vtable).get_Count(@ptrCast(*const IFaxOutgoingJobs, self), plCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutgoingJob_Value = @import("../zig.zig").Guid.initString("6356daad-6614-4583-bf7a-3ad67bbfc71c"); pub const IID_IFaxOutgoingJob = &IID_IFaxOutgoingJob_Value; pub const IFaxOutgoingJob = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Subject: fn( self: *const IFaxOutgoingJob, pbstrSubject: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DocumentName: fn( self: *const IFaxOutgoingJob, pbstrDocumentName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Pages: fn( self: *const IFaxOutgoingJob, plPages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Size: fn( self: *const IFaxOutgoingJob, plSize: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SubmissionId: fn( self: *const IFaxOutgoingJob, pbstrSubmissionId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Id: fn( self: *const IFaxOutgoingJob, pbstrId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OriginalScheduledTime: fn( self: *const IFaxOutgoingJob, pdateOriginalScheduledTime: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SubmissionTime: fn( self: *const IFaxOutgoingJob, pdateSubmissionTime: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiptType: fn( self: *const IFaxOutgoingJob, pReceiptType: ?*FAX_RECEIPT_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Priority: fn( self: *const IFaxOutgoingJob, pPriority: ?*FAX_PRIORITY_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Sender: fn( self: *const IFaxOutgoingJob, ppFaxSender: ?*?*IFaxSender, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Recipient: fn( self: *const IFaxOutgoingJob, ppFaxRecipient: ?*?*IFaxRecipient, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CurrentPage: fn( self: *const IFaxOutgoingJob, plCurrentPage: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceId: fn( self: *const IFaxOutgoingJob, plDeviceId: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Status: fn( self: *const IFaxOutgoingJob, pStatus: ?*FAX_JOB_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtendedStatusCode: fn( self: *const IFaxOutgoingJob, pExtendedStatusCode: ?*FAX_JOB_EXTENDED_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtendedStatus: fn( self: *const IFaxOutgoingJob, pbstrExtendedStatus: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AvailableOperations: fn( self: *const IFaxOutgoingJob, pAvailableOperations: ?*FAX_JOB_OPERATIONS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Retries: fn( self: *const IFaxOutgoingJob, plRetries: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ScheduledTime: fn( self: *const IFaxOutgoingJob, pdateScheduledTime: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionStart: fn( self: *const IFaxOutgoingJob, pdateTransmissionStart: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionEnd: fn( self: *const IFaxOutgoingJob, pdateTransmissionEnd: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CSID: fn( self: *const IFaxOutgoingJob, pbstrCSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TSID: fn( self: *const IFaxOutgoingJob, pbstrTSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_GroupBroadcastReceipts: fn( self: *const IFaxOutgoingJob, pbGroupBroadcastReceipts: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Pause: fn( self: *const IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Resume: fn( self: *const IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Restart: fn( self: *const IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CopyTiff: fn( self: *const IFaxOutgoingJob, bstrTiffPath: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Cancel: fn( self: *const IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Subject(self: *const T, pbstrSubject: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Subject(@ptrCast(*const IFaxOutgoingJob, self), pbstrSubject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_DocumentName(self: *const T, pbstrDocumentName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_DocumentName(@ptrCast(*const IFaxOutgoingJob, self), pbstrDocumentName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Pages(self: *const T, plPages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Pages(@ptrCast(*const IFaxOutgoingJob, self), plPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Size(self: *const T, plSize: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Size(@ptrCast(*const IFaxOutgoingJob, self), plSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_SubmissionId(self: *const T, pbstrSubmissionId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_SubmissionId(@ptrCast(*const IFaxOutgoingJob, self), pbstrSubmissionId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Id(self: *const T, pbstrId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Id(@ptrCast(*const IFaxOutgoingJob, self), pbstrId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_OriginalScheduledTime(self: *const T, pdateOriginalScheduledTime: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_OriginalScheduledTime(@ptrCast(*const IFaxOutgoingJob, self), pdateOriginalScheduledTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_SubmissionTime(self: *const T, pdateSubmissionTime: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_SubmissionTime(@ptrCast(*const IFaxOutgoingJob, self), pdateSubmissionTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_ReceiptType(self: *const T, pReceiptType: ?*FAX_RECEIPT_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_ReceiptType(@ptrCast(*const IFaxOutgoingJob, self), pReceiptType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Priority(self: *const T, pPriority: ?*FAX_PRIORITY_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Priority(@ptrCast(*const IFaxOutgoingJob, self), pPriority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Sender(self: *const T, ppFaxSender: ?*?*IFaxSender) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Sender(@ptrCast(*const IFaxOutgoingJob, self), ppFaxSender); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Recipient(self: *const T, ppFaxRecipient: ?*?*IFaxRecipient) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Recipient(@ptrCast(*const IFaxOutgoingJob, self), ppFaxRecipient); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_CurrentPage(self: *const T, plCurrentPage: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_CurrentPage(@ptrCast(*const IFaxOutgoingJob, self), plCurrentPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_DeviceId(self: *const T, plDeviceId: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_DeviceId(@ptrCast(*const IFaxOutgoingJob, self), plDeviceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Status(self: *const T, pStatus: ?*FAX_JOB_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Status(@ptrCast(*const IFaxOutgoingJob, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_ExtendedStatusCode(self: *const T, pExtendedStatusCode: ?*FAX_JOB_EXTENDED_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_ExtendedStatusCode(@ptrCast(*const IFaxOutgoingJob, self), pExtendedStatusCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_ExtendedStatus(self: *const T, pbstrExtendedStatus: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_ExtendedStatus(@ptrCast(*const IFaxOutgoingJob, self), pbstrExtendedStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_AvailableOperations(self: *const T, pAvailableOperations: ?*FAX_JOB_OPERATIONS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_AvailableOperations(@ptrCast(*const IFaxOutgoingJob, self), pAvailableOperations); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_Retries(self: *const T, plRetries: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_Retries(@ptrCast(*const IFaxOutgoingJob, self), plRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_ScheduledTime(self: *const T, pdateScheduledTime: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_ScheduledTime(@ptrCast(*const IFaxOutgoingJob, self), pdateScheduledTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_TransmissionStart(self: *const T, pdateTransmissionStart: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_TransmissionStart(@ptrCast(*const IFaxOutgoingJob, self), pdateTransmissionStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_TransmissionEnd(self: *const T, pdateTransmissionEnd: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_TransmissionEnd(@ptrCast(*const IFaxOutgoingJob, self), pdateTransmissionEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_CSID(self: *const T, pbstrCSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_CSID(@ptrCast(*const IFaxOutgoingJob, self), pbstrCSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_TSID(self: *const T, pbstrTSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_TSID(@ptrCast(*const IFaxOutgoingJob, self), pbstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_get_GroupBroadcastReceipts(self: *const T, pbGroupBroadcastReceipts: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).get_GroupBroadcastReceipts(@ptrCast(*const IFaxOutgoingJob, self), pbGroupBroadcastReceipts); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_Pause(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).Pause(@ptrCast(*const IFaxOutgoingJob, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_Resume(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).Resume(@ptrCast(*const IFaxOutgoingJob, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_Restart(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).Restart(@ptrCast(*const IFaxOutgoingJob, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_CopyTiff(self: *const T, bstrTiffPath: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).CopyTiff(@ptrCast(*const IFaxOutgoingJob, self), bstrTiffPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).Refresh(@ptrCast(*const IFaxOutgoingJob, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob_Cancel(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob.VTable, self.vtable).Cancel(@ptrCast(*const IFaxOutgoingJob, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutgoingMessageIterator_Value = @import("../zig.zig").Guid.initString("f5ec5d4f-b840-432f-9980-112fe42a9b7a"); pub const IID_IFaxOutgoingMessageIterator = &IID_IFaxOutgoingMessageIterator_Value; pub const IFaxOutgoingMessageIterator = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Message: fn( self: *const IFaxOutgoingMessageIterator, pFaxOutgoingMessage: ?*?*IFaxOutgoingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AtEOF: fn( self: *const IFaxOutgoingMessageIterator, pbEOF: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_PrefetchSize: fn( self: *const IFaxOutgoingMessageIterator, plPrefetchSize: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_PrefetchSize: fn( self: *const IFaxOutgoingMessageIterator, lPrefetchSize: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveFirst: fn( self: *const IFaxOutgoingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IFaxOutgoingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessageIterator_get_Message(self: *const T, pFaxOutgoingMessage: ?*?*IFaxOutgoingMessage) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessageIterator.VTable, self.vtable).get_Message(@ptrCast(*const IFaxOutgoingMessageIterator, self), pFaxOutgoingMessage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessageIterator_get_AtEOF(self: *const T, pbEOF: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessageIterator.VTable, self.vtable).get_AtEOF(@ptrCast(*const IFaxOutgoingMessageIterator, self), pbEOF); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessageIterator_get_PrefetchSize(self: *const T, plPrefetchSize: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessageIterator.VTable, self.vtable).get_PrefetchSize(@ptrCast(*const IFaxOutgoingMessageIterator, self), plPrefetchSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessageIterator_put_PrefetchSize(self: *const T, lPrefetchSize: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessageIterator.VTable, self.vtable).put_PrefetchSize(@ptrCast(*const IFaxOutgoingMessageIterator, self), lPrefetchSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessageIterator_MoveFirst(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessageIterator.VTable, self.vtable).MoveFirst(@ptrCast(*const IFaxOutgoingMessageIterator, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessageIterator_MoveNext(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessageIterator.VTable, self.vtable).MoveNext(@ptrCast(*const IFaxOutgoingMessageIterator, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutgoingMessage_Value = @import("../zig.zig").Guid.initString("f0ea35de-caa5-4a7c-82c7-2b60ba5f2be2"); pub const IID_IFaxOutgoingMessage = &IID_IFaxOutgoingMessage_Value; pub const IFaxOutgoingMessage = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SubmissionId: fn( self: *const IFaxOutgoingMessage, pbstrSubmissionId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Id: fn( self: *const IFaxOutgoingMessage, pbstrId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Subject: fn( self: *const IFaxOutgoingMessage, pbstrSubject: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DocumentName: fn( self: *const IFaxOutgoingMessage, pbstrDocumentName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Retries: fn( self: *const IFaxOutgoingMessage, plRetries: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Pages: fn( self: *const IFaxOutgoingMessage, plPages: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Size: fn( self: *const IFaxOutgoingMessage, plSize: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OriginalScheduledTime: fn( self: *const IFaxOutgoingMessage, pdateOriginalScheduledTime: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SubmissionTime: fn( self: *const IFaxOutgoingMessage, pdateSubmissionTime: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Priority: fn( self: *const IFaxOutgoingMessage, pPriority: ?*FAX_PRIORITY_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Sender: fn( self: *const IFaxOutgoingMessage, ppFaxSender: ?*?*IFaxSender, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Recipient: fn( self: *const IFaxOutgoingMessage, ppFaxRecipient: ?*?*IFaxRecipient, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceName: fn( self: *const IFaxOutgoingMessage, pbstrDeviceName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionStart: fn( self: *const IFaxOutgoingMessage, pdateTransmissionStart: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionEnd: fn( self: *const IFaxOutgoingMessage, pdateTransmissionEnd: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CSID: fn( self: *const IFaxOutgoingMessage, pbstrCSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TSID: fn( self: *const IFaxOutgoingMessage, pbstrTSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CopyTiff: fn( self: *const IFaxOutgoingMessage, bstrTiffPath: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Delete: fn( self: *const IFaxOutgoingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_SubmissionId(self: *const T, pbstrSubmissionId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_SubmissionId(@ptrCast(*const IFaxOutgoingMessage, self), pbstrSubmissionId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Id(self: *const T, pbstrId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Id(@ptrCast(*const IFaxOutgoingMessage, self), pbstrId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Subject(self: *const T, pbstrSubject: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Subject(@ptrCast(*const IFaxOutgoingMessage, self), pbstrSubject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_DocumentName(self: *const T, pbstrDocumentName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_DocumentName(@ptrCast(*const IFaxOutgoingMessage, self), pbstrDocumentName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Retries(self: *const T, plRetries: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Retries(@ptrCast(*const IFaxOutgoingMessage, self), plRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Pages(self: *const T, plPages: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Pages(@ptrCast(*const IFaxOutgoingMessage, self), plPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Size(self: *const T, plSize: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Size(@ptrCast(*const IFaxOutgoingMessage, self), plSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_OriginalScheduledTime(self: *const T, pdateOriginalScheduledTime: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_OriginalScheduledTime(@ptrCast(*const IFaxOutgoingMessage, self), pdateOriginalScheduledTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_SubmissionTime(self: *const T, pdateSubmissionTime: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_SubmissionTime(@ptrCast(*const IFaxOutgoingMessage, self), pdateSubmissionTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Priority(self: *const T, pPriority: ?*FAX_PRIORITY_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Priority(@ptrCast(*const IFaxOutgoingMessage, self), pPriority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Sender(self: *const T, ppFaxSender: ?*?*IFaxSender) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Sender(@ptrCast(*const IFaxOutgoingMessage, self), ppFaxSender); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_Recipient(self: *const T, ppFaxRecipient: ?*?*IFaxRecipient) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_Recipient(@ptrCast(*const IFaxOutgoingMessage, self), ppFaxRecipient); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_DeviceName(self: *const T, pbstrDeviceName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_DeviceName(@ptrCast(*const IFaxOutgoingMessage, self), pbstrDeviceName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_TransmissionStart(self: *const T, pdateTransmissionStart: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_TransmissionStart(@ptrCast(*const IFaxOutgoingMessage, self), pdateTransmissionStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_TransmissionEnd(self: *const T, pdateTransmissionEnd: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_TransmissionEnd(@ptrCast(*const IFaxOutgoingMessage, self), pdateTransmissionEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_CSID(self: *const T, pbstrCSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_CSID(@ptrCast(*const IFaxOutgoingMessage, self), pbstrCSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_get_TSID(self: *const T, pbstrTSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).get_TSID(@ptrCast(*const IFaxOutgoingMessage, self), pbstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_CopyTiff(self: *const T, bstrTiffPath: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).CopyTiff(@ptrCast(*const IFaxOutgoingMessage, self), bstrTiffPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage_Delete(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage.VTable, self.vtable).Delete(@ptrCast(*const IFaxOutgoingMessage, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxIncomingJobs_Value = @import("../zig.zig").Guid.initString("011f04e9-4fd6-4c23-9513-b6b66bb26be9"); pub const IID_IFaxIncomingJobs = &IID_IFaxIncomingJobs_Value; pub const IFaxIncomingJobs = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxIncomingJobs, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxIncomingJobs, vIndex: VARIANT, pFaxIncomingJob: ?*?*IFaxIncomingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxIncomingJobs, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJobs_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJobs.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxIncomingJobs, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJobs_get_Item(self: *const T, vIndex: VARIANT, pFaxIncomingJob: ?*?*IFaxIncomingJob) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJobs.VTable, self.vtable).get_Item(@ptrCast(*const IFaxIncomingJobs, self), vIndex, pFaxIncomingJob); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJobs_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJobs.VTable, self.vtable).get_Count(@ptrCast(*const IFaxIncomingJobs, self), plCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxIncomingJob_Value = @import("../zig.zig").Guid.initString("207529e6-654a-4916-9f88-4d232ee8a107"); pub const IID_IFaxIncomingJob = &IID_IFaxIncomingJob_Value; pub const IFaxIncomingJob = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Size: fn( self: *const IFaxIncomingJob, plSize: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Id: fn( self: *const IFaxIncomingJob, pbstrId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CurrentPage: fn( self: *const IFaxIncomingJob, plCurrentPage: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceId: fn( self: *const IFaxIncomingJob, plDeviceId: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Status: fn( self: *const IFaxIncomingJob, pStatus: ?*FAX_JOB_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtendedStatusCode: fn( self: *const IFaxIncomingJob, pExtendedStatusCode: ?*FAX_JOB_EXTENDED_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtendedStatus: fn( self: *const IFaxIncomingJob, pbstrExtendedStatus: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AvailableOperations: fn( self: *const IFaxIncomingJob, pAvailableOperations: ?*FAX_JOB_OPERATIONS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Retries: fn( self: *const IFaxIncomingJob, plRetries: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionStart: fn( self: *const IFaxIncomingJob, pdateTransmissionStart: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TransmissionEnd: fn( self: *const IFaxIncomingJob, pdateTransmissionEnd: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CSID: fn( self: *const IFaxIncomingJob, pbstrCSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TSID: fn( self: *const IFaxIncomingJob, pbstrTSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CallerId: fn( self: *const IFaxIncomingJob, pbstrCallerId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RoutingInformation: fn( self: *const IFaxIncomingJob, pbstrRoutingInformation: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_JobType: fn( self: *const IFaxIncomingJob, pJobType: ?*FAX_JOB_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Cancel: fn( self: *const IFaxIncomingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxIncomingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CopyTiff: fn( self: *const IFaxIncomingJob, bstrTiffPath: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_Size(self: *const T, plSize: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_Size(@ptrCast(*const IFaxIncomingJob, self), plSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_Id(self: *const T, pbstrId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_Id(@ptrCast(*const IFaxIncomingJob, self), pbstrId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_CurrentPage(self: *const T, plCurrentPage: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_CurrentPage(@ptrCast(*const IFaxIncomingJob, self), plCurrentPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_DeviceId(self: *const T, plDeviceId: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_DeviceId(@ptrCast(*const IFaxIncomingJob, self), plDeviceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_Status(self: *const T, pStatus: ?*FAX_JOB_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_Status(@ptrCast(*const IFaxIncomingJob, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_ExtendedStatusCode(self: *const T, pExtendedStatusCode: ?*FAX_JOB_EXTENDED_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_ExtendedStatusCode(@ptrCast(*const IFaxIncomingJob, self), pExtendedStatusCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_ExtendedStatus(self: *const T, pbstrExtendedStatus: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_ExtendedStatus(@ptrCast(*const IFaxIncomingJob, self), pbstrExtendedStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_AvailableOperations(self: *const T, pAvailableOperations: ?*FAX_JOB_OPERATIONS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_AvailableOperations(@ptrCast(*const IFaxIncomingJob, self), pAvailableOperations); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_Retries(self: *const T, plRetries: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_Retries(@ptrCast(*const IFaxIncomingJob, self), plRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_TransmissionStart(self: *const T, pdateTransmissionStart: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_TransmissionStart(@ptrCast(*const IFaxIncomingJob, self), pdateTransmissionStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_TransmissionEnd(self: *const T, pdateTransmissionEnd: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_TransmissionEnd(@ptrCast(*const IFaxIncomingJob, self), pdateTransmissionEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_CSID(self: *const T, pbstrCSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_CSID(@ptrCast(*const IFaxIncomingJob, self), pbstrCSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_TSID(self: *const T, pbstrTSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_TSID(@ptrCast(*const IFaxIncomingJob, self), pbstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_CallerId(self: *const T, pbstrCallerId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_CallerId(@ptrCast(*const IFaxIncomingJob, self), pbstrCallerId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_RoutingInformation(self: *const T, pbstrRoutingInformation: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_RoutingInformation(@ptrCast(*const IFaxIncomingJob, self), pbstrRoutingInformation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_get_JobType(self: *const T, pJobType: ?*FAX_JOB_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).get_JobType(@ptrCast(*const IFaxIncomingJob, self), pJobType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_Cancel(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).Cancel(@ptrCast(*const IFaxIncomingJob, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).Refresh(@ptrCast(*const IFaxIncomingJob, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingJob_CopyTiff(self: *const T, bstrTiffPath: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingJob.VTable, self.vtable).CopyTiff(@ptrCast(*const IFaxIncomingJob, self), bstrTiffPath); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_PROVIDER_STATUS_ENUM = enum(i32) { SUCCESS = 0, SERVER_ERROR = 1, BAD_GUID = 2, BAD_VERSION = 3, CANT_LOAD = 4, CANT_LINK = 5, CANT_INIT = 6, }; pub const fpsSUCCESS = FAX_PROVIDER_STATUS_ENUM.SUCCESS; pub const fpsSERVER_ERROR = FAX_PROVIDER_STATUS_ENUM.SERVER_ERROR; pub const fpsBAD_GUID = FAX_PROVIDER_STATUS_ENUM.BAD_GUID; pub const fpsBAD_VERSION = FAX_PROVIDER_STATUS_ENUM.BAD_VERSION; pub const fpsCANT_LOAD = FAX_PROVIDER_STATUS_ENUM.CANT_LOAD; pub const fpsCANT_LINK = FAX_PROVIDER_STATUS_ENUM.CANT_LINK; pub const fpsCANT_INIT = FAX_PROVIDER_STATUS_ENUM.CANT_INIT; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxDeviceProvider_Value = @import("../zig.zig").Guid.initString("290eac63-83ec-449c-8417-f148df8c682a"); pub const IID_IFaxDeviceProvider = &IID_IFaxDeviceProvider_Value; pub const IFaxDeviceProvider = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FriendlyName: fn( self: *const IFaxDeviceProvider, pbstrFriendlyName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ImageName: fn( self: *const IFaxDeviceProvider, pbstrImageName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UniqueName: fn( self: *const IFaxDeviceProvider, pbstrUniqueName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TapiProviderName: fn( self: *const IFaxDeviceProvider, pbstrTapiProviderName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MajorVersion: fn( self: *const IFaxDeviceProvider, plMajorVersion: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MinorVersion: fn( self: *const IFaxDeviceProvider, plMinorVersion: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MajorBuild: fn( self: *const IFaxDeviceProvider, plMajorBuild: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MinorBuild: fn( self: *const IFaxDeviceProvider, plMinorBuild: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Debug: fn( self: *const IFaxDeviceProvider, pbDebug: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Status: fn( self: *const IFaxDeviceProvider, pStatus: ?*FAX_PROVIDER_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_InitErrorCode: fn( self: *const IFaxDeviceProvider, plInitErrorCode: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceIds: fn( self: *const IFaxDeviceProvider, pvDeviceIds: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_FriendlyName(self: *const T, pbstrFriendlyName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_FriendlyName(@ptrCast(*const IFaxDeviceProvider, self), pbstrFriendlyName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_ImageName(self: *const T, pbstrImageName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_ImageName(@ptrCast(*const IFaxDeviceProvider, self), pbstrImageName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_UniqueName(self: *const T, pbstrUniqueName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_UniqueName(@ptrCast(*const IFaxDeviceProvider, self), pbstrUniqueName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_TapiProviderName(self: *const T, pbstrTapiProviderName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_TapiProviderName(@ptrCast(*const IFaxDeviceProvider, self), pbstrTapiProviderName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_MajorVersion(self: *const T, plMajorVersion: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_MajorVersion(@ptrCast(*const IFaxDeviceProvider, self), plMajorVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_MinorVersion(self: *const T, plMinorVersion: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_MinorVersion(@ptrCast(*const IFaxDeviceProvider, self), plMinorVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_MajorBuild(self: *const T, plMajorBuild: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_MajorBuild(@ptrCast(*const IFaxDeviceProvider, self), plMajorBuild); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_MinorBuild(self: *const T, plMinorBuild: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_MinorBuild(@ptrCast(*const IFaxDeviceProvider, self), plMinorBuild); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_Debug(self: *const T, pbDebug: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_Debug(@ptrCast(*const IFaxDeviceProvider, self), pbDebug); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_Status(self: *const T, pStatus: ?*FAX_PROVIDER_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_Status(@ptrCast(*const IFaxDeviceProvider, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_InitErrorCode(self: *const T, plInitErrorCode: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_InitErrorCode(@ptrCast(*const IFaxDeviceProvider, self), plInitErrorCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceProvider_get_DeviceIds(self: *const T, pvDeviceIds: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceProvider.VTable, self.vtable).get_DeviceIds(@ptrCast(*const IFaxDeviceProvider, self), pvDeviceIds); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_DEVICE_RECEIVE_MODE_ENUM = enum(i32) { NO_ANSWER = 0, AUTO_ANSWER = 1, MANUAL_ANSWER = 2, }; pub const fdrmNO_ANSWER = FAX_DEVICE_RECEIVE_MODE_ENUM.NO_ANSWER; pub const fdrmAUTO_ANSWER = FAX_DEVICE_RECEIVE_MODE_ENUM.AUTO_ANSWER; pub const fdrmMANUAL_ANSWER = FAX_DEVICE_RECEIVE_MODE_ENUM.MANUAL_ANSWER; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxDevice_Value = @import("../zig.zig").Guid.initString("49306c59-b52e-4867-9df4-ca5841c956d0"); pub const IID_IFaxDevice = &IID_IFaxDevice_Value; pub const IFaxDevice = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Id: fn( self: *const IFaxDevice, plId: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceName: fn( self: *const IFaxDevice, pbstrDeviceName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ProviderUniqueName: fn( self: *const IFaxDevice, pbstrProviderUniqueName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_PoweredOff: fn( self: *const IFaxDevice, pbPoweredOff: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceivingNow: fn( self: *const IFaxDevice, pbReceivingNow: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SendingNow: fn( self: *const IFaxDevice, pbSendingNow: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UsedRoutingMethods: fn( self: *const IFaxDevice, pvUsedRoutingMethods: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Description: fn( self: *const IFaxDevice, pbstrDescription: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Description: fn( self: *const IFaxDevice, bstrDescription: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SendEnabled: fn( self: *const IFaxDevice, pbSendEnabled: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SendEnabled: fn( self: *const IFaxDevice, bSendEnabled: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiveMode: fn( self: *const IFaxDevice, pReceiveMode: ?*FAX_DEVICE_RECEIVE_MODE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ReceiveMode: fn( self: *const IFaxDevice, ReceiveMode: FAX_DEVICE_RECEIVE_MODE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RingsBeforeAnswer: fn( self: *const IFaxDevice, plRingsBeforeAnswer: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_RingsBeforeAnswer: fn( self: *const IFaxDevice, lRingsBeforeAnswer: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CSID: fn( self: *const IFaxDevice, pbstrCSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_CSID: fn( self: *const IFaxDevice, bstrCSID: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_TSID: fn( self: *const IFaxDevice, pbstrTSID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_TSID: fn( self: *const IFaxDevice, bstrTSID: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxDevice, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxDevice, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetExtensionProperty: fn( self: *const IFaxDevice, bstrGUID: ?BSTR, pvProperty: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetExtensionProperty: fn( self: *const IFaxDevice, bstrGUID: ?BSTR, vProperty: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UseRoutingMethod: fn( self: *const IFaxDevice, bstrMethodGUID: ?BSTR, bUse: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RingingNow: fn( self: *const IFaxDevice, pbRingingNow: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AnswerCall: fn( self: *const IFaxDevice, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_Id(self: *const T, plId: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_Id(@ptrCast(*const IFaxDevice, self), plId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_DeviceName(self: *const T, pbstrDeviceName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_DeviceName(@ptrCast(*const IFaxDevice, self), pbstrDeviceName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_ProviderUniqueName(self: *const T, pbstrProviderUniqueName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_ProviderUniqueName(@ptrCast(*const IFaxDevice, self), pbstrProviderUniqueName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_PoweredOff(self: *const T, pbPoweredOff: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_PoweredOff(@ptrCast(*const IFaxDevice, self), pbPoweredOff); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_ReceivingNow(self: *const T, pbReceivingNow: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_ReceivingNow(@ptrCast(*const IFaxDevice, self), pbReceivingNow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_SendingNow(self: *const T, pbSendingNow: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_SendingNow(@ptrCast(*const IFaxDevice, self), pbSendingNow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_UsedRoutingMethods(self: *const T, pvUsedRoutingMethods: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_UsedRoutingMethods(@ptrCast(*const IFaxDevice, self), pvUsedRoutingMethods); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_Description(self: *const T, pbstrDescription: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_Description(@ptrCast(*const IFaxDevice, self), pbstrDescription); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_put_Description(self: *const T, bstrDescription: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).put_Description(@ptrCast(*const IFaxDevice, self), bstrDescription); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_SendEnabled(self: *const T, pbSendEnabled: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_SendEnabled(@ptrCast(*const IFaxDevice, self), pbSendEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_put_SendEnabled(self: *const T, bSendEnabled: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).put_SendEnabled(@ptrCast(*const IFaxDevice, self), bSendEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_ReceiveMode(self: *const T, pReceiveMode: ?*FAX_DEVICE_RECEIVE_MODE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_ReceiveMode(@ptrCast(*const IFaxDevice, self), pReceiveMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_put_ReceiveMode(self: *const T, ReceiveMode: FAX_DEVICE_RECEIVE_MODE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).put_ReceiveMode(@ptrCast(*const IFaxDevice, self), ReceiveMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_RingsBeforeAnswer(self: *const T, plRingsBeforeAnswer: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_RingsBeforeAnswer(@ptrCast(*const IFaxDevice, self), plRingsBeforeAnswer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_put_RingsBeforeAnswer(self: *const T, lRingsBeforeAnswer: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).put_RingsBeforeAnswer(@ptrCast(*const IFaxDevice, self), lRingsBeforeAnswer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_CSID(self: *const T, pbstrCSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_CSID(@ptrCast(*const IFaxDevice, self), pbstrCSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_put_CSID(self: *const T, bstrCSID: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).put_CSID(@ptrCast(*const IFaxDevice, self), bstrCSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_TSID(self: *const T, pbstrTSID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_TSID(@ptrCast(*const IFaxDevice, self), pbstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_put_TSID(self: *const T, bstrTSID: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).put_TSID(@ptrCast(*const IFaxDevice, self), bstrTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).Refresh(@ptrCast(*const IFaxDevice, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).Save(@ptrCast(*const IFaxDevice, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_GetExtensionProperty(self: *const T, bstrGUID: ?BSTR, pvProperty: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).GetExtensionProperty(@ptrCast(*const IFaxDevice, self), bstrGUID, pvProperty); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_SetExtensionProperty(self: *const T, bstrGUID: ?BSTR, vProperty: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).SetExtensionProperty(@ptrCast(*const IFaxDevice, self), bstrGUID, vProperty); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_UseRoutingMethod(self: *const T, bstrMethodGUID: ?BSTR, bUse: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).UseRoutingMethod(@ptrCast(*const IFaxDevice, self), bstrMethodGUID, bUse); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_get_RingingNow(self: *const T, pbRingingNow: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).get_RingingNow(@ptrCast(*const IFaxDevice, self), pbRingingNow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDevice_AnswerCall(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDevice.VTable, self.vtable).AnswerCall(@ptrCast(*const IFaxDevice, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxActivityLogging_Value = @import("../zig.zig").Guid.initString("1e29078b-5a69-497b-9592-49b7e7faddb5"); pub const IID_IFaxActivityLogging = &IID_IFaxActivityLogging_Value; pub const IFaxActivityLogging = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LogIncoming: fn( self: *const IFaxActivityLogging, pbLogIncoming: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_LogIncoming: fn( self: *const IFaxActivityLogging, bLogIncoming: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LogOutgoing: fn( self: *const IFaxActivityLogging, pbLogOutgoing: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_LogOutgoing: fn( self: *const IFaxActivityLogging, bLogOutgoing: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DatabasePath: fn( self: *const IFaxActivityLogging, pbstrDatabasePath: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DatabasePath: fn( self: *const IFaxActivityLogging, bstrDatabasePath: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxActivityLogging, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxActivityLogging, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_get_LogIncoming(self: *const T, pbLogIncoming: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).get_LogIncoming(@ptrCast(*const IFaxActivityLogging, self), pbLogIncoming); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_put_LogIncoming(self: *const T, bLogIncoming: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).put_LogIncoming(@ptrCast(*const IFaxActivityLogging, self), bLogIncoming); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_get_LogOutgoing(self: *const T, pbLogOutgoing: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).get_LogOutgoing(@ptrCast(*const IFaxActivityLogging, self), pbLogOutgoing); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_put_LogOutgoing(self: *const T, bLogOutgoing: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).put_LogOutgoing(@ptrCast(*const IFaxActivityLogging, self), bLogOutgoing); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_get_DatabasePath(self: *const T, pbstrDatabasePath: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).get_DatabasePath(@ptrCast(*const IFaxActivityLogging, self), pbstrDatabasePath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_put_DatabasePath(self: *const T, bstrDatabasePath: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).put_DatabasePath(@ptrCast(*const IFaxActivityLogging, self), bstrDatabasePath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).Refresh(@ptrCast(*const IFaxActivityLogging, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxActivityLogging_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxActivityLogging.VTable, self.vtable).Save(@ptrCast(*const IFaxActivityLogging, self)); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_LOG_LEVEL_ENUM = enum(i32) { NONE = 0, MIN = 1, MED = 2, MAX = 3, }; pub const fllNONE = FAX_LOG_LEVEL_ENUM.NONE; pub const fllMIN = FAX_LOG_LEVEL_ENUM.MIN; pub const fllMED = FAX_LOG_LEVEL_ENUM.MED; pub const fllMAX = FAX_LOG_LEVEL_ENUM.MAX; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxEventLogging_Value = @import("../zig.zig").Guid.initString("0880d965-20e8-42e4-8e17-944f192caad4"); pub const IID_IFaxEventLogging = &IID_IFaxEventLogging_Value; pub const IFaxEventLogging = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_InitEventsLevel: fn( self: *const IFaxEventLogging, pInitEventLevel: ?*FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_InitEventsLevel: fn( self: *const IFaxEventLogging, InitEventLevel: FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_InboundEventsLevel: fn( self: *const IFaxEventLogging, pInboundEventLevel: ?*FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_InboundEventsLevel: fn( self: *const IFaxEventLogging, InboundEventLevel: FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutboundEventsLevel: fn( self: *const IFaxEventLogging, pOutboundEventLevel: ?*FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_OutboundEventsLevel: fn( self: *const IFaxEventLogging, OutboundEventLevel: FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_GeneralEventsLevel: fn( self: *const IFaxEventLogging, pGeneralEventLevel: ?*FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_GeneralEventsLevel: fn( self: *const IFaxEventLogging, GeneralEventLevel: FAX_LOG_LEVEL_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxEventLogging, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxEventLogging, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_get_InitEventsLevel(self: *const T, pInitEventLevel: ?*FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).get_InitEventsLevel(@ptrCast(*const IFaxEventLogging, self), pInitEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_put_InitEventsLevel(self: *const T, InitEventLevel: FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).put_InitEventsLevel(@ptrCast(*const IFaxEventLogging, self), InitEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_get_InboundEventsLevel(self: *const T, pInboundEventLevel: ?*FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).get_InboundEventsLevel(@ptrCast(*const IFaxEventLogging, self), pInboundEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_put_InboundEventsLevel(self: *const T, InboundEventLevel: FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).put_InboundEventsLevel(@ptrCast(*const IFaxEventLogging, self), InboundEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_get_OutboundEventsLevel(self: *const T, pOutboundEventLevel: ?*FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).get_OutboundEventsLevel(@ptrCast(*const IFaxEventLogging, self), pOutboundEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_put_OutboundEventsLevel(self: *const T, OutboundEventLevel: FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).put_OutboundEventsLevel(@ptrCast(*const IFaxEventLogging, self), OutboundEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_get_GeneralEventsLevel(self: *const T, pGeneralEventLevel: ?*FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).get_GeneralEventsLevel(@ptrCast(*const IFaxEventLogging, self), pGeneralEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_put_GeneralEventsLevel(self: *const T, GeneralEventLevel: FAX_LOG_LEVEL_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).put_GeneralEventsLevel(@ptrCast(*const IFaxEventLogging, self), GeneralEventLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).Refresh(@ptrCast(*const IFaxEventLogging, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxEventLogging_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxEventLogging.VTable, self.vtable).Save(@ptrCast(*const IFaxEventLogging, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutboundRoutingGroups_Value = @import("../zig.zig").Guid.initString("235cbef7-c2de-4bfd-b8da-75097c82c87f"); pub const IID_IFaxOutboundRoutingGroups = &IID_IFaxOutboundRoutingGroups_Value; pub const IFaxOutboundRoutingGroups = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxOutboundRoutingGroups, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxOutboundRoutingGroups, vIndex: VARIANT, pFaxOutboundRoutingGroup: ?*?*IFaxOutboundRoutingGroup, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxOutboundRoutingGroups, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Add: fn( self: *const IFaxOutboundRoutingGroups, bstrName: ?BSTR, pFaxOutboundRoutingGroup: ?*?*IFaxOutboundRoutingGroup, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Remove: fn( self: *const IFaxOutboundRoutingGroups, vIndex: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroups_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroups.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxOutboundRoutingGroups, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroups_get_Item(self: *const T, vIndex: VARIANT, pFaxOutboundRoutingGroup: ?*?*IFaxOutboundRoutingGroup) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroups.VTable, self.vtable).get_Item(@ptrCast(*const IFaxOutboundRoutingGroups, self), vIndex, pFaxOutboundRoutingGroup); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroups_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroups.VTable, self.vtable).get_Count(@ptrCast(*const IFaxOutboundRoutingGroups, self), plCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroups_Add(self: *const T, bstrName: ?BSTR, pFaxOutboundRoutingGroup: ?*?*IFaxOutboundRoutingGroup) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroups.VTable, self.vtable).Add(@ptrCast(*const IFaxOutboundRoutingGroups, self), bstrName, pFaxOutboundRoutingGroup); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroups_Remove(self: *const T, vIndex: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroups.VTable, self.vtable).Remove(@ptrCast(*const IFaxOutboundRoutingGroups, self), vIndex); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_GROUP_STATUS_ENUM = enum(i32) { ALL_DEV_VALID = 0, EMPTY = 1, ALL_DEV_NOT_VALID = 2, SOME_DEV_NOT_VALID = 3, }; pub const fgsALL_DEV_VALID = FAX_GROUP_STATUS_ENUM.ALL_DEV_VALID; pub const fgsEMPTY = FAX_GROUP_STATUS_ENUM.EMPTY; pub const fgsALL_DEV_NOT_VALID = FAX_GROUP_STATUS_ENUM.ALL_DEV_NOT_VALID; pub const fgsSOME_DEV_NOT_VALID = FAX_GROUP_STATUS_ENUM.SOME_DEV_NOT_VALID; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutboundRoutingGroup_Value = @import("../zig.zig").Guid.initString("ca6289a1-7e25-4f87-9a0b-93365734962c"); pub const IID_IFaxOutboundRoutingGroup = &IID_IFaxOutboundRoutingGroup_Value; pub const IFaxOutboundRoutingGroup = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Name: fn( self: *const IFaxOutboundRoutingGroup, pbstrName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Status: fn( self: *const IFaxOutboundRoutingGroup, pStatus: ?*FAX_GROUP_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceIds: fn( self: *const IFaxOutboundRoutingGroup, pFaxDeviceIds: ?*?*IFaxDeviceIds, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroup_get_Name(self: *const T, pbstrName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroup.VTable, self.vtable).get_Name(@ptrCast(*const IFaxOutboundRoutingGroup, self), pbstrName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroup_get_Status(self: *const T, pStatus: ?*FAX_GROUP_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroup.VTable, self.vtable).get_Status(@ptrCast(*const IFaxOutboundRoutingGroup, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingGroup_get_DeviceIds(self: *const T, pFaxDeviceIds: ?*?*IFaxDeviceIds) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingGroup.VTable, self.vtable).get_DeviceIds(@ptrCast(*const IFaxOutboundRoutingGroup, self), pFaxDeviceIds); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxDeviceIds_Value = @import("../zig.zig").Guid.initString("2f0f813f-4ce9-443e-8ca1-738cfaeee149"); pub const IID_IFaxDeviceIds = &IID_IFaxDeviceIds_Value; pub const IFaxDeviceIds = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxDeviceIds, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxDeviceIds, lIndex: i32, plDeviceId: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxDeviceIds, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Add: fn( self: *const IFaxDeviceIds, lDeviceId: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Remove: fn( self: *const IFaxDeviceIds, lIndex: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetOrder: fn( self: *const IFaxDeviceIds, lDeviceId: i32, lNewOrder: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceIds_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceIds.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxDeviceIds, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceIds_get_Item(self: *const T, lIndex: i32, plDeviceId: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceIds.VTable, self.vtable).get_Item(@ptrCast(*const IFaxDeviceIds, self), lIndex, plDeviceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceIds_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceIds.VTable, self.vtable).get_Count(@ptrCast(*const IFaxDeviceIds, self), plCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceIds_Add(self: *const T, lDeviceId: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceIds.VTable, self.vtable).Add(@ptrCast(*const IFaxDeviceIds, self), lDeviceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceIds_Remove(self: *const T, lIndex: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceIds.VTable, self.vtable).Remove(@ptrCast(*const IFaxDeviceIds, self), lIndex); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDeviceIds_SetOrder(self: *const T, lDeviceId: i32, lNewOrder: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDeviceIds.VTable, self.vtable).SetOrder(@ptrCast(*const IFaxDeviceIds, self), lDeviceId, lNewOrder); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutboundRoutingRules_Value = @import("../zig.zig").Guid.initString("dcefa1e7-ae7d-4ed6-8521-369edcca5120"); pub const IID_IFaxOutboundRoutingRules = &IID_IFaxOutboundRoutingRules_Value; pub const IFaxOutboundRoutingRules = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxOutboundRoutingRules, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxOutboundRoutingRules, lIndex: i32, pFaxOutboundRoutingRule: ?*?*IFaxOutboundRoutingRule, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxOutboundRoutingRules, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ItemByCountryAndArea: fn( self: *const IFaxOutboundRoutingRules, lCountryCode: i32, lAreaCode: i32, pFaxOutboundRoutingRule: ?*?*IFaxOutboundRoutingRule, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RemoveByCountryAndArea: fn( self: *const IFaxOutboundRoutingRules, lCountryCode: i32, lAreaCode: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Remove: fn( self: *const IFaxOutboundRoutingRules, lIndex: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Add: fn( self: *const IFaxOutboundRoutingRules, lCountryCode: i32, lAreaCode: i32, bUseDevice: i16, bstrGroupName: ?BSTR, lDeviceId: i32, pFaxOutboundRoutingRule: ?*?*IFaxOutboundRoutingRule, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRules_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRules.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxOutboundRoutingRules, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRules_get_Item(self: *const T, lIndex: i32, pFaxOutboundRoutingRule: ?*?*IFaxOutboundRoutingRule) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRules.VTable, self.vtable).get_Item(@ptrCast(*const IFaxOutboundRoutingRules, self), lIndex, pFaxOutboundRoutingRule); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRules_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRules.VTable, self.vtable).get_Count(@ptrCast(*const IFaxOutboundRoutingRules, self), plCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRules_ItemByCountryAndArea(self: *const T, lCountryCode: i32, lAreaCode: i32, pFaxOutboundRoutingRule: ?*?*IFaxOutboundRoutingRule) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRules.VTable, self.vtable).ItemByCountryAndArea(@ptrCast(*const IFaxOutboundRoutingRules, self), lCountryCode, lAreaCode, pFaxOutboundRoutingRule); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRules_RemoveByCountryAndArea(self: *const T, lCountryCode: i32, lAreaCode: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRules.VTable, self.vtable).RemoveByCountryAndArea(@ptrCast(*const IFaxOutboundRoutingRules, self), lCountryCode, lAreaCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRules_Remove(self: *const T, lIndex: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRules.VTable, self.vtable).Remove(@ptrCast(*const IFaxOutboundRoutingRules, self), lIndex); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRules_Add(self: *const T, lCountryCode: i32, lAreaCode: i32, bUseDevice: i16, bstrGroupName: ?BSTR, lDeviceId: i32, pFaxOutboundRoutingRule: ?*?*IFaxOutboundRoutingRule) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRules.VTable, self.vtable).Add(@ptrCast(*const IFaxOutboundRoutingRules, self), lCountryCode, lAreaCode, bUseDevice, bstrGroupName, lDeviceId, pFaxOutboundRoutingRule); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_RULE_STATUS_ENUM = enum(i32) { VALID = 0, EMPTY_GROUP = 1, ALL_GROUP_DEV_NOT_VALID = 2, SOME_GROUP_DEV_NOT_VALID = 3, BAD_DEVICE = 4, }; pub const frsVALID = FAX_RULE_STATUS_ENUM.VALID; pub const frsEMPTY_GROUP = FAX_RULE_STATUS_ENUM.EMPTY_GROUP; pub const frsALL_GROUP_DEV_NOT_VALID = FAX_RULE_STATUS_ENUM.ALL_GROUP_DEV_NOT_VALID; pub const frsSOME_GROUP_DEV_NOT_VALID = FAX_RULE_STATUS_ENUM.SOME_GROUP_DEV_NOT_VALID; pub const frsBAD_DEVICE = FAX_RULE_STATUS_ENUM.BAD_DEVICE; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxOutboundRoutingRule_Value = @import("../zig.zig").Guid.initString("e1f795d5-07c2-469f-b027-acacc23219da"); pub const IID_IFaxOutboundRoutingRule = &IID_IFaxOutboundRoutingRule_Value; pub const IFaxOutboundRoutingRule = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CountryCode: fn( self: *const IFaxOutboundRoutingRule, plCountryCode: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AreaCode: fn( self: *const IFaxOutboundRoutingRule, plAreaCode: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Status: fn( self: *const IFaxOutboundRoutingRule, pStatus: ?*FAX_RULE_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseDevice: fn( self: *const IFaxOutboundRoutingRule, pbUseDevice: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseDevice: fn( self: *const IFaxOutboundRoutingRule, bUseDevice: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DeviceId: fn( self: *const IFaxOutboundRoutingRule, plDeviceId: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DeviceId: fn( self: *const IFaxOutboundRoutingRule, DeviceId: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_GroupName: fn( self: *const IFaxOutboundRoutingRule, pbstrGroupName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_GroupName: fn( self: *const IFaxOutboundRoutingRule, bstrGroupName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxOutboundRoutingRule, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxOutboundRoutingRule, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_get_CountryCode(self: *const T, plCountryCode: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).get_CountryCode(@ptrCast(*const IFaxOutboundRoutingRule, self), plCountryCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_get_AreaCode(self: *const T, plAreaCode: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).get_AreaCode(@ptrCast(*const IFaxOutboundRoutingRule, self), plAreaCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_get_Status(self: *const T, pStatus: ?*FAX_RULE_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).get_Status(@ptrCast(*const IFaxOutboundRoutingRule, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_get_UseDevice(self: *const T, pbUseDevice: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).get_UseDevice(@ptrCast(*const IFaxOutboundRoutingRule, self), pbUseDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_put_UseDevice(self: *const T, bUseDevice: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).put_UseDevice(@ptrCast(*const IFaxOutboundRoutingRule, self), bUseDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_get_DeviceId(self: *const T, plDeviceId: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).get_DeviceId(@ptrCast(*const IFaxOutboundRoutingRule, self), plDeviceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_put_DeviceId(self: *const T, DeviceId: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).put_DeviceId(@ptrCast(*const IFaxOutboundRoutingRule, self), DeviceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_get_GroupName(self: *const T, pbstrGroupName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).get_GroupName(@ptrCast(*const IFaxOutboundRoutingRule, self), pbstrGroupName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_put_GroupName(self: *const T, bstrGroupName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).put_GroupName(@ptrCast(*const IFaxOutboundRoutingRule, self), bstrGroupName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).Refresh(@ptrCast(*const IFaxOutboundRoutingRule, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutboundRoutingRule_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutboundRoutingRule.VTable, self.vtable).Save(@ptrCast(*const IFaxOutboundRoutingRule, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxInboundRoutingExtensions_Value = @import("../zig.zig").Guid.initString("2f6c9673-7b26-42de-8eb0-915dcd2a4f4c"); pub const IID_IFaxInboundRoutingExtensions = &IID_IFaxInboundRoutingExtensions_Value; pub const IFaxInboundRoutingExtensions = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxInboundRoutingExtensions, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxInboundRoutingExtensions, vIndex: VARIANT, pFaxInboundRoutingExtension: ?*?*IFaxInboundRoutingExtension, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxInboundRoutingExtensions, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtensions_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtensions.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxInboundRoutingExtensions, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtensions_get_Item(self: *const T, vIndex: VARIANT, pFaxInboundRoutingExtension: ?*?*IFaxInboundRoutingExtension) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtensions.VTable, self.vtable).get_Item(@ptrCast(*const IFaxInboundRoutingExtensions, self), vIndex, pFaxInboundRoutingExtension); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtensions_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtensions.VTable, self.vtable).get_Count(@ptrCast(*const IFaxInboundRoutingExtensions, self), plCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxInboundRoutingExtension_Value = @import("../zig.zig").Guid.initString("885b5e08-c26c-4ef9-af83-51580a750be1"); pub const IID_IFaxInboundRoutingExtension = &IID_IFaxInboundRoutingExtension_Value; pub const IFaxInboundRoutingExtension = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FriendlyName: fn( self: *const IFaxInboundRoutingExtension, pbstrFriendlyName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ImageName: fn( self: *const IFaxInboundRoutingExtension, pbstrImageName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UniqueName: fn( self: *const IFaxInboundRoutingExtension, pbstrUniqueName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MajorVersion: fn( self: *const IFaxInboundRoutingExtension, plMajorVersion: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MinorVersion: fn( self: *const IFaxInboundRoutingExtension, plMinorVersion: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MajorBuild: fn( self: *const IFaxInboundRoutingExtension, plMajorBuild: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_MinorBuild: fn( self: *const IFaxInboundRoutingExtension, plMinorBuild: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Debug: fn( self: *const IFaxInboundRoutingExtension, pbDebug: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Status: fn( self: *const IFaxInboundRoutingExtension, pStatus: ?*FAX_PROVIDER_STATUS_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_InitErrorCode: fn( self: *const IFaxInboundRoutingExtension, plInitErrorCode: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Methods: fn( self: *const IFaxInboundRoutingExtension, pvMethods: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_FriendlyName(self: *const T, pbstrFriendlyName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_FriendlyName(@ptrCast(*const IFaxInboundRoutingExtension, self), pbstrFriendlyName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_ImageName(self: *const T, pbstrImageName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_ImageName(@ptrCast(*const IFaxInboundRoutingExtension, self), pbstrImageName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_UniqueName(self: *const T, pbstrUniqueName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_UniqueName(@ptrCast(*const IFaxInboundRoutingExtension, self), pbstrUniqueName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_MajorVersion(self: *const T, plMajorVersion: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_MajorVersion(@ptrCast(*const IFaxInboundRoutingExtension, self), plMajorVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_MinorVersion(self: *const T, plMinorVersion: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_MinorVersion(@ptrCast(*const IFaxInboundRoutingExtension, self), plMinorVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_MajorBuild(self: *const T, plMajorBuild: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_MajorBuild(@ptrCast(*const IFaxInboundRoutingExtension, self), plMajorBuild); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_MinorBuild(self: *const T, plMinorBuild: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_MinorBuild(@ptrCast(*const IFaxInboundRoutingExtension, self), plMinorBuild); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_Debug(self: *const T, pbDebug: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_Debug(@ptrCast(*const IFaxInboundRoutingExtension, self), pbDebug); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_Status(self: *const T, pStatus: ?*FAX_PROVIDER_STATUS_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_Status(@ptrCast(*const IFaxInboundRoutingExtension, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_InitErrorCode(self: *const T, plInitErrorCode: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_InitErrorCode(@ptrCast(*const IFaxInboundRoutingExtension, self), plInitErrorCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingExtension_get_Methods(self: *const T, pvMethods: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingExtension.VTable, self.vtable).get_Methods(@ptrCast(*const IFaxInboundRoutingExtension, self), pvMethods); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxInboundRoutingMethods_Value = @import("../zig.zig").Guid.initString("783fca10-8908-4473-9d69-f67fbea0c6b9"); pub const IID_IFaxInboundRoutingMethods = &IID_IFaxInboundRoutingMethods_Value; pub const IFaxInboundRoutingMethods = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxInboundRoutingMethods, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxInboundRoutingMethods, vIndex: VARIANT, pFaxInboundRoutingMethod: ?*?*IFaxInboundRoutingMethod, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxInboundRoutingMethods, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethods_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethods.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxInboundRoutingMethods, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethods_get_Item(self: *const T, vIndex: VARIANT, pFaxInboundRoutingMethod: ?*?*IFaxInboundRoutingMethod) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethods.VTable, self.vtable).get_Item(@ptrCast(*const IFaxInboundRoutingMethods, self), vIndex, pFaxInboundRoutingMethod); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethods_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethods.VTable, self.vtable).get_Count(@ptrCast(*const IFaxInboundRoutingMethods, self), plCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IFaxInboundRoutingMethod_Value = @import("../zig.zig").Guid.initString("45700061-ad9d-4776-a8c4-64065492cf4b"); pub const IID_IFaxInboundRoutingMethod = &IID_IFaxInboundRoutingMethod_Value; pub const IFaxInboundRoutingMethod = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Name: fn( self: *const IFaxInboundRoutingMethod, pbstrName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_GUID: fn( self: *const IFaxInboundRoutingMethod, pbstrGUID: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FunctionName: fn( self: *const IFaxInboundRoutingMethod, pbstrFunctionName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtensionFriendlyName: fn( self: *const IFaxInboundRoutingMethod, pbstrExtensionFriendlyName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ExtensionImageName: fn( self: *const IFaxInboundRoutingMethod, pbstrExtensionImageName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Priority: fn( self: *const IFaxInboundRoutingMethod, plPriority: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Priority: fn( self: *const IFaxInboundRoutingMethod, lPriority: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxInboundRoutingMethod, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxInboundRoutingMethod, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_get_Name(self: *const T, pbstrName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).get_Name(@ptrCast(*const IFaxInboundRoutingMethod, self), pbstrName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_get_GUID(self: *const T, pbstrGUID: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).get_GUID(@ptrCast(*const IFaxInboundRoutingMethod, self), pbstrGUID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_get_FunctionName(self: *const T, pbstrFunctionName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).get_FunctionName(@ptrCast(*const IFaxInboundRoutingMethod, self), pbstrFunctionName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_get_ExtensionFriendlyName(self: *const T, pbstrExtensionFriendlyName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).get_ExtensionFriendlyName(@ptrCast(*const IFaxInboundRoutingMethod, self), pbstrExtensionFriendlyName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_get_ExtensionImageName(self: *const T, pbstrExtensionImageName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).get_ExtensionImageName(@ptrCast(*const IFaxInboundRoutingMethod, self), pbstrExtensionImageName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_get_Priority(self: *const T, plPriority: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).get_Priority(@ptrCast(*const IFaxInboundRoutingMethod, self), plPriority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_put_Priority(self: *const T, lPriority: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).put_Priority(@ptrCast(*const IFaxInboundRoutingMethod, self), lPriority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).Refresh(@ptrCast(*const IFaxInboundRoutingMethod, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxInboundRoutingMethod_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxInboundRoutingMethod.VTable, self.vtable).Save(@ptrCast(*const IFaxInboundRoutingMethod, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxDocument2_Value = @import("../zig.zig").Guid.initString("e1347661-f9ef-4d6d-b4a5-c0a068b65cff"); pub const IID_IFaxDocument2 = &IID_IFaxDocument2_Value; pub const IFaxDocument2 = extern struct { pub const VTable = extern struct { base: IFaxDocument.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SubmissionId: fn( self: *const IFaxDocument2, pbstrSubmissionId: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Bodies: fn( self: *const IFaxDocument2, pvBodies: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Bodies: fn( self: *const IFaxDocument2, vBodies: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Submit2: fn( self: *const IFaxDocument2, bstrFaxServerName: ?BSTR, pvFaxOutgoingJobIDs: ?*VARIANT, plErrorBodyFile: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ConnectedSubmit2: fn( self: *const IFaxDocument2, pFaxServer: ?*IFaxServer, pvFaxOutgoingJobIDs: ?*VARIANT, plErrorBodyFile: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IFaxDocument.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument2_get_SubmissionId(self: *const T, pbstrSubmissionId: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument2.VTable, self.vtable).get_SubmissionId(@ptrCast(*const IFaxDocument2, self), pbstrSubmissionId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument2_get_Bodies(self: *const T, pvBodies: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument2.VTable, self.vtable).get_Bodies(@ptrCast(*const IFaxDocument2, self), pvBodies); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument2_put_Bodies(self: *const T, vBodies: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument2.VTable, self.vtable).put_Bodies(@ptrCast(*const IFaxDocument2, self), vBodies); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument2_Submit2(self: *const T, bstrFaxServerName: ?BSTR, pvFaxOutgoingJobIDs: ?*VARIANT, plErrorBodyFile: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument2.VTable, self.vtable).Submit2(@ptrCast(*const IFaxDocument2, self), bstrFaxServerName, pvFaxOutgoingJobIDs, plErrorBodyFile); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxDocument2_ConnectedSubmit2(self: *const T, pFaxServer: ?*IFaxServer, pvFaxOutgoingJobIDs: ?*VARIANT, plErrorBodyFile: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxDocument2.VTable, self.vtable).ConnectedSubmit2(@ptrCast(*const IFaxDocument2, self), pFaxServer, pvFaxOutgoingJobIDs, plErrorBodyFile); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxConfiguration_Value = @import("../zig.zig").Guid.initString("10f4d0f7-0994-4543-ab6e-506949128c40"); pub const IID_IFaxConfiguration = &IID_IFaxConfiguration_Value; pub const IFaxConfiguration = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseArchive: fn( self: *const IFaxConfiguration, pbUseArchive: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseArchive: fn( self: *const IFaxConfiguration, bUseArchive: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ArchiveLocation: fn( self: *const IFaxConfiguration, pbstrArchiveLocation: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ArchiveLocation: fn( self: *const IFaxConfiguration, bstrArchiveLocation: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeQuotaWarning: fn( self: *const IFaxConfiguration, pbSizeQuotaWarning: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SizeQuotaWarning: fn( self: *const IFaxConfiguration, bSizeQuotaWarning: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HighQuotaWaterMark: fn( self: *const IFaxConfiguration, plHighQuotaWaterMark: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_HighQuotaWaterMark: fn( self: *const IFaxConfiguration, lHighQuotaWaterMark: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LowQuotaWaterMark: fn( self: *const IFaxConfiguration, plLowQuotaWaterMark: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_LowQuotaWaterMark: fn( self: *const IFaxConfiguration, lLowQuotaWaterMark: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ArchiveAgeLimit: fn( self: *const IFaxConfiguration, plArchiveAgeLimit: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ArchiveAgeLimit: fn( self: *const IFaxConfiguration, lArchiveAgeLimit: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ArchiveSizeLow: fn( self: *const IFaxConfiguration, plSizeLow: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ArchiveSizeHigh: fn( self: *const IFaxConfiguration, plSizeHigh: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingQueueBlocked: fn( self: *const IFaxConfiguration, pbOutgoingBlocked: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_OutgoingQueueBlocked: fn( self: *const IFaxConfiguration, bOutgoingBlocked: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingQueuePaused: fn( self: *const IFaxConfiguration, pbOutgoingPaused: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_OutgoingQueuePaused: fn( self: *const IFaxConfiguration, bOutgoingPaused: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AllowPersonalCoverPages: fn( self: *const IFaxConfiguration, pbAllowPersonalCoverPages: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AllowPersonalCoverPages: fn( self: *const IFaxConfiguration, bAllowPersonalCoverPages: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseDeviceTSID: fn( self: *const IFaxConfiguration, pbUseDeviceTSID: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseDeviceTSID: fn( self: *const IFaxConfiguration, bUseDeviceTSID: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Retries: fn( self: *const IFaxConfiguration, plRetries: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Retries: fn( self: *const IFaxConfiguration, lRetries: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RetryDelay: fn( self: *const IFaxConfiguration, plRetryDelay: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_RetryDelay: fn( self: *const IFaxConfiguration, lRetryDelay: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DiscountRateStart: fn( self: *const IFaxConfiguration, pdateDiscountRateStart: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DiscountRateStart: fn( self: *const IFaxConfiguration, dateDiscountRateStart: f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DiscountRateEnd: fn( self: *const IFaxConfiguration, pdateDiscountRateEnd: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DiscountRateEnd: fn( self: *const IFaxConfiguration, dateDiscountRateEnd: f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingQueueAgeLimit: fn( self: *const IFaxConfiguration, plOutgoingQueueAgeLimit: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_OutgoingQueueAgeLimit: fn( self: *const IFaxConfiguration, lOutgoingQueueAgeLimit: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Branding: fn( self: *const IFaxConfiguration, pbBranding: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Branding: fn( self: *const IFaxConfiguration, bBranding: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IncomingQueueBlocked: fn( self: *const IFaxConfiguration, pbIncomingBlocked: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_IncomingQueueBlocked: fn( self: *const IFaxConfiguration, bIncomingBlocked: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AutoCreateAccountOnConnect: fn( self: *const IFaxConfiguration, pbAutoCreateAccountOnConnect: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AutoCreateAccountOnConnect: fn( self: *const IFaxConfiguration, bAutoCreateAccountOnConnect: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IncomingFaxesArePublic: fn( self: *const IFaxConfiguration, pbIncomingFaxesArePublic: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_IncomingFaxesArePublic: fn( self: *const IFaxConfiguration, bIncomingFaxesArePublic: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxConfiguration, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxConfiguration, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_UseArchive(self: *const T, pbUseArchive: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_UseArchive(@ptrCast(*const IFaxConfiguration, self), pbUseArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_UseArchive(self: *const T, bUseArchive: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_UseArchive(@ptrCast(*const IFaxConfiguration, self), bUseArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_ArchiveLocation(self: *const T, pbstrArchiveLocation: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_ArchiveLocation(@ptrCast(*const IFaxConfiguration, self), pbstrArchiveLocation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_ArchiveLocation(self: *const T, bstrArchiveLocation: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_ArchiveLocation(@ptrCast(*const IFaxConfiguration, self), bstrArchiveLocation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_SizeQuotaWarning(self: *const T, pbSizeQuotaWarning: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_SizeQuotaWarning(@ptrCast(*const IFaxConfiguration, self), pbSizeQuotaWarning); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_SizeQuotaWarning(self: *const T, bSizeQuotaWarning: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_SizeQuotaWarning(@ptrCast(*const IFaxConfiguration, self), bSizeQuotaWarning); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_HighQuotaWaterMark(self: *const T, plHighQuotaWaterMark: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_HighQuotaWaterMark(@ptrCast(*const IFaxConfiguration, self), plHighQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_HighQuotaWaterMark(self: *const T, lHighQuotaWaterMark: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_HighQuotaWaterMark(@ptrCast(*const IFaxConfiguration, self), lHighQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_LowQuotaWaterMark(self: *const T, plLowQuotaWaterMark: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_LowQuotaWaterMark(@ptrCast(*const IFaxConfiguration, self), plLowQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_LowQuotaWaterMark(self: *const T, lLowQuotaWaterMark: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_LowQuotaWaterMark(@ptrCast(*const IFaxConfiguration, self), lLowQuotaWaterMark); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_ArchiveAgeLimit(self: *const T, plArchiveAgeLimit: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_ArchiveAgeLimit(@ptrCast(*const IFaxConfiguration, self), plArchiveAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_ArchiveAgeLimit(self: *const T, lArchiveAgeLimit: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_ArchiveAgeLimit(@ptrCast(*const IFaxConfiguration, self), lArchiveAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_ArchiveSizeLow(self: *const T, plSizeLow: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_ArchiveSizeLow(@ptrCast(*const IFaxConfiguration, self), plSizeLow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_ArchiveSizeHigh(self: *const T, plSizeHigh: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_ArchiveSizeHigh(@ptrCast(*const IFaxConfiguration, self), plSizeHigh); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_OutgoingQueueBlocked(self: *const T, pbOutgoingBlocked: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_OutgoingQueueBlocked(@ptrCast(*const IFaxConfiguration, self), pbOutgoingBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_OutgoingQueueBlocked(self: *const T, bOutgoingBlocked: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_OutgoingQueueBlocked(@ptrCast(*const IFaxConfiguration, self), bOutgoingBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_OutgoingQueuePaused(self: *const T, pbOutgoingPaused: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_OutgoingQueuePaused(@ptrCast(*const IFaxConfiguration, self), pbOutgoingPaused); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_OutgoingQueuePaused(self: *const T, bOutgoingPaused: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_OutgoingQueuePaused(@ptrCast(*const IFaxConfiguration, self), bOutgoingPaused); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_AllowPersonalCoverPages(self: *const T, pbAllowPersonalCoverPages: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_AllowPersonalCoverPages(@ptrCast(*const IFaxConfiguration, self), pbAllowPersonalCoverPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_AllowPersonalCoverPages(self: *const T, bAllowPersonalCoverPages: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_AllowPersonalCoverPages(@ptrCast(*const IFaxConfiguration, self), bAllowPersonalCoverPages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_UseDeviceTSID(self: *const T, pbUseDeviceTSID: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_UseDeviceTSID(@ptrCast(*const IFaxConfiguration, self), pbUseDeviceTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_UseDeviceTSID(self: *const T, bUseDeviceTSID: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_UseDeviceTSID(@ptrCast(*const IFaxConfiguration, self), bUseDeviceTSID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_Retries(self: *const T, plRetries: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_Retries(@ptrCast(*const IFaxConfiguration, self), plRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_Retries(self: *const T, lRetries: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_Retries(@ptrCast(*const IFaxConfiguration, self), lRetries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_RetryDelay(self: *const T, plRetryDelay: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_RetryDelay(@ptrCast(*const IFaxConfiguration, self), plRetryDelay); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_RetryDelay(self: *const T, lRetryDelay: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_RetryDelay(@ptrCast(*const IFaxConfiguration, self), lRetryDelay); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_DiscountRateStart(self: *const T, pdateDiscountRateStart: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_DiscountRateStart(@ptrCast(*const IFaxConfiguration, self), pdateDiscountRateStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_DiscountRateStart(self: *const T, dateDiscountRateStart: f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_DiscountRateStart(@ptrCast(*const IFaxConfiguration, self), dateDiscountRateStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_DiscountRateEnd(self: *const T, pdateDiscountRateEnd: ?*f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_DiscountRateEnd(@ptrCast(*const IFaxConfiguration, self), pdateDiscountRateEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_DiscountRateEnd(self: *const T, dateDiscountRateEnd: f64) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_DiscountRateEnd(@ptrCast(*const IFaxConfiguration, self), dateDiscountRateEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_OutgoingQueueAgeLimit(self: *const T, plOutgoingQueueAgeLimit: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_OutgoingQueueAgeLimit(@ptrCast(*const IFaxConfiguration, self), plOutgoingQueueAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_OutgoingQueueAgeLimit(self: *const T, lOutgoingQueueAgeLimit: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_OutgoingQueueAgeLimit(@ptrCast(*const IFaxConfiguration, self), lOutgoingQueueAgeLimit); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_Branding(self: *const T, pbBranding: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_Branding(@ptrCast(*const IFaxConfiguration, self), pbBranding); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_Branding(self: *const T, bBranding: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_Branding(@ptrCast(*const IFaxConfiguration, self), bBranding); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_IncomingQueueBlocked(self: *const T, pbIncomingBlocked: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_IncomingQueueBlocked(@ptrCast(*const IFaxConfiguration, self), pbIncomingBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_IncomingQueueBlocked(self: *const T, bIncomingBlocked: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_IncomingQueueBlocked(@ptrCast(*const IFaxConfiguration, self), bIncomingBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_AutoCreateAccountOnConnect(self: *const T, pbAutoCreateAccountOnConnect: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_AutoCreateAccountOnConnect(@ptrCast(*const IFaxConfiguration, self), pbAutoCreateAccountOnConnect); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_AutoCreateAccountOnConnect(self: *const T, bAutoCreateAccountOnConnect: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_AutoCreateAccountOnConnect(@ptrCast(*const IFaxConfiguration, self), bAutoCreateAccountOnConnect); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_get_IncomingFaxesArePublic(self: *const T, pbIncomingFaxesArePublic: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).get_IncomingFaxesArePublic(@ptrCast(*const IFaxConfiguration, self), pbIncomingFaxesArePublic); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_put_IncomingFaxesArePublic(self: *const T, bIncomingFaxesArePublic: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).put_IncomingFaxesArePublic(@ptrCast(*const IFaxConfiguration, self), bIncomingFaxesArePublic); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).Refresh(@ptrCast(*const IFaxConfiguration, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxConfiguration_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxConfiguration.VTable, self.vtable).Save(@ptrCast(*const IFaxConfiguration, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxServer2_Value = @import("../zig.zig").Guid.initString("571ced0f-5609-4f40-9176-547e3a72ca7c"); pub const IID_IFaxServer2 = &IID_IFaxServer2_Value; pub const IFaxServer2 = extern struct { pub const VTable = extern struct { base: IFaxServer.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Configuration: fn( self: *const IFaxServer2, ppFaxConfiguration: ?*?*IFaxConfiguration, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_CurrentAccount: fn( self: *const IFaxServer2, ppCurrentAccount: ?*?*IFaxAccount, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FaxAccountSet: fn( self: *const IFaxServer2, ppFaxAccountSet: ?*?*IFaxAccountSet, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Security2: fn( self: *const IFaxServer2, ppFaxSecurity2: ?*?*IFaxSecurity2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IFaxServer.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer2_get_Configuration(self: *const T, ppFaxConfiguration: ?*?*IFaxConfiguration) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer2.VTable, self.vtable).get_Configuration(@ptrCast(*const IFaxServer2, self), ppFaxConfiguration); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer2_get_CurrentAccount(self: *const T, ppCurrentAccount: ?*?*IFaxAccount) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer2.VTable, self.vtable).get_CurrentAccount(@ptrCast(*const IFaxServer2, self), ppCurrentAccount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer2_get_FaxAccountSet(self: *const T, ppFaxAccountSet: ?*?*IFaxAccountSet) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer2.VTable, self.vtable).get_FaxAccountSet(@ptrCast(*const IFaxServer2, self), ppFaxAccountSet); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxServer2_get_Security2(self: *const T, ppFaxSecurity2: ?*?*IFaxSecurity2) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxServer2.VTable, self.vtable).get_Security2(@ptrCast(*const IFaxServer2, self), ppFaxSecurity2); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccountSet_Value = @import("../zig.zig").Guid.initString("7428fbae-841e-47b8-86f4-2288946dca1b"); pub const IID_IFaxAccountSet = &IID_IFaxAccountSet_Value; pub const IFaxAccountSet = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, GetAccounts: fn( self: *const IFaxAccountSet, ppFaxAccounts: ?*?*IFaxAccounts, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAccount: fn( self: *const IFaxAccountSet, bstrAccountName: ?BSTR, pFaxAccount: ?*?*IFaxAccount, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddAccount: fn( self: *const IFaxAccountSet, bstrAccountName: ?BSTR, pFaxAccount: ?*?*IFaxAccount, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RemoveAccount: fn( self: *const IFaxAccountSet, bstrAccountName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountSet_GetAccounts(self: *const T, ppFaxAccounts: ?*?*IFaxAccounts) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountSet.VTable, self.vtable).GetAccounts(@ptrCast(*const IFaxAccountSet, self), ppFaxAccounts); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountSet_GetAccount(self: *const T, bstrAccountName: ?BSTR, pFaxAccount: ?*?*IFaxAccount) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountSet.VTable, self.vtable).GetAccount(@ptrCast(*const IFaxAccountSet, self), bstrAccountName, pFaxAccount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountSet_AddAccount(self: *const T, bstrAccountName: ?BSTR, pFaxAccount: ?*?*IFaxAccount) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountSet.VTable, self.vtable).AddAccount(@ptrCast(*const IFaxAccountSet, self), bstrAccountName, pFaxAccount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountSet_RemoveAccount(self: *const T, bstrAccountName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountSet.VTable, self.vtable).RemoveAccount(@ptrCast(*const IFaxAccountSet, self), bstrAccountName); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccounts_Value = @import("../zig.zig").Guid.initString("93ea8162-8be7-42d1-ae7b-ec74e2d989da"); pub const IID_IFaxAccounts = &IID_IFaxAccounts_Value; pub const IFaxAccounts = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get__NewEnum: fn( self: *const IFaxAccounts, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Item: fn( self: *const IFaxAccounts, vIndex: VARIANT, pFaxAccount: ?*?*IFaxAccount, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Count: fn( self: *const IFaxAccounts, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccounts_get__NewEnum(self: *const T, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccounts.VTable, self.vtable).get__NewEnum(@ptrCast(*const IFaxAccounts, self), ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccounts_get_Item(self: *const T, vIndex: VARIANT, pFaxAccount: ?*?*IFaxAccount) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccounts.VTable, self.vtable).get_Item(@ptrCast(*const IFaxAccounts, self), vIndex, pFaxAccount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccounts_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccounts.VTable, self.vtable).get_Count(@ptrCast(*const IFaxAccounts, self), plCount); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_ACCOUNT_EVENTS_TYPE_ENUM = enum(i32) { NONE = 0, IN_QUEUE = 1, OUT_QUEUE = 2, IN_ARCHIVE = 4, OUT_ARCHIVE = 8, FXSSVC_ENDED = 16, }; pub const faetNONE = FAX_ACCOUNT_EVENTS_TYPE_ENUM.NONE; pub const faetIN_QUEUE = FAX_ACCOUNT_EVENTS_TYPE_ENUM.IN_QUEUE; pub const faetOUT_QUEUE = FAX_ACCOUNT_EVENTS_TYPE_ENUM.OUT_QUEUE; pub const faetIN_ARCHIVE = FAX_ACCOUNT_EVENTS_TYPE_ENUM.IN_ARCHIVE; pub const faetOUT_ARCHIVE = FAX_ACCOUNT_EVENTS_TYPE_ENUM.OUT_ARCHIVE; pub const faetFXSSVC_ENDED = FAX_ACCOUNT_EVENTS_TYPE_ENUM.FXSSVC_ENDED; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccount_Value = @import("../zig.zig").Guid.initString("68535b33-5dc4-4086-be26-b76f9b711006"); pub const IID_IFaxAccount = &IID_IFaxAccount_Value; pub const IFaxAccount = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AccountName: fn( self: *const IFaxAccount, pbstrAccountName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Folders: fn( self: *const IFaxAccount, ppFolders: ?*?*IFaxAccountFolders, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ListenToAccountEvents: fn( self: *const IFaxAccount, EventTypes: FAX_ACCOUNT_EVENTS_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RegisteredEvents: fn( self: *const IFaxAccount, pRegisteredEvents: ?*FAX_ACCOUNT_EVENTS_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccount_get_AccountName(self: *const T, pbstrAccountName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccount.VTable, self.vtable).get_AccountName(@ptrCast(*const IFaxAccount, self), pbstrAccountName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccount_get_Folders(self: *const T, ppFolders: ?*?*IFaxAccountFolders) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccount.VTable, self.vtable).get_Folders(@ptrCast(*const IFaxAccount, self), ppFolders); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccount_ListenToAccountEvents(self: *const T, EventTypes: FAX_ACCOUNT_EVENTS_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccount.VTable, self.vtable).ListenToAccountEvents(@ptrCast(*const IFaxAccount, self), EventTypes); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccount_get_RegisteredEvents(self: *const T, pRegisteredEvents: ?*FAX_ACCOUNT_EVENTS_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccount.VTable, self.vtable).get_RegisteredEvents(@ptrCast(*const IFaxAccount, self), pRegisteredEvents); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxOutgoingJob2_Value = @import("../zig.zig").Guid.initString("418a8d96-59a0-4789-b176-edf3dc8fa8f7"); pub const IID_IFaxOutgoingJob2 = &IID_IFaxOutgoingJob2_Value; pub const IFaxOutgoingJob2 = extern struct { pub const VTable = extern struct { base: IFaxOutgoingJob.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HasCoverPage: fn( self: *const IFaxOutgoingJob2, pbHasCoverPage: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiptAddress: fn( self: *const IFaxOutgoingJob2, pbstrReceiptAddress: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ScheduleType: fn( self: *const IFaxOutgoingJob2, pScheduleType: ?*FAX_SCHEDULE_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IFaxOutgoingJob.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob2_get_HasCoverPage(self: *const T, pbHasCoverPage: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob2.VTable, self.vtable).get_HasCoverPage(@ptrCast(*const IFaxOutgoingJob2, self), pbHasCoverPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob2_get_ReceiptAddress(self: *const T, pbstrReceiptAddress: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob2.VTable, self.vtable).get_ReceiptAddress(@ptrCast(*const IFaxOutgoingJob2, self), pbstrReceiptAddress); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingJob2_get_ScheduleType(self: *const T, pScheduleType: ?*FAX_SCHEDULE_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingJob2.VTable, self.vtable).get_ScheduleType(@ptrCast(*const IFaxOutgoingJob2, self), pScheduleType); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccountFolders_Value = @import("../zig.zig").Guid.initString("6463f89d-23d8-46a9-8f86-c47b77ca7926"); pub const IID_IFaxAccountFolders = &IID_IFaxAccountFolders_Value; pub const IFaxAccountFolders = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingQueue: fn( self: *const IFaxAccountFolders, pFaxOutgoingQueue: ?*?*IFaxAccountOutgoingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IncomingQueue: fn( self: *const IFaxAccountFolders, pFaxIncomingQueue: ?*?*IFaxAccountIncomingQueue, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IncomingArchive: fn( self: *const IFaxAccountFolders, pFaxIncomingArchive: ?*?*IFaxAccountIncomingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_OutgoingArchive: fn( self: *const IFaxAccountFolders, pFaxOutgoingArchive: ?*?*IFaxAccountOutgoingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountFolders_get_OutgoingQueue(self: *const T, pFaxOutgoingQueue: ?*?*IFaxAccountOutgoingQueue) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountFolders.VTable, self.vtable).get_OutgoingQueue(@ptrCast(*const IFaxAccountFolders, self), pFaxOutgoingQueue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountFolders_get_IncomingQueue(self: *const T, pFaxIncomingQueue: ?*?*IFaxAccountIncomingQueue) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountFolders.VTable, self.vtable).get_IncomingQueue(@ptrCast(*const IFaxAccountFolders, self), pFaxIncomingQueue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountFolders_get_IncomingArchive(self: *const T, pFaxIncomingArchive: ?*?*IFaxAccountIncomingArchive) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountFolders.VTable, self.vtable).get_IncomingArchive(@ptrCast(*const IFaxAccountFolders, self), pFaxIncomingArchive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountFolders_get_OutgoingArchive(self: *const T, pFaxOutgoingArchive: ?*?*IFaxAccountOutgoingArchive) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountFolders.VTable, self.vtable).get_OutgoingArchive(@ptrCast(*const IFaxAccountFolders, self), pFaxOutgoingArchive); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccountIncomingQueue_Value = @import("../zig.zig").Guid.initString("dd142d92-0186-4a95-a090-cbc3eadba6b4"); pub const IID_IFaxAccountIncomingQueue = &IID_IFaxAccountIncomingQueue_Value; pub const IFaxAccountIncomingQueue = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, GetJobs: fn( self: *const IFaxAccountIncomingQueue, pFaxIncomingJobs: ?*?*IFaxIncomingJobs, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetJob: fn( self: *const IFaxAccountIncomingQueue, bstrJobId: ?BSTR, pFaxIncomingJob: ?*?*IFaxIncomingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountIncomingQueue_GetJobs(self: *const T, pFaxIncomingJobs: ?*?*IFaxIncomingJobs) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountIncomingQueue.VTable, self.vtable).GetJobs(@ptrCast(*const IFaxAccountIncomingQueue, self), pFaxIncomingJobs); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountIncomingQueue_GetJob(self: *const T, bstrJobId: ?BSTR, pFaxIncomingJob: ?*?*IFaxIncomingJob) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountIncomingQueue.VTable, self.vtable).GetJob(@ptrCast(*const IFaxAccountIncomingQueue, self), bstrJobId, pFaxIncomingJob); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccountOutgoingQueue_Value = @import("../zig.zig").Guid.initString("0f1424e9-f22d-4553-b7a5-0d24bd0d7e46"); pub const IID_IFaxAccountOutgoingQueue = &IID_IFaxAccountOutgoingQueue_Value; pub const IFaxAccountOutgoingQueue = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, GetJobs: fn( self: *const IFaxAccountOutgoingQueue, pFaxOutgoingJobs: ?*?*IFaxOutgoingJobs, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetJob: fn( self: *const IFaxAccountOutgoingQueue, bstrJobId: ?BSTR, pFaxOutgoingJob: ?*?*IFaxOutgoingJob, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountOutgoingQueue_GetJobs(self: *const T, pFaxOutgoingJobs: ?*?*IFaxOutgoingJobs) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountOutgoingQueue.VTable, self.vtable).GetJobs(@ptrCast(*const IFaxAccountOutgoingQueue, self), pFaxOutgoingJobs); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountOutgoingQueue_GetJob(self: *const T, bstrJobId: ?BSTR, pFaxOutgoingJob: ?*?*IFaxOutgoingJob) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountOutgoingQueue.VTable, self.vtable).GetJob(@ptrCast(*const IFaxAccountOutgoingQueue, self), bstrJobId, pFaxOutgoingJob); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxOutgoingMessage2_Value = @import("../zig.zig").Guid.initString("b37df687-bc88-4b46-b3be-b458b3ea9e7f"); pub const IID_IFaxOutgoingMessage2 = &IID_IFaxOutgoingMessage2_Value; pub const IFaxOutgoingMessage2 = extern struct { pub const VTable = extern struct { base: IFaxOutgoingMessage.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HasCoverPage: fn( self: *const IFaxOutgoingMessage2, pbHasCoverPage: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiptType: fn( self: *const IFaxOutgoingMessage2, pReceiptType: ?*FAX_RECEIPT_TYPE_ENUM, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ReceiptAddress: fn( self: *const IFaxOutgoingMessage2, pbstrReceiptAddress: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Read: fn( self: *const IFaxOutgoingMessage2, pbRead: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Read: fn( self: *const IFaxOutgoingMessage2, bRead: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxOutgoingMessage2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxOutgoingMessage2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IFaxOutgoingMessage.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage2_get_HasCoverPage(self: *const T, pbHasCoverPage: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage2.VTable, self.vtable).get_HasCoverPage(@ptrCast(*const IFaxOutgoingMessage2, self), pbHasCoverPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage2_get_ReceiptType(self: *const T, pReceiptType: ?*FAX_RECEIPT_TYPE_ENUM) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage2.VTable, self.vtable).get_ReceiptType(@ptrCast(*const IFaxOutgoingMessage2, self), pReceiptType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage2_get_ReceiptAddress(self: *const T, pbstrReceiptAddress: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage2.VTable, self.vtable).get_ReceiptAddress(@ptrCast(*const IFaxOutgoingMessage2, self), pbstrReceiptAddress); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage2_get_Read(self: *const T, pbRead: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage2.VTable, self.vtable).get_Read(@ptrCast(*const IFaxOutgoingMessage2, self), pbRead); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage2_put_Read(self: *const T, bRead: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage2.VTable, self.vtable).put_Read(@ptrCast(*const IFaxOutgoingMessage2, self), bRead); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage2_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage2.VTable, self.vtable).Save(@ptrCast(*const IFaxOutgoingMessage2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxOutgoingMessage2_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxOutgoingMessage2.VTable, self.vtable).Refresh(@ptrCast(*const IFaxOutgoingMessage2, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccountIncomingArchive_Value = @import("../zig.zig").Guid.initString("a8a5b6ef-e0d6-4aee-955c-91625bec9db4"); pub const IID_IFaxAccountIncomingArchive = &IID_IFaxAccountIncomingArchive_Value; pub const IFaxAccountIncomingArchive = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeLow: fn( self: *const IFaxAccountIncomingArchive, plSizeLow: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeHigh: fn( self: *const IFaxAccountIncomingArchive, plSizeHigh: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxAccountIncomingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessages: fn( self: *const IFaxAccountIncomingArchive, lPrefetchSize: i32, pFaxIncomingMessageIterator: ?*?*IFaxIncomingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessage: fn( self: *const IFaxAccountIncomingArchive, bstrMessageId: ?BSTR, pFaxIncomingMessage: ?*?*IFaxIncomingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountIncomingArchive_get_SizeLow(self: *const T, plSizeLow: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountIncomingArchive.VTable, self.vtable).get_SizeLow(@ptrCast(*const IFaxAccountIncomingArchive, self), plSizeLow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountIncomingArchive_get_SizeHigh(self: *const T, plSizeHigh: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountIncomingArchive.VTable, self.vtable).get_SizeHigh(@ptrCast(*const IFaxAccountIncomingArchive, self), plSizeHigh); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountIncomingArchive_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountIncomingArchive.VTable, self.vtable).Refresh(@ptrCast(*const IFaxAccountIncomingArchive, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountIncomingArchive_GetMessages(self: *const T, lPrefetchSize: i32, pFaxIncomingMessageIterator: ?*?*IFaxIncomingMessageIterator) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountIncomingArchive.VTable, self.vtable).GetMessages(@ptrCast(*const IFaxAccountIncomingArchive, self), lPrefetchSize, pFaxIncomingMessageIterator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountIncomingArchive_GetMessage(self: *const T, bstrMessageId: ?BSTR, pFaxIncomingMessage: ?*?*IFaxIncomingMessage) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountIncomingArchive.VTable, self.vtable).GetMessage(@ptrCast(*const IFaxAccountIncomingArchive, self), bstrMessageId, pFaxIncomingMessage); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccountOutgoingArchive_Value = @import("../zig.zig").Guid.initString("5463076d-ec14-491f-926e-b3ceda5e5662"); pub const IID_IFaxAccountOutgoingArchive = &IID_IFaxAccountOutgoingArchive_Value; pub const IFaxAccountOutgoingArchive = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeLow: fn( self: *const IFaxAccountOutgoingArchive, plSizeLow: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SizeHigh: fn( self: *const IFaxAccountOutgoingArchive, plSizeHigh: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxAccountOutgoingArchive, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessages: fn( self: *const IFaxAccountOutgoingArchive, lPrefetchSize: i32, pFaxOutgoingMessageIterator: ?*?*IFaxOutgoingMessageIterator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMessage: fn( self: *const IFaxAccountOutgoingArchive, bstrMessageId: ?BSTR, pFaxOutgoingMessage: ?*?*IFaxOutgoingMessage, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountOutgoingArchive_get_SizeLow(self: *const T, plSizeLow: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountOutgoingArchive.VTable, self.vtable).get_SizeLow(@ptrCast(*const IFaxAccountOutgoingArchive, self), plSizeLow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountOutgoingArchive_get_SizeHigh(self: *const T, plSizeHigh: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountOutgoingArchive.VTable, self.vtable).get_SizeHigh(@ptrCast(*const IFaxAccountOutgoingArchive, self), plSizeHigh); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountOutgoingArchive_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountOutgoingArchive.VTable, self.vtable).Refresh(@ptrCast(*const IFaxAccountOutgoingArchive, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountOutgoingArchive_GetMessages(self: *const T, lPrefetchSize: i32, pFaxOutgoingMessageIterator: ?*?*IFaxOutgoingMessageIterator) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountOutgoingArchive.VTable, self.vtable).GetMessages(@ptrCast(*const IFaxAccountOutgoingArchive, self), lPrefetchSize, pFaxOutgoingMessageIterator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxAccountOutgoingArchive_GetMessage(self: *const T, bstrMessageId: ?BSTR, pFaxOutgoingMessage: ?*?*IFaxOutgoingMessage) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxAccountOutgoingArchive.VTable, self.vtable).GetMessage(@ptrCast(*const IFaxAccountOutgoingArchive, self), bstrMessageId, pFaxOutgoingMessage); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_ACCESS_RIGHTS_ENUM_2 = enum(i32) { SUBMIT_LOW = 1, SUBMIT_NORMAL = 2, SUBMIT_HIGH = 4, QUERY_OUT_JOBS = 8, MANAGE_OUT_JOBS = 16, QUERY_CONFIG = 32, MANAGE_CONFIG = 64, QUERY_ARCHIVES = 128, MANAGE_ARCHIVES = 256, MANAGE_RECEIVE_FOLDER = 512, }; pub const far2SUBMIT_LOW = FAX_ACCESS_RIGHTS_ENUM_2.SUBMIT_LOW; pub const far2SUBMIT_NORMAL = FAX_ACCESS_RIGHTS_ENUM_2.SUBMIT_NORMAL; pub const far2SUBMIT_HIGH = FAX_ACCESS_RIGHTS_ENUM_2.SUBMIT_HIGH; pub const far2QUERY_OUT_JOBS = FAX_ACCESS_RIGHTS_ENUM_2.QUERY_OUT_JOBS; pub const far2MANAGE_OUT_JOBS = FAX_ACCESS_RIGHTS_ENUM_2.MANAGE_OUT_JOBS; pub const far2QUERY_CONFIG = FAX_ACCESS_RIGHTS_ENUM_2.QUERY_CONFIG; pub const far2MANAGE_CONFIG = FAX_ACCESS_RIGHTS_ENUM_2.MANAGE_CONFIG; pub const far2QUERY_ARCHIVES = FAX_ACCESS_RIGHTS_ENUM_2.QUERY_ARCHIVES; pub const far2MANAGE_ARCHIVES = FAX_ACCESS_RIGHTS_ENUM_2.MANAGE_ARCHIVES; pub const far2MANAGE_RECEIVE_FOLDER = FAX_ACCESS_RIGHTS_ENUM_2.MANAGE_RECEIVE_FOLDER; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxSecurity2_Value = @import("../zig.zig").Guid.initString("17d851f4-d09b-48fc-99c9-8f24c4db9ab1"); pub const IID_IFaxSecurity2 = &IID_IFaxSecurity2_Value; pub const IFaxSecurity2 = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Descriptor: fn( self: *const IFaxSecurity2, pvDescriptor: ?*VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Descriptor: fn( self: *const IFaxSecurity2, vDescriptor: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_GrantedRights: fn( self: *const IFaxSecurity2, pGrantedRights: ?*FAX_ACCESS_RIGHTS_ENUM_2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxSecurity2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxSecurity2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_InformationType: fn( self: *const IFaxSecurity2, plInformationType: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_InformationType: fn( self: *const IFaxSecurity2, lInformationType: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity2_get_Descriptor(self: *const T, pvDescriptor: ?*VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity2.VTable, self.vtable).get_Descriptor(@ptrCast(*const IFaxSecurity2, self), pvDescriptor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity2_put_Descriptor(self: *const T, vDescriptor: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity2.VTable, self.vtable).put_Descriptor(@ptrCast(*const IFaxSecurity2, self), vDescriptor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity2_get_GrantedRights(self: *const T, pGrantedRights: ?*FAX_ACCESS_RIGHTS_ENUM_2) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity2.VTable, self.vtable).get_GrantedRights(@ptrCast(*const IFaxSecurity2, self), pGrantedRights); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity2_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity2.VTable, self.vtable).Refresh(@ptrCast(*const IFaxSecurity2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity2_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity2.VTable, self.vtable).Save(@ptrCast(*const IFaxSecurity2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity2_get_InformationType(self: *const T, plInformationType: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity2.VTable, self.vtable).get_InformationType(@ptrCast(*const IFaxSecurity2, self), plInformationType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxSecurity2_put_InformationType(self: *const T, lInformationType: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxSecurity2.VTable, self.vtable).put_InformationType(@ptrCast(*const IFaxSecurity2, self), lInformationType); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxIncomingMessage2_Value = @import("../zig.zig").Guid.initString("f9208503-e2bc-48f3-9ec0-e6236f9b509a"); pub const IID_IFaxIncomingMessage2 = &IID_IFaxIncomingMessage2_Value; pub const IFaxIncomingMessage2 = extern struct { pub const VTable = extern struct { base: IFaxIncomingMessage.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Subject: fn( self: *const IFaxIncomingMessage2, pbstrSubject: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Subject: fn( self: *const IFaxIncomingMessage2, bstrSubject: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SenderName: fn( self: *const IFaxIncomingMessage2, pbstrSenderName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SenderName: fn( self: *const IFaxIncomingMessage2, bstrSenderName: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_SenderFaxNumber: fn( self: *const IFaxIncomingMessage2, pbstrSenderFaxNumber: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_SenderFaxNumber: fn( self: *const IFaxIncomingMessage2, bstrSenderFaxNumber: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HasCoverPage: fn( self: *const IFaxIncomingMessage2, pbHasCoverPage: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_HasCoverPage: fn( self: *const IFaxIncomingMessage2, bHasCoverPage: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Recipients: fn( self: *const IFaxIncomingMessage2, pbstrRecipients: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Recipients: fn( self: *const IFaxIncomingMessage2, bstrRecipients: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_WasReAssigned: fn( self: *const IFaxIncomingMessage2, pbWasReAssigned: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Read: fn( self: *const IFaxIncomingMessage2, pbRead: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Read: fn( self: *const IFaxIncomingMessage2, bRead: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReAssign: fn( self: *const IFaxIncomingMessage2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Save: fn( self: *const IFaxIncomingMessage2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Refresh: fn( self: *const IFaxIncomingMessage2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IFaxIncomingMessage.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_get_Subject(self: *const T, pbstrSubject: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).get_Subject(@ptrCast(*const IFaxIncomingMessage2, self), pbstrSubject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_put_Subject(self: *const T, bstrSubject: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).put_Subject(@ptrCast(*const IFaxIncomingMessage2, self), bstrSubject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_get_SenderName(self: *const T, pbstrSenderName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).get_SenderName(@ptrCast(*const IFaxIncomingMessage2, self), pbstrSenderName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_put_SenderName(self: *const T, bstrSenderName: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).put_SenderName(@ptrCast(*const IFaxIncomingMessage2, self), bstrSenderName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_get_SenderFaxNumber(self: *const T, pbstrSenderFaxNumber: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).get_SenderFaxNumber(@ptrCast(*const IFaxIncomingMessage2, self), pbstrSenderFaxNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_put_SenderFaxNumber(self: *const T, bstrSenderFaxNumber: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).put_SenderFaxNumber(@ptrCast(*const IFaxIncomingMessage2, self), bstrSenderFaxNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_get_HasCoverPage(self: *const T, pbHasCoverPage: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).get_HasCoverPage(@ptrCast(*const IFaxIncomingMessage2, self), pbHasCoverPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_put_HasCoverPage(self: *const T, bHasCoverPage: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).put_HasCoverPage(@ptrCast(*const IFaxIncomingMessage2, self), bHasCoverPage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_get_Recipients(self: *const T, pbstrRecipients: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).get_Recipients(@ptrCast(*const IFaxIncomingMessage2, self), pbstrRecipients); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_put_Recipients(self: *const T, bstrRecipients: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).put_Recipients(@ptrCast(*const IFaxIncomingMessage2, self), bstrRecipients); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_get_WasReAssigned(self: *const T, pbWasReAssigned: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).get_WasReAssigned(@ptrCast(*const IFaxIncomingMessage2, self), pbWasReAssigned); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_get_Read(self: *const T, pbRead: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).get_Read(@ptrCast(*const IFaxIncomingMessage2, self), pbRead); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_put_Read(self: *const T, bRead: i16) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).put_Read(@ptrCast(*const IFaxIncomingMessage2, self), bRead); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_ReAssign(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).ReAssign(@ptrCast(*const IFaxIncomingMessage2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_Save(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).Save(@ptrCast(*const IFaxIncomingMessage2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFaxIncomingMessage2_Refresh(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFaxIncomingMessage2.VTable, self.vtable).Refresh(@ptrCast(*const IFaxIncomingMessage2, self)); } };} pub usingnamespace MethodMixin(@This()); }; pub const FAX_ROUTING_RULE_CODE_ENUM = enum(i32) { E = 0, }; pub const frrcANY_CODE = FAX_ROUTING_RULE_CODE_ENUM.E; const IID_IFaxServerNotify_Value = @import("../zig.zig").Guid.initString("2e037b27-cf8a-4abd-b1e0-5704943bea6f"); pub const IID_IFaxServerNotify = &IID_IFaxServerNotify_Value; pub const IFaxServerNotify = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID__IFaxServerNotify2_Value = @import("../zig.zig").Guid.initString("ec9c69b9-5fe7-4805-9467-82fcd96af903"); pub const IID__IFaxServerNotify2 = &IID__IFaxServerNotify2_Value; pub const _IFaxServerNotify2 = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, OnIncomingJobAdded: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingJobRemoved: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingJobChanged: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingJobAdded: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingJobRemoved: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingJobChanged: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingMessageAdded: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingMessageRemoved: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingMessageAdded: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingMessageRemoved: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnReceiptOptionsChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnActivityLoggingConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnSecurityConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnEventLoggingConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingQueueConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingArchiveConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingArchiveConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnDevicesConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutboundRoutingGroupsConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutboundRoutingRulesConfigChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnServerActivityChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, lIncomingMessages: i32, lRoutingMessages: i32, lOutgoingMessages: i32, lQueuedMessages: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnQueuesStatusChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, bOutgoingQueueBlocked: i16, bOutgoingQueuePaused: i16, bIncomingQueueBlocked: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnNewCall: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, lCallId: i32, lDeviceId: i32, bstrCallerId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnServerShutDown: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnDeviceStatusChange: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, lDeviceId: i32, bPoweredOff: i16, bSending: i16, bReceiving: i16, bRinging: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnGeneralServerConfigChanged: fn( self: *const _IFaxServerNotify2, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnIncomingJobAdded(self: *const T, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnIncomingJobAdded(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnIncomingJobRemoved(self: *const T, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnIncomingJobRemoved(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnIncomingJobChanged(self: *const T, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnIncomingJobChanged(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrJobId, pJobStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutgoingJobAdded(self: *const T, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutgoingJobAdded(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutgoingJobRemoved(self: *const T, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutgoingJobRemoved(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutgoingJobChanged(self: *const T, pFaxServer: ?*IFaxServer2, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutgoingJobChanged(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrJobId, pJobStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnIncomingMessageAdded(self: *const T, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnIncomingMessageAdded(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrMessageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnIncomingMessageRemoved(self: *const T, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnIncomingMessageRemoved(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrMessageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutgoingMessageAdded(self: *const T, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutgoingMessageAdded(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrMessageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutgoingMessageRemoved(self: *const T, pFaxServer: ?*IFaxServer2, bstrMessageId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutgoingMessageRemoved(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bstrMessageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnReceiptOptionsChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnReceiptOptionsChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnActivityLoggingConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnActivityLoggingConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnSecurityConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnSecurityConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnEventLoggingConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnEventLoggingConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutgoingQueueConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutgoingQueueConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutgoingArchiveConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutgoingArchiveConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnIncomingArchiveConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnIncomingArchiveConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnDevicesConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnDevicesConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutboundRoutingGroupsConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutboundRoutingGroupsConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnOutboundRoutingRulesConfigChange(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnOutboundRoutingRulesConfigChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnServerActivityChange(self: *const T, pFaxServer: ?*IFaxServer2, lIncomingMessages: i32, lRoutingMessages: i32, lOutgoingMessages: i32, lQueuedMessages: i32) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnServerActivityChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, lIncomingMessages, lRoutingMessages, lOutgoingMessages, lQueuedMessages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnQueuesStatusChange(self: *const T, pFaxServer: ?*IFaxServer2, bOutgoingQueueBlocked: i16, bOutgoingQueuePaused: i16, bIncomingQueueBlocked: i16) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnQueuesStatusChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, bOutgoingQueueBlocked, bOutgoingQueuePaused, bIncomingQueueBlocked); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnNewCall(self: *const T, pFaxServer: ?*IFaxServer2, lCallId: i32, lDeviceId: i32, bstrCallerId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnNewCall(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, lCallId, lDeviceId, bstrCallerId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnServerShutDown(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnServerShutDown(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnDeviceStatusChange(self: *const T, pFaxServer: ?*IFaxServer2, lDeviceId: i32, bPoweredOff: i16, bSending: i16, bReceiving: i16, bRinging: i16) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnDeviceStatusChange(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer, lDeviceId, bPoweredOff, bSending, bReceiving, bRinging); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxServerNotify2_OnGeneralServerConfigChanged(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxServerNotify2.VTable, self.vtable).OnGeneralServerConfigChanged(@ptrCast(*const _IFaxServerNotify2, self), pFaxServer); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxServerNotify2_Value = @import("../zig.zig").Guid.initString("616ca8d6-a77a-4062-abfd-0e471241c7aa"); pub const IID_IFaxServerNotify2 = &IID_IFaxServerNotify2_Value; pub const IFaxServerNotify2 = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID__IFaxAccountNotify_Value = @import("../zig.zig").Guid.initString("b9b3bc81-ac1b-46f3-b39d-0adc30e1b788"); pub const IID__IFaxAccountNotify = &IID__IFaxAccountNotify_Value; pub const _IFaxAccountNotify = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, OnIncomingJobAdded: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingJobRemoved: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingJobChanged: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingJobAdded: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingJobRemoved: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingJobChanged: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingMessageAdded: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR, fAddedToReceiveFolder: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnIncomingMessageRemoved: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR, fRemovedFromReceiveFolder: i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingMessageAdded: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnOutgoingMessageRemoved: fn( self: *const _IFaxAccountNotify, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnServerShutDown: fn( self: *const _IFaxAccountNotify, pFaxServer: ?*IFaxServer2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnIncomingJobAdded(self: *const T, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnIncomingJobAdded(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnIncomingJobRemoved(self: *const T, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnIncomingJobRemoved(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnIncomingJobChanged(self: *const T, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnIncomingJobChanged(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrJobId, pJobStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnOutgoingJobAdded(self: *const T, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnOutgoingJobAdded(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnOutgoingJobRemoved(self: *const T, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnOutgoingJobRemoved(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrJobId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnOutgoingJobChanged(self: *const T, pFaxAccount: ?*IFaxAccount, bstrJobId: ?BSTR, pJobStatus: ?*IFaxJobStatus) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnOutgoingJobChanged(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrJobId, pJobStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnIncomingMessageAdded(self: *const T, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR, fAddedToReceiveFolder: i16) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnIncomingMessageAdded(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrMessageId, fAddedToReceiveFolder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnIncomingMessageRemoved(self: *const T, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR, fRemovedFromReceiveFolder: i16) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnIncomingMessageRemoved(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrMessageId, fRemovedFromReceiveFolder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnOutgoingMessageAdded(self: *const T, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnOutgoingMessageAdded(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrMessageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnOutgoingMessageRemoved(self: *const T, pFaxAccount: ?*IFaxAccount, bstrMessageId: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnOutgoingMessageRemoved(@ptrCast(*const _IFaxAccountNotify, self), pFaxAccount, bstrMessageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn _IFaxAccountNotify_OnServerShutDown(self: *const T, pFaxServer: ?*IFaxServer2) callconv(.Inline) HRESULT { return @ptrCast(*const _IFaxAccountNotify.VTable, self.vtable).OnServerShutDown(@ptrCast(*const _IFaxAccountNotify, self), pFaxServer); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFaxAccountNotify_Value = @import("../zig.zig").Guid.initString("0b5e5bd1-b8a9-47a0-a323-ef4a293ba06a"); pub const IID_IFaxAccountNotify = &IID_IFaxAccountNotify_Value; pub const IFaxAccountNotify = extern struct { pub const VTable = extern struct { base: IDispatch.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDispatch.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; pub const PFAXROUTEADDFILE = fn( JobId: u32, FileName: ?[*:0]const u16, Guid: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) i32; pub const PFAXROUTEDELETEFILE = fn( JobId: u32, FileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub const PFAXROUTEGETFILE = fn( JobId: u32, Index: u32, // TODO: what to do with BytesParamIndex 3? FileNameBuffer: ?PWSTR, RequiredSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTEENUMFILE = fn( JobId: u32, GuidOwner: ?*Guid, GuidCaller: ?*Guid, FileName: ?[*:0]const u16, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTEENUMFILES = fn( JobId: u32, Guid: ?*Guid, FileEnumerator: ?PFAXROUTEENUMFILE, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTEMODIFYROUTINGDATA = fn( JobId: u32, RoutingGuid: ?[*:0]const u16, RoutingData: ?*u8, RoutingDataSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const FAX_ROUTE_CALLBACKROUTINES = extern struct { SizeOfStruct: u32, FaxRouteAddFile: ?PFAXROUTEADDFILE, FaxRouteDeleteFile: ?PFAXROUTEDELETEFILE, FaxRouteGetFile: ?PFAXROUTEGETFILE, FaxRouteEnumFiles: ?PFAXROUTEENUMFILES, FaxRouteModifyRoutingData: ?PFAXROUTEMODIFYROUTINGDATA, }; pub const FAX_ROUTE = extern struct { SizeOfStruct: u32, JobId: u32, ElapsedTime: u64, ReceiveTime: u64, PageCount: u32, Csid: ?[*:0]const u16, Tsid: ?[*:0]const u16, CallerId: ?[*:0]const u16, RoutingInfo: ?[*:0]const u16, ReceiverName: ?[*:0]const u16, ReceiverNumber: ?[*:0]const u16, DeviceName: ?[*:0]const u16, DeviceId: u32, RoutingInfoData: ?*u8, RoutingInfoDataSize: u32, }; pub const FAXROUTE_ENABLE = enum(i32) { QUERY_STATUS = -1, STATUS_DISABLE = 0, STATUS_ENABLE = 1, }; pub const QUERY_STATUS = FAXROUTE_ENABLE.QUERY_STATUS; pub const STATUS_DISABLE = FAXROUTE_ENABLE.STATUS_DISABLE; pub const STATUS_ENABLE = FAXROUTE_ENABLE.STATUS_ENABLE; pub const PFAXROUTEINITIALIZE = fn( param0: ?HANDLE, param1: ?*FAX_ROUTE_CALLBACKROUTINES, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTEMETHOD = fn( param0: ?*const FAX_ROUTE, param1: ?*?*c_void, param2: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTEDEVICEENABLE = fn( param0: ?[*:0]const u16, param1: u32, param2: i32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTEDEVICECHANGENOTIFICATION = fn( param0: u32, param1: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTEGETROUTINGINFO = fn( param0: ?[*:0]const u16, param1: u32, param2: ?*u8, param3: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFAXROUTESETROUTINGINFO = fn( param0: ?[*:0]const u16, param1: u32, param2: ?*const u8, param3: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const FAX_ENUM_DEVICE_ID_SOURCE = enum(i32) { FAX = 0, TAPI = 1, }; pub const DEV_ID_SRC_FAX = FAX_ENUM_DEVICE_ID_SOURCE.FAX; pub const DEV_ID_SRC_TAPI = FAX_ENUM_DEVICE_ID_SOURCE.TAPI; pub const PFAX_EXT_GET_DATA = fn( param0: u32, param1: FAX_ENUM_DEVICE_ID_SOURCE, param2: ?[*:0]const u16, param3: ?*?*u8, param4: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub const PFAX_EXT_SET_DATA = fn( param0: ?HINSTANCE, param1: u32, param2: FAX_ENUM_DEVICE_ID_SOURCE, param3: ?[*:0]const u16, param4: ?*u8, param5: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub const PFAX_EXT_CONFIG_CHANGE = fn( param0: u32, param1: ?[*:0]const u16, param2: ?*u8, param3: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const PFAX_EXT_REGISTER_FOR_EVENTS = fn( param0: ?HINSTANCE, param1: u32, param2: FAX_ENUM_DEVICE_ID_SOURCE, param3: ?[*:0]const u16, param4: ?PFAX_EXT_CONFIG_CHANGE, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub const PFAX_EXT_UNREGISTER_FOR_EVENTS = fn( param0: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) u32; pub const PFAX_EXT_FREE_BUFFER = fn( param0: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; pub const PFAX_EXT_INITIALIZE_CONFIG = fn( param0: ?PFAX_EXT_GET_DATA, param1: ?PFAX_EXT_SET_DATA, param2: ?PFAX_EXT_REGISTER_FOR_EVENTS, param3: ?PFAX_EXT_UNREGISTER_FOR_EVENTS, param4: ?PFAX_EXT_FREE_BUFFER, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const SendToMode = enum(i32) { T = 0, }; pub const SEND_TO_FAX_RECIPIENT_ATTACHMENT = SendToMode.T; //-------------------------------------------------------------------------------- // Section: Functions (58) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxConnectFaxServerA( MachineName: ?[*:0]const u8, FaxHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxConnectFaxServerW( MachineName: ?[*:0]const u16, FaxHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxClose( FaxHandle: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxOpenPort( FaxHandle: ?HANDLE, DeviceId: u32, Flags: u32, FaxPortHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxCompleteJobParamsA( JobParams: ?*?*FAX_JOB_PARAMA, CoverpageInfo: ?*?*FAX_COVERPAGE_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxCompleteJobParamsW( JobParams: ?*?*FAX_JOB_PARAMW, CoverpageInfo: ?*?*FAX_COVERPAGE_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSendDocumentA( FaxHandle: ?HANDLE, FileName: ?[*:0]const u8, JobParams: ?*FAX_JOB_PARAMA, CoverpageInfo: ?*const FAX_COVERPAGE_INFOA, FaxJobId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSendDocumentW( FaxHandle: ?HANDLE, FileName: ?[*:0]const u16, JobParams: ?*FAX_JOB_PARAMW, CoverpageInfo: ?*const FAX_COVERPAGE_INFOW, FaxJobId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSendDocumentForBroadcastA( FaxHandle: ?HANDLE, FileName: ?[*:0]const u8, FaxJobId: ?*u32, FaxRecipientCallback: ?PFAX_RECIPIENT_CALLBACKA, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSendDocumentForBroadcastW( FaxHandle: ?HANDLE, FileName: ?[*:0]const u16, FaxJobId: ?*u32, FaxRecipientCallback: ?PFAX_RECIPIENT_CALLBACKW, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumJobsA( FaxHandle: ?HANDLE, JobEntry: ?*?*FAX_JOB_ENTRYA, JobsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumJobsW( FaxHandle: ?HANDLE, JobEntry: ?*?*FAX_JOB_ENTRYW, JobsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetJobA( FaxHandle: ?HANDLE, JobId: u32, JobEntry: ?*?*FAX_JOB_ENTRYA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetJobW( FaxHandle: ?HANDLE, JobId: u32, JobEntry: ?*?*FAX_JOB_ENTRYW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetJobA( FaxHandle: ?HANDLE, JobId: u32, Command: u32, JobEntry: ?*const FAX_JOB_ENTRYA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetJobW( FaxHandle: ?HANDLE, JobId: u32, Command: u32, JobEntry: ?*const FAX_JOB_ENTRYW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxGetPageData( FaxHandle: ?HANDLE, JobId: u32, Buffer: ?*?*u8, BufferSize: ?*u32, ImageWidth: ?*u32, ImageHeight: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetDeviceStatusA( FaxPortHandle: ?HANDLE, DeviceStatus: ?*?*FAX_DEVICE_STATUSA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetDeviceStatusW( FaxPortHandle: ?HANDLE, DeviceStatus: ?*?*FAX_DEVICE_STATUSW, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxAbort( FaxHandle: ?HANDLE, JobId: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetConfigurationA( FaxHandle: ?HANDLE, FaxConfig: ?*?*FAX_CONFIGURATIONA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetConfigurationW( FaxHandle: ?HANDLE, FaxConfig: ?*?*FAX_CONFIGURATIONW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetConfigurationA( FaxHandle: ?HANDLE, FaxConfig: ?*const FAX_CONFIGURATIONA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetConfigurationW( FaxHandle: ?HANDLE, FaxConfig: ?*const FAX_CONFIGURATIONW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetLoggingCategoriesA( FaxHandle: ?HANDLE, Categories: ?*?*FAX_LOG_CATEGORYA, NumberCategories: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetLoggingCategoriesW( FaxHandle: ?HANDLE, Categories: ?*?*FAX_LOG_CATEGORYW, NumberCategories: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetLoggingCategoriesA( FaxHandle: ?HANDLE, Categories: ?*const FAX_LOG_CATEGORYA, NumberCategories: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetLoggingCategoriesW( FaxHandle: ?HANDLE, Categories: ?*const FAX_LOG_CATEGORYW, NumberCategories: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumPortsA( FaxHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOA, PortsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumPortsW( FaxHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOW, PortsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetPortA( FaxPortHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetPortW( FaxPortHandle: ?HANDLE, PortInfo: ?*?*FAX_PORT_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetPortA( FaxPortHandle: ?HANDLE, PortInfo: ?*const FAX_PORT_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetPortW( FaxPortHandle: ?HANDLE, PortInfo: ?*const FAX_PORT_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumRoutingMethodsA( FaxPortHandle: ?HANDLE, RoutingMethod: ?*?*FAX_ROUTING_METHODA, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumRoutingMethodsW( FaxPortHandle: ?HANDLE, RoutingMethod: ?*?*FAX_ROUTING_METHODW, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnableRoutingMethodA( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u8, Enabled: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnableRoutingMethodW( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u16, Enabled: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumGlobalRoutingInfoA( FaxHandle: ?HANDLE, RoutingInfo: ?*?*FAX_GLOBAL_ROUTING_INFOA, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxEnumGlobalRoutingInfoW( FaxHandle: ?HANDLE, RoutingInfo: ?*?*FAX_GLOBAL_ROUTING_INFOW, MethodsReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetGlobalRoutingInfoA( FaxHandle: ?HANDLE, RoutingInfo: ?*const FAX_GLOBAL_ROUTING_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetGlobalRoutingInfoW( FaxHandle: ?HANDLE, RoutingInfo: ?*const FAX_GLOBAL_ROUTING_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetRoutingInfoA( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u8, RoutingInfoBuffer: ?*?*u8, RoutingInfoBufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxGetRoutingInfoW( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u16, RoutingInfoBuffer: ?*?*u8, RoutingInfoBufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetRoutingInfoA( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u8, RoutingInfoBuffer: ?*const u8, RoutingInfoBufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxSetRoutingInfoW( FaxPortHandle: ?HANDLE, RoutingGuid: ?[*:0]const u16, RoutingInfoBuffer: ?*const u8, RoutingInfoBufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxInitializeEventQueue( FaxHandle: ?HANDLE, CompletionPort: ?HANDLE, CompletionKey: usize, hWnd: ?HWND, MessageStart: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxFreeBuffer( Buffer: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxStartPrintJobA( PrinterName: ?[*:0]const u8, PrintInfo: ?*const FAX_PRINT_INFOA, FaxJobId: ?*u32, FaxContextInfo: ?*FAX_CONTEXT_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxStartPrintJobW( PrinterName: ?[*:0]const u16, PrintInfo: ?*const FAX_PRINT_INFOW, FaxJobId: ?*u32, FaxContextInfo: ?*FAX_CONTEXT_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxPrintCoverPageA( FaxContextInfo: ?*const FAX_CONTEXT_INFOA, CoverPageInfo: ?*const FAX_COVERPAGE_INFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxPrintCoverPageW( FaxContextInfo: ?*const FAX_CONTEXT_INFOW, CoverPageInfo: ?*const FAX_COVERPAGE_INFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxRegisterServiceProviderW( DeviceProvider: ?[*:0]const u16, FriendlyName: ?[*:0]const u16, ImageName: ?[*:0]const u16, TspName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxUnregisterServiceProviderW( DeviceProvider: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "WINFAX" fn FaxRegisterRoutingExtensionW( FaxHandle: ?HANDLE, ExtensionName: ?[*:0]const u16, FriendlyName: ?[*:0]const u16, ImageName: ?[*:0]const u16, CallBack: ?PFAX_ROUTING_INSTALLATION_CALLBACKW, Context: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "WINFAX" fn FaxAccessCheck( FaxHandle: ?HANDLE, AccessMask: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "fxsutility" fn CanSendToFaxRecipient( ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "fxsutility" fn SendToFaxRecipient( sndMode: SendToMode, lpFileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) u32; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (59) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { pub const FAX_LOG_CATEGORY = thismodule.FAX_LOG_CATEGORYA; pub const FAX_CONFIGURATION = thismodule.FAX_CONFIGURATIONA; pub const FAX_DEVICE_STATUS = thismodule.FAX_DEVICE_STATUSA; pub const FAX_JOB_ENTRY = thismodule.FAX_JOB_ENTRYA; pub const FAX_PORT_INFO = thismodule.FAX_PORT_INFOA; pub const FAX_ROUTING_METHOD = thismodule.FAX_ROUTING_METHODA; pub const FAX_GLOBAL_ROUTING_INFO = thismodule.FAX_GLOBAL_ROUTING_INFOA; pub const FAX_COVERPAGE_INFO = thismodule.FAX_COVERPAGE_INFOA; pub const FAX_JOB_PARAM = thismodule.FAX_JOB_PARAMA; pub const FAX_EVENT = thismodule.FAX_EVENTA; pub const FAX_PRINT_INFO = thismodule.FAX_PRINT_INFOA; pub const FAX_CONTEXT_INFO = thismodule.FAX_CONTEXT_INFOA; pub const PFAXCONNECTFAXSERVER = thismodule.PFAXCONNECTFAXSERVERA; pub const PFAXCOMPLETEJOBPARAMS = thismodule.PFAXCOMPLETEJOBPARAMSA; pub const PFAXSENDDOCUMENT = thismodule.PFAXSENDDOCUMENTA; pub const PFAX_RECIPIENT_CALLBACK = thismodule.PFAX_RECIPIENT_CALLBACKA; pub const PFAXSENDDOCUMENTFORBROADCAST = thismodule.PFAXSENDDOCUMENTFORBROADCASTA; pub const PFAXENUMJOBS = thismodule.PFAXENUMJOBSA; pub const PFAXGETJOB = thismodule.PFAXGETJOBA; pub const PFAXSETJOB = thismodule.PFAXSETJOBA; pub const PFAXGETDEVICESTATUS = thismodule.PFAXGETDEVICESTATUSA; pub const PFAXGETCONFIGURATION = thismodule.PFAXGETCONFIGURATIONA; pub const PFAXSETCONFIGURATION = thismodule.PFAXSETCONFIGURATIONA; pub const PFAXGETLOGGINGCATEGORIES = thismodule.PFAXGETLOGGINGCATEGORIESA; pub const PFAXSETLOGGINGCATEGORIES = thismodule.PFAXSETLOGGINGCATEGORIESA; pub const PFAXENUMPORTS = thismodule.PFAXENUMPORTSA; pub const PFAXGETPORT = thismodule.PFAXGETPORTA; pub const PFAXSETPORT = thismodule.PFAXSETPORTA; pub const PFAXENUMROUTINGMETHODS = thismodule.PFAXENUMROUTINGMETHODSA; pub const PFAXENABLEROUTINGMETHOD = thismodule.PFAXENABLEROUTINGMETHODA; pub const PFAXENUMGLOBALROUTINGINFO = thismodule.PFAXENUMGLOBALROUTINGINFOA; pub const PFAXSETGLOBALROUTINGINFO = thismodule.PFAXSETGLOBALROUTINGINFOA; pub const PFAXGETROUTINGINFO = thismodule.PFAXGETROUTINGINFOA; pub const PFAXSETROUTINGINFO = thismodule.PFAXSETROUTINGINFOA; pub const PFAXSTARTPRINTJOB = thismodule.PFAXSTARTPRINTJOBA; pub const PFAXPRINTCOVERPAGE = thismodule.PFAXPRINTCOVERPAGEA; pub const FaxConnectFaxServer = thismodule.FaxConnectFaxServerA; pub const FaxCompleteJobParams = thismodule.FaxCompleteJobParamsA; pub const FaxSendDocument = thismodule.FaxSendDocumentA; pub const FaxSendDocumentForBroadcast = thismodule.FaxSendDocumentForBroadcastA; pub const FaxEnumJobs = thismodule.FaxEnumJobsA; pub const FaxGetJob = thismodule.FaxGetJobA; pub const FaxSetJob = thismodule.FaxSetJobA; pub const FaxGetDeviceStatus = thismodule.FaxGetDeviceStatusA; pub const FaxGetConfiguration = thismodule.FaxGetConfigurationA; pub const FaxSetConfiguration = thismodule.FaxSetConfigurationA; pub const FaxGetLoggingCategories = thismodule.FaxGetLoggingCategoriesA; pub const FaxSetLoggingCategories = thismodule.FaxSetLoggingCategoriesA; pub const FaxEnumPorts = thismodule.FaxEnumPortsA; pub const FaxGetPort = thismodule.FaxGetPortA; pub const FaxSetPort = thismodule.FaxSetPortA; pub const FaxEnumRoutingMethods = thismodule.FaxEnumRoutingMethodsA; pub const FaxEnableRoutingMethod = thismodule.FaxEnableRoutingMethodA; pub const FaxEnumGlobalRoutingInfo = thismodule.FaxEnumGlobalRoutingInfoA; pub const FaxSetGlobalRoutingInfo = thismodule.FaxSetGlobalRoutingInfoA; pub const FaxGetRoutingInfo = thismodule.FaxGetRoutingInfoA; pub const FaxSetRoutingInfo = thismodule.FaxSetRoutingInfoA; pub const FaxStartPrintJob = thismodule.FaxStartPrintJobA; pub const FaxPrintCoverPage = thismodule.FaxPrintCoverPageA; }, .wide => struct { pub const FAX_LOG_CATEGORY = thismodule.FAX_LOG_CATEGORYW; pub const FAX_CONFIGURATION = thismodule.FAX_CONFIGURATIONW; pub const FAX_DEVICE_STATUS = thismodule.FAX_DEVICE_STATUSW; pub const FAX_JOB_ENTRY = thismodule.FAX_JOB_ENTRYW; pub const FAX_PORT_INFO = thismodule.FAX_PORT_INFOW; pub const FAX_ROUTING_METHOD = thismodule.FAX_ROUTING_METHODW; pub const FAX_GLOBAL_ROUTING_INFO = thismodule.FAX_GLOBAL_ROUTING_INFOW; pub const FAX_COVERPAGE_INFO = thismodule.FAX_COVERPAGE_INFOW; pub const FAX_JOB_PARAM = thismodule.FAX_JOB_PARAMW; pub const FAX_EVENT = thismodule.FAX_EVENTW; pub const FAX_PRINT_INFO = thismodule.FAX_PRINT_INFOW; pub const FAX_CONTEXT_INFO = thismodule.FAX_CONTEXT_INFOW; pub const PFAXCONNECTFAXSERVER = thismodule.PFAXCONNECTFAXSERVERW; pub const PFAXCOMPLETEJOBPARAMS = thismodule.PFAXCOMPLETEJOBPARAMSW; pub const PFAXSENDDOCUMENT = thismodule.PFAXSENDDOCUMENTW; pub const PFAX_RECIPIENT_CALLBACK = thismodule.PFAX_RECIPIENT_CALLBACKW; pub const PFAXSENDDOCUMENTFORBROADCAST = thismodule.PFAXSENDDOCUMENTFORBROADCASTW; pub const PFAXENUMJOBS = thismodule.PFAXENUMJOBSW; pub const PFAXGETJOB = thismodule.PFAXGETJOBW; pub const PFAXSETJOB = thismodule.PFAXSETJOBW; pub const PFAXGETDEVICESTATUS = thismodule.PFAXGETDEVICESTATUSW; pub const PFAXGETCONFIGURATION = thismodule.PFAXGETCONFIGURATIONW; pub const PFAXSETCONFIGURATION = thismodule.PFAXSETCONFIGURATIONW; pub const PFAXGETLOGGINGCATEGORIES = thismodule.PFAXGETLOGGINGCATEGORIESW; pub const PFAXSETLOGGINGCATEGORIES = thismodule.PFAXSETLOGGINGCATEGORIESW; pub const PFAXENUMPORTS = thismodule.PFAXENUMPORTSW; pub const PFAXGETPORT = thismodule.PFAXGETPORTW; pub const PFAXSETPORT = thismodule.PFAXSETPORTW; pub const PFAXENUMROUTINGMETHODS = thismodule.PFAXENUMROUTINGMETHODSW; pub const PFAXENABLEROUTINGMETHOD = thismodule.PFAXENABLEROUTINGMETHODW; pub const PFAXENUMGLOBALROUTINGINFO = thismodule.PFAXENUMGLOBALROUTINGINFOW; pub const PFAXSETGLOBALROUTINGINFO = thismodule.PFAXSETGLOBALROUTINGINFOW; pub const PFAXGETROUTINGINFO = thismodule.PFAXGETROUTINGINFOW; pub const PFAXSETROUTINGINFO = thismodule.PFAXSETROUTINGINFOW; pub const PFAXSTARTPRINTJOB = thismodule.PFAXSTARTPRINTJOBW; pub const PFAXPRINTCOVERPAGE = thismodule.PFAXPRINTCOVERPAGEW; pub const FaxConnectFaxServer = thismodule.FaxConnectFaxServerW; pub const FaxCompleteJobParams = thismodule.FaxCompleteJobParamsW; pub const FaxSendDocument = thismodule.FaxSendDocumentW; pub const FaxSendDocumentForBroadcast = thismodule.FaxSendDocumentForBroadcastW; pub const FaxEnumJobs = thismodule.FaxEnumJobsW; pub const FaxGetJob = thismodule.FaxGetJobW; pub const FaxSetJob = thismodule.FaxSetJobW; pub const FaxGetDeviceStatus = thismodule.FaxGetDeviceStatusW; pub const FaxGetConfiguration = thismodule.FaxGetConfigurationW; pub const FaxSetConfiguration = thismodule.FaxSetConfigurationW; pub const FaxGetLoggingCategories = thismodule.FaxGetLoggingCategoriesW; pub const FaxSetLoggingCategories = thismodule.FaxSetLoggingCategoriesW; pub const FaxEnumPorts = thismodule.FaxEnumPortsW; pub const FaxGetPort = thismodule.FaxGetPortW; pub const FaxSetPort = thismodule.FaxSetPortW; pub const FaxEnumRoutingMethods = thismodule.FaxEnumRoutingMethodsW; pub const FaxEnableRoutingMethod = thismodule.FaxEnableRoutingMethodW; pub const FaxEnumGlobalRoutingInfo = thismodule.FaxEnumGlobalRoutingInfoW; pub const FaxSetGlobalRoutingInfo = thismodule.FaxSetGlobalRoutingInfoW; pub const FaxGetRoutingInfo = thismodule.FaxGetRoutingInfoW; pub const FaxSetRoutingInfo = thismodule.FaxSetRoutingInfoW; pub const FaxStartPrintJob = thismodule.FaxStartPrintJobW; pub const FaxPrintCoverPage = thismodule.FaxPrintCoverPageW; }, .unspecified => if (@import("builtin").is_test) struct { pub const FAX_LOG_CATEGORY = *opaque{}; pub const FAX_CONFIGURATION = *opaque{}; pub const FAX_DEVICE_STATUS = *opaque{}; pub const FAX_JOB_ENTRY = *opaque{}; pub const FAX_PORT_INFO = *opaque{}; pub const FAX_ROUTING_METHOD = *opaque{}; pub const FAX_GLOBAL_ROUTING_INFO = *opaque{}; pub const FAX_COVERPAGE_INFO = *opaque{}; pub const FAX_JOB_PARAM = *opaque{}; pub const FAX_EVENT = *opaque{}; pub const FAX_PRINT_INFO = *opaque{}; pub const FAX_CONTEXT_INFO = *opaque{}; pub const PFAXCONNECTFAXSERVER = *opaque{}; pub const PFAXCOMPLETEJOBPARAMS = *opaque{}; pub const PFAXSENDDOCUMENT = *opaque{}; pub const PFAX_RECIPIENT_CALLBACK = *opaque{}; pub const PFAXSENDDOCUMENTFORBROADCAST = *opaque{}; pub const PFAXENUMJOBS = *opaque{}; pub const PFAXGETJOB = *opaque{}; pub const PFAXSETJOB = *opaque{}; pub const PFAXGETDEVICESTATUS = *opaque{}; pub const PFAXGETCONFIGURATION = *opaque{}; pub const PFAXSETCONFIGURATION = *opaque{}; pub const PFAXGETLOGGINGCATEGORIES = *opaque{}; pub const PFAXSETLOGGINGCATEGORIES = *opaque{}; pub const PFAXENUMPORTS = *opaque{}; pub const PFAXGETPORT = *opaque{}; pub const PFAXSETPORT = *opaque{}; pub const PFAXENUMROUTINGMETHODS = *opaque{}; pub const PFAXENABLEROUTINGMETHOD = *opaque{}; pub const PFAXENUMGLOBALROUTINGINFO = *opaque{}; pub const PFAXSETGLOBALROUTINGINFO = *opaque{}; pub const PFAXGETROUTINGINFO = *opaque{}; pub const PFAXSETROUTINGINFO = *opaque{}; pub const PFAXSTARTPRINTJOB = *opaque{}; pub const PFAXPRINTCOVERPAGE = *opaque{}; pub const FaxConnectFaxServer = *opaque{}; pub const FaxCompleteJobParams = *opaque{}; pub const FaxSendDocument = *opaque{}; pub const FaxSendDocumentForBroadcast = *opaque{}; pub const FaxEnumJobs = *opaque{}; pub const FaxGetJob = *opaque{}; pub const FaxSetJob = *opaque{}; pub const FaxGetDeviceStatus = *opaque{}; pub const FaxGetConfiguration = *opaque{}; pub const FaxSetConfiguration = *opaque{}; pub const FaxGetLoggingCategories = *opaque{}; pub const FaxSetLoggingCategories = *opaque{}; pub const FaxEnumPorts = *opaque{}; pub const FaxGetPort = *opaque{}; pub const FaxSetPort = *opaque{}; pub const FaxEnumRoutingMethods = *opaque{}; pub const FaxEnableRoutingMethod = *opaque{}; pub const FaxEnumGlobalRoutingInfo = *opaque{}; pub const FaxSetGlobalRoutingInfo = *opaque{}; pub const FaxGetRoutingInfo = *opaque{}; pub const FaxSetRoutingInfo = *opaque{}; pub const FaxStartPrintJob = *opaque{}; pub const FaxPrintCoverPage = *opaque{}; } else struct { pub const FAX_LOG_CATEGORY = @compileError("'FAX_LOG_CATEGORY' requires that UNICODE be set to true or false in the root module"); pub const FAX_CONFIGURATION = @compileError("'FAX_CONFIGURATION' requires that UNICODE be set to true or false in the root module"); pub const FAX_DEVICE_STATUS = @compileError("'FAX_DEVICE_STATUS' requires that UNICODE be set to true or false in the root module"); pub const FAX_JOB_ENTRY = @compileError("'FAX_JOB_ENTRY' requires that UNICODE be set to true or false in the root module"); pub const FAX_PORT_INFO = @compileError("'FAX_PORT_INFO' requires that UNICODE be set to true or false in the root module"); pub const FAX_ROUTING_METHOD = @compileError("'FAX_ROUTING_METHOD' requires that UNICODE be set to true or false in the root module"); pub const FAX_GLOBAL_ROUTING_INFO = @compileError("'FAX_GLOBAL_ROUTING_INFO' requires that UNICODE be set to true or false in the root module"); pub const FAX_COVERPAGE_INFO = @compileError("'FAX_COVERPAGE_INFO' requires that UNICODE be set to true or false in the root module"); pub const FAX_JOB_PARAM = @compileError("'FAX_JOB_PARAM' requires that UNICODE be set to true or false in the root module"); pub const FAX_EVENT = @compileError("'FAX_EVENT' requires that UNICODE be set to true or false in the root module"); pub const FAX_PRINT_INFO = @compileError("'FAX_PRINT_INFO' requires that UNICODE be set to true or false in the root module"); pub const FAX_CONTEXT_INFO = @compileError("'FAX_CONTEXT_INFO' requires that UNICODE be set to true or false in the root module"); pub const PFAXCONNECTFAXSERVER = @compileError("'PFAXCONNECTFAXSERVER' requires that UNICODE be set to true or false in the root module"); pub const PFAXCOMPLETEJOBPARAMS = @compileError("'PFAXCOMPLETEJOBPARAMS' requires that UNICODE be set to true or false in the root module"); pub const PFAXSENDDOCUMENT = @compileError("'PFAXSENDDOCUMENT' requires that UNICODE be set to true or false in the root module"); pub const PFAX_RECIPIENT_CALLBACK = @compileError("'PFAX_RECIPIENT_CALLBACK' requires that UNICODE be set to true or false in the root module"); pub const PFAXSENDDOCUMENTFORBROADCAST = @compileError("'PFAXSENDDOCUMENTFORBROADCAST' requires that UNICODE be set to true or false in the root module"); pub const PFAXENUMJOBS = @compileError("'PFAXENUMJOBS' requires that UNICODE be set to true or false in the root module"); pub const PFAXGETJOB = @compileError("'PFAXGETJOB' requires that UNICODE be set to true or false in the root module"); pub const PFAXSETJOB = @compileError("'PFAXSETJOB' requires that UNICODE be set to true or false in the root module"); pub const PFAXGETDEVICESTATUS = @compileError("'PFAXGETDEVICESTATUS' requires that UNICODE be set to true or false in the root module"); pub const PFAXGETCONFIGURATION = @compileError("'PFAXGETCONFIGURATION' requires that UNICODE be set to true or false in the root module"); pub const PFAXSETCONFIGURATION = @compileError("'PFAXSETCONFIGURATION' requires that UNICODE be set to true or false in the root module"); pub const PFAXGETLOGGINGCATEGORIES = @compileError("'PFAXGETLOGGINGCATEGORIES' requires that UNICODE be set to true or false in the root module"); pub const PFAXSETLOGGINGCATEGORIES = @compileError("'PFAXSETLOGGINGCATEGORIES' requires that UNICODE be set to true or false in the root module"); pub const PFAXENUMPORTS = @compileError("'PFAXENUMPORTS' requires that UNICODE be set to true or false in the root module"); pub const PFAXGETPORT = @compileError("'PFAXGETPORT' requires that UNICODE be set to true or false in the root module"); pub const PFAXSETPORT = @compileError("'PFAXSETPORT' requires that UNICODE be set to true or false in the root module"); pub const PFAXENUMROUTINGMETHODS = @compileError("'PFAXENUMROUTINGMETHODS' requires that UNICODE be set to true or false in the root module"); pub const PFAXENABLEROUTINGMETHOD = @compileError("'PFAXENABLEROUTINGMETHOD' requires that UNICODE be set to true or false in the root module"); pub const PFAXENUMGLOBALROUTINGINFO = @compileError("'PFAXENUMGLOBALROUTINGINFO' requires that UNICODE be set to true or false in the root module"); pub const PFAXSETGLOBALROUTINGINFO = @compileError("'PFAXSETGLOBALROUTINGINFO' requires that UNICODE be set to true or false in the root module"); pub const PFAXGETROUTINGINFO = @compileError("'PFAXGETROUTINGINFO' requires that UNICODE be set to true or false in the root module"); pub const PFAXSETROUTINGINFO = @compileError("'PFAXSETROUTINGINFO' requires that UNICODE be set to true or false in the root module"); pub const PFAXSTARTPRINTJOB = @compileError("'PFAXSTARTPRINTJOB' requires that UNICODE be set to true or false in the root module"); pub const PFAXPRINTCOVERPAGE = @compileError("'PFAXPRINTCOVERPAGE' requires that UNICODE be set to true or false in the root module"); pub const FaxConnectFaxServer = @compileError("'FaxConnectFaxServer' requires that UNICODE be set to true or false in the root module"); pub const FaxCompleteJobParams = @compileError("'FaxCompleteJobParams' requires that UNICODE be set to true or false in the root module"); pub const FaxSendDocument = @compileError("'FaxSendDocument' requires that UNICODE be set to true or false in the root module"); pub const FaxSendDocumentForBroadcast = @compileError("'FaxSendDocumentForBroadcast' requires that UNICODE be set to true or false in the root module"); pub const FaxEnumJobs = @compileError("'FaxEnumJobs' requires that UNICODE be set to true or false in the root module"); pub const FaxGetJob = @compileError("'FaxGetJob' requires that UNICODE be set to true or false in the root module"); pub const FaxSetJob = @compileError("'FaxSetJob' requires that UNICODE be set to true or false in the root module"); pub const FaxGetDeviceStatus = @compileError("'FaxGetDeviceStatus' requires that UNICODE be set to true or false in the root module"); pub const FaxGetConfiguration = @compileError("'FaxGetConfiguration' requires that UNICODE be set to true or false in the root module"); pub const FaxSetConfiguration = @compileError("'FaxSetConfiguration' requires that UNICODE be set to true or false in the root module"); pub const FaxGetLoggingCategories = @compileError("'FaxGetLoggingCategories' requires that UNICODE be set to true or false in the root module"); pub const FaxSetLoggingCategories = @compileError("'FaxSetLoggingCategories' requires that UNICODE be set to true or false in the root module"); pub const FaxEnumPorts = @compileError("'FaxEnumPorts' requires that UNICODE be set to true or false in the root module"); pub const FaxGetPort = @compileError("'FaxGetPort' requires that UNICODE be set to true or false in the root module"); pub const FaxSetPort = @compileError("'FaxSetPort' requires that UNICODE be set to true or false in the root module"); pub const FaxEnumRoutingMethods = @compileError("'FaxEnumRoutingMethods' requires that UNICODE be set to true or false in the root module"); pub const FaxEnableRoutingMethod = @compileError("'FaxEnableRoutingMethod' requires that UNICODE be set to true or false in the root module"); pub const FaxEnumGlobalRoutingInfo = @compileError("'FaxEnumGlobalRoutingInfo' requires that UNICODE be set to true or false in the root module"); pub const FaxSetGlobalRoutingInfo = @compileError("'FaxSetGlobalRoutingInfo' requires that UNICODE be set to true or false in the root module"); pub const FaxGetRoutingInfo = @compileError("'FaxGetRoutingInfo' requires that UNICODE be set to true or false in the root module"); pub const FaxSetRoutingInfo = @compileError("'FaxSetRoutingInfo' requires that UNICODE be set to true or false in the root module"); pub const FaxStartPrintJob = @compileError("'FaxStartPrintJob' requires that UNICODE be set to true or false in the root module"); pub const FaxPrintCoverPage = @compileError("'FaxPrintCoverPage' requires that UNICODE be set to true or false in the root module"); }, }; //-------------------------------------------------------------------------------- // Section: Imports (17) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const BSTR = @import("../foundation.zig").BSTR; const CHAR = @import("../system/system_services.zig").CHAR; const FILETIME = @import("../foundation.zig").FILETIME; const HANDLE = @import("../foundation.zig").HANDLE; const HDC = @import("../graphics/gdi.zig").HDC; const HINSTANCE = @import("../foundation.zig").HINSTANCE; const HPROPSHEETPAGE = @import("../ui/controls.zig").HPROPSHEETPAGE; const HRESULT = @import("../foundation.zig").HRESULT; const HWND = @import("../foundation.zig").HWND; const IDispatch = @import("../system/ole_automation.zig").IDispatch; const IUnknown = @import("../system/com.zig").IUnknown; const PSTR = @import("../foundation.zig").PSTR; const PWSTR = @import("../foundation.zig").PWSTR; const SYSTEMTIME = @import("../foundation.zig").SYSTEMTIME; const VARIANT = @import("../system/ole_automation.zig").VARIANT; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "PFAXCONNECTFAXSERVERA")) { _ = PFAXCONNECTFAXSERVERA; } if (@hasDecl(@This(), "PFAXCONNECTFAXSERVERW")) { _ = PFAXCONNECTFAXSERVERW; } if (@hasDecl(@This(), "PFAXCLOSE")) { _ = PFAXCLOSE; } if (@hasDecl(@This(), "PFAXOPENPORT")) { _ = PFAXOPENPORT; } if (@hasDecl(@This(), "PFAXCOMPLETEJOBPARAMSA")) { _ = PFAXCOMPLETEJOBPARAMSA; } if (@hasDecl(@This(), "PFAXCOMPLETEJOBPARAMSW")) { _ = PFAXCOMPLETEJOBPARAMSW; } if (@hasDecl(@This(), "PFAXSENDDOCUMENTA")) { _ = PFAXSENDDOCUMENTA; } if (@hasDecl(@This(), "PFAXSENDDOCUMENTW")) { _ = PFAXSENDDOCUMENTW; } if (@hasDecl(@This(), "PFAX_RECIPIENT_CALLBACKA")) { _ = PFAX_RECIPIENT_CALLBACKA; } if (@hasDecl(@This(), "PFAX_RECIPIENT_CALLBACKW")) { _ = PFAX_RECIPIENT_CALLBACKW; } if (@hasDecl(@This(), "PFAXSENDDOCUMENTFORBROADCASTA")) { _ = PFAXSENDDOCUMENTFORBROADCASTA; } if (@hasDecl(@This(), "PFAXSENDDOCUMENTFORBROADCASTW")) { _ = PFAXSENDDOCUMENTFORBROADCASTW; } if (@hasDecl(@This(), "PFAXENUMJOBSA")) { _ = PFAXENUMJOBSA; } if (@hasDecl(@This(), "PFAXENUMJOBSW")) { _ = PFAXENUMJOBSW; } if (@hasDecl(@This(), "PFAXGETJOBA")) { _ = PFAXGETJOBA; } if (@hasDecl(@This(), "PFAXGETJOBW")) { _ = PFAXGETJOBW; } if (@hasDecl(@This(), "PFAXSETJOBA")) { _ = PFAXSETJOBA; } if (@hasDecl(@This(), "PFAXSETJOBW")) { _ = PFAXSETJOBW; } if (@hasDecl(@This(), "PFAXGETPAGEDATA")) { _ = PFAXGETPAGEDATA; } if (@hasDecl(@This(), "PFAXGETDEVICESTATUSA")) { _ = PFAXGETDEVICESTATUSA; } if (@hasDecl(@This(), "PFAXGETDEVICESTATUSW")) { _ = PFAXGETDEVICESTATUSW; } if (@hasDecl(@This(), "PFAXABORT")) { _ = PFAXABORT; } if (@hasDecl(@This(), "PFAXGETCONFIGURATIONA")) { _ = PFAXGETCONFIGURATIONA; } if (@hasDecl(@This(), "PFAXGETCONFIGURATIONW")) { _ = PFAXGETCONFIGURATIONW; } if (@hasDecl(@This(), "PFAXSETCONFIGURATIONA")) { _ = PFAXSETCONFIGURATIONA; } if (@hasDecl(@This(), "PFAXSETCONFIGURATIONW")) { _ = PFAXSETCONFIGURATIONW; } if (@hasDecl(@This(), "PFAXGETLOGGINGCATEGORIESA")) { _ = PFAXGETLOGGINGCATEGORIESA; } if (@hasDecl(@This(), "PFAXGETLOGGINGCATEGORIESW")) { _ = PFAXGETLOGGINGCATEGORIESW; } if (@hasDecl(@This(), "PFAXSETLOGGINGCATEGORIESA")) { _ = PFAXSETLOGGINGCATEGORIESA; } if (@hasDecl(@This(), "PFAXSETLOGGINGCATEGORIESW")) { _ = PFAXSETLOGGINGCATEGORIESW; } if (@hasDecl(@This(), "PFAXENUMPORTSA")) { _ = PFAXENUMPORTSA; } if (@hasDecl(@This(), "PFAXENUMPORTSW")) { _ = PFAXENUMPORTSW; } if (@hasDecl(@This(), "PFAXGETPORTA")) { _ = PFAXGETPORTA; } if (@hasDecl(@This(), "PFAXGETPORTW")) { _ = PFAXGETPORTW; } if (@hasDecl(@This(), "PFAXSETPORTA")) { _ = PFAXSETPORTA; } if (@hasDecl(@This(), "PFAXSETPORTW")) { _ = PFAXSETPORTW; } if (@hasDecl(@This(), "PFAXENUMROUTINGMETHODSA")) { _ = PFAXENUMROUTINGMETHODSA; } if (@hasDecl(@This(), "PFAXENUMROUTINGMETHODSW")) { _ = PFAXENUMROUTINGMETHODSW; } if (@hasDecl(@This(), "PFAXENABLEROUTINGMETHODA")) { _ = PFAXENABLEROUTINGMETHODA; } if (@hasDecl(@This(), "PFAXENABLEROUTINGMETHODW")) { _ = PFAXENABLEROUTINGMETHODW; } if (@hasDecl(@This(), "PFAXENUMGLOBALROUTINGINFOA")) { _ = PFAXENUMGLOBALROUTINGINFOA; } if (@hasDecl(@This(), "PFAXENUMGLOBALROUTINGINFOW")) { _ = PFAXENUMGLOBALROUTINGINFOW; } if (@hasDecl(@This(), "PFAXSETGLOBALROUTINGINFOA")) { _ = PFAXSETGLOBALROUTINGINFOA; } if (@hasDecl(@This(), "PFAXSETGLOBALROUTINGINFOW")) { _ = PFAXSETGLOBALROUTINGINFOW; } if (@hasDecl(@This(), "PFAXGETROUTINGINFOA")) { _ = PFAXGETROUTINGINFOA; } if (@hasDecl(@This(), "PFAXGETROUTINGINFOW")) { _ = PFAXGETROUTINGINFOW; } if (@hasDecl(@This(), "PFAXSETROUTINGINFOA")) { _ = PFAXSETROUTINGINFOA; } if (@hasDecl(@This(), "PFAXSETROUTINGINFOW")) { _ = PFAXSETROUTINGINFOW; } if (@hasDecl(@This(), "PFAXINITIALIZEEVENTQUEUE")) { _ = PFAXINITIALIZEEVENTQUEUE; } if (@hasDecl(@This(), "PFAXFREEBUFFER")) { _ = PFAXFREEBUFFER; } if (@hasDecl(@This(), "PFAXSTARTPRINTJOBA")) { _ = PFAXSTARTPRINTJOBA; } if (@hasDecl(@This(), "PFAXSTARTPRINTJOBW")) { _ = PFAXSTARTPRINTJOBW; } if (@hasDecl(@This(), "PFAXPRINTCOVERPAGEA")) { _ = PFAXPRINTCOVERPAGEA; } if (@hasDecl(@This(), "PFAXPRINTCOVERPAGEW")) { _ = PFAXPRINTCOVERPAGEW; } if (@hasDecl(@This(), "PFAXREGISTERSERVICEPROVIDERW")) { _ = PFAXREGISTERSERVICEPROVIDERW; } if (@hasDecl(@This(), "PFAXUNREGISTERSERVICEPROVIDERW")) { _ = PFAXUNREGISTERSERVICEPROVIDERW; } if (@hasDecl(@This(), "PFAX_ROUTING_INSTALLATION_CALLBACKW")) { _ = PFAX_ROUTING_INSTALLATION_CALLBACKW; } if (@hasDecl(@This(), "PFAXREGISTERROUTINGEXTENSIONW")) { _ = PFAXREGISTERROUTINGEXTENSIONW; } if (@hasDecl(@This(), "PFAXACCESSCHECK")) { _ = PFAXACCESSCHECK; } if (@hasDecl(@This(), "PFAX_SERVICE_CALLBACK")) { _ = PFAX_SERVICE_CALLBACK; } if (@hasDecl(@This(), "PFAX_LINECALLBACK")) { _ = PFAX_LINECALLBACK; } if (@hasDecl(@This(), "PFAX_SEND_CALLBACK")) { _ = PFAX_SEND_CALLBACK; } if (@hasDecl(@This(), "PFAXDEVINITIALIZE")) { _ = PFAXDEVINITIALIZE; } if (@hasDecl(@This(), "PFAXDEVVIRTUALDEVICECREATION")) { _ = PFAXDEVVIRTUALDEVICECREATION; } if (@hasDecl(@This(), "PFAXDEVSTARTJOB")) { _ = PFAXDEVSTARTJOB; } if (@hasDecl(@This(), "PFAXDEVENDJOB")) { _ = PFAXDEVENDJOB; } if (@hasDecl(@This(), "PFAXDEVSEND")) { _ = PFAXDEVSEND; } if (@hasDecl(@This(), "PFAXDEVRECEIVE")) { _ = PFAXDEVRECEIVE; } if (@hasDecl(@This(), "PFAXDEVREPORTSTATUS")) { _ = PFAXDEVREPORTSTATUS; } if (@hasDecl(@This(), "PFAXDEVABORTOPERATION")) { _ = PFAXDEVABORTOPERATION; } if (@hasDecl(@This(), "PFAXDEVCONFIGURE")) { _ = PFAXDEVCONFIGURE; } if (@hasDecl(@This(), "PFAXDEVSHUTDOWN")) { _ = PFAXDEVSHUTDOWN; } if (@hasDecl(@This(), "PFAXROUTEADDFILE")) { _ = PFAXROUTEADDFILE; } if (@hasDecl(@This(), "PFAXROUTEDELETEFILE")) { _ = PFAXROUTEDELETEFILE; } if (@hasDecl(@This(), "PFAXROUTEGETFILE")) { _ = PFAXROUTEGETFILE; } if (@hasDecl(@This(), "PFAXROUTEENUMFILE")) { _ = PFAXROUTEENUMFILE; } if (@hasDecl(@This(), "PFAXROUTEENUMFILES")) { _ = PFAXROUTEENUMFILES; } if (@hasDecl(@This(), "PFAXROUTEMODIFYROUTINGDATA")) { _ = PFAXROUTEMODIFYROUTINGDATA; } if (@hasDecl(@This(), "PFAXROUTEINITIALIZE")) { _ = PFAXROUTEINITIALIZE; } if (@hasDecl(@This(), "PFAXROUTEMETHOD")) { _ = PFAXROUTEMETHOD; } if (@hasDecl(@This(), "PFAXROUTEDEVICEENABLE")) { _ = PFAXROUTEDEVICEENABLE; } if (@hasDecl(@This(), "PFAXROUTEDEVICECHANGENOTIFICATION")) { _ = PFAXROUTEDEVICECHANGENOTIFICATION; } if (@hasDecl(@This(), "PFAXROUTEGETROUTINGINFO")) { _ = PFAXROUTEGETROUTINGINFO; } if (@hasDecl(@This(), "PFAXROUTESETROUTINGINFO")) { _ = PFAXROUTESETROUTINGINFO; } if (@hasDecl(@This(), "PFAX_EXT_GET_DATA")) { _ = PFAX_EXT_GET_DATA; } if (@hasDecl(@This(), "PFAX_EXT_SET_DATA")) { _ = PFAX_EXT_SET_DATA; } if (@hasDecl(@This(), "PFAX_EXT_CONFIG_CHANGE")) { _ = PFAX_EXT_CONFIG_CHANGE; } if (@hasDecl(@This(), "PFAX_EXT_REGISTER_FOR_EVENTS")) { _ = PFAX_EXT_REGISTER_FOR_EVENTS; } if (@hasDecl(@This(), "PFAX_EXT_UNREGISTER_FOR_EVENTS")) { _ = PFAX_EXT_UNREGISTER_FOR_EVENTS; } if (@hasDecl(@This(), "PFAX_EXT_FREE_BUFFER")) { _ = PFAX_EXT_FREE_BUFFER; } if (@hasDecl(@This(), "PFAX_EXT_INITIALIZE_CONFIG")) { _ = PFAX_EXT_INITIALIZE_CONFIG; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/devices/fax.zig
const layout = @import("layout.zig"); const x86 = @import("x86.zig"); const fmt = @import("std").fmt; use @import("lib").tty; // Hold the VGA status. var vga = VGA.init(VRAM_ADDR); //// // Initialize the terminal. // pub fn initialize() void { disableCursor(); vga.clear(); } //// // Print a formatted string to screen. // // Arguments: // format: Format string. // args: Parameters for format specifiers. // const Errors = error {}; pub fn print(comptime format: []const u8, args: ...) void { _ = fmt.format({}, Errors, printCallback, format, args); } // Callback for print. fn printCallback(context: void, string: []const u8) Errors!void { vga.writeString(string); } //// // Print a string in the given foreground color. // // Arguments: // fg: Color of the text. // format: Format string. // args: Parameters for format specifiers. // pub fn colorPrint(fg: Color, comptime format: []const u8, args: ...) void { const save_foreground = vga.foreground; vga.foreground = fg; print(format, args); vga.foreground = save_foreground; } //// // Align the cursor so that it is offset characters from the left border. // // Arguments: // offset: Number of characters from the left border. // pub fn alignLeft(offset: usize) void { while (vga.cursor % VGA_WIDTH != offset) { vga.writeChar(' '); } } //// // Align the cursor so that it is offset characters from the right border. // // Arguments: // offset: Number of characters from the right border. // pub fn alignRight(offset: usize) void { alignLeft(VGA_WIDTH - offset); } //// // Align the cursor to horizontally center a string. // // Arguments: // str_len: Length of the string to be centered. // pub fn alignCenter(str_len: usize) void { alignLeft((VGA_WIDTH - str_len) / 2); } //// // Signal an unrecoverable error and hang the computer. // // Arguments: // format: Format string. // args: Parameters for format specifiers. // pub fn panic(comptime format: []const u8, args: ...) noreturn { // We may be interrupting user mode, so we disable the hardware cursor // and fetch its current position, and start writing from there. disableCursor(); vga.fetchCursor(); vga.writeChar('\n'); vga.background = Color.Red; colorPrint(Color.White, "KERNEL PANIC: " ++ format ++ "\n", args); x86.hang(); } //// // Print a loading step. // // Arguments: // format: Format string. // args: Parameters for format specifiers. // pub fn step(comptime format: []const u8, args: ...) void { colorPrint(Color.LightBlue, ">> "); print(format ++ "...", args); } //// // Signal that a loading step completed successfully. // pub fn stepOK() void { const ok = " [ OK ]"; alignRight(ok.len); colorPrint(Color.LightGreen, ok); }
kernel/tty.zig
const std = @import("std"); const concepts = @import("../../lib.zig").concepts; const assert = std.debug.assert; pub fn Visitor( comptime Context: type, comptime Value: type, comptime visitBool: @TypeOf(struct { fn f(self: Context, comptime Error: type, input: bool) Error!Value { _ = self; _ = input; unreachable; } }.f), comptime visitEnum: @TypeOf(struct { fn f(self: Context, comptime Error: type, input: anytype) Error!Value { _ = self; _ = input; unreachable; } }.f), comptime visitFloat: @TypeOf(struct { fn f(self: Context, comptime Error: type, input: anytype) Error!Value { _ = self; _ = input; unreachable; } }.f), comptime visitInt: @TypeOf(struct { fn f(self: Context, comptime Error: type, input: anytype) Error!Value { _ = self; _ = input; unreachable; } }.f), comptime visitMap: @TypeOf(struct { fn f(self: Context, mapAccess: anytype) @TypeOf(mapAccess).Error!Value { _ = self; _ = mapAccess; unreachable; } }.f), comptime visitNull: @TypeOf(struct { fn f(self: Context, comptime Error: type) Error!Value { _ = self; unreachable; } }.f), comptime visitSequence: @TypeOf(struct { fn f(self: Context, sequenceAccess: anytype) @TypeOf(sequenceAccess).Error!Value { _ = self; _ = sequenceAccess; unreachable; } }.f), comptime visitString: @TypeOf(struct { fn f(self: Context, comptime E: type, input: anytype) E!Value { _ = self; _ = input; unreachable; } }.f), comptime visitSome: @TypeOf(struct { fn f(self: Context, deserializer: anytype) @TypeOf(deserializer).Error!Value { _ = self; unreachable; } }.f), comptime visitVoid: @TypeOf(struct { fn f(self: Context, comptime Error: type) Error!Value { _ = self; unreachable; } }.f), ) type { return struct { pub const @"getty.de.Visitor" = struct { context: Context, const Self = @This(); pub const Value = Value; pub fn visitBool(self: Self, comptime Error: type, input: bool) Error!Value { return try visitBool(self.context, Error, input); } pub fn visitEnum(self: Self, comptime Error: type, input: anytype) Error!Value { comptime { switch (@typeInfo(@TypeOf(input))) { .Enum, .EnumLiteral => {}, else => @compileError("expected enum or enum literal, found `" ++ @typeName(@TypeOf(input)) ++ "`"), } } return try visitEnum(self.context, Error, input); } pub fn visitFloat(self: Self, comptime Error: type, input: anytype) Error!Value { comptime { switch (@typeInfo(@TypeOf(input))) { .Float, .ComptimeFloat => {}, else => @compileError("expected floating-point, found `" ++ @typeName(@TypeOf(input)) ++ "`"), } } return try visitFloat(self.context, Error, input); } pub fn visitInt(self: Self, comptime Error: type, input: anytype) Error!Value { comptime { switch (@typeInfo(@TypeOf(input))) { .Int, .ComptimeInt => {}, else => @compileError("expected integer, found `" ++ @typeName(@TypeOf(input)) ++ "`"), } } return try visitInt(self.context, Error, input); } pub fn visitMap(self: Self, mapAccess: anytype) blk: { concepts.@"getty.de.MapAccess"(@TypeOf(mapAccess)); break :blk @TypeOf(mapAccess).Error!Value; } { return try visitMap(self.context, mapAccess); } pub fn visitNull(self: Self, comptime Error: type) Error!Value { return try visitNull(self.context, Error); } /// /// /// The visitor is responsible for visiting the entire sequence. Note /// that this implies that `sequenceAccess` must be able to identify /// the end of a sequence when it is encountered. pub fn visitSequence(self: Self, sequenceAccess: anytype) blk: { concepts.@"getty.de.SequenceAccess"(@TypeOf(sequenceAccess)); break :blk @TypeOf(sequenceAccess).Error!Value; } { return try visitSequence(self.context, sequenceAccess); } pub fn visitSome(self: Self, deserializer: anytype) blk: { concepts.@"getty.Deserializer"(@TypeOf(deserializer)); break :blk @TypeOf(deserializer).Error!Value; } { return try visitSome(self.context, deserializer); } /// /// /// The visitor is responsible for visiting the entire slice. pub fn visitString(self: Self, comptime Error: type, input: anytype) Error!Value { comptime { if (!std.meta.trait.isZigString(@TypeOf(input))) { @compileError("expected string, found `" ++ @typeName(@TypeOf(input)) ++ "`"); } } return try visitString(self.context, Error, input); } pub fn visitVoid(self: Self, comptime Error: type) Error!Value { return try visitVoid(self.context, Error); } }; pub fn visitor(ctx: Context) @"getty.de.Visitor" { return .{ .context = ctx }; } }; }
src/de/interface/visitor.zig
const std = @import("std"); const stdx = @import("stdx"); const algo = stdx.algo; const log = stdx.log.scoped(.grammar); const ds = stdx.ds; const parser = @import("parser.zig"); const NodeTag = parser.NodeTag; const NodePtr = parser.NodePtr; const builder = @import("builder.zig"); pub const Grammar = struct { const Self = @This(); alloc: std.mem.Allocator, token_decls: std.ArrayList(TokenDecl), // The decls used in the first pass to start matching each char. // After a match, the matched string can be replaced by another decl with @replace. token_main_decls: std.ArrayList(TokenDecl), // Maps a literal str to its tag. Tag is then used for fast comparisons. literal_tag_map: stdx.ds.OwnedKeyStringHashMap(LiteralTokenTag), next_literal_tag: LiteralTokenTag, decls: std.ArrayList(RuleDecl), ops: std.ArrayList(MatchOp), token_ops: std.ArrayList(TokenMatchOp), root_rule_name: []const u8, root_rule_id: RuleId, // String buf for dupes and unescaped strings. str_buf: std.ArrayList(u8), // Prevent allocating duplicate strings. str_buf_map: std.StringHashMap(CharSlice), // Buffer for charset ranges. charset_range_buf: std.ArrayList(CharSetRange), // Special node tags. decl_tag_end: NodeTag, // Exclusive. // TODO: Make these start at predefined values starting at 0 so we get comptime branching. // Would require realloc on readonly decls list since tags are used to index into them. null_node_tag: NodeTag, // Represents a null node. Used when setting node child fields. node_list_tag: NodeTag, string_value_tag: NodeTag, token_value_tag: NodeTag, char_value_tag: NodeTag, // Transient vars. token_match_op_buf: std.ArrayList(*TokenMatchOp), match_op_buf: std.ArrayList(*MatchOp), bit_buf: ds.BitArrayList, pub fn init(self: *Self, alloc: std.mem.Allocator, root_rule_name: []const u8) void { self.* = .{ .alloc = alloc, .token_decls = std.ArrayList(TokenDecl).init(alloc), .token_main_decls = std.ArrayList(TokenDecl).init(alloc), .literal_tag_map = stdx.ds.OwnedKeyStringHashMap(LiteralTokenTag).init(alloc), .next_literal_tag = NullLiteralTokenTag + 1, .decls = std.ArrayList(RuleDecl).init(alloc), .ops = std.ArrayList(MatchOp).init(alloc), .token_ops = std.ArrayList(TokenMatchOp).init(alloc), .root_rule_name = root_rule_name, .root_rule_id = undefined, .node_list_tag = undefined, .string_value_tag = undefined, .char_value_tag = undefined, .token_value_tag = undefined, .decl_tag_end = undefined, .null_node_tag = undefined, .token_match_op_buf = std.ArrayList(*TokenMatchOp).init(alloc), .match_op_buf = std.ArrayList(*MatchOp).init(alloc), .bit_buf = ds.BitArrayList.init(alloc), .str_buf = std.ArrayList(u8).init(alloc), .str_buf_map = std.StringHashMap(CharSlice).init(alloc), .charset_range_buf = std.ArrayList(CharSetRange).init(alloc), }; } pub fn deinit(self: *Self) void { self.literal_tag_map.deinit(); self.token_decls.deinit(); self.token_main_decls.deinit(); self.token_ops.deinit(); self.ops.deinit(); self.decls.deinit(); self.token_match_op_buf.deinit(); self.match_op_buf.deinit(); self.bit_buf.deinit(); self.str_buf.deinit(); self.str_buf_map.deinit(); self.charset_range_buf.deinit(); } // Should only be used before Grammar.build. fn addString(self: *Self, str: []const u8) CharSlice { const item = self.str_buf_map.getOrPut(str) catch unreachable; if (!item.found_existing) { const start = self.str_buf.items.len; self.str_buf.appendSlice(str) catch unreachable; const slice = CharSlice{ .start = @intCast(u32, start), .end = @intCast(u32, self.str_buf.items.len) }; item.value_ptr.* = slice; } return item.value_ptr.*; } pub fn getString(self: *Self, slice: CharSlice) []const u8 { return self.str_buf.items[slice.start..slice.end]; } fn addLiteralTokenTag(self: *Self, str: []const u8) LiteralTokenTag { if (self.literal_tag_map.get(str)) |_| { // For now prevent multiple declarations of the same literal. stdx.panicFmt("already added literal '{s}'", .{str}); } self.literal_tag_map.put(str, self.next_literal_tag) catch unreachable; defer self.next_literal_tag += 1; return self.next_literal_tag; } // Build config before it can be used by the Parser. pub fn build(self: *Self, alloc: std.mem.Allocator) void { self.root_rule_id = self.findRuleDeclId(self.root_rule_name) orelse stdx.panicFmt("Couldn't find {s}", .{self.root_rule_name}); // Assumes CharSlice is a unique key to the same string. var name_to_token_tag = std.AutoHashMap(CharSlice, TokenTag).init(alloc); defer name_to_token_tag.deinit(); var name_to_rule = std.AutoHashMap(CharSlice, RuleId).init(alloc); defer name_to_rule.deinit(); for (self.token_decls.items) |decl, idx| { name_to_token_tag.put(decl.name, @intCast(u32, idx)) catch unreachable; // Generate Rules that wrap TokenRules with a string value as the only field. const name = self.getString(decl.name); self.addRule(name, self.matchTokCap(name)); if (decl.replace_name != null) { const target_tag = self.findTokenDecl(decl.replace_name.?) orelse stdx.panicFmt("Couldn't find {s}", .{decl.replace_name.?}); const target_decl = &self.token_decls.items[target_tag]; target_decl.replace_with = @intCast(u32, idx); } } for (self.decls.items) |it, idx| { name_to_rule.put(it.name, @intCast(u32, idx)) catch unreachable; } prepareTokenMatchOps(self, &name_to_token_tag); // Token processing after op resolving. for (self.token_decls.items) |decl| { if (decl.is_literal) { const op = self.token_ops.items[decl.op_id]; // For now, just look for MatchChoice op and assign literal tag to the immediate MatchText children. if (op == .MatchChoice) { var i = op.MatchChoice.ops.start; while (i < op.MatchChoice.ops.end) : (i += 1) { const child_op = self.token_ops.items[i]; if (child_op == .MatchText) { const str = self.getString(child_op.MatchText.str); _ = self.addLiteralTokenTag(str); } else if (child_op == .MatchExactChar) { _ = self.addLiteralTokenTag(&[_]u8{child_op.MatchExactChar.ch}); } } } else if (op == .MatchText) { const str = self.getString(op.MatchText.str); _ = self.addLiteralTokenTag(str); } else stdx.panicFmt("unsupported {s}", .{@tagName(op)}); } } prepareMatchOps(self, &name_to_token_tag, &name_to_rule); // Compute node data sizes after we resolved all capture vars in op_ids. for (self.decls.items) |*it| { var num_child_items: u32 = 0; var i = it.ops.start; while (i < it.ops.end) : (i += 1) { const op = self.ops.items[i]; const capture = self.shouldCaptureRule(&op); if (capture) { num_child_items += 1; } } it.num_child_items = num_child_items; it.data_size = num_child_items * @sizeOf(NodePtr); } // For each rule, check if it can do left term recursion. var visited_map = std.AutoHashMap(RuleId, void).init(alloc); defer visited_map.deinit(); for (self.decls.items) |*decl, idx| { const rule_id = @intCast(u32, idx); decl.is_left_recursive = self.isLeftRecursive(rule_id, &visited_map); } // Special tags start after decls so that decls can be accessed by their tags directly. self.decl_tag_end = @intCast(NodeTag, self.decls.items.len); self.node_list_tag = self.decl_tag_end; self.string_value_tag = self.decl_tag_end + 1; self.char_value_tag = self.decl_tag_end + 2; self.null_node_tag = self.decl_tag_end + 3; self.token_value_tag = self.decl_tag_end + 4; // Generate the main token decls after they have computed values set. for (self.token_decls.items) |it| { if (it.replace_name == null) { self.token_main_decls.append(it) catch unreachable; } } } // Checks against the first term of a match op. // We need to track visited sub rules or the walking will be recursive. fn isLeftRecursive(self: *Self, rule_id: RuleId, visited_map: *std.AutoHashMap(RuleId, void)) bool { const Result = enum { NotLeftRecursive, LeftRecursive, // Indicates that a subbranch contains a MatchRule that matches the root. // A parent MatchSeq still needs to determine if it is a LeftRecursive. FoundSameMatchRule, }; const S = struct { rule_id: RuleId, config: *Grammar, visited_map: *std.AutoHashMap(RuleId, void), fn visitFirstOpTerm(ctx: *@This(), op_id: MatchOpId) Result { const op = ctx.config.getMatchOp(op_id); switch (op) { .MatchSeq => |inner| { const res = visitFirstOpTerm(ctx, inner.ops.start); if (res == .FoundSameMatchRule) { const len = inner.ops.len(); if (len == 1) { return res; } else if (len > 1) { return .LeftRecursive; } else { unreachable; } } else { return res; } }, .MatchOptional => |inner| { return visitFirstOpTerm(ctx, inner.op_id); }, .MatchRule => |inner| { if (ctx.rule_id == inner.rule_id) { return .FoundSameMatchRule; } else { if (ctx.visited_map.contains(inner.rule_id)) { return .NotLeftRecursive; } ctx.visited_map.put(inner.rule_id, {}) catch unreachable; const rule = ctx.config.getRule(inner.rule_id); const res = visitFirstOpTerm(ctx, rule.ops.start); if (res == .FoundSameMatchRule) { const num_ops = rule.ops.len(); if (num_ops == 1) { return res; } else if (num_ops > 1) { return .LeftRecursive; } else { unreachable; } } else { return res; } } }, .MatchChoice => |inner| { var i = inner.ops.start; while (i < inner.ops.end) : (i += 1) { const res = visitFirstOpTerm(ctx, i); if (res == .LeftRecursive or res == .FoundSameMatchRule) { return res; } } return .NotLeftRecursive; }, .MatchNegLookahead, .MatchPosLookahead, .MatchLiteral, .MatchToken, .MatchTokenText, .MatchOneOrMore, .MatchZeroOrMore => { return .NotLeftRecursive; }, // else => stdx.panicFmt("unsupported {s}", .{@tagName(op)}), } } }; var ctx = S{ .config = self, .rule_id = rule_id, .visited_map = visited_map }; const rule = self.getRule(rule_id); visited_map.clearRetainingCapacity(); return S.visitFirstOpTerm(&ctx, rule.ops.start) == .LeftRecursive; } pub fn shouldCaptureRule(self: *Self, rule: *const MatchOp) bool { _ = self; return switch (rule.*) { .MatchToken => |inner| inner.capture, .MatchTokenText => |inner| inner.capture, .MatchRule => |inner| !inner.skip, .MatchZeroOrMore => |inner| !inner.skip, .MatchOneOrMore => |_| true, .MatchSeq => |inner| inner.computed_capture, .MatchChoice => |inner| inner.computed_capture, .MatchOptional => |inner| inner.computed_capture, .MatchNegLookahead => false, .MatchPosLookahead => false, .MatchLiteral => |inner| inner.capture, }; } pub fn getNumChildFields(self: *Self, tag: NodeTag) u32 { if (tag < self.node_list_tag) { return self.decls.items[tag].num_child_items; } else if (tag == self.node_list_tag) { return 1; } else { stdx.panicFmt("unsupported tag {}", .{tag}); } } pub fn getNodeDataSize(self: *Self, tag: NodeTag) u32 { if (tag < self.node_list_tag) { return self.decls.items[tag].data_size; } else if (tag == self.node_list_tag) { return @sizeOf([]const NodePtr); } else { stdx.panicFmt("unsupported tag {}", .{tag}); } } pub fn getMatchOp(self: *Self, id: MatchOpId) MatchOp { return self.ops.items[id]; } pub fn getMatchOpName(self: *Self, id: MatchOpId) []const u8 { return @tagName(self.ops.items[id]); } pub fn getMatchOpDesc(self: *Self, id: MatchOpId) []const u8 { const static = struct { var buf: [128]u8 = undefined; }; const op = self.ops.items[id]; var fbs = std.io.fixedBufferStream(&static.buf); var writer = fbs.writer(); std.fmt.format(writer, "{s} ", .{@tagName(op)}) catch {}; if (op == .MatchLiteral) { const str = self.getString(op.MatchLiteral.str); std.fmt.format(writer, "{s} ", .{str}) catch {}; } return fbs.getWritten(); } pub fn getRule(self: *Self, id: RuleId) RuleDecl { return self.decls.items[id]; } pub fn getRuleName(self: *Self, id: RuleId) []const u8 { return self.getString(self.decls.items[id].name); } pub fn getTokenName(self: *Self, tag: TokenTag) []const u8 { return self.getString(self.token_decls.items[tag].name); } pub fn getNodeTagName(self: *Self, id: NodeTag) []const u8 { if (id < self.decl_tag_end) { return self.getRuleName(id); } else if (id == self.node_list_tag) { return "NodeList"; } else if (id == self.string_value_tag) { return "String"; } else if (id == self.char_value_tag) { return "Char"; } else if (id == self.null_node_tag) { return "Null"; } else if (id == self.token_value_tag) { return "TokenString"; } else { stdx.panicFmt("unsupported {}", .{id}); } } fn findTokenDecl(self: *Self, name: []const u8) ?TokenTag { for (self.token_decls.items) |it, idx| { const token_name = self.getString(it.name); if (std.mem.eql(u8, token_name, name)) { return @intCast(u32, idx); } } return null; } pub fn findRuleDeclId(self: *Self, name: []const u8) ?RuleId { for (self.decls.items) |it, idx| { const rule_name = self.getString(it.name); if (std.mem.eql(u8, rule_name, name)) { return @intCast(u32, idx); } } return null; } pub fn findRuleDecl(self: *Self, name: []const u8) ?RuleDecl { const mb_id = self.findRuleDeclId(name); return if (mb_id) |id| self.getRule(id) else null; } pub fn addTokenOp(self: *Self, op: TokenMatchOp) TokenMatchOpId { self.token_ops.append(op) catch unreachable; return @intCast(TokenMatchOpId, self.token_ops.items.len - 1); } pub fn addTokenOps(self: *Self, ops: []const TokenMatchOp) TokenMatchOpSlice { const first = self.addTokenOp(ops[0]); for (ops[1..]) |it| { _ = self.addTokenOp(it); } return .{ .start = first, .end = first + @intCast(u32, ops.len) }; } pub fn addTokenRule(self: *Self, name: []const u8, op: TokenMatchOp) void { self.addTokenRuleExt(name, op, false, false, null); } pub fn addTokenRuleExt(self: *Self, name: []const u8, op: TokenMatchOp, is_literal: bool, skip: bool, replace: ?[]const u8) void { const name_slice = self.addString(name); const op_id = self.addTokenOp(op); const tag = @intCast(u32, self.token_decls.items.len); self.token_decls.append(TokenDecl.init(tag, name_slice, op_id, is_literal, skip, replace)) catch unreachable; } pub fn addRuleExt(self: *Self, name: []const u8, op: MatchOp, is_inline: bool) void { const name_slice = self.addString(name); const op_id = self.addOp(op); if (op == .MatchSeq) { self.decls.append(RuleDecl.init(name_slice, op.MatchSeq.ops, is_inline)) catch unreachable; } else { const slice = MatchOpSlice{ .start = op_id, .end = op_id + 1 }; self.decls.append(RuleDecl.init(name_slice, slice, is_inline)) catch unreachable; } } pub fn addRule(self: *Self, name: []const u8, op: MatchOp) void { self.addRuleExt(name, op, false); } pub fn addInlineRule(self: *Self, name: []const u8, op: MatchOp) void { self.addRuleExt(name, op, true); } pub fn addOp(self: *Self, op: MatchOp) MatchOpId { self.ops.append(op) catch unreachable; return @intCast(MatchOpId, self.ops.items.len - 1); } pub fn addOps(self: *Self, ops: []const MatchOp) MatchOpSlice { const first = self.addOp(ops[0]); for (ops[1..]) |it| { _ = self.addOp(it); } return .{ .start = first, .end = first + @intCast(u32, ops.len) }; } pub fn matchTokCap(self: *Self, tag_name: []const u8) MatchOp { const slice = self.addString(tag_name); return .{ .MatchToken = .{ .capture = true, .tag_name = slice, .tag = undefined, }, }; } pub fn matchTokText(self: *Self, tag_name: []const u8, str: []const u8) MatchOp { return .{ .MatchTokenText = .{ .capture = false, .tag_name = self.addString(tag_name), .tag = undefined, .str = self.addString(str), }, }; } pub fn matchRule(self: *Self, name: []const u8) MatchOp { const slice = self.addString(name); return .{ .MatchRule = .{ .skip = false, .name = slice, .rule_id = undefined, }, }; } pub fn matchLiteral(self: *Self, str: []const u8) MatchOp { const slice = self.addString(str); return .{ .MatchLiteral = .{ .capture = false, .computed_literal_tag = undefined, .str = slice, }, }; } pub fn matchLiteralCap(self: *Self, str: []const u8) MatchOp { const slice = self.addString(str); return .{ .MatchLiteral = .{ .capture = true, .computed_literal_tag = undefined, .str = slice, }, }; } pub fn tokMatchText(self: *Self, str: []const u8) TokenMatchOp { const slice = self.addString(str); return .{ .MatchText = .{ .str = slice, } }; } }; fn initTokenMatchOpWalker(op_ids: []TokenMatchOp) algo.Walker([]TokenMatchOp, *TokenMatchOp) { const S = struct { fn _walk(ctx: *algo.WalkerContext(*TokenMatchOp), ops: []TokenMatchOp, op: *TokenMatchOp) void { _ = ctx; switch (op.*) { .MatchText, .MatchUntilChar, .MatchExactChar, .MatchNotChar, .MatchRangeChar, .MatchAsciiLetter, .MatchDigit, .MatchRegexChar, .MatchCharSet, .MatchNotCharSet, .MatchRule => { // Nop. }, .MatchOneOrMore => |inner| { ctx.beginAddNode(1); ctx.addNode(&ops[inner.op_id]); }, .MatchZeroOrMore => |inner| { ctx.beginAddNode(1); ctx.addNode(&ops[inner.op_id]); }, .MatchChoice => |inner| { var i = inner.ops.start; ctx.beginAddNode(inner.ops.len()); while (i < inner.ops.end) : (i += 1) { var _op = &ops[i]; ctx.addNode(_op); } }, .MatchOptional => |m| { ctx.beginAddNode(1); ctx.addNode(&ops[m.op_id]); }, .MatchNegLookahead => |m| { ctx.beginAddNode(1); ctx.addNode(&ops[m.op_id]); }, .MatchPosLookahead => |m| { ctx.beginAddNode(1); ctx.addNode(&ops[m.op_id]); }, .MatchSeq => |inner| { var i = inner.ops.start; ctx.beginAddNode(inner.ops.len()); while (i < inner.ops.end) : (i += 1) { var _op = &ops[i]; ctx.addNode(_op); } }, } } }; return algo.Walker([]TokenMatchOp, *TokenMatchOp).init(op_ids, S._walk); } fn initMatchOpWalker(op_ids: []MatchOp) algo.Walker([]MatchOp, *MatchOp) { const S = struct { fn _walk(ctx: *algo.WalkerContext(*MatchOp), ops: []MatchOp, op: *MatchOp) void { switch (op.*) { .MatchLiteral, .MatchToken, .MatchRule, .MatchTokenText => { // Nop. }, .MatchNegLookahead => |m| { var inner = &ops[m.op_id]; ctx.beginAddNode(1); ctx.addNode(inner); }, .MatchPosLookahead => |m| { var inner = &ops[m.op_id]; ctx.beginAddNode(1); ctx.addNode(inner); }, .MatchOptional => |inner| { var _op = &ops[inner.op_id]; ctx.beginAddNode(1); ctx.addNode(_op); }, .MatchSeq => |inner| { var i = inner.ops.start; ctx.beginAddNode(inner.ops.len()); while (i < inner.ops.end) : (i += 1) { var _op = &ops[i]; ctx.addNode(_op); } }, .MatchChoice => |inner| { var i = inner.ops.start; ctx.beginAddNode(inner.ops.len()); while (i < inner.ops.end) : (i += 1) { var _op = &ops[i]; ctx.addNode(_op); } }, .MatchOneOrMore => |inner| { var _op = &ops[inner.op_id]; ctx.beginAddNode(1); ctx.addNode(_op); }, .MatchZeroOrMore => |inner| { var _op = &ops[inner.op_id]; ctx.beginAddNode(1); ctx.addNode(_op); }, } } }; return algo.Walker([]MatchOp, *MatchOp).init(op_ids, S._walk); } fn prepareTokenMatchOps(config: *Grammar, name_to_token_tag: *const std.AutoHashMap(CharSlice, TokenTag)) void { const S = struct { config: *Grammar, name_to_token_tag: *const std.AutoHashMap(CharSlice, TokenTag), fn resolve(_: *algo.VisitContext(.{}), c: *@This(), op: *TokenMatchOp) void { switch (op.*) { .MatchRule => |*inner| { inner.tag = c.config.findTokenDecl(inner.name).?; }, .MatchCharSet => |*inner| { // TODO: Remove resolved fields, it's probably slower than referencing str_buf directly. inner.resolved_charset = c.config.str_buf.items[inner.charset.start..inner.charset.end]; }, .MatchNotCharSet => |*inner| { inner.resolved_charset = c.config.str_buf.items[inner.charset.start..inner.charset.end]; }, .MatchOptional, .MatchText, .MatchUntilChar, .MatchExactChar, .MatchNotChar, .MatchRangeChar, .MatchAsciiLetter, .MatchDigit, .MatchRegexChar, .MatchZeroOrMore, .MatchOneOrMore, .MatchChoice, .MatchNegLookahead, .MatchPosLookahead, .MatchSeq, => {}, } } }; var ctx = S{ .config = config, .name_to_token_tag = name_to_token_tag, }; var walker = initTokenMatchOpWalker(config.token_ops.items); for (config.token_decls.items) |decl| { const op = &config.token_ops.items[decl.op_id]; algo.walkPost(.{}, *S, &ctx, *TokenMatchOp, op, walker.getIface(), S.resolve, &config.token_match_op_buf, &config.bit_buf); } } fn prepareMatchOps(g: *Grammar, name_to_token_tag: *const std.AutoHashMap(CharSlice, TokenTag), name_to_rule: *const std.AutoHashMap(CharSlice, RuleId)) void { const S = struct { g: *Grammar, name_to_token_tag: *const std.AutoHashMap(CharSlice, TokenTag), name_to_rule: *const std.AutoHashMap(CharSlice, RuleId), fn resolve(_: *algo.VisitContext(.{}), self: *@This(), op: *MatchOp) void { // Set match op tags. // Compute capture. switch (op.*) { .MatchToken => |*inner| { inner.tag = self.name_to_token_tag.get(inner.tag_name) orelse stdx.panicFmt("Couldn't find {s}", .{self.g.getString(inner.tag_name)}); }, .MatchTokenText => |*inner| { inner.tag = self.name_to_token_tag.get(inner.tag_name) orelse stdx.panicFmt("Couldn't find {s}", .{self.g.getString(inner.tag_name)}); }, .MatchRule => |*inner| { inner.rule_id = self.name_to_rule.get(inner.name) orelse stdx.panicFmt("Couldn't find {s}", .{self.g.getString(inner.name)}); }, .MatchOptional => |*inner| { const child_rule = self.g.ops.items[inner.op_id]; inner.computed_capture = self.g.shouldCaptureRule(&child_rule); }, .MatchSeq => |*inner| b: { var i = inner.ops.start; while (i < inner.ops.end) : (i += 1) { const child_op = self.g.ops.items[i]; if (self.g.shouldCaptureRule(&child_op)) { inner.computed_capture = true; break :b; } } inner.computed_capture = false; }, .MatchChoice => |*inner| b: { var i = inner.ops.start; while (i < inner.ops.end) : (i += 1) { const child_op = self.g.ops.items[i]; if (self.g.shouldCaptureRule(&child_op)) { inner.computed_capture = true; break :b; } } inner.computed_capture = false; }, .MatchLiteral => |*inner| { const str = self.g.getString(inner.str); if (self.g.literal_tag_map.get(str)) |literal_tag| { inner.computed_literal_tag = literal_tag; } else { log.warn("literal tags: {}", .{self.g.literal_tag_map.count()}); stdx.panicFmt("expected literal tag for '{s}'", .{self.g.getString(inner.str)}); } }, .MatchNegLookahead, .MatchPosLookahead, .MatchOneOrMore, .MatchZeroOrMore => { // Nop. }, } } }; var ctx = S{ .g = g, .name_to_token_tag = name_to_token_tag, .name_to_rule = name_to_rule, }; var walker = initMatchOpWalker(g.ops.items); for (g.decls.items) |decl| { var i = decl.ops.start; while (i < decl.ops.end) : (i += 1) { const op = &g.ops.items[i]; // Visit the leaf nodes first so we can compute whether op_ids need to be captured by looking at its children. algo.walkPost(.{}, *S, &ctx, *MatchOp, op, walker.getIface(), S.resolve, &g.match_op_buf, &g.bit_buf); } } } pub const RuleId = u32; pub const RuleDecl = struct { name: CharSlice, // These ops match like a seq and parses into child fields of the root node. ops: MatchOpSlice, // Skip creating an AST node for this rule and return child node instead. // Useful for Statement, Declaration, Expression decls. is_inline: bool, // This is computed in Grammar.build. // If true, there exists a sub expression where the left term is itself and additional terms follow. // Before returning the first match result, it will try to recursively match itself against those recursive expressions. is_left_recursive: bool, // Computed. num_child_items: u32, data_size: u32, fn init(name: CharSlice, ops: MatchOpSlice, is_inline: bool) @This() { return .{ .name = name, .is_inline = is_inline, .ops = ops, .num_child_items = undefined, .data_size = undefined, .is_left_recursive = undefined, }; } }; pub const MatchOpId = u32; pub const MatchOpSlice = ds.RelSlice(MatchOpId); pub const MatchOp = union(enum) { MatchOneOrMore: struct { op_id: MatchOpId, }, MatchZeroOrMore: struct { skip: bool, op_id: MatchOpId, }, MatchToken: struct { capture: bool, tag_name: CharSlice, // Tag is set by Grammar.build. tag: TokenTag, }, // Match any literal token with text value. // Literal tokens are created if there is a TokenDecl with a top level TokenMatchOp.MatchLiteral op. MatchLiteral: struct { capture: bool, computed_literal_tag: LiteralTokenTag, str: CharSlice, }, // Match token and text value. MatchTokenText: struct { // By default matching exact token text is not included in the parsed ast. // Setting capture to true would include it. // TODO: Should be captureName: ?[]const u8 capture: bool, tag_name: CharSlice, tag: TokenTag, str: CharSlice, }, MatchRule: struct { // By default matching other rules is included into the parsed ast. skip: bool, name: CharSlice, rule_id: RuleId, }, // Matches all and returns last matching child up to parent. // TODO: multiple matching children should be returned as NodeList MatchSeq: struct { computed_capture: bool, ops: MatchOpSlice, }, // Returns matching child up to parent. MatchChoice: struct { // capture is computed by looking at it's child op_ids. computed_capture: bool, ops: MatchOpSlice, }, // Returns matching child up to parent. MatchOptional: struct { // Instead of returning the child, return a bool value capture_bool: bool, computed_capture: bool, op_id: MatchOpId, }, MatchNegLookahead: struct { op_id: MatchOpId, }, MatchPosLookahead: struct { op_id: MatchOpId, }, }; // Index into the decls array. pub const TokenTag = u32; // Id starting from 1 assigned to each unique literal token. Useful for fast comparisons. pub const LiteralTokenTag = u32; // Id 0 is reserved for no literal token tag. pub const NullLiteralTokenTag: LiteralTokenTag = 0; pub const TokenDecl = struct { tag: TokenTag, name: CharSlice, op_id: TokenMatchOpId, // Match but skip adding to token list. skip: bool, // The matches from this rule should also be assigned a literal tag. is_literal: bool, // Which token name to replace. replace_name: ?[]const u8, // Set by config prepare step. replace_with: ?TokenTag, pub fn init(tag: TokenTag, name: CharSlice, op_id: TokenMatchOpId, is_literal: bool, skip: bool, replace_name: ?[]const u8) @This() { return .{ .tag = tag, .name = name, .op_id = op_id, .skip = skip, .is_literal = is_literal, .replace_name = replace_name, .replace_with = null, }; } }; pub const TokenMatchOpId = u32; pub const TokenMatchOpSlice = ds.RelSlice(TokenMatchOpId); // eg. [a-zA-Z0-9] pub const CharSetRange = struct { start: u8, end_incl: u8, }; const CharSetRangeId = u32; pub const CharSetRangeSlice = ds.RelSlice(CharSetRangeId); const CharId = u32; pub const CharSlice = ds.RelSlice(CharId); pub const TokenMatchOp = union(enum) { MatchRule: struct { name: []const u8, tag: TokenTag, }, MatchCharSet: struct { // When building, a CharSlice is added but is resolved to a slice in Grammar.build // TODO: remove this, its faster with ptr from the buffer resolved_charset: []const u8, charset: CharSlice, ranges: CharSetRangeSlice, }, MatchNotCharSet: struct { resolved_charset: []const u8, charset: CharSlice, ranges: CharSetRangeSlice, }, MatchZeroOrMore: struct { op_id: TokenMatchOpId, }, MatchOneOrMore: struct { op_id: TokenMatchOpId, }, MatchText: struct { str: CharSlice, }, MatchUntilChar: struct { ch: u8, }, MatchExactChar: struct { ch: u8, }, MatchNotChar: struct { ch: u8, }, MatchRangeChar: struct { start: u8, // Inclusive. end: u8, mod: MatchModifier, }, MatchAsciiLetter: void, MatchDigit: MatchModifier, MatchRegexChar: struct { expr: []const u8, }, // Tokenizer needs to save the state before trying each path. MatchChoice: struct { ops: TokenMatchOpSlice, }, MatchSeq: struct { ops: TokenMatchOpSlice, }, MatchOptional: struct { op_id: TokenMatchOpId, }, MatchPosLookahead: struct { op_id: TokenMatchOpId, }, MatchNegLookahead: struct { op_id: TokenMatchOpId, }, }; const MatchModifier = enum { One, OneOrMore, };
parser/grammar.zig
const std = @import("std"); const builtin = @import("builtin"); const wasi = std.os.wasi; const Allocator = std.mem.Allocator; const unexpectedErrno = std.os.unexpectedErrno; const GetCommandLineW = std.os.windows.kernel32.GetCommandLineW; /// An iterator over an array of null-terminated argument strings. pub const ArgvIterator = struct { argv: []const [*:0]const u8, /// NOTE: This method is only intended on POSIX systems or in applications that link libc. /// On Windows use WindowsArgIterator and on WASI use WasiArgs.iterator(); pub fn init() ArgvIterator { return .{ .argv = std.os.argv }; } /// Retrieve the next argument passed to the process. pub fn next(self: *ArgvIterator) ?[:0]const u8 { if (self.argv.len == 0) return null; defer self.argv = self.argv[1..]; return std.mem.span(self.argv[0]); } }; /// A structure that manages allocation, retrieval, and deallocation of WASI arguments. /// Construct using `var wasi_args = WasiArgs{}`, then access the argv slice via /// `wasi_args.argv()` or `wasi_args.iterator()`. Make sure to call `wasi_args.deinit()`. pub const WasiArgs = struct { /// The buffer that stores the argument strings and pointers into them. buf: []align(buf_align) u8 = &[_]u8{}, /// The initialization error, taken from the stdlib WASI iterator. pub const InitError = error{OutOfMemory} || std.os.UnexpectedError; /// The alignment required to store the array of pointers or slices. const buf_align = std.math.max(@alignOf([*:0]u8), @alignOf([:0]u8)); /// Initialize the argv buffer using the WASI argument retrieval API. /// Caller is responsible for calling `WasiArgs.deinit()` to free memory. /// Do not free the returned slice separately from `WasiArgs`; the slice /// is contained within the same buffer as the argument data. /// If `span` is false, return value is a slice of [*:0]u8. /// If `span` is true, return value is a slice of [:0]u8. pub fn argv( self: *WasiArgs, allocator: Allocator, comptime span: bool, ) InitError!(if (span) [][:0]u8 else [][*:0]u8) { if (builtin.os.tag != .wasi) @compileError("Cannot initialize WASI argument buffer on non-WASI target."); // Retrieve argument count and required buffer size. var argc: usize = undefined; var argv_size: usize = undefined; var err = wasi.args_sizes_get(&argc, &argv_size); if (err != wasi.errno_t.SUCCESS) return unexpectedErrno(err); // Allocate the buffer with enough space for argv slice and argument data. const buf = try self.allocBuf(allocator, argc, argv_size, span); errdefer self.deinit(allocator); // Retrieve the argument data, storing it in the newly-allocated buffer. err = wasi.args_get(buf.argv.ptr, buf.buf.ptr); if (err != wasi.errno_t.SUCCESS) return unexpectedErrno(err); // Return the argv buffer, spanning it if requested. return if (span) buf.span() else buf.argv; } /// Retrieve the process arguments and return an iterator over them. The /// returned iterator is an ArgvIterator, which is used on POSIX systems. /// Caller is responsible for calling `WasiArgs.deinit()` to free memory. pub fn iterator(self: *WasiArgs, allocator: Allocator) InitError!ArgvIterator { return ArgvIterator{ .argv = try self.argv(allocator, false), }; } /// Free the memory allocated by `argv()` or `iterator()`. pub fn deinit(self: *WasiArgs, allocator: Allocator) void { defer self.* = undefined; allocator.free(self.buf); } /// Allocate memory for `buf` and `argv` based on argument count and number /// of bytes required to store the concatenated, null-terminated `argv`. This /// should usually not be called directly as it serves little purpose outside /// of implementing `WasiArgs.argv()` and `WasiArgs.iterator()`. pub fn allocBuf( self: *WasiArgs, allocator: Allocator, argc: usize, argv_size: usize, comptime span: bool, ) error{OutOfMemory}!AllocBuf { const ArgvT = if (span) [:0]u8 else [*:0]u8; const item_size = comptime std.math.max(@sizeOf(ArgvT), @sizeOf([*:0]u8)); const buf_len = argc * item_size + argv_size; self.buf = try allocator.allocWithOptions(u8, buf_len, buf_align, null); return AllocBuf{ .argv = @ptrCast([*][*:0]u8, self.buf.ptr)[0..argc], .buf = self.buf[(item_size * argc)..], }; } /// Return value of `allocBuf`, storing the requested allocation. pub const AllocBuf = struct { /// A slice of argument strings, indexing into the underlying buffer. argv: [][*:0]u8, /// The buffer of argument data indexed by the `argv` slice. buf: []u8, /// Return the result of spanning each pointer in the argv slice. /// Caller asserts that the underlying allocation has space for this. pub fn span(self: AllocBuf) [][:0]u8 { comptime std.debug.assert(@sizeOf([:0]u8) >= @sizeOf([*:0]u8)); var source = self.argv; const dest = @ptrCast([*][:0]u8, source.ptr)[0..source.len]; while (source.len > 0) : (source.len -= 1) dest[source.len - 1] = std.mem.span(source[source.len - 1]); return dest; } }; }; /// An iterator over arguments specified in a Windows command line string. pub const WindowsArgIterator = struct { command_line: [*:0]const u16, /// Initialize the iterator using the kernel32 function GetCommandLineW. pub fn init() WindowsArgIterator { if (builtin.os.tag != .windows) @compileError("Cannot initialize Windows argument iterator on non-Windows target."); return .{ .command_line = GetCommandLineW() }; } /// Retrieve the next argument in the command line. The text is unprocessed /// other than being split according to the Windows backslash and quoting /// rules, so to actually determine the contents of the argument it will /// likely be useful to use the other methods provided by this library. pub fn next(self: *WindowsArgIterator) ?[]const u16 { return self.parseInternal(.raw, null, {}, {}) catch unreachable; } /// Parse the argument from the command line string into the provided buffer. /// Returns an error if the buffer does not have room for the parsed argument. /// Note that Windows command lines are limited to 32768 WTF-16 code units in /// length, including quotes and escape sequences, so the buffer need only be /// [32768]u16 or [98304]u8, depending on encoding choice. pub fn decodeNext( self: *WindowsArgIterator, comptime encoding: Encoding, buf: encoding.v(null).Param(), ) error{OutOfMemory}!?encoding.v(null).Ret() { return self.parseInternal(comptime encoding.v(null), null, {}, buf); } /// Parse the argument from the command line string into a newly allocated buffer. /// Windows argument decoding can be done with a fixed buffer; see `decodeNext`. pub fn decodeAlloc( self: *WindowsArgIterator, allocator: Allocator, comptime encoding: Encoding, ) error{OutOfMemory}!?encoding.v(null).Ret() { return self.parseAlloc(allocator, encoding, null); } /// Parse the argument from the command line string into the provided buffer, /// additionally appending the requested sentinel. See `decodeNext` for more /// information. pub fn decodeNextZ( self: *WindowsArgIterator, comptime encoding: Encoding, buf: encoding.v(null).Param(), comptime sentinel: encoding.Sentinel(), ) error{OutOfMemory}!?encoding.v(sentinel).Ret() { return self.parseInternal(comptime encoding.v(sentinel), null, {}, buf); } /// Parse the argument from the command line string into a newly allocated buffer. /// Windows argument decoding can be done with a fixed buffer; see `decodeNextZ`. pub fn decodeAllocZ( self: *WindowsArgIterator, allocator: Allocator, comptime encoding: Encoding, comptime sentinel: encoding.Sentinel(), ) error{OutOfMemory}!?encoding.v(sentinel).Ret() { return self.parseAlloc(allocator, encoding, sentinel); } /// Return a decoder which tracks state and thus can be resumed after out of /// memory errors instead of failing with no chance of recovery. pub fn advancedDecoder( self: *WindowsArgIterator, comptime encoding: Encoding, comptime sentinel: ?encoding.Sentinel(), ) AdvancedDecoder(encoding, sentinel) { return .{ .iterator = self }; } pub const AdvancedDecoder = AdvancedDecoder_; /// The encoding to parse command line arguments in, used by `decodeNext`. pub const Encoding = enum { /// The native encoding used by Windows to represent non-ASCII characters. /// This is the recommended encoding to read command line arguments in, but /// requires platform-specific code as non-Windows OSes do not use this. /// However, using WTF-8 entails a series of back-and-forth conversions for /// winapi calls and file IO so it is highly recommended to use WTF-16. wtf16, /// An encoding roughly compatible with UTF-8, but additionally allowing /// unpaired surrogate codepoints to be encoded. This encoding should not /// be used unless truly required; WTF-16 is the format used by winapi, /// and for network transmission, display to user, and file IO it should /// be converted to validated UTF-8 (possibly with U+FFFD) instead. To /// reiterate: using this encoding is almost always incorrect. wtf8, fn Sentinel(comptime self: Encoding) type { return switch (self) { .wtf16 => u16, .wtf8 => u8, }; } fn v(comptime self: Encoding, comptime sentinel: ?self.Sentinel()) OutputVariant { return @unionInit(OutputVariant, @tagName(self), sentinel); } }; fn parseAlloc( self: *WindowsArgIterator, allocator: Allocator, comptime encoding: Encoding, comptime sentinel: ?encoding.Sentinel(), ) !?encoding.v(sentinel).Ret() { var decoder = self.advancedDecoder(encoding, sentinel); var buf = std.ArrayListUnmanaged(encoding.Sentinel()){}; errdefer buf.deinit(allocator); while (true) { if (decoder.decodeNext(buf.items)) |result| { if (result) |str| { const new_len = str.len + @boolToInt(sentinel != null); _ = allocator.shrink(buf.items, new_len); } return result; } else |_| { try buf.ensureTotalCapacity(allocator, buf.capacity + 1); buf.items.len = buf.capacity; } } } fn parseInternal( self: *WindowsArgIterator, comptime v: OutputVariant, comptime Advanced: ?type, advanced: if (Advanced) |A| *A else void, buf: v.Param(), ) !?v.Ret() { // Initialize variables used in the parser loop. var c = self.command_line; if (Advanced != null) c += advanced.cmd_offset; // Null terminator means there is nothing to iterate, so return null. if (c[0] == 0 and (Advanced == null or advanced.cmd_offset == 0)) return null; // Set up output parsing utility. var out = ParseOutput(v).init(if (v == .raw) c else buf); if (Advanced != null) { out.buf.len = advanced.buf_offset; out.prev = advanced.prev; } // The MSDN docs say that if the command line string starts with a space // or tab, then the first argument shall be parsed as a 0-length string. if (c[0] != ' ' and c[0] != '\t' or (Advanced != null and advanced.cmd_offset > 0)) { var in_quotes = false; var num_backslashes: usize = 0; if (Advanced != null) { in_quotes = advanced.in_quotes; num_backslashes = advanced.num_backslashes; } while (c[0] != 0 and (in_quotes or (c[0] != ' ' and c[0] != '\t'))) : (c += 1) { switch (c[0]) { '\\' => { if (v == .raw) try out.add({}); num_backslashes += 1; }, '"' => { if (num_backslashes % 2 == 0) in_quotes = !in_quotes; if (v != .raw) { if (num_backslashes > 0) try out.addBackslashes(num_backslashes / 2); if (num_backslashes % 2 == 1) try out.add('"'); } else try out.add({}); num_backslashes = 0; }, else => { if (v != .raw) { if (num_backslashes > 0) try out.addBackslashes(num_backslashes); try out.add(c[0]); } else try out.add({}); num_backslashes = 0; }, } if (Advanced != null) { advanced.cmd_offset += 1; advanced.buf_offset = out.buf.len; advanced.prev = out.prev; advanced.in_quotes = in_quotes; advanced.num_backslashes = num_backslashes; } } if (v != .raw and num_backslashes > 0) { try out.addBackslashes(num_backslashes); } } // Extract the final result from the parser state and strip trailing whitespace. const result = try out.finalize(); while (c[0] == ' ' or c[0] == '\t') : (c += 1) {} self.command_line = c; if (Advanced != null) advanced.* = .{ .iterator = self }; return result; } /// Internal structure that represents the buffer. Performs bounds checks /// and, when applicable, converts WTF-16 code units into WTF-8 bytes. fn ParseOutput(comptime v: OutputVariant) type { return struct { cap: if (v == .raw) void else usize, prev: if (v == .wtf8) u16 else u0 = 0, buf: v.Buf(), /// Initialize the ParseOutput using the relevant buffer type. fn init(buf: if (v == .raw) [*]const u16 else v.Buf()) @This() { const buf2: v.Buf() = buf[0..0]; return switch (v) { .raw => .{ .cap = {}, .buf = buf2, }, .wtf16 => .{ .cap = buf.len, .buf = buf2, }, .wtf8 => .{ .cap = buf.len, .buf = buf2, }, }; } /// Encode a WTF-16 code unit into a 1-byte WTF-8 sequence. fn encode1(self: *@This(), c: u16) !void { if (self.cap == self.buf.len) return error.OutOfMemory; self.buf.len += 1; self.buf[self.buf.len - 1] = @intCast(u8, c); } /// Encode a WTF-16 code unit into a 2-byte WTF-8 sequence. fn encode2(self: *@This(), c: u16) !void { if (self.cap < self.buf.len + 2) return error.OutOfMemory; self.buf.len += 2; self.buf[(self.buf.len - 2)..][0..2].* = [_]u8{ 0xc0 | @intCast(u8, c >> 6), 0x80 | @intCast(u8, c & 0x3f), }; } /// Encode a WTF-16 code unit into a 3-byte WTF-8 sequence. fn encode3(self: *@This(), c: u16) !void { if (self.cap < self.buf.len + 3) return error.OutOfMemory; self.buf.len += 3; self.buf[(self.buf.len - 3)..][0..3].* = [_]u8{ 0xe0 | @intCast(u8, c >> 12), 0x80 | @intCast(u8, (c >> 6) & 0x3f), 0x80 | @intCast(u8, c & 0x3f), }; } /// Encode a WTF-16 surrogate pair into a 4-byte WTF-8 sequence. fn encode4(self: *@This(), lead: u16, trail: u16) !void { if (self.cap < self.buf.len + 4) return error.OutOfMemory; const c = 0x10000 + (@as(u21, lead - 0xd800) << 10) + (trail - 0xdc00); self.buf.len += 4; self.buf[(self.buf.len - 4)..][0..4].* = [_]u8{ 0xf0 | @intCast(u8, c >> 18), 0x80 | @intCast(u8, (c >> 12) & 0x3f), 0x80 | @intCast(u8, (c >> 6) & 0x3f), 0x80 | @intCast(u8, c & 0x3f), }; } /// Append the provided WTF-16 code unit to the buffer, returning an /// error if the buffer is out of room. Performs WTF-16 to WTF-8 /// conversion if ParseOutput is in WTF-8 mode. fn add(self: *@This(), c: if (v == .raw) void else u16) !void { switch (v) { .raw => self.buf.len += 1, .wtf16 => { // Verify buffer capacity, then append the code unit. if (self.cap == self.buf.len) return error.OutOfMemory; self.buf.len += 1; self.buf[self.buf.len - 1] = c; }, .wtf8 => { // Perform WTF-16 to WTF-8 conversion. switch (c) { 0xd800...0xdbff => { if (self.prev != 0) { // Encode unpaired leading surrogate. try self.encode3(self.prev); } // Store this leading surrogate for later. self.prev = c; }, 0xdc00...0xdfff => { if (self.prev == 0) { // Encode unpaired trailing surrogate. try self.encode3(c); } else { // Encode supplementary codepoint. try self.encode4(self.prev, c); self.prev = 0; } }, else => { if (self.prev != 0) { // Encode unpaired leading surrogate. try self.encode3(self.prev); self.prev = 0; } // Encode basic multilingual plane codepoint. switch (c) { 0x0000...0x007f => try self.encode1(c), 0x0080...0x07ff => try self.encode2(c), 0x0800...0xffff => try self.encode3(c), } }, } }, } } /// Append the specified number of backslashes to end of the buffer. fn addBackslashes(self: *@This(), count: usize) !void { comptime std.debug.assert(v != .raw); if (v == .wtf8 and self.prev != 0) { try self.encode3(self.prev); self.prev = 0; } if (self.cap < self.buf.len + count) return error.OutOfMemory; self.buf.len += count; for (self.buf[(self.buf.len - count)..]) |*target| target.* = '\\'; } /// Returns the buffer that was filled via repeated `add()` calls. /// This will fail if there is not enough capacity to encode the /// final codepoint and/or the null terminator. fn finalize(self: *@This()) !v.Ret() { if (v == .wtf8 and self.prev != 0) try self.encode3(self.prev); if (comptime v.sentinel()) |s| { if (self.cap == self.buf.len) return error.OutOfMemory; self.buf.len += 1; self.buf[self.buf.len - 1] = s; return self.buf[0..(self.buf.len - 1) :s]; } else return self.buf; } }; } /// Helper enum to make the above code a bit less verbose. const OutputVariant = union(enum) { raw: void, wtf16: ?u16, wtf8: ?u8, fn sentinel(comptime v: OutputVariant) switch (v) { .wtf16 => ?u16, .wtf8 => ?u8, else => ?void, } { if (v == .raw) return null; return @field(v, @tagName(v)); } fn Param(comptime v: OutputVariant) type { return switch (v) { .raw => void, .wtf16 => []u16, .wtf8 => []u8, }; } fn Buf(comptime v: OutputVariant) type { return switch (v) { .raw => []const u16, .wtf16 => []u16, .wtf8 => []u8, }; } fn Ret(comptime v: OutputVariant) type { return switch (v) { .raw => []const u16, .wtf16 => |s_| if (s_) |s| [:s]u16 else []u16, .wtf8 => |s_| if (s_) |s| [:s]u8 else []u8, }; } }; }; fn AdvancedDecoder_(comptime encoding: WindowsArgIterator.Encoding, comptime sentinel: ?encoding.Sentinel()) type { const v = encoding.v(sentinel); return struct { iterator: *WindowsArgIterator, in_quotes: bool = false, num_backslashes: usize = 0, prev: if (v == .wtf8) u16 else u0 = 0, cmd_offset: usize = 0, buf_offset: usize = 0, /// Decode the next argument. This function can be retried with a reallocated /// buffer when an error is returned, but will otherwise return the new buffer. pub fn decodeNext(self: *@This(), buf: v.Param()) error{OutOfMemory}!?v.Ret() { return self.iterator.parseInternal(v, @This(), self, buf); } }; } test "zlaap.ArgvIterator" { // Ensure [][*:0]u8 can cast to []const [*:0]const u8. _ = @as([]const [*:0]const u8, std.os.argv); // Random argv buffer to test in a platform-independent environment. const argv_str = [_][:0]const u8{ "abc, xyz, 123", "592392 jgjsk l2ql1", "al2laktrwalktl", }; const argv_ptr = [_][*:0]const u8{ argv_str[0].ptr, argv_str[1].ptr, argv_str[2].ptr, }; // The iterator should return exactly 3 arguments. var iterator = ArgvIterator{ .argv = &argv_ptr }; const arg_1 = iterator.next() orelse return error.ExpectedArgument; const arg_2 = iterator.next() orelse return error.ExpectedArgument; const arg_3 = iterator.next() orelse return error.ExpectedArgument; // Validate the returned arguments and ensure no 4th argument is returned. try std.testing.expect(iterator.next() == null); try std.testing.expectEqualStrings(argv_str[0], arg_1); try std.testing.expectEqualStrings(argv_str[1], arg_2); try std.testing.expectEqualStrings(argv_str[2], arg_3); } test "zlaap.WasiArgs" { // Random argv buffer to test in a platform-independent environment. const test_buf: []const u8 = "abcxyz123456000+++\x00111222333444---555---+++\x00999___212___622\x00"; const test_argv = [_][]const u8{ test_buf[0..18], test_buf[19..][0..24], test_buf[19..][25..][0..15], }; // Run the test with both spanned argv and unspanned argv. inline for ([_]bool{ false, true }) |span| { // Simulate WASI argument retrieval but for any target. var wasi_args = WasiArgs{}; const buf = try wasi_args.allocBuf( std.testing.allocator, test_argv.len, test_buf.len, span, ); defer wasi_args.deinit(std.testing.allocator); // Ensure the allocation was performed correctly. const ArgvT = if (span) [:0]u8 else [*:0]u8; const ptr_size = comptime std.math.max(@sizeOf(ArgvT), @sizeOf([*:0]u8)); try std.testing.expectEqual(test_buf.len, buf.buf.len); try std.testing.expectEqual(test_argv.len, buf.argv.len); try std.testing.expectEqual(test_buf.len + test_argv.len * ptr_size, wasi_args.buf.len); std.mem.copy(u8, buf.buf, test_buf); buf.argv[0] = @ptrCast([*:0]u8, buf.buf.ptr); buf.argv[1] = @ptrCast([*:0]u8, buf.buf.ptr) + 19; buf.argv[2] = @ptrCast([*:0]u8, buf.buf.ptr) + 19 + 25; var argv: [3][:0]const u8 = undefined; if (span) { const argv_ = buf.span(); try std.testing.expectEqual(test_argv.len, argv_.len); argv = argv_[0..test_argv.len].*; } else { var iterator = ArgvIterator{ .argv = buf.argv }; for (argv) |*arg| arg.* = iterator.next() orelse return error.ExpectedArgument; try std.testing.expect(iterator.next() == null); } for (argv) |arg, i| try std.testing.expectEqualStrings(test_argv[i], arg); } } test "zlaap.WindowsArgIterator" { inline for (.{ windows_test_ascii, windows_test_emoji, windows_test_unpaired, }) |test_case| { try test_case.execute(); } } /// A Windows test case and its expected outputs. const WindowsTest = struct { command_line: [:0]const u16, argv_raw: [3][]const u16, argv_wtf16: [3][]const u16, argv_wtf8: [3][]const u8, /// Convert a UTF-8 command line and expected results into a test case /// that supports WTF-16 testing as well. fn fromUtf8( comptime command_line: []const u8, comptime argv_raw: [3][]const u8, comptime argv_wtf8: [3][]const u8, ) WindowsTest { @setEvalBranchQuota(100 * (command_line.len + argv_raw[0].len + argv_raw[1].len + argv_raw[2].len + argv_wtf8[0].len + argv_wtf8[1].len + argv_wtf8[2].len)); comptime return .{ .command_line = std.unicode.utf8ToUtf16LeStringLiteral(command_line), .argv_wtf8 = argv_wtf8, .argv_raw = [_][]const u16{ std.unicode.utf8ToUtf16LeStringLiteral(argv_raw[0]), std.unicode.utf8ToUtf16LeStringLiteral(argv_raw[1]), std.unicode.utf8ToUtf16LeStringLiteral(argv_raw[2]), }, .argv_wtf16 = [_][]const u16{ std.unicode.utf8ToUtf16LeStringLiteral(argv_wtf8[0]), std.unicode.utf8ToUtf16LeStringLiteral(argv_wtf8[1]), std.unicode.utf8ToUtf16LeStringLiteral(argv_wtf8[2]), }, }; } fn execute(comptime self: WindowsTest) !void { const iterator_init = WindowsArgIterator{ .command_line = self.command_line }; { // Test case for `iterator.next()`. var iterator = iterator_init; for (self.argv_raw) |str| { const result = iterator.next() orelse return error.ExpectedArgument; try std.testing.expectEqualSlices(u16, str, result); } // Ensure that there is no fourth argument. try std.testing.expect(iterator.next() == null); try std.testing.expect((try iterator.decodeNext(.wtf8, &[_]u8{})) == null); try std.testing.expect((try iterator.decodeNext(.wtf16, &[_]u16{})) == null); try std.testing.expect((try iterator.decodeNextZ(.wtf8, &[_]u8{}, 0)) == null); try std.testing.expect((try iterator.decodeNextZ(.wtf16, &[_]u16{}, 0)) == null); } const allocator = std.testing.allocator; inline for (.{ "wtf16", "wtf8" }) |encoding_| { // Test case for the `iterator.decode` family of functions. var iterators = [_]WindowsArgIterator{iterator_init} ** 4; const encoding = @field(WindowsArgIterator.Encoding, encoding_); const S = encoding.Sentinel(); inline for (@field(self, "argv_" ++ encoding_)) |str| { // Create buffers for `decodeNext` and `decodeNextZ`. var buf: [str.len]S = undefined; var buf_z: [str.len + 1]S = undefined; // Execute each function to be tested and store the results in an array. var results: std.meta.Tuple(&[_]type{[]S, [:0]S, []S, [:0]S}) = undefined; results[0] = (try iterators[0].decodeNext(encoding, &buf)) orelse return error.ExpectedArgument; results[1] = (try iterators[1].decodeNextZ(encoding, &buf_z, 0)) orelse return error.ExpectedArgument; results[2] = (try iterators[2].decodeAlloc(allocator, encoding)) orelse return error.ExpectedArgument; defer allocator.free(results[2]); results[3] = (try iterators[3].decodeAllocZ(allocator, encoding, 0)) orelse return error.ExpectedArgument; defer allocator.free(results[3]); // Ensure that the returned argument matches what is expected. inline for ([_]void{{}} ** results.len) |_, i| try std.testing.expectEqualSlices(S, str, results[i]); } // Ensure that there is no fourth argument. for (iterators) |*iterator| { try std.testing.expect(iterator.next() == null); try std.testing.expect((try iterator.decodeNext(.wtf8, &[_]u8{})) == null); try std.testing.expect((try iterator.decodeNext(.wtf16, &[_]u16{})) == null); try std.testing.expect((try iterator.decodeNextZ(.wtf8, &[_]u8{}, 0)) == null); try std.testing.expect((try iterator.decodeNextZ(.wtf16, &[_]u16{}, 0)) == null); } } } }; /// An ASCII command line that contains the "gotchas" that one may run into when /// implementing a Windows command line argument parser. Useful to isolate errors /// caused by parsing from errors caused by Unicode conversion. const windows_test_ascii = WindowsTest.fromUtf8( " \t \t \t HELLO\"WORLD\\\" \t \\\\\"\\\\\" \\\t \\\\ \" \t \tfoo\\\\\\\\\"bar\" ", [_][]const u8{ "", "HELLO\"WORLD\\\" \t \\\\\"\\\\\" \\\t \\\\ \"", "foo\\\\\\\\\"bar\"" }, [_][]const u8{ "", "HELLOWORLD\" \t \\\\ \\\t \\\\ ", "foo\\\\bar" }, ); /// A Unicode command line with UTF-8 byte sequences ranging from 1 to 4 bytes /// in length; additionally, when converted to UTF-16, there are both surrogate /// and non-surrogate code units. const windows_test_emoji = WindowsTest.fromUtf8( "_ⱵSƎƎⱵƎ \t \"Z😜☺_\\\" \" \t T☺v😜\\Ⱶ \t ", [_][]const u8{ "_ⱵSƎƎⱵƎ", "\"Z😜☺_\\\" \"", "T☺v😜\\Ⱶ" }, [_][]const u8{ "_ⱵSƎƎⱵƎ", "Z😜☺_\" ", "T☺v😜\\Ⱶ" }, ); // A Unicode command line with unpaired surrogate codepoints. const windows_test_unpaired = WindowsTest{ .command_line = blk: { const buf = [_]u16{ ' ', 0xDC10, 0xD810, '"', ' ', 0xD810, 0xDC10, 's', 0xDC10, '\t', '"', ' ', 0xD810, ' ', 0x0 }; break :blk buf[0..(buf.len - 1) :0]; }, .argv_raw = [_][]const u16{ &[_]u16{}, &[_]u16{ 0xDC10, 0xD810, '"', ' ', 0xD810, 0xDC10, 's', 0xDC10, '\t', '"' }, &[_]u16{0xD810}, }, .argv_wtf16 = [_][]const u16{ &[_]u16{}, &[_]u16{ 0xDC10, 0xD810, ' ', 0xD810, 0xDC10, 's', 0xDC10, '\t' }, &[_]u16{0xD810}, }, .argv_wtf8 = struct { const result = [_][]const u8{ &[_]u8{}, unpaired(0xDC10) ++ unpaired(0xD810) ++ " " ++ paired(0xD810, 0xDC10) ++ "s" ++ unpaired(0xDC10) ++ "\t", &unpaired(0xD810), }; fn paired(lead: u16, trail: u16) [4]u8 { const codepoint = 0x10000 + (@as(u21, lead - 0xD800) << 10) + (trail - 0xDC00); return [_]u8{ 0xF0 | @intCast(u8, codepoint >> 18), 0x80 | @intCast(u8, (codepoint >> 12) & 0x3F), 0x80 | @intCast(u8, (codepoint >> 6) & 0x3F), 0x80 | @intCast(u8, codepoint & 0x3F), }; } fn unpaired(codepoint: u16) [3]u8 { return [_]u8{ 0xE0 | @intCast(u8, codepoint >> 12), 0x80 | @intCast(u8, (codepoint >> 6) & 0x3F), 0x80 | @intCast(u8, codepoint & 0x3F), }; } }.result, };
zlaap.zig
const std = @import("std"); const print = std.debug.print; usingnamespace @import("value.zig"); usingnamespace @import("chunk.zig"); usingnamespace @import("compiler.zig"); usingnamespace @import("heap.zig"); pub var vm: VM = undefined; pub const ObjStringHashOfValue = std.hash_map.AutoHashMap(*ObjString, Value); pub const ObjStringSet = std.hash_map.StringHashMap(*ObjString); pub const ArrayListOfObj = std.ArrayList(*Obj); const CallFrame = struct { closure: *ObjClosure, ip: usize, slots: []Value, lastStackTop: usize, fn readByte(self: *CallFrame) u8 { const instrction = self.closure.fun.chunk.code.items[self.ip]; self.ip += 1; return instrction; } fn readShort(self: *CallFrame) u16 { var offset = @intCast(u16, self.closure.fun.chunk.code.items[self.ip]) << 8; offset |= self.closure.fun.chunk.code.items[self.ip + 1]; self.ip += 2; return offset; } fn readConstant(self: *CallFrame) Value { const instrction = self.readByte(); return self.closure.fun.chunk.constants.items[instrction]; } fn readString(self: *CallFrame) *ObjString { const value = self.readConstant(); return asString(value); } }; const FRAMES_MAX = 64; const STACK_MAX = std.math.maxInt(u8) * FRAMES_MAX; pub const VM = struct { frames: [FRAMES_MAX]CallFrame, frameCount: usize, openUpvalue: ?*ObjUpvalue, // vm运行时创建的对象 objects: ?*Obj, strings: ObjStringSet, stack: [STACK_MAX]Value, stackTop: usize, globals: ObjStringHashOfValue, grayStack: ArrayListOfObj, pub fn init(self: *VM, allocator: *std.mem.Allocator) void { self.globals = ObjStringHashOfValue.init(allocator); self.strings = ObjStringSet.init(allocator); self.grayStack = ArrayListOfObj.init(allocator); self.restStack(); } pub fn deinit(self: *VM) void { self.globals.deinit(); heap.freeObjects(self.objects); self.grayStack.deinit(); } fn restStack(self: *VM) void { self.stackTop = 0; self.frameCount = 0; self.openUpvalue = null; } fn run(self: *VM) !InterpretResult { const start = std.time.milliTimestamp(); var frame = &self.frames[self.frameCount - 1]; while (true) { // 打印栈数据 print(" ", .{}); for (self.stack[0..self.stackTop]) |item| { print("[ ", .{}); try printValue(item); print(" ]", .{}); } print("\n", .{}); _ = try frame.closure.fun.chunk.disassembleInstruction(frame.ip); const instrction = @intToEnum(OpCode, frame.readByte()); switch (instrction) { OpCode.OP_PRINT => { try printValue(self.pop()); print("\n", .{}); }, OpCode.OP_RETURN => { const result = self.pop(); self.closeUpvalues(&self.stack[frame.lastStackTop]); vm.frameCount -= 1; if (vm.frameCount == 0) { const i: f64 = 1000.0; print("执行花费时间: {d:.5}秒\n", .{std.math.divFloor(f64, @intToFloat(f64, (std.time.milliTimestamp() - start)), i)}); return InterpretResult.INTERPRET_OK; } vm.stackTop = frame.lastStackTop; self.push(result); frame = &vm.frames[vm.frameCount - 1]; }, OpCode.OP_CONSTANT => { const constant = frame.readConstant(); self.push(constant); }, OpCode.OP_NIL => self.push(nil2Value()), OpCode.OP_TRUE => self.push(bool2Value(true)), OpCode.OP_FALSE => self.push(bool2Value(false)), OpCode.OP_NOT => self.push(bool2Value(isFalsey(self.pop()))), OpCode.OP_POP => _ = self.pop(), OpCode.OP_GET_LOCAL => { const slot = frame.readByte(); self.push(frame.slots[slot]); }, OpCode.OP_SET_LOCAL => { const slot = frame.readByte(); frame.slots[slot] = self.peek(0); }, OpCode.OP_GET_GLOBAL => { const name = frame.readString(); if (self.globals.get(name)) |value| { self.push(value); } else { self.runtimeError("Undefined variable '{}'.", .{name}); return InterpretResult.INTERPRET_RUNTIME_ERROR; } }, OpCode.OP_SET_GLOBAL => { const name = frame.readString(); if (self.globals.get(name)) |_| { try self.globals.put(name, self.peek(0)); } else { self.runtimeError("Undefined variable '{}'.", .{name.chars}); return InterpretResult.INTERPRET_RUNTIME_ERROR; } }, OpCode.OP_DEFINE_GLOBAL => { const name = frame.readString(); try self.globals.put(name, self.peek(0)); _ = self.pop(); }, OpCode.OP_EQUAL => { const b = self.pop(); const a = self.pop(); self.push(bool2Value(valuesEqual(a, b))); }, OpCode.OP_ADD, OpCode.OP_SUBTRACT, OpCode.OP_MULTIPLY, OpCode.OP_DIVIDE, OpCode.OP_GREATER, OpCode.OP_LESS, => { if (!isNumber(self.peek(0)) or !isNumber(self.peek(1))) { self.runtimeError("Operands must be numbers.", .{}); return InterpretResult.INTERPRET_RUNTIME_ERROR; } self.binaryOp(instrction); }, OpCode.OP_NEGATE => { if (!isNumber(self.peek(0))) { self.runtimeError("Operand must be a number.", .{}); return InterpretResult.INTERPRET_RUNTIME_ERROR; } self.push(number2Value(-asNumber(self.pop()))); }, OpCode.OP_JUMP_IF_FALSE => { const offset = frame.readShort(); if (isFalsey(self.peek(0))) frame.ip += offset; }, OpCode.OP_JUMP => { const offset = frame.readShort(); frame.ip += offset; }, OpCode.OP_LOOP => { const offset = frame.readShort(); frame.ip -= offset; }, OpCode.OP_CALL => { const argCount = frame.readByte(); if (!self.callValue(self.peek(argCount), argCount)) { return InterpretResult.INTERPRET_RUNTIME_ERROR; } frame = &self.frames[vm.frameCount - 1]; }, OpCode.OP_CLOSURE => { const fun = asFunction(frame.readConstant()); const closure = try heap.newClosure(fun); self.push(objClosure2Value(closure)); var i: usize = 0; while (i < closure.fun.upvalueCount) : (i += 1) { const isLocal = frame.readByte(); const index = frame.readByte(); if (isLocal == 1) { try closure.upvalues.append((try self.captureUpvalue(&frame.slots[index]))); } else { try closure.upvalues.append(frame.closure.upvalues.items[index]); } } }, OpCode.OP_SET_UPVALUE => { const slot = frame.readByte(); frame.closure.upvalues.items[slot].location.* = self.peek(0); }, OpCode.OP_GET_UPVALUE => { const slot = frame.readByte(); self.push(frame.closure.upvalues.items[slot].location.*); }, OpCode.OP_CLOSE_UPVALUE => { //self.closeUpvalues(&self.peek(0)); }, } } } fn closeUpvalues(self: *VM, last: *Value) void { while (self.openUpvalue != null and @ptrToInt(self.openUpvalue.?.location) >= @ptrToInt(last)) { var upvalue = self.openUpvalue.?; upvalue.closed = upvalue.location.*; upvalue.location = &upvalue.closed; self.openUpvalue = upvalue.next; } } fn captureUpvalue(self: *VM, local: *Value) !*ObjUpvalue { var preUpvalue: ?*ObjUpvalue = null; var upvalue = vm.openUpvalue; while (upvalue != null and @ptrToInt(upvalue.?.location) > @ptrToInt(local)) { preUpvalue = upvalue; upvalue = upvalue.?.next; } if (upvalue != null and upvalue.?.location == local) { return upvalue.?; } var createdUpvalue = try heap.newUpvalue(local); createdUpvalue.next = upvalue; if (preUpvalue == null) { vm.openUpvalue = createdUpvalue; } else { preUpvalue.?.next = createdUpvalue; } return createdUpvalue; } fn callValue(self: *VM, callee: Value, argCount: u8) bool { if (isClosure(callee)) { return self.call(asClosure(callee), argCount); } self.runtimeError("Can only call functions and classes.", .{}); return false; } fn call(self: *VM, closure: *ObjClosure, argCount: u8) bool { if (argCount != closure.fun.arity) { self.runtimeError("Expected {} arguments but got {}.", .{ closure.fun.arity, argCount }); return false; } if (vm.frameCount == FRAMES_MAX) { self.runtimeError("Stack overflow.", .{}); return false; } var frame = &vm.frames[vm.frameCount]; frame.closure = closure; frame.ip = 0; frame.slots = self.stack[self.stackTop - argCount - 1 ..]; frame.lastStackTop = self.stackTop - argCount - 1; vm.frameCount += 1; return true; } fn binaryOp(self: *VM, opCode: OpCode) void { const b = asNumber(self.pop()); const a = asNumber(self.pop()); switch (opCode) { OpCode.OP_ADD => self.push(number2Value(a + b)), OpCode.OP_SUBTRACT => self.push(number2Value(a - b)), OpCode.OP_MULTIPLY => self.push(number2Value(a * b)), OpCode.OP_DIVIDE => self.push(number2Value(a / b)), OpCode.OP_GREATER => self.push(bool2Value(a > b)), OpCode.OP_LESS => self.push(bool2Value(a < b)), else => unreachable, } } fn push(self: *VM, value: Value) void { self.stack[self.stackTop] = value; self.stackTop += 1; } fn pop(self: *VM) Value { self.stackTop -= 1; return self.stack[self.stackTop]; } fn peek(self: *VM, distance: usize) Value { return self.stack[self.stackTop - 1 - distance]; } fn runtimeError(self: *VM, comptime msg: []const u8, format: anytype) void { print(msg, format); print("\n", .{}); var i = vm.frameCount - 1; while (i >= 0) { const frame = vm.frames[i]; const function = frame.closure.fun; const instruction = frame.ip; print("[line {}]", .{function.chunk.lines.items[instruction]}); if (function.name) |name| { print("{}()\n", .{name.chars}); } else { print("script\n", .{}); } if (@subWithOverflow(usize, i, 1, &i)) { break; } } self.restStack(); } }; pub const InterpretResult = enum(u2) { INTERPRET_OK, INTERPRET_COMPILE_ERROR, INTERPRET_RUNTIME_ERROR }; pub fn interpret(allocator: *std.mem.Allocator, source: []const u8) !InterpretResult { heap = Heap.init(allocator); const function = try compile(allocator, source); if (function) |fun| { vm.push(objFunction2Value(fun)); const closure = try heap.newClosure(fun); _ = vm.pop(); vm.push(objClosure2Value(closure)); _ = vm.callValue(objClosure2Value(closure), 0); return try vm.run(); } else { return InterpretResult.INTERPRET_COMPILE_ERROR; } }
zvm/src/vm.zig
pub const CmsdkTimer = struct { base: usize, const Self = @This(); /// Construct a `CmsdkTimer` object using the specified MMIO base address. pub fn withBase(base: usize) Self { return Self{ .base = base }; } pub fn regCtrl(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base); } pub const CTRL_EN: u32 = 1 << 0; pub const CTRL_EXT_ENABLE: u32 = 1 << 1; pub const CTRL_EXT_CLOCK: u32 = 1 << 2; pub const CTRL_INT_EN: u32 = 1 << 3; pub fn regValue(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x04); } pub fn regReload(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x08); } pub fn regIntStatusClear(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x0c); } pub fn setEnable(self: Self, x: bool) void { if (x) { self.regCtrl.* |= CTRL_EN; } else { self.regCtrl.* &= ~CTRL_EN; } } pub fn setClockSource(self: Self, x: bool) void { if (x) { self.regCtrl.* |= CTRL_EXT_CLOCK; } else { self.regCtrl.* &= ~CTRL_EXT_CLOCK; } } pub fn setStartByExternalInput(self: Self, x: bool) void { if (x) { self.regCtrl.* |= CTRL_EXT_ENABLE; } else { self.regCtrl.* &= ~CTRL_EXT_ENABLE; } } pub fn setInterruptEnable(self: Self, x: bool) void { if (x) { self.regCtrl.* |= CTRL_INT_EN; } else { self.regCtrl.* &= ~CTRL_INT_EN; } } pub fn getValue(self: Self) u32 { return self.regValue().*; } pub fn setValue(self: Self, x: u32) void { self.regValue().* = x; } pub fn getReloadValue(self: Self) u32 { return self.regReload().*; } pub fn setReloadValue(self: Self, x: u32) void { self.regReload().* = x; } pub fn clearInterruptFlag(self: Self) void { self.regIntStatusClear().* = 1; } };
examples/drivers/cmsdk_timer.zig
const std = @import("std"); const os = std.os; const warn = std.debug.warn; const expectEqual = std.testing.expectEqual; const readv = @import("c.zig").readv; const MemSegments = @import("read_map.zig").Segments; const NeedleType = @import("input.zig").NeedleType; // TODO@Performance: A linked list is likely to offer better performance in the most common scenarios pub const Addresses = std.ArrayList(usize); /// Looks through a dereferenced segment of memory for values that match our needle. /// Returns the index of the next match, or null if there are no remaining matches. /// /// "byte_alignment" helps determine how much padding is placed between each item in memory. /// Or for our purposes, how many bytes we can skip after each checked item. /// If the alignment is its minimum value of 1, items are placed contiguously /// in memory with no padding between them. /// If the alignment, N, is greater than 1, there will exist padding between /// items such that the starting byte of each item will be at /// an index evenly divisible by N. /// Example: [ u16, pad_byte, pad_byte, u24, pad_byte, u32 ] fn findMatches(pos: *usize, haystack: []const u8, needle: []const u8, byte_alignment: u8) ?usize { // Example situation: We're looking for a needle of 2 bytes in a haystack of 5 bytes. // The outer index will start at 0 and iterate through until it has finished an inner loop with the (5-2)nd byte. // The inner index will be iterated through upon each iteration of the outer index. // Visually it would look like: // [[1], 2, 3, 4, 5] outer = 0, inner = 0 // [[1, 2], 3, 4, 5] outer = 0, inner = 1 // [1, [2], 3, 4, 5] outer = 1, inner = 0 // ... // [1, 2, 3, [4, 5]] outer = 3, inner = 1 const new_haystack = haystack[pos.*..]; var outer_index: usize = 0; while (outer_index < new_haystack.len) : (outer_index += byte_alignment) { // No more room left in haystack for needle matches. if (new_haystack.len < outer_index + needle.len) break; var is_match: bool = false; for (needle) |i, inner_index| { is_match = i == new_haystack[outer_index + inner_index]; if (!is_match) { break; } } if (is_match) { const abs_index = outer_index + pos.*; pos.* += outer_index + 1; return abs_index; } } return null; } /// Takes a list of segments. Looks through each address for a match of our expected value. /// The expected value is represented as a series of bytes. /// Finally, returns a list of all memory addresses that contain the value we are looking for. pub fn parseSegments(allocator: *std.mem.Allocator, pid: os.pid_t, segments: *MemSegments, expected_value: []const u8, byte_alignment: u8) !?Addresses { var potential_addresses = Addresses.init(allocator); errdefer potential_addresses.deinit(); var buffer = try allocator.alloc(u8, 0); defer allocator.free(buffer); for (segments.items) |segment| { buffer = try allocator.realloc(buffer, segment.len); const read_amount = readv(pid, buffer, segment.start) catch |err| { warn("Failed reading from segment: 0x{x}-0x{x} name \"{s}\"\n", .{ segment.start, segment.start + segment.len, segment.name }); return err; }; if (read_amount != buffer.len) { warn("Partially read from segment: 0x{x}-0x{x} name \"{s}\"\n", .{ segment.start, segment.start + segment.len, segment.name }); warn("Expected to read {} bytes, instead read {} bytes\n", .{ segment.len, read_amount }); continue; } var pos: usize = 0; while (findMatches(&pos, buffer[0..read_amount], expected_value, byte_alignment)) |match_pos| { try potential_addresses.append(segment.start + match_pos); } } if (potential_addresses.items.len == 0) { return null; } else { return potential_addresses; } } // TODO@Performance // it might be faster to simply readv the entire memory segment from the target process, at least when we're dealing with large haystacks. // A single larger than necessary read will be faster than many small reads pub fn pruneAddresses(pid: os.pid_t, needle: []const u8, haystack: *Addresses) !void { var buffer = [_]u8{0} ** 256; var pos: usize = haystack.items.len - 1; while (true) { const ptr = haystack.items[pos]; if (!isMatch(pid, buffer[0..], needle, ptr)) { _ = haystack.orderedRemove(pos); } if (pos == 0) break; pos -= 1; } } fn isMatch(pid: os.pid_t, buffer: []u8, expected: []const u8, ptr: usize) bool { std.debug.assert(buffer.len >= expected.len); const read_amount = readv(pid, buffer[0..expected.len], ptr) catch |err| { warn("{} reading ptr: {x}\n", .{ err, ptr }); return false; }; std.debug.assert(read_amount == expected.len); return std.mem.eql(u8, expected, buffer[0..read_amount]); } /// Reads from a remote process at the given address. /// Reads as much as the buffer will allow. pub fn readRemote(buffer: []u8, pid: os.pid_t, address: usize) !usize { const read = try readv(pid, buffer[0..], address); if (read != buffer.len) return error.RemoteReadAmountMismatch; return read; } /// Reads the value located at an address. /// Prints that value to the buffer as a string. pub fn readToBufferAs(comptime T: type, buffer: []u8, pid: os.pid_t, address: usize) !usize { if (@typeInfo(T) != .Int and @typeInfo(T) != .Float) return @compileError("readToBufferAs requires an int or float type\n"); var result_buffer = [_]u8{0} ** @sizeOf(T); const read_amount = try readRemote(result_buffer[0..], pid, address); const result = @ptrCast(*align(1) T, result_buffer[0..read_amount]).*; return (try std.fmt.bufPrint(buffer, "{}", .{result})).len; } test "find matches with 1 byte alignment" { const haystack = &[_]u8{ 11, 12, 13, 14, 15 }; var pos: usize = 0; // Normal use. try expectEqual(@as(?usize, 0), findMatches(&pos, haystack, &[_]u8{11}, 1)); try expectEqual(@as(?usize, 1), pos); pos = 0; try expectEqual(@as(?usize, 0), findMatches(&pos, haystack, &[_]u8{ 11, 12, 13, 14, 15 }, 1)); pos = 0; try expectEqual(@as(?usize, 2), findMatches(&pos, haystack, &[_]u8{ 13, 14 }, 1)); try expectEqual(@as(?usize, 3), pos); pos = 0; try expectEqual(@as(?usize, 4), findMatches(&pos, haystack, &[_]u8{15}, 1)); try expectEqual(@as(?usize, 5), pos); // Calling function after a simluated match, where our needle is not in the adjusted buffer. pos = 1; try expectEqual(@as(?usize, null), findMatches(&pos, haystack, &[_]u8{ 11, 12, 13, 14, 15 }, 1)); try expectEqual(@as(?usize, 1), pos); try expectEqual(@as(?usize, null), findMatches(&pos, haystack, &[_]u8{ 11, 12, 13, 14 }, 1)); pos = 0; // Calling function after a simluated match, where our needle is in the adjusted buffer. pos = 2; try expectEqual(@as(?usize, 2), findMatches(&pos, haystack, &[_]u8{ 13, 14 }, 1)); // Needle is not in buffer pos = 0; try expectEqual(@as(?usize, null), findMatches(&pos, haystack, &[_]u8{ 9, 5, 8, 7, 3 }, 1)); try expectEqual(@as(?usize, 0), pos); try expectEqual(@as(?usize, null), findMatches(&pos, haystack, &[_]u8{ 11, 12, 13, 14, 17 }, 1)); try expectEqual(@as(?usize, 0), pos); try expectEqual(@as(?usize, null), findMatches(&pos, haystack, &[_]u8{ 13, 13, 14 }, 1)); try expectEqual(@as(?usize, 0), pos); // Needle is larger than adjusted buffer, but not full buffer. pos = 3; try expectEqual(@as(?usize, null), findMatches(&pos, haystack, &[_]u8{ 14, 15, 16 }, 1)); } test "find matches with variable byte alignment" { var haystack: [100]u8 = undefined; for (haystack) |*i, index| i.* = @intCast(u8, index); const hay = haystack[0..]; var pos: usize = 0; try expectEqual(@as(?usize, 4), findMatches(&pos, hay, &[_]u8{ 4, 5, 6 }, 4)); try expectEqual(@as(?usize, null), findMatches(&pos, hay, &[_]u8{ 4, 5, 6 }, 4)); pos = 0; try expectEqual(@as(?usize, 0), findMatches(&pos, hay, hay, 30)); pos = 0; try expectEqual(@as(?usize, null), findMatches(&pos, hay, &[_]u8{1}, 2)); try expectEqual(@as(?usize, 0), pos); pos = 0; try expectEqual(@as(?usize, 0), findMatches(&pos, hay, &[_]u8{0}, 4)); try expectEqual(@as(?usize, 1), pos); pos = 0; try expectEqual(@as(?usize, 8), findMatches(&pos, hay, &[_]u8{ 8, 9, 10, 11, 12, 13, 14, 15, 16 }, 8)); pos = 0; }
src/memory.zig
// const std = @import("../std.zig"); const builtin = std.builtin; usingnamespace std.c; extern "c" fn _errnop() *c_int; pub const _errno = _errnop; pub extern "c" fn find_directory(which: c_int, volume: i32, createIt: bool, path_ptr: [*]u8, length: i32) u64; pub extern "c" fn find_thread(thread_name: ?*c_void) i32; pub extern "c" fn get_system_info(system_info: *system_info) usize; // TODO revisit if abi changes or better option becomes apparent pub extern "c" fn _get_next_image_info(team: c_int, cookie: *i32, image_info: *image_info) usize; pub extern "c" fn _kern_read_dir(fd: c_int, buf_ptr: [*]u8, nbytes: usize, maxcount: u32) usize; pub extern "c" fn _kern_read_stat(fd: c_int, path_ptr: [*]u8, traverse_link: bool, libc_stat: *libc_stat, stat_size: i32) usize; pub extern "c" fn _kern_get_current_team() i32; pub const sem_t = extern struct { _magic: u32, _kern: extern struct { _count: u32, _flags: u32, }, _padding: u32, }; pub const pthread_attr_t = extern struct { __detach_state: i32, __sched_priority: i32, __stack_size: i32, __guard_size: i32, __stack_address: ?*c_void, }; pub const pthread_mutex_t = extern struct { flags: u32 = 0, lock: i32 = 0, unused: i32 = -42, owner: i32 = -1, owner_count: i32 = 0, }; pub const pthread_cond_t = extern struct { flags: u32 = 0, unused: i32 = -42, mutex: ?*c_void = null, waiter_count: i32 = 0, lock: i32 = 0, }; pub const pthread_rwlock_t = extern struct { flags: u32 = 0, owner: i32 = -1, lock_sem: i32 = 0, lock_count: i32 = 0, reader_count: i32 = 0, writer_count: i32 = 0, waiters: [2]?*c_void = [_]?*c_void{ null, null }, }; pub const EAI = extern enum(c_int) { /// address family for hostname not supported ADDRFAMILY = 1, /// name could not be resolved at this time AGAIN = 2, /// flags parameter had an invalid value BADFLAGS = 3, /// non-recoverable failure in name resolution FAIL = 4, /// address family not recognized FAMILY = 5, /// memory allocation failure MEMORY = 6, /// no address associated with hostname NODATA = 7, /// name does not resolve NONAME = 8, /// service not recognized for socket type SERVICE = 9, /// intended socket type was not recognized SOCKTYPE = 10, /// system error returned in errno SYSTEM = 11, /// invalid value for hints BADHINTS = 12, /// resolved protocol is unknown PROTOCOL = 13, /// argument buffer overflow OVERFLOW = 14, _, }; pub const EAI_MAX = 15;
lib/std/c/haiku.zig
pub const numbers = [_]usize{ 1539, 1914, 1866, 1407, 1706, 1423, 1834, 1700, 1573, 1486, 1743, 1394, 1693, 1705, 1530, 1811, 1626, 1473, 1901, 1481, 1527, 1841, 1891, 1750, 1343, 1899, 401, 1896, 1627, 1593, 1541, 874, 1484, 1210, 1692, 1963, 1964, 1780, 671, 1862, 1393, 1309, 1740, 1831, 1932, 1185, 1979, 1504, 1663, 1610, 1494, 1511, 1103, 1738, 1816, 1871, 1545, 1595, 1784, 1412, 1815, 1998, 1783, 1770, 1426, 1699, 1416, 1880, 1612, 1989, 1360, 1869, 1762, 1690, 1999, 1990, 1521, 1730, 703, 1463, 1670, 1472, 1413, 1669, 1502, 1548, 1475, 1694, 1314, 1980, 980, 1667, 890, 1569, 1456, 1406, 1924, 1973, 1965, 1533, 1827, 2000, 1847, 1520, 1729, 1512, 1555, 1566, 1505, 1672, 1169, 1835, 1850, 1493, 1861, 1288, 1675, 1676, 1556, 1320, 1757, 1870, 1642, 1903, 1372, 1967, 1894, 176, 1908, 1418, 1535, 1487, 1496, 1491, 1611, 1970, 1758, 1563, 1766, 1629, 1937, 1763, 1829, 1772, 1632, 1517, 1736, 1971, 1721, 1716, 1429, 1408, 1560, 1958, 1359, 1890, 1825, 1536, 1819, 1697, 1887, 1832, 2005, 892, 1471, 1425, 1677, 1673, 1128, 1878, 1062, 1470, 1875, 1854, 1518, 1568, 1919, 256, 1532, 1711, 1944, 1344, 1330, 1636, 1957, 1709, 1551, 1983, 1674, 1671, 1959, 1760, 1689, 1767, 1477, 1589, 1897, 1144, 1982, 1544, };
src/input01.zig
const Command = @This(); const std = @import("std"); const parser = @import("parser.zig"); const Arg = @import("Arg.zig"); const ArgMatches = @import("arg_matches.zig").ArgMatches; const mem = std.mem; const ArrayList = std.ArrayList; const Allocator = mem.Allocator; const Setting = struct { takes_value: bool, arg_required: bool, subcommand_required: bool, pub fn initDefault() Setting { return Setting{ .takes_value = false, .arg_required = false, .subcommand_required = false, }; } }; allocator: Allocator, name: []const u8, about: ?[]const u8, args: ArrayList(Arg), subcommands: ArrayList(Command), setting: Setting, pub fn new(allocator: Allocator, name: []const u8) Command { return Command{ .allocator = allocator, .name = name, .about = null, .args = ArrayList(Arg).init(allocator), .subcommands = ArrayList(Command).init(allocator), .setting = Setting.initDefault(), }; } pub fn newWithHelpTxt(allocator: Allocator, name: []const u8, about: []const u8) Command { var self = Command.new(allocator, name); self.about = about; return self; } pub fn deinit(self: *Command) void { self.args.deinit(); for (self.subcommands.items) |*subcommand| { subcommand.deinit(); } self.subcommands.deinit(); } pub fn addArg(self: *Command, new_arg: Arg) !void { return self.args.append(new_arg); } pub fn addSubcommand(self: *Command, new_subcommand: Command) !void { return self.subcommands.append(new_subcommand); } pub fn takesSingleValue(self: *Command, arg_name: []const u8) !void { try self.takesNValues(arg_name, 1); } pub fn takesNValues(self: *Command, arg_name: []const u8, n: usize) !void { var arg = Arg.new(arg_name); arg.minValues(1); arg.maxValues(n); if (n > 1) arg.valuesDelimiter(","); try self.addArg(arg); self.setting.takes_value = true; } pub fn argRequired(self: *Command, boolean: bool) void { self.setting.arg_required = boolean; } pub fn subcommandRequired(self: *Command, boolean: bool) void { self.setting.subcommand_required = boolean; } pub fn parseProcess(self: *Command) parser.Error!ArgMatches { const process_args = try std.process.argsAlloc(self.allocator); defer std.process.argsFree(self.allocator, process_args); errdefer std.process.argsFree(self.allocator, process_args); if (process_args.len > 1) { return self.parseFrom(process_args[1..]); } else { return self.parseFrom(&[_][:0]const u8{}); } } pub fn parseFrom(self: *Command, argv: []const [:0]const u8) parser.Error!ArgMatches { return parser.parse(self.allocator, argv, self); }
src/Command.zig
const std = @import("std"); const input = @embedFile("data/input16"); usingnamespace @import("util.zig"); pub fn main() !void { var allocator_state = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer allocator_state.deinit(); const allocator = &allocator_state.allocator; const result = scanTickets(allocator, input); print("[Part1] Error rate: {}", .{result.error_rate}); print("[Part2] Departure product: {}", .{result.departure_product}); } const Range = struct { min: u32, max: u32, pub fn contains(self: Range, n: u32) bool { return self.min <= n and n <= self.max;} }; const Class = struct { name: []const u8, range1: Range, range2: Range, position_bits: u32 = ~@as(u32, 0), pub fn isValid(self: Class, n: u32) bool { return self.range1.contains(n) or self.range2.contains(n); } }; const Ticket = struct { values: []u32 }; const Result = struct { error_rate: u32, departure_product: u64 }; fn scanTickets(allocator: *std.mem.Allocator, input_str: []const u8) Result { var classes = std.ArrayList(Class).init(allocator); defer classes.deinit(); var result = Result{ .error_rate = 0, .departure_product = 1 }; var reader = lines(input_str); while (reader.next()) |line| { const class_name = line[0..std.mem.indexOf(u8, line, ":").?]; if (std.mem.eql(u8, class_name, "your ticket")) { break; } else { var rule_tokens = std.mem.tokenize(line[class_name.len+2..], " -or"); classes.append(.{ .name = class_name, .range1 = .{ .min = std.fmt.parseUnsigned(u32, rule_tokens.next().?, 10) catch @panic("range1.min failed"), .max = std.fmt.parseUnsigned(u32, rule_tokens.next().?, 10) catch @panic("range1.max failed"), }, .range2 = .{ .min = std.fmt.parseUnsigned(u32, rule_tokens.next().?, 10) catch @panic("range2.min failed"), .max = std.fmt.parseUnsigned(u32, rule_tokens.next().?, 10) catch @panic("range2.max failed"), }, }) catch @panic("append failed"); } } const my_ticket = parseTicket(allocator, classes.items.len, reader.next().?); _ = reader.next(); // skip "nearby tickets" header var tickets = std.ArrayList(Ticket).init(allocator); while (reader.next()) |line| { const ticket = parseTicket(allocator, classes.items.len, line); tickets.append(ticket) catch @panic("tickets append failed"); for (ticket.values) |v| { for (classes.items) |class| { if (class.isValid(v)) { break; } } else { result.error_rate += v; _ = tickets.pop(); break; } } } for (tickets.items) |ticket| { for (ticket.values) |v, i| { const position_bit = @as(u32, 1) << @intCast(u5, i); for (classes.items) |*class| { if ((class.position_bits & position_bit) != 0 and !class.isValid(v)) { class.position_bits &= ~position_bit; } } } } var useful_bits_mask: u32 = (@as(u32, 1) << @intCast(u5, classes.items.len)) - 1; var num_matched: usize = 0; while (num_matched < classes.items.len) : (num_matched += 1) { // find a class that only matches 1 field const class_index = for (classes.items) |c, i| { if (@popCount(u32, c.position_bits & useful_bits_mask) == 1) { break i; } } else @panic("multiple classes match the same fields"); const class = classes.items[class_index]; const field_index = @ctz(u32, class.position_bits); if (std.mem.startsWith(u8, class.name, "departure")) { result.departure_product *= my_ticket.values[field_index]; } // remove that position bit from all the classes for (classes.items) |*c, i| { c.position_bits &= ~(@as(u32, 1) << @intCast(u5, field_index)); } } return result; } fn parseTicket(allocator: *std.mem.Allocator, class_count: usize, line: []const u8) Ticket { const ticket = Ticket{ .values = allocator.alloc(u32, class_count) catch @panic("alloc failed") }; var tokens = std.mem.tokenize(line, ","); var i: usize = 0; while (tokens.next()) |number_str| : (i += 1) { ticket.values[i] = std.fmt.parseUnsigned(u32, number_str, 10) catch @panic("ticket number failed"); } return ticket; } const expectEqual = std.testing.expectEqual; test "findTicketScanningErrorRate" { var allocator_state = std.heap.ArenaAllocator.init(std.testing.allocator); defer allocator_state.deinit(); const allocator = &allocator_state.allocator; const result = scanTickets(allocator, testInput); expectEqual(@as(u32, 71), result.error_rate); } const testInput = \\class: 1-3 or 5-7 \\row: 6-11 or 33-44 \\seat: 13-40 or 45-50 \\ \\your ticket: \\7,1,14 \\ \\nearby tickets: \\7,3,47 \\40,4,50 \\55,2,20 \\38,6,12 ;
src/day16.zig
const std = @import("std"); const c = @import("internal/c.zig"); const internal = @import("internal/internal.zig"); const log = std.log.scoped(.git); const git = @import("git.zig"); /// This type bundles all functionality that does not act on an instance of an object pub const Handle = struct { /// De-initialize the libraries global state. /// *NOTE*: should be called as many times as `init` was called. pub fn deinit(self: Handle) void { _ = self; log.debug("Handle.deinit called", .{}); const number = internal.wrapCallWithReturn("git_libgit2_shutdown", .{}) catch unreachable; if (number == 0) { log.debug("libgit2 shutdown successful", .{}); } else { log.debug("{} initializations have not been shutdown (after this one)", .{number}); } } /// Create a new bare Git index object as a memory representation of the Git index file in `path`, without a repository to /// back it. /// /// ## Parameters /// * `path` - the path to the index pub fn indexOpen(self: Handle, path: [:0]const u8) !*git.Index { _ = self; log.debug("Handle.indexOpen called, path={s}", .{path}); var index: *git.Index = undefined; try internal.wrapCall("git_index_open", .{ @ptrCast(*?*c.git_index, &index), path.ptr, }); log.debug("index opened successfully", .{}); return index; } /// Create an in-memory index object. /// /// This index object cannot be read/written to the filesystem, but may be used to perform in-memory index operations. pub fn indexNew(self: Handle) !*git.Index { _ = self; log.debug("Handle.indexInit called", .{}); var index: *git.Index = undefined; try internal.wrapCall("git_index_new", .{ @ptrCast(*?*c.git_index, &index), }); log.debug("index created successfully", .{}); return index; } /// Create a new repository in the given directory. /// /// ## Parameters /// * `path` - the path to the repository /// * `is_bare` - If true, a Git repository without a working directory is created at the pointed path. /// If false, provided path will be considered as the working directory into which the .git directory will be /// created. pub fn repositoryInit(self: Handle, path: [:0]const u8, is_bare: bool) !*git.Repository { _ = self; log.debug("Handle.repositoryInit called, path={s}, is_bare={}", .{ path, is_bare }); var repo: *git.Repository = undefined; try internal.wrapCall("git_repository_init", .{ @ptrCast(*?*c.git_repository, &repo), path.ptr, @boolToInt(is_bare), }); log.debug("repository created successfully", .{}); return repo; } /// Create a new repository in the given directory with extended options. /// /// ## Parameters /// * `path` - the path to the repository /// * `options` - The options to use during the creation of the repository pub fn repositoryInitExtended(self: Handle, path: [:0]const u8, options: RepositoryInitOptions) !*git.Repository { _ = self; log.debug("Handle.repositoryInitExtended called, path={s}, options={}", .{ path, options }); var repo: *git.Repository = undefined; var c_options = options.makeCOptionObject(); try internal.wrapCall("git_repository_init_ext", .{ @ptrCast(*?*c.git_repository, &repo), path.ptr, &c_options, }); log.debug("repository created successfully", .{}); return repo; } pub const RepositoryInitOptions = struct { flags: RepositoryInitExtendedFlags = .{}, mode: InitMode = .shared_umask, /// The path to the working dir or `null` for default (i.e. repo_path parent on non-bare repos). /// *NOTE*: if this is a relative path, it must be relative to the repository path. /// If this is not the "natural" working directory, a .git gitlink file will be created linking to the repository path. workdir_path: ?[:0]const u8 = null, /// A "description" file to be used in the repository, instead of using the template content. description: ?[:0]const u8 = null, /// When `RepositoryInitExtendedFlags.external_template` is set, this must contain the path to use for the template /// directory. If this is `null`, the config or default directory options will be used instead. template_path: ?[:0]const u8 = null, /// The name of the head to point HEAD at. If `null`, then this will be treated as "master" and the HEAD ref will be set /// to "refs/heads/master". /// If this begins with "refs/" it will be used verbatim; otherwise "refs/heads/" will be prefixed. initial_head: ?[:0]const u8 = null, /// If this is non-`null`, then after the rest of the repository initialization is completed, an "origin" remote will be /// added pointing to this URL. origin_url: ?[:0]const u8 = null, pub const RepositoryInitExtendedFlags = packed struct { /// Create a bare repository with no working directory. bare: bool = false, /// Return an `GitError.EXISTS` error if the path appears to already be an git repository. no_reinit: bool = false, /// Normally a "/.git/" will be appended to the repo path for non-bare repos (if it is not already there), but passing /// this flag prevents that behavior. no_dotgit_dir: bool = false, /// Make the repo_path (and workdir_path) as needed. Init is always willing to create the ".git" directory even /// without this flag. This flag tells init to create the trailing component of the repo and workdir paths as needed. mkdir: bool = false, /// Recursively make all components of the repo and workdir paths as necessary. mkpath: bool = false, /// libgit2 normally uses internal templates to initialize a new repo. /// This flag enables external templates, looking at the "template_path" from the options if set, or the /// `init.templatedir` global config if not, or falling back on "/usr/share/git-core/templates" if it exists. external_template: bool = false, /// If an alternate workdir is specified, use relative paths for the gitdir and core.worktree. relative_gitlink: bool = false, z_padding: std.meta.Int(.unsigned, @bitSizeOf(c_uint) - 7) = 0, pub fn toInt(self: RepositoryInitExtendedFlags) c_uint { return @bitCast(c_uint, self); } pub fn format( value: RepositoryInitExtendedFlags, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; return internal.formatWithoutFields( value, options, writer, &.{"z_padding"}, ); } test { try std.testing.expectEqual(@sizeOf(c_uint), @sizeOf(RepositoryInitExtendedFlags)); try std.testing.expectEqual(@bitSizeOf(c_uint), @bitSizeOf(RepositoryInitExtendedFlags)); } comptime { std.testing.refAllDecls(@This()); } }; pub const InitMode = union(enum) { /// Use permissions configured by umask - the default. shared_umask: void, /// Use "--shared=group" behavior, chmod'ing the new repo to be group writable and "g+sx" for sticky group assignment. shared_group: void, /// Use "--shared=all" behavior, adding world readability. shared_all: void, custom: c_uint, pub fn toInt(self: InitMode) c_uint { return switch (self) { .shared_umask => 0, .shared_group => 0o2775, .shared_all => 0o2777, .custom => |custom| custom, }; } }; pub fn makeCOptionObject(self: RepositoryInitOptions) c.git_repository_init_options { return .{ .version = c.GIT_REPOSITORY_INIT_OPTIONS_VERSION, .flags = self.flags.toInt(), .mode = self.mode.toInt(), .workdir_path = if (self.workdir_path) |slice| slice.ptr else null, .description = if (self.description) |slice| slice.ptr else null, .template_path = if (self.template_path) |slice| slice.ptr else null, .initial_head = if (self.initial_head) |slice| slice.ptr else null, .origin_url = if (self.origin_url) |slice| slice.ptr else null, }; } comptime { std.testing.refAllDecls(@This()); } }; /// Open a repository. /// /// ## Parameters /// * `path` - the path to the repository pub fn repositoryOpen(self: Handle, path: [:0]const u8) !*git.Repository { _ = self; log.debug("Handle.repositoryOpen called, path={s}", .{path}); var repo: *git.Repository = undefined; try internal.wrapCall("git_repository_open", .{ @ptrCast(*?*c.git_repository, &repo), path.ptr, }); log.debug("repository opened successfully", .{}); return repo; } /// Find and open a repository with extended options. /// /// *NOTE*: `path` can only be null if the `open_from_env` option is used. /// /// ## Parameters /// * `path` - the path to the repository /// * `flags` - options controlling how the repository is opened /// * `ceiling_dirs` - A `PATH_LIST_SEPARATOR` delimited list of path prefixes at which the search for a containing /// repository should terminate. pub fn repositoryOpenExtended( self: Handle, path: ?[:0]const u8, flags: RepositoryOpenOptions, ceiling_dirs: ?[:0]const u8, ) !*git.Repository { _ = self; log.debug("Handle.repositoryOpenExtended called, path={s}, flags={}, ceiling_dirs={s}", .{ path, flags, ceiling_dirs }); var repo: *git.Repository = undefined; const path_temp: [*c]const u8 = if (path) |slice| slice.ptr else null; const ceiling_dirs_temp: [*c]const u8 = if (ceiling_dirs) |slice| slice.ptr else null; try internal.wrapCall("git_repository_open_ext", .{ @ptrCast(*?*c.git_repository, &repo), path_temp, flags.toInt(), ceiling_dirs_temp, }); log.debug("repository opened successfully", .{}); return repo; } pub const RepositoryOpenOptions = packed struct { /// Only open the repository if it can be immediately found in the path. Do not walk up the directory tree to look for it. no_search: bool = false, /// Unless this flag is set, open will not search across filesystem boundaries. cross_fs: bool = false, /// Open repository as a bare repo regardless of core.bare config. bare: bool = false, /// Do not check for a repository by appending /.git to the path; only open the repository if path itself points to the /// git directory. no_dotgit: bool = false, /// Find and open a git repository, respecting the environment variables used by the git command-line tools. If set, /// `Handle.repositoryOpenExtended` will ignore the other flags and the `ceiling_dirs` argument, and will allow a `null` /// `path` to use `GIT_DIR` or search from the current directory. open_from_env: bool = false, z_padding: std.meta.Int(.unsigned, @bitSizeOf(c_uint) - 5) = 0, pub fn toInt(self: RepositoryOpenOptions) c_uint { return @bitCast(c_uint, self); } pub fn format( value: RepositoryOpenOptions, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; return internal.formatWithoutFields( value, options, writer, &.{"z_padding"}, ); } test { try std.testing.expectEqual(@sizeOf(c_uint), @sizeOf(RepositoryOpenOptions)); try std.testing.expectEqual(@bitSizeOf(c_uint), @bitSizeOf(RepositoryOpenOptions)); } comptime { std.testing.refAllDecls(@This()); } }; /// Open a bare repository. /// /// ## Parameters /// * `path` - the path to the repository pub fn repositoryOpenBare(self: Handle, path: [:0]const u8) !*git.Repository { _ = self; log.debug("Handle.repositoryOpenBare called, path={s}", .{path}); var repo: *git.Repository = undefined; try internal.wrapCall("git_repository_open_bare", .{ @ptrCast(*?*c.git_repository, &repo), path.ptr, }); log.debug("repository opened successfully", .{}); return repo; } /// Look for a git repository and return its path. /// /// The lookup starts from `start_path` and walks the directory tree until the first repository is found, or when reaching a /// directory referenced in `ceiling_dirs` or when the filesystem changes (when `across_fs` is false). /// /// ## Parameters /// * `start_path` - The path where the lookup starts. /// * `across_fs` - If true, then the lookup will not stop when a filesystem device change is encountered. /// * `ceiling_dirs` - A `PATH_LIST_SEPARATOR` separated list of absolute symbolic link free paths. The lookup will stop /// when any of this paths is reached. pub fn repositoryDiscover(self: Handle, start_path: [:0]const u8, across_fs: bool, ceiling_dirs: ?[:0]const u8) !git.Buf { _ = self; log.debug( "Handle.repositoryDiscover called, start_path={s}, across_fs={}, ceiling_dirs={s}", .{ start_path, across_fs, ceiling_dirs }, ); var buf: git.Buf = .{}; const ceiling_dirs_temp: [*c]const u8 = if (ceiling_dirs) |slice| slice.ptr else null; try internal.wrapCall("git_repository_discover", .{ @ptrCast(*c.git_buf, &buf), start_path.ptr, @boolToInt(across_fs), ceiling_dirs_temp, }); log.debug("repository discovered - {s}", .{buf.toSlice()}); return buf; } pub const CloneOptions = struct { /// Options to pass to the checkout step. checkout_options: git.Repository.CheckoutOptions = .{}, // options which control the fetch, including callbacks. Callbacks are for reporting fetch progress, and for // acquiring credentials in the event they are needed. fetch_options: git.Remote.FetchOptions = .{}, /// Set false (default) to create a standard repo or true for a bare repo. bare: bool = false, /// Whether to use a fetch or a copy of the object database. local: LocalType = .LOCAL_AUTO, /// Branch of the remote repository to checkout. `null` means the default. checkout_branch: ?[:0]const u8 = null, /// A callback used to create the new repository into which to clone. If `null` the `bare` field will be used to /// determine whether to create a bare repository. /// /// Return 0, or a negative value to indicate error /// /// ## Parameters /// * `out` - the resulting repository /// * `path` - path in which to create the repository /// * `bare` - whether the repository is bare. This is the value from the clone options /// * `payload` - payload specified by the options repository_cb: ?fn ( out: **git.Repository, path: [*:0]const u8, bare: bool, payload: *anyopaque, ) callconv(.C) void = null, /// An opaque payload to pass to the `repository_cb` creation callback. /// This parameter is ignored unless repository_cb is non-`null`. repository_cb_payload: ?*anyopaque = null, /// A callback used to create the git remote, prior to its being used to perform the clone option. /// This parameter may be `null`, indicating that `Handle.clone` should provide default behavior. /// /// Return 0, or an error code /// /// ## Parameters /// * `out` - the resulting remote /// * `repo` - the repository in which to create the remote /// * `name` - the remote's name /// * `url` - the remote's url /// * `payload` - an opaque payload remote_cb: ?fn ( out: **git.Remote, repo: *git.Repository, name: [*:0]const u8, url: [*:0]const u8, payload: ?*anyopaque, ) callconv(.C) void = null, remote_cb_payload: ?*anyopaque = null, /// Options for bypassing the git-aware transport on clone. Bypassing it means that instead of a fetch, /// libgit2 will copy the object database directory instead of figuring out what it needs, which is faster. pub const LocalType = enum(c_uint) { /// Auto-detect (default), libgit2 will bypass the git-aware transport for local paths, but use a normal fetch for /// `file://` urls. LOCAL_AUTO, /// Bypass the git-aware transport even for a `file://` url. LOCAL, /// Do no bypass the git-aware transport NO_LOCAL, /// Bypass the git-aware transport, but do not try to use hardlinks. LOCAL_NO_LINKS, }; fn makeCOptionsObject(self: CloneOptions) c.git_clone_options { return c.git_clone_options{ .version = c.GIT_CHECKOUT_OPTIONS_VERSION, .checkout_opts = self.checkout_options.makeCOptionObject(), .fetch_opts = self.fetch_options.makeCOptionsObject(), .bare = @boolToInt(self.bare), .local = @enumToInt(self.local), .checkout_branch = if (self.checkout_branch) |b| @as(?[*]const u8, b.ptr) else null, .repository_cb = @ptrCast(c.git_repository_create_cb, self.repository_cb), .repository_cb_payload = self.repository_cb_payload, .remote_cb = @ptrCast(c.git_remote_create_cb, self.remote_cb), .remote_cb_payload = self.remote_cb_payload, }; } }; /// Clone a remote repository. /// /// By default this creates its repository and initial remote to match git's defaults. /// You can use the options in the callback to customize how these are created. /// /// ## Parameters /// * `url` - URL of the remote repository to clone. /// * `local_path` - Directory to clone the repository into. /// * `options` - Customize how the repository is created. pub fn clone(self: Handle, url: [:0]const u8, local_path: [:0]const u8, options: CloneOptions) !*git.Repository { _ = self; log.debug("Handle.clone called, url={s}, local_path={s}", .{ url, local_path }); var repo: *git.Repository = undefined; const c_options = options.makeCOptionsObject(); try internal.wrapCall("git_clone", .{ @ptrCast(*?*c.git_repository, &repo), url.ptr, local_path.ptr, &c_options, }); log.debug("repository cloned successfully", .{}); return repo; } pub fn optionGetMaximumMmapWindowSize(self: Handle) !usize { _ = self; log.debug("Handle.optionGetMmapWindowSize called", .{}); var result: usize = undefined; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_MWINDOW_SIZE, &result }); log.debug("maximum mmap window size: {}", .{result}); return result; } pub fn optionSetMaximumMmapWindowSize(self: Handle, value: usize) !void { _ = self; log.debug("Handle.optionSetMaximumMmapWindowSize called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_MWINDOW_SIZE, value }); log.debug("successfully set maximum mmap window size", .{}); } pub fn optionGetMaximumMmapLimit(self: Handle) !usize { _ = self; log.debug("Handle.optionGetMaximumMmapLimit called", .{}); var result: usize = undefined; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_MWINDOW_MAPPED_LIMIT, &result }); log.debug("maximum mmap limit: {}", .{result}); return result; } pub fn optionSetMaximumMmapLimit(self: Handle, value: usize) !void { _ = self; log.debug("Handle.optionSetMaximumMmapLimit called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_MWINDOW_MAPPED_LIMIT, value }); log.debug("successfully set maximum mmap limit", .{}); } /// zero means unlimited pub fn optionGetMaximumMappedFiles(self: Handle) !usize { _ = self; log.debug("Handle.optionGetMaximumMappedFiles called", .{}); var result: usize = undefined; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_MWINDOW_FILE_LIMIT, &result }); log.debug("maximum mapped files: {}", .{result}); return result; } /// zero means unlimited pub fn optionSetMaximumMmapFiles(self: Handle, value: usize) !void { _ = self; log.debug("Handle.optionSetMaximumMmapFiles called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_MWINDOW_FILE_LIMIT, value }); log.debug("successfully set maximum mapped files", .{}); } pub fn optionGetSearchPath(self: Handle, level: git.Config.Level) !git.Buf { _ = self; log.debug("Handle.optionGetSearchPath called, level={s}", .{@tagName(level)}); var buf: git.Buf = .{}; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_SEARCH_PATH, @enumToInt(level), @ptrCast(*c.git_buf, &buf), }); log.debug("got search path: {s}", .{buf.toSlice()}); return buf; } /// `path` should be a list of directories delimited by PATH_LIST_SEPARATOR. /// Pass `null` to reset to the default (generally based on environment variables). Use magic path `$PATH` to include the old /// value of the path (if you want to prepend or append, for instance). pub fn optionSetSearchPath(self: Handle, level: git.Config.Level, path: ?[:0]const u8) !void { _ = self; log.debug("Handle.optionSetSearchPath called, path={s}", .{path}); const path_c: [*c]const u8 = if (path) |slice| slice.ptr else null; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_SEARCH_PATH, @enumToInt(level), path_c }); log.debug("successfully set search path", .{}); } pub fn optionSetCacheObjectLimit(self: Handle, object_type: git.ObjectType, value: usize) !void { _ = self; log.debug("Handle.optionSetCacheObjectLimit called, object_type={s}, value={}", .{ @tagName(object_type), value }); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_CACHE_OBJECT_LIMIT, @enumToInt(object_type), value }); log.debug("successfully set cache object limit", .{}); } pub fn optionSetMaximumCacheSize(self: Handle, value: usize) !void { _ = self; log.debug("Handle.optionSetCacheMaximumSize called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_CACHE_MAX_SIZE, value }); log.debug("successfully set maximum cache size", .{}); } pub fn optionSetCaching(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetCaching called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_CACHING, enabled }); log.debug("successfully set caching status", .{}); } pub fn optionGetCachedMemory(self: Handle) !CachedMemory { _ = self; log.debug("Handle.optionGetCachedMemory called", .{}); var result: CachedMemory = undefined; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_CACHED_MEMORY, &result.current, &result.allowed }); log.debug("cached memory: {}", .{result}); return result; } pub const CachedMemory = struct { current: usize, allowed: usize, }; pub fn optionGetTemplatePath(self: Handle) !git.Buf { _ = self; log.debug("Handle.optionGetTemplatePath called", .{}); var result: git.Buf = .{}; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_TEMPLATE_PATH, @ptrCast(*c.git_buf, &result), }); log.debug("got template path: {s}", .{result.toSlice()}); return result; } pub fn optionSetTemplatePath(self: Handle, path: [:0]const u8) !void { _ = self; log.debug("Handle.optionSetTemplatePath called, path={s}", .{path}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_TEMPLATE_PATH, path.ptr }); log.debug("successfully set template path", .{}); } /// Either parameter may be `null`, but not both. pub fn optionSetSslCertLocations(self: Handle, file: ?[:0]const u8, path: ?[:0]const u8) !void { _ = self; log.debug("Handle.optionSetSslCertLocations called, file={s}, path={s}", .{ file, path }); const file_c: [*c]const u8 = if (file) |ptr| ptr.ptr else null; const path_c: [*c]const u8 = if (path) |ptr| ptr.ptr else null; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_SSL_CERT_LOCATIONS, file_c, path_c }); log.debug("successfully set ssl certificate location", .{}); } pub fn optionSetUserAgent(self: Handle, user_agent: [:0]const u8) !void { _ = self; log.debug("Handle.optionSetUserAgent called, user_agent={s}", .{user_agent}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_USER_AGENT, user_agent.ptr }); log.debug("successfully set user agent", .{}); } pub fn optionGetUserAgent(self: Handle) !git.Buf { _ = self; log.debug("Handle.optionGetUserAgent called", .{}); var result: git.Buf = .{}; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_USER_AGENT, @ptrCast(*c.git_buf, &result), }); log.debug("got user agent: {s}", .{result.toSlice()}); return result; } pub fn optionSetWindowsSharemode(self: Handle, value: c_uint) !void { _ = self; log.debug("Handle.optionSetWindowsSharemode called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_WINDOWS_SHAREMODE, value }); log.debug("successfully set windows share mode", .{}); } pub fn optionGetWindowSharemode(self: Handle) !c_uint { _ = self; log.debug("Handle.optionGetWindowSharemode called", .{}); var result: c_uint = undefined; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_WINDOWS_SHAREMODE, &result }); log.debug("got windows share mode: {}", .{result}); return result; } pub fn optionSetStrictObjectCreation(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetStrictObjectCreation called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_STRICT_OBJECT_CREATION, @boolToInt(enabled) }); log.debug("successfully set strict object creation mode", .{}); } pub fn optionSetStrictSymbolicRefCreations(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetStrictSymbolicRefCreations called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_STRICT_SYMBOLIC_REF_CREATION, @boolToInt(enabled) }); log.debug("successfully set strict symbolic ref creation mode", .{}); } pub fn optionSetSslCiphers(self: Handle, ciphers: [:0]const u8) !void { _ = self; log.debug("Handle.optionSetSslCiphers called, ciphers={s}", .{ciphers}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_SSL_CIPHERS, ciphers.ptr }); log.debug("successfully set SSL ciphers", .{}); } pub fn optionSetOffsetDeltas(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetOffsetDeltas called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_OFS_DELTA, @boolToInt(enabled) }); log.debug("successfully set offset deltas mode", .{}); } pub fn optionSetFsyncDir(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetFsyncDir called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_FSYNC_GITDIR, @boolToInt(enabled) }); log.debug("successfully set fsync dir mode", .{}); } pub fn optionSetStrictHashVerification(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetStrictHashVerification called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION, @boolToInt(enabled) }); log.debug("successfully set strict hash verification mode", .{}); } /// If the given `allocator` is `null`, then the system default will be restored. pub fn optionSetAllocator(self: Handle, allocator: ?*git.GitAllocator) !void { _ = self; log.debug("Handle.optionSetAllocator called, allocator={*}", .{allocator}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_ALLOCATOR, allocator }); log.debug("successfully set allocator", .{}); } pub fn optionSetUnsafedIndexSafety(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetUnsafedIndexSafety called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_UNSAVED_INDEX_SAFETY, @boolToInt(enabled) }); log.debug("successfully set unsaved index safety mode", .{}); } pub fn optionGetMaximumPackObjects(self: Handle) !usize { _ = self; log.debug("Handle.optionGetMaximumPackObjects called", .{}); var result: usize = undefined; try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_GET_PACK_MAX_OBJECTS, &result }); log.debug("maximum pack objects: {}", .{result}); return result; } pub fn optionSetMaximumPackObjects(self: Handle, value: usize) !void { _ = self; log.debug("Handle.optionSetMaximumPackObjects called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_PACK_MAX_OBJECTS, value }); log.debug("successfully set maximum pack objects", .{}); } pub fn optionSetDisablePackKeepFileChecks(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetDisablePackKeepFileChecks called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, @boolToInt(enabled) }); log.debug("successfully set unsaved index safety mode", .{}); } pub fn optionSetHTTPExpectContinue(self: Handle, enabled: bool) !void { _ = self; log.debug("Handle.optionSetHTTPExpectContinue called, enabled={}", .{enabled}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_ENABLE_HTTP_EXPECT_CONTINUE, @boolToInt(enabled) }); log.debug("successfully set HTTP expect continue mode", .{}); } pub fn branchNameIsValid(name: [:0]const u8) !bool { log.debug("Handle.branchNameIsValid, name={s}", .{name}); var valid: c_int = undefined; try internal.wrapCall("git_branch_name_is_valid", .{ &valid, name.ptr }); const ret = valid == 1; log.debug("branch name valid: {}", .{ret}); return ret; } pub fn optionSetOdbPackedPriority(self: Handle, value: usize) !void { _ = self; log.debug("Handle.optionSetOdbPackedPriority called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_ODB_PACKED_PRIORITY, value }); log.debug("successfully set odb packed priority", .{}); } pub fn optionSetOdbLoosePriority(self: Handle, value: usize) !void { _ = self; log.debug("Handle.optionSetOdbLoosePriority called, value={}", .{value}); try internal.wrapCall("git_libgit2_opts", .{ c.GIT_OPT_SET_ODB_LOOSE_PRIORITY, value }); log.debug("successfully set odb loose priority", .{}); } /// Clean up excess whitespace and make sure there is a trailing newline in the message. /// /// Optionally, it can remove lines which start with the comment character. /// /// ## Parameters /// * `message` - the message to be prettified. /// * `strip_comment_char` - if non-`null` lines starting with this character are considered to be comments and removed pub fn messagePrettify(self: Handle, message: [:0]const u8, strip_comment_char: ?u8) !git.Buf { _ = self; log.debug("Handle.messagePrettify called, message={s}, strip_comment_char={}", .{ message, strip_comment_char }); var ret: git.Buf = .{}; if (strip_comment_char) |char| { try internal.wrapCall("git_message_prettify", .{ @ptrCast(*c.git_buf, &ret), message.ptr, 1, char, }); } else { try internal.wrapCall("git_message_prettify", .{ @ptrCast(*c.git_buf, &ret), message.ptr, 0, 0, }); } log.debug("prettified message: {s}", .{ret.toSlice()}); return ret; } /// Parse trailers out of a message /// /// Trailers are key/value pairs in the last paragraph of a message, not including any patches or conflicts that may /// be present. pub fn messageParseTrailers(self: Handle, message: [:0]const u8) !git.MessageTrailerArray { _ = self; log.debug("Handle.messageParseTrailers called, message={s}", .{message}); var ret: git.MessageTrailerArray = undefined; try internal.wrapCall("git_message_trailers", .{ @ptrCast(*c.git_message_trailer_array, &ret), message.ptr, }); log.debug("successfully parsed {} message trailers", .{ret.count}); return ret; } comptime { std.testing.refAllDecls(@This()); } }; comptime { std.testing.refAllDecls(@This()); }
src/handle.zig
const std = @import("std"); const os = std.os; const fs = std.fs; const linux = os.linux; const io = std.io; const fmt = std.fmt; const mem = std.mem; const FixedBufferAllocator = std.heap.FixedBufferAllocator; const assert = std.debug.assert; const termios = @import("termios.zig"); const BELL = [_]u8{7}; // Bell const BS = [_]u8{8}; // Moves cursor back one column const HT = [_]u8{9}; // Moves the cursor to next tab stop const LF = [_]u8{10}; // Moves the cursor down one row const CR = [_]u8{13}; // Move the cursor to column one const CAN = [_]u8{24}; // Cancels an escape sequence const ESC = [_]u8{27}; // Starts an escape sequence const std_in = std.io.getStdIn(); const std_out = std.io.getStdOut(); const std_err = std.io.getStdErr(); const VTError = error{ UnexpectedResponse, UnableToDetermineTerminalDimensions, }; const max_usize_str_len = "18446744073709551615".len; const unsupported_term = [_][]const u8{ "dumb", "cons25", "emacs", }; pub const CursorPos = struct { row: usize, col: usize, }; pub const TerminalDimensions = struct { width: usize, height: usize, }; pub fn isUnsupportedTerm() bool { const TERM = os.getenv("TERM") orelse return true; for (unsupported_term) |comp| { if (mem.eql(u8, TERM, comp)) return true; } return false; } pub fn eraseCursorToEndOfLine() !void { //https://vt100.net/docs/vt100-ug/chapter3.html#EL try std_out.write(ESC ++ "[0K"); } pub fn eraseStartOfLineToCursor() !void { //https://vt100.net/docs/vt100-ug/chapter3.html#EL try std_out.write(ESC ++ "[1K"); } pub fn eraseEntireLine() !void { //https://vt100.net/docs/vt100-ug/chapter3.html#EL try std_out.write(ESC ++ "[2K"); } pub fn eraseCursorToEndOfDisplay() !void { //https://vt100.net/docs/vt100-ug/chapter3.html#ED try std_out.write(ESC ++ "[0J"); } pub fn eraseStartOfDisplayToCursor() !void { //https://vt100.net/docs/vt100-ug/chapter3.html#ED try std_out.write(ESC ++ "[1J"); } pub fn eraseEntireDisplay() !void { //https://vt100.net/docs/vt100-ug/chapter3.html#ED try std_out.write(ESC ++ "[2J"); } pub fn setCursorPos(cpos: CursorPos) !void { //https://vt100.net/docs/vt100-ug/chapter3.html#CUP var formatting_buf: [(max_usize_str_len * 2) + 4]u8 = undefined; const esc_seq = try fmt.bufPrint(formatting_buf[0..], ESC ++ "[{};{}H", .{ cpos.row, cpos.col }); try std_out.write(esc_seq); } pub fn cursorHome() !void { //https://vt100.net/docs/vt100-ug/chapter3.html#CUP try std_out.write(ESC ++ "[H"); } pub fn clearScreen() !void { try cursorHome(); try eraseEntireDisplay(); } pub fn cursorForward(num_chars: usize) !void { //https://vt100.net/docs/vt100-ug/chapter3.html#CUF var formatting_buf: [max_usize_str_len + 3]u8 = undefined; const esc_seq = try fmt.bufPrint(formatting_buf[0..], ESC ++ "[{}C", .{ num_chars }); try std_out.write(esc_seq); } pub fn cursorBackward(num_chars: usize) !void { //https://vt100.net/docs/vt100-ug/chapter3.html#CUB var formatting_buf: [max_usize_str_len + 3]u8 = undefined; const esc_seq = try fmt.bufPrint(formatting_buf[0..], ESC ++ "[{}D", .{ num_chars }); try std_out.write(esc_seq); } pub fn cursorUp(num_chars: usize) !void { //https://vt100.net/docs/vt100-ug/chapter3.html#CUU var formatting_buf: [max_usize_str_len + 3]u8 = undefined; const esc_seq = try fmt.bufPrint(formatting_buf[0..], ESC ++ "[{}A", .{ num_chars }); try std_out.write(esc_seq); } pub fn cursorDown(num_chars: usize) !void { //https://vt100.net/docs/vt100-ug/chapter3.html#CUD var formatting_buf: [max_usize_str_len + 3]u8 = undefined; const esc_seq = try fmt.bufPrint(formatting_buf[0..], ESC ++ "[{}B", .{ num_chars }); try std_out.write(esc_seq); } pub fn getCursorPos() !CursorPos { var buf_arr: [(max_usize_str_len * 2) + 4]u8 = undefined; const buf = buf_arr[0..]; //https://vt100.net/docs/vt100-ug/chapter3.html#DSR try std_out.write(ESC ++ "[6n"); var esc_index: usize = 0; var char_R_index: usize = 0; for (buf) |c, i| { if ((try std_out.read(buf[i .. i + 1])) == 1) { switch (buf[i]) { ESC[0] => { esc_index = i; }, 'R' => { char_R_index = i; break; }, else => {}, } } else { break; } } if (char_R_index > 0) { return try scanCursorPositionReport(buf[esc_index..char_R_index]); } else { return error.CursorPosResponseNotFound; } } pub fn getCursorColumn() !usize { const cursor_pos = try getCursorPos(); return cursor_pos.col; } pub fn getCursorRow() !usize { const cursor_pos = try getCursorPos(); return cursor_pos.row; } pub fn getTerminalSize() TerminalDimensions { if (ttyWinSize()) |win_size| { return TerminalDimensions{ .width = win_size.ws_col, .height = win_size.ws_row, }; } else |err| { return TerminalDimensions{ .width = 80, .height = 24, }; } } pub fn enableRawTerminalMode() !termios.Termios { if (!std_in.isTty()) return error.IsNotTty; var orig: termios.Termios = undefined; try termios.tcgetattr(std_in.handle, &orig); var raw = orig; termios.cfmakeraw(&raw); try termios.tcsetattr(std_in.handle, termios.TCSAFLUSH, &raw); return orig; } pub fn setTerminalMode(tio: *const termios.Termios) !void { try termios.tcsetattr(std_in.handle, termios.TCSAFLUSH, tio); } pub fn beep() !void { try std_err.write(&BELL); } fn ttyWinSize() !linux.winsize { var wsz: linux.winsize = undefined; if (os.linux.syscall3(linux.SYS_ioctl, std_out.handle, linux.TIOCGWINSZ, @ptrToInt(&wsz)) == 0 and wsz.ws_col != 0) { return wsz; } else { return VTError.UnableToDetermineTerminalDimensions; } } fn scanCursorPositionReport(response: []const u8) !CursorPos { //https://vt100.net/docs/vt100-ug/chapter3.html#CPR if (!mem.eql(u8, response[0..2], ESC ++ "[")) { return VTError.UnexpectedResponse; } const delimiter_index = mem.indexOf(u8, response, ";") orelse return VTError.UnexpectedResponse; const row = try fmt.parseUnsigned(usize, response[2..delimiter_index], 10); const col = try fmt.parseUnsigned(usize, response[delimiter_index + 1 ..], 10); return CursorPos{ .row = row, .col = col, }; } test "vt-term.zig: scan row/column position response" { // SUCCESS CASES const ret1 = scanCursorPositionReport((ESC ++ "[20;30")[0..]) catch unreachable; assert(ret1.row == 20 and ret1.col == 30); const ret2 = scanCursorPositionReport((ESC ++ "[18446744073709551615;18446744073709551615")[0..]) catch unreachable; assert(ret2.row == 18446744073709551615 and ret2.col == 18446744073709551615); // FAILURE CASES const catch_val = CursorPos{ .row = 127, .col = 255, }; // parseUnsigned failure, num too large const err1 = scanCursorPositionReport((ESC ++ "[18446744073709551615;18446744073709551616")[0..]) catch catch_val; assert(err1.row == catch_val.row and err1.col == catch_val.col); const err2 = scanCursorPositionReport((ESC ++ "[18446744073709551616;18446744073709551615")[0..]) catch catch_val; assert(err2.row == catch_val.row and err2.col == catch_val.col); // malformed response // missing semicolon const err3 = scanCursorPositionReport((ESC ++ "[20:30")[0..]) catch catch_val; assert(err3.row == catch_val.row and err3.col == catch_val.col); // missing [ const err4 = scanCursorPositionReport((ESC ++ "{20;30")[0..]) catch catch_val; assert(err4.row == catch_val.row and err4.col == catch_val.col); // extra character at start const err5 = scanCursorPositionReport((BELL ++ ESC ++ "[20;30")[0..]) catch catch_val; assert(err5.row == catch_val.row and err5.col == catch_val.col); } test "vt-term.zig: use functions" { var cursor: CursorPos = undefined; assert(!isUnsupportedTerm()); var non_raw = try enableRawTerminalMode(); defer setTerminalMode(&non_raw) catch {}; // best effort clean up try beep(); try eraseCursorToEndOfLine(); try eraseStartOfLineToCursor(); try eraseEntireLine(); try eraseCursorToEndOfDisplay(); try eraseStartOfDisplayToCursor(); try eraseEntireDisplay(); const term_size = getTerminalSize(); assert(term_size.width >= 15 and term_size.height >= 12); try cursorHome(); cursor = try getCursorPos(); assert(cursor.row == 1 and cursor.col == 1); try std_out.write("123"); cursor = try getCursorPos(); assert(cursor.row == 1 and cursor.col == 4); try clearScreen(); cursor = try getCursorPos(); assert(cursor.row == 1 and cursor.col == 1); try cursorForward(10); cursor = try getCursorPos(); assert(cursor.row == 1 and cursor.col == 11); try cursorDown(2); cursor = try getCursorPos(); assert(cursor.row == 3 and cursor.col == 11); try cursorBackward(10); cursor = try getCursorPos(); assert(cursor.row == 3 and cursor.col == 1); try cursorUp(2); cursor = try getCursorPos(); assert(cursor.row == 1 and cursor.col == 1); try setCursorPos(CursorPos{ .row = std.math.maxInt(usize), .col = std.math.maxInt(usize) }); cursor = try getCursorPos(); assert(cursor.row == term_size.height and cursor.col == term_size.width); try setCursorPos(CursorPos{ .row = 12, .col = 15 }); cursor = try getCursorPos(); assert(cursor.row == 12 and cursor.col == 15); assert((try getCursorRow()) == 12); assert((try getCursorColumn()) == 15); try cursorHome(); }
src/vt-term.zig
pub const PROCESSOR_NUMBER_PKEY = PROPERTYKEY { .fmtid = @import("../zig.zig").Guid.initString("5724c81d-d5af-4c1f-a103-a06e28f204c6"), .pid = 1 }; pub const GUID_DEVICE_BATTERY = Guid.initString("72631e54-78a4-11d0-bcf7-00aa00b7b32a"); pub const GUID_DEVICE_APPLICATIONLAUNCH_BUTTON = Guid.initString("629758ee-986e-4d9e-8e47-de27f8ab054d"); pub const GUID_DEVICE_SYS_BUTTON = Guid.initString("4afa3d53-74a7-11d0-be5e-00a0c9062857"); pub const GUID_DEVICE_LID = Guid.initString("4afa3d52-74a7-11d0-be5e-00a0c9062857"); pub const GUID_DEVICE_THERMAL_ZONE = Guid.initString("4afa3d51-74a7-11d0-be5e-00a0c9062857"); pub const GUID_DEVICE_FAN = Guid.initString("05ecd13d-81da-4a2a-8a4c-524f23dd4dc9"); pub const GUID_DEVICE_PROCESSOR = Guid.initString("97fadb10-4e33-40ae-359c-8bef029dbdd0"); pub const GUID_DEVICE_MEMORY = Guid.initString("3fd0f03d-92e0-45fb-b75c-5ed8ffb01021"); pub const GUID_DEVICE_ACPI_TIME = Guid.initString("97f99bf6-4497-4f18-bb22-4b9fb2fbef9c"); pub const GUID_DEVICE_MESSAGE_INDICATOR = Guid.initString("cd48a365-fa94-4ce2-a232-a1b764e5d8b4"); pub const GUID_CLASS_INPUT = Guid.initString("4d1e55b2-f16f-11cf-88cb-001111000030"); pub const GUID_DEVINTERFACE_THERMAL_COOLING = Guid.initString("dbe4373d-3c81-40cb-ace4-e0e5d05f0c9f"); pub const GUID_DEVINTERFACE_THERMAL_MANAGER = Guid.initString("927ec093-69a4-4bc0-bd02-711664714463"); pub const BATTERY_UNKNOWN_CAPACITY = @as(u32, 4294967295); pub const UNKNOWN_CAPACITY = @as(u32, 4294967295); pub const BATTERY_SYSTEM_BATTERY = @as(u32, 2147483648); pub const BATTERY_CAPACITY_RELATIVE = @as(u32, 1073741824); pub const BATTERY_IS_SHORT_TERM = @as(u32, 536870912); pub const BATTERY_SEALED = @as(u32, 268435456); pub const BATTERY_SET_CHARGE_SUPPORTED = @as(u32, 1); pub const BATTERY_SET_DISCHARGE_SUPPORTED = @as(u32, 2); pub const BATTERY_SET_CHARGINGSOURCE_SUPPORTED = @as(u32, 4); pub const BATTERY_SET_CHARGER_ID_SUPPORTED = @as(u32, 8); pub const BATTERY_UNKNOWN_TIME = @as(u32, 4294967295); pub const BATTERY_UNKNOWN_CURRENT = @as(u32, 4294967295); pub const UNKNOWN_CURRENT = @as(u32, 4294967295); pub const BATTERY_USB_CHARGER_STATUS_FN_DEFAULT_USB = @as(u32, 1); pub const BATTERY_USB_CHARGER_STATUS_UCM_PD = @as(u32, 2); pub const BATTERY_UNKNOWN_VOLTAGE = @as(u32, 4294967295); pub const BATTERY_UNKNOWN_RATE = @as(u32, 2147483648); pub const UNKNOWN_RATE = @as(u32, 2147483648); pub const UNKNOWN_VOLTAGE = @as(u32, 4294967295); pub const BATTERY_POWER_ON_LINE = @as(u32, 1); pub const BATTERY_DISCHARGING = @as(u32, 2); pub const BATTERY_CHARGING = @as(u32, 4); pub const BATTERY_CRITICAL = @as(u32, 8); pub const MAX_BATTERY_STRING_SIZE = @as(u32, 128); pub const IOCTL_BATTERY_QUERY_TAG = @as(u32, 2703424); pub const IOCTL_BATTERY_QUERY_INFORMATION = @as(u32, 2703428); pub const IOCTL_BATTERY_SET_INFORMATION = @as(u32, 2719816); pub const IOCTL_BATTERY_QUERY_STATUS = @as(u32, 2703436); pub const IOCTL_BATTERY_CHARGING_SOURCE_CHANGE = @as(u32, 2703440); pub const BATTERY_TAG_INVALID = @as(u32, 0); pub const MAX_ACTIVE_COOLING_LEVELS = @as(u32, 10); pub const ACTIVE_COOLING = @as(u32, 0); pub const PASSIVE_COOLING = @as(u32, 1); pub const TZ_ACTIVATION_REASON_THERMAL = @as(u32, 1); pub const TZ_ACTIVATION_REASON_CURRENT = @as(u32, 2); pub const THERMAL_POLICY_VERSION_1 = @as(u32, 1); pub const THERMAL_POLICY_VERSION_2 = @as(u32, 2); pub const IOCTL_THERMAL_QUERY_INFORMATION = @as(u32, 2703488); pub const IOCTL_THERMAL_SET_COOLING_POLICY = @as(u32, 2719876); pub const IOCTL_RUN_ACTIVE_COOLING_METHOD = @as(u32, 2719880); pub const IOCTL_THERMAL_SET_PASSIVE_LIMIT = @as(u32, 2719884); pub const IOCTL_THERMAL_READ_TEMPERATURE = @as(u32, 2703504); pub const IOCTL_THERMAL_READ_POLICY = @as(u32, 2703508); pub const IOCTL_QUERY_LID = @as(u32, 2703552); pub const IOCTL_NOTIFY_SWITCH_EVENT = @as(u32, 2703616); pub const IOCTL_GET_SYS_BUTTON_CAPS = @as(u32, 2703680); pub const IOCTL_GET_SYS_BUTTON_EVENT = @as(u32, 2703684); pub const SYS_BUTTON_POWER = @as(u32, 1); pub const SYS_BUTTON_SLEEP = @as(u32, 2); pub const SYS_BUTTON_LID = @as(u32, 4); pub const SYS_BUTTON_WAKE = @as(u32, 2147483648); pub const SYS_BUTTON_LID_STATE_MASK = @as(u32, 196608); pub const SYS_BUTTON_LID_OPEN = @as(u32, 65536); pub const SYS_BUTTON_LID_CLOSED = @as(u32, 131072); pub const SYS_BUTTON_LID_INITIAL = @as(u32, 262144); pub const SYS_BUTTON_LID_CHANGED = @as(u32, 524288); pub const IOCTL_GET_PROCESSOR_OBJ_INFO = @as(u32, 2703744); pub const THERMAL_COOLING_INTERFACE_VERSION = @as(u32, 1); pub const THERMAL_DEVICE_INTERFACE_VERSION = @as(u32, 1); pub const IOCTL_SET_SYS_MESSAGE_INDICATOR = @as(u32, 2720192); pub const IOCTL_SET_WAKE_ALARM_VALUE = @as(u32, 2720256); pub const IOCTL_SET_WAKE_ALARM_POLICY = @as(u32, 2720260); pub const IOCTL_GET_WAKE_ALARM_VALUE = @as(u32, 2736648); pub const IOCTL_GET_WAKE_ALARM_POLICY = @as(u32, 2736652); pub const ACPI_TIME_ADJUST_DAYLIGHT = @as(u32, 1); pub const ACPI_TIME_IN_DAYLIGHT = @as(u32, 2); pub const ACPI_TIME_ZONE_UNKNOWN = @as(u32, 2047); pub const IOCTL_ACPI_GET_REAL_TIME = @as(u32, 2703888); pub const IOCTL_ACPI_SET_REAL_TIME = @as(u32, 2720276); pub const IOCTL_GET_WAKE_ALARM_SYSTEM_POWERSTATE = @as(u32, 2703896); pub const BATTERY_STATUS_WMI_GUID = Guid.initString("fc4670d1-ebbf-416e-87ce-374a4ebc111a"); pub const BATTERY_RUNTIME_WMI_GUID = Guid.initString("535a3767-1ac2-49bc-a077-3f7a02e40aec"); pub const BATTERY_TEMPERATURE_WMI_GUID = Guid.initString("1a52a14d-adce-4a44-9a3e-c8d8f15ff2c2"); pub const BATTERY_FULL_CHARGED_CAPACITY_WMI_GUID = Guid.initString("40b40565-96f7-4435-8694-97e0e4395905"); pub const BATTERY_CYCLE_COUNT_WMI_GUID = Guid.initString("ef98db24-0014-4c25-a50b-c724ae5cd371"); pub const BATTERY_STATIC_DATA_WMI_GUID = Guid.initString("05e1e463-e4e2-4ea9-80cb-9bd4b3ca0655"); pub const BATTERY_STATUS_CHANGE_WMI_GUID = Guid.initString("cddfa0c3-7c5b-4e43-a034-059fa5b84364"); pub const BATTERY_TAG_CHANGE_WMI_GUID = Guid.initString("5e1f6e19-8786-4d23-94fc-9e746bd5d888"); pub const BATTERY_MINIPORT_UPDATE_DATA_VER_1 = @as(u32, 1); pub const BATTERY_MINIPORT_UPDATE_DATA_VER_2 = @as(u32, 2); pub const BATTERY_CLASS_MAJOR_VERSION = @as(u32, 1); pub const BATTERY_CLASS_MINOR_VERSION = @as(u32, 0); pub const BATTERY_CLASS_MINOR_VERSION_1 = @as(u32, 1); pub const GUID_DEVICE_ENERGY_METER = Guid.initString("45bd8344-7ed6-49cf-a440-c276c933b053"); pub const IOCTL_EMI_GET_VERSION = @as(u32, 2244608); pub const IOCTL_EMI_GET_METADATA_SIZE = @as(u32, 2244612); pub const IOCTL_EMI_GET_METADATA = @as(u32, 2244616); pub const IOCTL_EMI_GET_MEASUREMENT = @as(u32, 2244620); pub const EMI_NAME_MAX = @as(u32, 16); pub const EMI_VERSION_V1 = @as(u32, 1); pub const EMI_VERSION_V2 = @as(u32, 2); pub const EFFECTIVE_POWER_MODE_V1 = @as(u32, 1); pub const EFFECTIVE_POWER_MODE_V2 = @as(u32, 2); pub const EnableSysTrayBatteryMeter = @as(u32, 1); pub const EnableMultiBatteryDisplay = @as(u32, 2); pub const EnablePasswordLogon = @as(u32, 4); pub const EnableWakeOnRing = @as(u32, 8); pub const EnableVideoDimDisplay = @as(u32, 16); pub const POWER_ATTRIBUTE_HIDE = @as(u32, 1); pub const POWER_ATTRIBUTE_SHOW_AOAC = @as(u32, 2); pub const DEVICEPOWER_HARDWAREID = @as(u32, 2147483648); pub const DEVICEPOWER_AND_OPERATION = @as(u32, 1073741824); pub const DEVICEPOWER_FILTER_DEVICES_PRESENT = @as(u32, 536870912); pub const DEVICEPOWER_FILTER_HARDWARE = @as(u32, 268435456); pub const DEVICEPOWER_FILTER_WAKEENABLED = @as(u32, 134217728); pub const DEVICEPOWER_FILTER_WAKEPROGRAMMABLE = @as(u32, 67108864); pub const DEVICEPOWER_FILTER_ON_NAME = @as(u32, 33554432); pub const DEVICEPOWER_SET_WAKEENABLED = @as(u32, 1); pub const DEVICEPOWER_CLEAR_WAKEENABLED = @as(u32, 2); pub const PDCAP_S0_SUPPORTED = @as(u32, 65536); pub const PDCAP_S1_SUPPORTED = @as(u32, 131072); pub const PDCAP_S2_SUPPORTED = @as(u32, 262144); pub const PDCAP_S3_SUPPORTED = @as(u32, 524288); pub const PDCAP_WAKE_FROM_S0_SUPPORTED = @as(u32, 1048576); pub const PDCAP_WAKE_FROM_S1_SUPPORTED = @as(u32, 2097152); pub const PDCAP_WAKE_FROM_S2_SUPPORTED = @as(u32, 4194304); pub const PDCAP_WAKE_FROM_S3_SUPPORTED = @as(u32, 8388608); pub const PDCAP_S4_SUPPORTED = @as(u32, 16777216); pub const PDCAP_S5_SUPPORTED = @as(u32, 33554432); pub const THERMAL_EVENT_VERSION = @as(u32, 1); //-------------------------------------------------------------------------------- // Section: Types (70) //-------------------------------------------------------------------------------- pub const POWER_PLATFORM_ROLE_VERSION = enum(u32) { @"1" = 1, @"2" = 2, }; // TODO: enum 'POWER_PLATFORM_ROLE_VERSION' has known issues with its value aliases pub const POWER_SETTING_REGISTER_NOTIFICATION_FLAGS = enum(u32) { SERVICE_HANDLE = 1, CALLBACK = 2, WINDOW_HANDLE = 0, }; pub const DEVICE_NOTIFY_SERVICE_HANDLE = POWER_SETTING_REGISTER_NOTIFICATION_FLAGS.SERVICE_HANDLE; pub const DEVICE_NOTIFY_CALLBACK = POWER_SETTING_REGISTER_NOTIFICATION_FLAGS.CALLBACK; pub const DEVICE_NOTIFY_WINDOW_HANDLE = POWER_SETTING_REGISTER_NOTIFICATION_FLAGS.WINDOW_HANDLE; pub const EXECUTION_STATE = enum(u32) { AWAYMODE_REQUIRED = 64, CONTINUOUS = 2147483648, DISPLAY_REQUIRED = 2, SYSTEM_REQUIRED = 1, USER_PRESENT = 4, _, pub fn initFlags(o: struct { AWAYMODE_REQUIRED: u1 = 0, CONTINUOUS: u1 = 0, DISPLAY_REQUIRED: u1 = 0, SYSTEM_REQUIRED: u1 = 0, USER_PRESENT: u1 = 0, }) EXECUTION_STATE { return @intToEnum(EXECUTION_STATE, (if (o.AWAYMODE_REQUIRED == 1) @enumToInt(EXECUTION_STATE.AWAYMODE_REQUIRED) else 0) | (if (o.CONTINUOUS == 1) @enumToInt(EXECUTION_STATE.CONTINUOUS) else 0) | (if (o.DISPLAY_REQUIRED == 1) @enumToInt(EXECUTION_STATE.DISPLAY_REQUIRED) else 0) | (if (o.SYSTEM_REQUIRED == 1) @enumToInt(EXECUTION_STATE.SYSTEM_REQUIRED) else 0) | (if (o.USER_PRESENT == 1) @enumToInt(EXECUTION_STATE.USER_PRESENT) else 0) ); } }; pub const ES_AWAYMODE_REQUIRED = EXECUTION_STATE.AWAYMODE_REQUIRED; pub const ES_CONTINUOUS = EXECUTION_STATE.CONTINUOUS; pub const ES_DISPLAY_REQUIRED = EXECUTION_STATE.DISPLAY_REQUIRED; pub const ES_SYSTEM_REQUIRED = EXECUTION_STATE.SYSTEM_REQUIRED; pub const ES_USER_PRESENT = EXECUTION_STATE.USER_PRESENT; pub const POWER_ACTION_POLICY_EVENT_CODE = enum(u32) { FORCE_TRIGGER_RESET = 2147483648, LEVEL_USER_NOTIFY_EXEC = 4, LEVEL_USER_NOTIFY_SOUND = 2, LEVEL_USER_NOTIFY_TEXT = 1, USER_NOTIFY_BUTTON = 8, USER_NOTIFY_SHUTDOWN = 16, _, pub fn initFlags(o: struct { FORCE_TRIGGER_RESET: u1 = 0, LEVEL_USER_NOTIFY_EXEC: u1 = 0, LEVEL_USER_NOTIFY_SOUND: u1 = 0, LEVEL_USER_NOTIFY_TEXT: u1 = 0, USER_NOTIFY_BUTTON: u1 = 0, USER_NOTIFY_SHUTDOWN: u1 = 0, }) POWER_ACTION_POLICY_EVENT_CODE { return @intToEnum(POWER_ACTION_POLICY_EVENT_CODE, (if (o.FORCE_TRIGGER_RESET == 1) @enumToInt(POWER_ACTION_POLICY_EVENT_CODE.FORCE_TRIGGER_RESET) else 0) | (if (o.LEVEL_USER_NOTIFY_EXEC == 1) @enumToInt(POWER_ACTION_POLICY_EVENT_CODE.LEVEL_USER_NOTIFY_EXEC) else 0) | (if (o.LEVEL_USER_NOTIFY_SOUND == 1) @enumToInt(POWER_ACTION_POLICY_EVENT_CODE.LEVEL_USER_NOTIFY_SOUND) else 0) | (if (o.LEVEL_USER_NOTIFY_TEXT == 1) @enumToInt(POWER_ACTION_POLICY_EVENT_CODE.LEVEL_USER_NOTIFY_TEXT) else 0) | (if (o.USER_NOTIFY_BUTTON == 1) @enumToInt(POWER_ACTION_POLICY_EVENT_CODE.USER_NOTIFY_BUTTON) else 0) | (if (o.USER_NOTIFY_SHUTDOWN == 1) @enumToInt(POWER_ACTION_POLICY_EVENT_CODE.USER_NOTIFY_SHUTDOWN) else 0) ); } }; pub const POWER_FORCE_TRIGGER_RESET = POWER_ACTION_POLICY_EVENT_CODE.FORCE_TRIGGER_RESET; pub const POWER_LEVEL_USER_NOTIFY_EXEC = POWER_ACTION_POLICY_EVENT_CODE.LEVEL_USER_NOTIFY_EXEC; pub const POWER_LEVEL_USER_NOTIFY_SOUND = POWER_ACTION_POLICY_EVENT_CODE.LEVEL_USER_NOTIFY_SOUND; pub const POWER_LEVEL_USER_NOTIFY_TEXT = POWER_ACTION_POLICY_EVENT_CODE.LEVEL_USER_NOTIFY_TEXT; pub const POWER_USER_NOTIFY_BUTTON = POWER_ACTION_POLICY_EVENT_CODE.USER_NOTIFY_BUTTON; pub const POWER_USER_NOTIFY_SHUTDOWN = POWER_ACTION_POLICY_EVENT_CODE.USER_NOTIFY_SHUTDOWN; // TODO: this type has a FreeFunc 'UnregisterPowerSettingNotification', what can Zig do with this information? pub const HPOWERNOTIFY = *opaque{}; pub const EFFECTIVE_POWER_MODE = enum(i32) { BatterySaver = 0, BetterBattery = 1, Balanced = 2, HighPerformance = 3, MaxPerformance = 4, GameMode = 5, MixedReality = 6, }; pub const EffectivePowerModeBatterySaver = EFFECTIVE_POWER_MODE.BatterySaver; pub const EffectivePowerModeBetterBattery = EFFECTIVE_POWER_MODE.BetterBattery; pub const EffectivePowerModeBalanced = EFFECTIVE_POWER_MODE.Balanced; pub const EffectivePowerModeHighPerformance = EFFECTIVE_POWER_MODE.HighPerformance; pub const EffectivePowerModeMaxPerformance = EFFECTIVE_POWER_MODE.MaxPerformance; pub const EffectivePowerModeGameMode = EFFECTIVE_POWER_MODE.GameMode; pub const EffectivePowerModeMixedReality = EFFECTIVE_POWER_MODE.MixedReality; // TODO: this type is limited to platform 'windows10.0.17763' pub const EFFECTIVE_POWER_MODE_CALLBACK = fn( Mode: EFFECTIVE_POWER_MODE, Context: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) void; pub const GLOBAL_MACHINE_POWER_POLICY = extern struct { Revision: u32, LidOpenWakeAc: SYSTEM_POWER_STATE, LidOpenWakeDc: SYSTEM_POWER_STATE, BroadcastCapacityResolution: u32, }; pub const GLOBAL_USER_POWER_POLICY = extern struct { Revision: u32, PowerButtonAc: POWER_ACTION_POLICY, PowerButtonDc: POWER_ACTION_POLICY, SleepButtonAc: POWER_ACTION_POLICY, SleepButtonDc: POWER_ACTION_POLICY, LidCloseAc: POWER_ACTION_POLICY, LidCloseDc: POWER_ACTION_POLICY, DischargePolicy: [4]SYSTEM_POWER_LEVEL, GlobalFlags: u32, }; pub const GLOBAL_POWER_POLICY = extern struct { user: GLOBAL_USER_POWER_POLICY, mach: GLOBAL_MACHINE_POWER_POLICY, }; pub const MACHINE_POWER_POLICY = extern struct { Revision: u32, MinSleepAc: SYSTEM_POWER_STATE, MinSleepDc: SYSTEM_POWER_STATE, ReducedLatencySleepAc: SYSTEM_POWER_STATE, ReducedLatencySleepDc: SYSTEM_POWER_STATE, DozeTimeoutAc: u32, DozeTimeoutDc: u32, DozeS4TimeoutAc: u32, DozeS4TimeoutDc: u32, MinThrottleAc: u8, MinThrottleDc: u8, pad1: [2]u8, OverThrottledAc: POWER_ACTION_POLICY, OverThrottledDc: POWER_ACTION_POLICY, }; pub const MACHINE_PROCESSOR_POWER_POLICY = extern struct { Revision: u32, ProcessorPolicyAc: PROCESSOR_POWER_POLICY, ProcessorPolicyDc: PROCESSOR_POWER_POLICY, }; pub const USER_POWER_POLICY = extern struct { Revision: u32, IdleAc: POWER_ACTION_POLICY, IdleDc: POWER_ACTION_POLICY, IdleTimeoutAc: u32, IdleTimeoutDc: u32, IdleSensitivityAc: u8, IdleSensitivityDc: u8, ThrottlePolicyAc: u8, ThrottlePolicyDc: u8, MaxSleepAc: SYSTEM_POWER_STATE, MaxSleepDc: SYSTEM_POWER_STATE, Reserved: [2]u32, VideoTimeoutAc: u32, VideoTimeoutDc: u32, SpindownTimeoutAc: u32, SpindownTimeoutDc: u32, OptimizeForPowerAc: BOOLEAN, OptimizeForPowerDc: BOOLEAN, FanThrottleToleranceAc: u8, FanThrottleToleranceDc: u8, ForcedThrottleAc: u8, ForcedThrottleDc: u8, }; pub const POWER_POLICY = extern struct { user: USER_POWER_POLICY, mach: MACHINE_POWER_POLICY, }; pub const PWRSCHEMESENUMPROC_V1 = fn( Index: u32, NameSize: u32, // TODO: what to do with BytesParamIndex 1? Name: ?*i8, DescriptionSize: u32, // TODO: what to do with BytesParamIndex 3? Description: ?*i8, Policy: ?*POWER_POLICY, Context: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; pub const PWRSCHEMESENUMPROC = fn( Index: u32, NameSize: u32, // TODO: what to do with BytesParamIndex 1? Name: ?PWSTR, DescriptionSize: u32, // TODO: what to do with BytesParamIndex 3? Description: ?PWSTR, Policy: ?*POWER_POLICY, Context: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; pub const POWER_DATA_ACCESSOR = enum(i32) { AC_POWER_SETTING_INDEX = 0, DC_POWER_SETTING_INDEX = 1, FRIENDLY_NAME = 2, DESCRIPTION = 3, POSSIBLE_POWER_SETTING = 4, POSSIBLE_POWER_SETTING_FRIENDLY_NAME = 5, POSSIBLE_POWER_SETTING_DESCRIPTION = 6, DEFAULT_AC_POWER_SETTING = 7, DEFAULT_DC_POWER_SETTING = 8, POSSIBLE_VALUE_MIN = 9, POSSIBLE_VALUE_MAX = 10, POSSIBLE_VALUE_INCREMENT = 11, POSSIBLE_VALUE_UNITS = 12, ICON_RESOURCE = 13, DEFAULT_SECURITY_DESCRIPTOR = 14, ATTRIBUTES = 15, SCHEME = 16, SUBGROUP = 17, INDIVIDUAL_SETTING = 18, ACTIVE_SCHEME = 19, CREATE_SCHEME = 20, AC_POWER_SETTING_MAX = 21, DC_POWER_SETTING_MAX = 22, AC_POWER_SETTING_MIN = 23, DC_POWER_SETTING_MIN = 24, PROFILE = 25, OVERLAY_SCHEME = 26, ACTIVE_OVERLAY_SCHEME = 27, }; pub const ACCESS_AC_POWER_SETTING_INDEX = POWER_DATA_ACCESSOR.AC_POWER_SETTING_INDEX; pub const ACCESS_DC_POWER_SETTING_INDEX = POWER_DATA_ACCESSOR.DC_POWER_SETTING_INDEX; pub const ACCESS_FRIENDLY_NAME = POWER_DATA_ACCESSOR.FRIENDLY_NAME; pub const ACCESS_DESCRIPTION = POWER_DATA_ACCESSOR.DESCRIPTION; pub const ACCESS_POSSIBLE_POWER_SETTING = POWER_DATA_ACCESSOR.POSSIBLE_POWER_SETTING; pub const ACCESS_POSSIBLE_POWER_SETTING_FRIENDLY_NAME = POWER_DATA_ACCESSOR.POSSIBLE_POWER_SETTING_FRIENDLY_NAME; pub const ACCESS_POSSIBLE_POWER_SETTING_DESCRIPTION = POWER_DATA_ACCESSOR.POSSIBLE_POWER_SETTING_DESCRIPTION; pub const ACCESS_DEFAULT_AC_POWER_SETTING = POWER_DATA_ACCESSOR.DEFAULT_AC_POWER_SETTING; pub const ACCESS_DEFAULT_DC_POWER_SETTING = POWER_DATA_ACCESSOR.DEFAULT_DC_POWER_SETTING; pub const ACCESS_POSSIBLE_VALUE_MIN = POWER_DATA_ACCESSOR.POSSIBLE_VALUE_MIN; pub const ACCESS_POSSIBLE_VALUE_MAX = POWER_DATA_ACCESSOR.POSSIBLE_VALUE_MAX; pub const ACCESS_POSSIBLE_VALUE_INCREMENT = POWER_DATA_ACCESSOR.POSSIBLE_VALUE_INCREMENT; pub const ACCESS_POSSIBLE_VALUE_UNITS = POWER_DATA_ACCESSOR.POSSIBLE_VALUE_UNITS; pub const ACCESS_ICON_RESOURCE = POWER_DATA_ACCESSOR.ICON_RESOURCE; pub const ACCESS_DEFAULT_SECURITY_DESCRIPTOR = POWER_DATA_ACCESSOR.DEFAULT_SECURITY_DESCRIPTOR; pub const ACCESS_ATTRIBUTES = POWER_DATA_ACCESSOR.ATTRIBUTES; pub const ACCESS_SCHEME = POWER_DATA_ACCESSOR.SCHEME; pub const ACCESS_SUBGROUP = POWER_DATA_ACCESSOR.SUBGROUP; pub const ACCESS_INDIVIDUAL_SETTING = POWER_DATA_ACCESSOR.INDIVIDUAL_SETTING; pub const ACCESS_ACTIVE_SCHEME = POWER_DATA_ACCESSOR.ACTIVE_SCHEME; pub const ACCESS_CREATE_SCHEME = POWER_DATA_ACCESSOR.CREATE_SCHEME; pub const ACCESS_AC_POWER_SETTING_MAX = POWER_DATA_ACCESSOR.AC_POWER_SETTING_MAX; pub const ACCESS_DC_POWER_SETTING_MAX = POWER_DATA_ACCESSOR.DC_POWER_SETTING_MAX; pub const ACCESS_AC_POWER_SETTING_MIN = POWER_DATA_ACCESSOR.AC_POWER_SETTING_MIN; pub const ACCESS_DC_POWER_SETTING_MIN = POWER_DATA_ACCESSOR.DC_POWER_SETTING_MIN; pub const ACCESS_PROFILE = POWER_DATA_ACCESSOR.PROFILE; pub const ACCESS_OVERLAY_SCHEME = POWER_DATA_ACCESSOR.OVERLAY_SCHEME; pub const ACCESS_ACTIVE_OVERLAY_SCHEME = POWER_DATA_ACCESSOR.ACTIVE_OVERLAY_SCHEME; pub const PDEVICE_NOTIFY_CALLBACK_ROUTINE = fn( Context: ?*anyopaque, Type: u32, Setting: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) u32; pub const DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS = extern struct { Callback: ?PDEVICE_NOTIFY_CALLBACK_ROUTINE, Context: ?*anyopaque, }; pub const THERMAL_EVENT = extern struct { Version: u32, Size: u32, Type: u32, Temperature: u32, TripPointTemperature: u32, Initiator: ?PWSTR, }; pub const BATTERY_QUERY_INFORMATION_LEVEL = enum(i32) { Information = 0, GranularityInformation = 1, Temperature = 2, EstimatedTime = 3, DeviceName = 4, ManufactureDate = 5, ManufactureName = 6, UniqueID = 7, SerialNumber = 8, }; pub const BatteryInformation = BATTERY_QUERY_INFORMATION_LEVEL.Information; pub const BatteryGranularityInformation = BATTERY_QUERY_INFORMATION_LEVEL.GranularityInformation; pub const BatteryTemperature = BATTERY_QUERY_INFORMATION_LEVEL.Temperature; pub const BatteryEstimatedTime = BATTERY_QUERY_INFORMATION_LEVEL.EstimatedTime; pub const BatteryDeviceName = BATTERY_QUERY_INFORMATION_LEVEL.DeviceName; pub const BatteryManufactureDate = BATTERY_QUERY_INFORMATION_LEVEL.ManufactureDate; pub const BatteryManufactureName = BATTERY_QUERY_INFORMATION_LEVEL.ManufactureName; pub const BatteryUniqueID = BATTERY_QUERY_INFORMATION_LEVEL.UniqueID; pub const BatterySerialNumber = BATTERY_QUERY_INFORMATION_LEVEL.SerialNumber; pub const BATTERY_QUERY_INFORMATION = extern struct { BatteryTag: u32, InformationLevel: BATTERY_QUERY_INFORMATION_LEVEL, AtRate: u32, }; pub const BATTERY_INFORMATION = extern struct { Capabilities: u32, Technology: u8, Reserved: [3]u8, Chemistry: [4]u8, DesignedCapacity: u32, FullChargedCapacity: u32, DefaultAlert1: u32, DefaultAlert2: u32, CriticalBias: u32, CycleCount: u32, }; pub const BATTERY_CHARGING_SOURCE_TYPE = enum(i32) { AC = 1, USB = 2, Wireless = 3, Max = 4, }; pub const BatteryChargingSourceType_AC = BATTERY_CHARGING_SOURCE_TYPE.AC; pub const BatteryChargingSourceType_USB = BATTERY_CHARGING_SOURCE_TYPE.USB; pub const BatteryChargingSourceType_Wireless = BATTERY_CHARGING_SOURCE_TYPE.Wireless; pub const BatteryChargingSourceType_Max = BATTERY_CHARGING_SOURCE_TYPE.Max; pub const BATTERY_CHARGING_SOURCE = extern struct { Type: BATTERY_CHARGING_SOURCE_TYPE, MaxCurrent: u32, }; pub const BATTERY_CHARGING_SOURCE_INFORMATION = extern struct { Type: BATTERY_CHARGING_SOURCE_TYPE, SourceOnline: BOOLEAN, }; pub const USB_CHARGER_PORT = enum(i32) { Legacy = 0, TypeC = 1, Max = 2, }; pub const UsbChargerPort_Legacy = USB_CHARGER_PORT.Legacy; pub const UsbChargerPort_TypeC = USB_CHARGER_PORT.TypeC; pub const UsbChargerPort_Max = USB_CHARGER_PORT.Max; pub const BATTERY_SET_INFORMATION_LEVEL = enum(i32) { CriticalBias = 0, Charge = 1, Discharge = 2, ChargingSource = 3, ChargerId = 4, ChargerStatus = 5, }; pub const BatteryCriticalBias = BATTERY_SET_INFORMATION_LEVEL.CriticalBias; pub const BatteryCharge = BATTERY_SET_INFORMATION_LEVEL.Charge; pub const BatteryDischarge = BATTERY_SET_INFORMATION_LEVEL.Discharge; pub const BatteryChargingSource = BATTERY_SET_INFORMATION_LEVEL.ChargingSource; pub const BatteryChargerId = BATTERY_SET_INFORMATION_LEVEL.ChargerId; pub const BatteryChargerStatus = BATTERY_SET_INFORMATION_LEVEL.ChargerStatus; pub const BATTERY_SET_INFORMATION = extern struct { BatteryTag: u32, InformationLevel: BATTERY_SET_INFORMATION_LEVEL, Buffer: [1]u8, }; pub const BATTERY_CHARGER_STATUS = extern struct { Type: BATTERY_CHARGING_SOURCE_TYPE, VaData: [1]u32, }; pub const BATTERY_USB_CHARGER_STATUS = extern struct { Type: BATTERY_CHARGING_SOURCE_TYPE, Reserved: u32, Flags: u32, MaxCurrent: u32, Voltage: u32, PortType: USB_CHARGER_PORT, PortId: u64, PowerSourceInformation: ?*anyopaque, OemCharger: Guid, }; pub const BATTERY_WAIT_STATUS = extern struct { BatteryTag: u32, Timeout: u32, PowerState: u32, LowCapacity: u32, HighCapacity: u32, }; pub const BATTERY_STATUS = extern struct { PowerState: u32, Capacity: u32, Voltage: u32, Rate: i32, }; pub const BATTERY_MANUFACTURE_DATE = extern struct { Day: u8, Month: u8, Year: u16, }; pub const THERMAL_INFORMATION = extern struct { ThermalStamp: u32, ThermalConstant1: u32, ThermalConstant2: u32, Processors: usize, SamplingPeriod: u32, CurrentTemperature: u32, PassiveTripPoint: u32, CriticalTripPoint: u32, ActiveTripPointCount: u8, ActiveTripPoint: [10]u32, }; pub const THERMAL_WAIT_READ = extern struct { Timeout: u32, LowTemperature: u32, HighTemperature: u32, }; pub const THERMAL_POLICY = extern struct { Version: u32, WaitForUpdate: BOOLEAN, Hibernate: BOOLEAN, Critical: BOOLEAN, ThermalStandby: BOOLEAN, ActivationReasons: u32, PassiveLimit: u32, ActiveLevel: u32, OverThrottled: BOOLEAN, }; pub const PROCESSOR_OBJECT_INFO = extern struct { PhysicalID: u32, PBlkAddress: u32, PBlkLength: u8, }; pub const PROCESSOR_OBJECT_INFO_EX = extern struct { PhysicalID: u32, PBlkAddress: u32, PBlkLength: u8, InitialApicId: u32, }; pub const WAKE_ALARM_INFORMATION = extern struct { TimerIdentifier: u32, Timeout: u32, }; pub const ACPI_REAL_TIME = extern struct { Year: u16, Month: u8, Day: u8, Hour: u8, Minute: u8, Second: u8, Valid: u8, Milliseconds: u16, TimeZone: i16, DayLight: u8, Reserved1: [3]u8, }; pub const EMI_MEASUREMENT_UNIT = enum(i32) { s = 0, }; pub const EmiMeasurementUnitPicowattHours = EMI_MEASUREMENT_UNIT.s; pub const EMI_VERSION = extern struct { EmiVersion: u16, }; pub const EMI_METADATA_SIZE = extern struct { MetadataSize: u32, }; pub const EMI_CHANNEL_MEASUREMENT_DATA = extern struct { AbsoluteEnergy: u64, AbsoluteTime: u64, }; pub const EMI_METADATA_V1 = extern struct { MeasurementUnit: EMI_MEASUREMENT_UNIT, HardwareOEM: [16]u16, HardwareModel: [16]u16, HardwareRevision: u16, MeteredHardwareNameSize: u16, MeteredHardwareName: [1]u16, }; pub const EMI_CHANNEL_V2 = extern struct { MeasurementUnit: EMI_MEASUREMENT_UNIT, ChannelNameSize: u16, ChannelName: [1]u16, }; pub const EMI_METADATA_V2 = extern struct { HardwareOEM: [16]u16, HardwareModel: [16]u16, HardwareRevision: u16, ChannelCount: u16, Channels: [1]EMI_CHANNEL_V2, }; pub const EMI_MEASUREMENT_DATA_V2 = extern struct { ChannelData: [1]EMI_CHANNEL_MEASUREMENT_DATA, }; pub const SYSTEM_POWER_STATE = enum(i32) { Unspecified = 0, Working = 1, Sleeping1 = 2, Sleeping2 = 3, Sleeping3 = 4, Hibernate = 5, Shutdown = 6, Maximum = 7, }; pub const PowerSystemUnspecified = SYSTEM_POWER_STATE.Unspecified; pub const PowerSystemWorking = SYSTEM_POWER_STATE.Working; pub const PowerSystemSleeping1 = SYSTEM_POWER_STATE.Sleeping1; pub const PowerSystemSleeping2 = SYSTEM_POWER_STATE.Sleeping2; pub const PowerSystemSleeping3 = SYSTEM_POWER_STATE.Sleeping3; pub const PowerSystemHibernate = SYSTEM_POWER_STATE.Hibernate; pub const PowerSystemShutdown = SYSTEM_POWER_STATE.Shutdown; pub const PowerSystemMaximum = SYSTEM_POWER_STATE.Maximum; pub const POWER_ACTION = enum(i32) { None = 0, Reserved = 1, Sleep = 2, Hibernate = 3, Shutdown = 4, ShutdownReset = 5, ShutdownOff = 6, WarmEject = 7, DisplayOff = 8, }; pub const PowerActionNone = POWER_ACTION.None; pub const PowerActionReserved = POWER_ACTION.Reserved; pub const PowerActionSleep = POWER_ACTION.Sleep; pub const PowerActionHibernate = POWER_ACTION.Hibernate; pub const PowerActionShutdown = POWER_ACTION.Shutdown; pub const PowerActionShutdownReset = POWER_ACTION.ShutdownReset; pub const PowerActionShutdownOff = POWER_ACTION.ShutdownOff; pub const PowerActionWarmEject = POWER_ACTION.WarmEject; pub const PowerActionDisplayOff = POWER_ACTION.DisplayOff; pub const DEVICE_POWER_STATE = enum(i32) { Unspecified = 0, D0 = 1, D1 = 2, D2 = 3, D3 = 4, Maximum = 5, }; pub const PowerDeviceUnspecified = DEVICE_POWER_STATE.Unspecified; pub const PowerDeviceD0 = DEVICE_POWER_STATE.D0; pub const PowerDeviceD1 = DEVICE_POWER_STATE.D1; pub const PowerDeviceD2 = DEVICE_POWER_STATE.D2; pub const PowerDeviceD3 = DEVICE_POWER_STATE.D3; pub const PowerDeviceMaximum = DEVICE_POWER_STATE.Maximum; pub const LATENCY_TIME = enum(i32) { DONT_CARE = 0, LOWEST_LATENCY = 1, }; pub const LT_DONT_CARE = LATENCY_TIME.DONT_CARE; pub const LT_LOWEST_LATENCY = LATENCY_TIME.LOWEST_LATENCY; pub const POWER_REQUEST_TYPE = enum(i32) { DisplayRequired = 0, SystemRequired = 1, AwayModeRequired = 2, ExecutionRequired = 3, }; pub const PowerRequestDisplayRequired = POWER_REQUEST_TYPE.DisplayRequired; pub const PowerRequestSystemRequired = POWER_REQUEST_TYPE.SystemRequired; pub const PowerRequestAwayModeRequired = POWER_REQUEST_TYPE.AwayModeRequired; pub const PowerRequestExecutionRequired = POWER_REQUEST_TYPE.ExecutionRequired; pub const CM_POWER_DATA = extern struct { PD_Size: u32, PD_MostRecentPowerState: DEVICE_POWER_STATE, PD_Capabilities: u32, PD_D1Latency: u32, PD_D2Latency: u32, PD_D3Latency: u32, PD_PowerStateMapping: [7]DEVICE_POWER_STATE, PD_DeepestSystemWake: SYSTEM_POWER_STATE, }; pub const POWER_INFORMATION_LEVEL = enum(i32) { SystemPowerPolicyAc = 0, SystemPowerPolicyDc = 1, VerifySystemPolicyAc = 2, VerifySystemPolicyDc = 3, SystemPowerCapabilities = 4, SystemBatteryState = 5, SystemPowerStateHandler = 6, ProcessorStateHandler = 7, SystemPowerPolicyCurrent = 8, AdministratorPowerPolicy = 9, SystemReserveHiberFile = 10, ProcessorInformation = 11, SystemPowerInformation = 12, ProcessorStateHandler2 = 13, LastWakeTime = 14, LastSleepTime = 15, SystemExecutionState = 16, SystemPowerStateNotifyHandler = 17, ProcessorPowerPolicyAc = 18, ProcessorPowerPolicyDc = 19, VerifyProcessorPowerPolicyAc = 20, VerifyProcessorPowerPolicyDc = 21, ProcessorPowerPolicyCurrent = 22, SystemPowerStateLogging = 23, SystemPowerLoggingEntry = 24, SetPowerSettingValue = 25, NotifyUserPowerSetting = 26, PowerInformationLevelUnused0 = 27, SystemMonitorHiberBootPowerOff = 28, SystemVideoState = 29, TraceApplicationPowerMessage = 30, TraceApplicationPowerMessageEnd = 31, ProcessorPerfStates = 32, ProcessorIdleStates = 33, ProcessorCap = 34, SystemWakeSource = 35, SystemHiberFileInformation = 36, TraceServicePowerMessage = 37, ProcessorLoad = 38, PowerShutdownNotification = 39, MonitorCapabilities = 40, SessionPowerInit = 41, SessionDisplayState = 42, PowerRequestCreate = 43, PowerRequestAction = 44, GetPowerRequestList = 45, ProcessorInformationEx = 46, NotifyUserModeLegacyPowerEvent = 47, GroupPark = 48, ProcessorIdleDomains = 49, WakeTimerList = 50, SystemHiberFileSize = 51, ProcessorIdleStatesHv = 52, ProcessorPerfStatesHv = 53, ProcessorPerfCapHv = 54, ProcessorSetIdle = 55, LogicalProcessorIdling = 56, UserPresence = 57, PowerSettingNotificationName = 58, GetPowerSettingValue = 59, IdleResiliency = 60, SessionRITState = 61, SessionConnectNotification = 62, SessionPowerCleanup = 63, SessionLockState = 64, SystemHiberbootState = 65, PlatformInformation = 66, PdcInvocation = 67, MonitorInvocation = 68, FirmwareTableInformationRegistered = 69, SetShutdownSelectedTime = 70, SuspendResumeInvocation = 71, PlmPowerRequestCreate = 72, ScreenOff = 73, CsDeviceNotification = 74, PlatformRole = 75, LastResumePerformance = 76, DisplayBurst = 77, ExitLatencySamplingPercentage = 78, RegisterSpmPowerSettings = 79, PlatformIdleStates = 80, ProcessorIdleVeto = 81, PlatformIdleVeto = 82, SystemBatteryStatePrecise = 83, ThermalEvent = 84, PowerRequestActionInternal = 85, BatteryDeviceState = 86, PowerInformationInternal = 87, ThermalStandby = 88, SystemHiberFileType = 89, PhysicalPowerButtonPress = 90, QueryPotentialDripsConstraint = 91, EnergyTrackerCreate = 92, EnergyTrackerQuery = 93, UpdateBlackBoxRecorder = 94, SessionAllowExternalDmaDevices = 95, SendSuspendResumeNotification = 96, PowerInformationLevelMaximum = 97, }; pub const SystemPowerPolicyAc = POWER_INFORMATION_LEVEL.SystemPowerPolicyAc; pub const SystemPowerPolicyDc = POWER_INFORMATION_LEVEL.SystemPowerPolicyDc; pub const VerifySystemPolicyAc = POWER_INFORMATION_LEVEL.VerifySystemPolicyAc; pub const VerifySystemPolicyDc = POWER_INFORMATION_LEVEL.VerifySystemPolicyDc; pub const SystemPowerCapabilities = POWER_INFORMATION_LEVEL.SystemPowerCapabilities; pub const SystemBatteryState = POWER_INFORMATION_LEVEL.SystemBatteryState; pub const SystemPowerStateHandler = POWER_INFORMATION_LEVEL.SystemPowerStateHandler; pub const ProcessorStateHandler = POWER_INFORMATION_LEVEL.ProcessorStateHandler; pub const SystemPowerPolicyCurrent = POWER_INFORMATION_LEVEL.SystemPowerPolicyCurrent; pub const AdministratorPowerPolicy = POWER_INFORMATION_LEVEL.AdministratorPowerPolicy; pub const SystemReserveHiberFile = POWER_INFORMATION_LEVEL.SystemReserveHiberFile; pub const ProcessorInformation = POWER_INFORMATION_LEVEL.ProcessorInformation; pub const SystemPowerInformation = POWER_INFORMATION_LEVEL.SystemPowerInformation; pub const ProcessorStateHandler2 = POWER_INFORMATION_LEVEL.ProcessorStateHandler2; pub const LastWakeTime = POWER_INFORMATION_LEVEL.LastWakeTime; pub const LastSleepTime = POWER_INFORMATION_LEVEL.LastSleepTime; pub const SystemExecutionState = POWER_INFORMATION_LEVEL.SystemExecutionState; pub const SystemPowerStateNotifyHandler = POWER_INFORMATION_LEVEL.SystemPowerStateNotifyHandler; pub const ProcessorPowerPolicyAc = POWER_INFORMATION_LEVEL.ProcessorPowerPolicyAc; pub const ProcessorPowerPolicyDc = POWER_INFORMATION_LEVEL.ProcessorPowerPolicyDc; pub const VerifyProcessorPowerPolicyAc = POWER_INFORMATION_LEVEL.VerifyProcessorPowerPolicyAc; pub const VerifyProcessorPowerPolicyDc = POWER_INFORMATION_LEVEL.VerifyProcessorPowerPolicyDc; pub const ProcessorPowerPolicyCurrent = POWER_INFORMATION_LEVEL.ProcessorPowerPolicyCurrent; pub const SystemPowerStateLogging = POWER_INFORMATION_LEVEL.SystemPowerStateLogging; pub const SystemPowerLoggingEntry = POWER_INFORMATION_LEVEL.SystemPowerLoggingEntry; pub const SetPowerSettingValue = POWER_INFORMATION_LEVEL.SetPowerSettingValue; pub const NotifyUserPowerSetting = POWER_INFORMATION_LEVEL.NotifyUserPowerSetting; pub const PowerInformationLevelUnused0 = POWER_INFORMATION_LEVEL.PowerInformationLevelUnused0; pub const SystemMonitorHiberBootPowerOff = POWER_INFORMATION_LEVEL.SystemMonitorHiberBootPowerOff; pub const SystemVideoState = POWER_INFORMATION_LEVEL.SystemVideoState; pub const TraceApplicationPowerMessage = POWER_INFORMATION_LEVEL.TraceApplicationPowerMessage; pub const TraceApplicationPowerMessageEnd = POWER_INFORMATION_LEVEL.TraceApplicationPowerMessageEnd; pub const ProcessorPerfStates = POWER_INFORMATION_LEVEL.ProcessorPerfStates; pub const ProcessorIdleStates = POWER_INFORMATION_LEVEL.ProcessorIdleStates; pub const ProcessorCap = POWER_INFORMATION_LEVEL.ProcessorCap; pub const SystemWakeSource = POWER_INFORMATION_LEVEL.SystemWakeSource; pub const SystemHiberFileInformation = POWER_INFORMATION_LEVEL.SystemHiberFileInformation; pub const TraceServicePowerMessage = POWER_INFORMATION_LEVEL.TraceServicePowerMessage; pub const ProcessorLoad = POWER_INFORMATION_LEVEL.ProcessorLoad; pub const PowerShutdownNotification = POWER_INFORMATION_LEVEL.PowerShutdownNotification; pub const MonitorCapabilities = POWER_INFORMATION_LEVEL.MonitorCapabilities; pub const SessionPowerInit = POWER_INFORMATION_LEVEL.SessionPowerInit; pub const SessionDisplayState = POWER_INFORMATION_LEVEL.SessionDisplayState; pub const PowerRequestCreate = POWER_INFORMATION_LEVEL.PowerRequestCreate; pub const PowerRequestAction = POWER_INFORMATION_LEVEL.PowerRequestAction; pub const GetPowerRequestList = POWER_INFORMATION_LEVEL.GetPowerRequestList; pub const ProcessorInformationEx = POWER_INFORMATION_LEVEL.ProcessorInformationEx; pub const NotifyUserModeLegacyPowerEvent = POWER_INFORMATION_LEVEL.NotifyUserModeLegacyPowerEvent; pub const GroupPark = POWER_INFORMATION_LEVEL.GroupPark; pub const ProcessorIdleDomains = POWER_INFORMATION_LEVEL.ProcessorIdleDomains; pub const WakeTimerList = POWER_INFORMATION_LEVEL.WakeTimerList; pub const SystemHiberFileSize = POWER_INFORMATION_LEVEL.SystemHiberFileSize; pub const ProcessorIdleStatesHv = POWER_INFORMATION_LEVEL.ProcessorIdleStatesHv; pub const ProcessorPerfStatesHv = POWER_INFORMATION_LEVEL.ProcessorPerfStatesHv; pub const ProcessorPerfCapHv = POWER_INFORMATION_LEVEL.ProcessorPerfCapHv; pub const ProcessorSetIdle = POWER_INFORMATION_LEVEL.ProcessorSetIdle; pub const LogicalProcessorIdling = POWER_INFORMATION_LEVEL.LogicalProcessorIdling; pub const UserPresence = POWER_INFORMATION_LEVEL.UserPresence; pub const PowerSettingNotificationName = POWER_INFORMATION_LEVEL.PowerSettingNotificationName; pub const GetPowerSettingValue = POWER_INFORMATION_LEVEL.GetPowerSettingValue; pub const IdleResiliency = POWER_INFORMATION_LEVEL.IdleResiliency; pub const SessionRITState = POWER_INFORMATION_LEVEL.SessionRITState; pub const SessionConnectNotification = POWER_INFORMATION_LEVEL.SessionConnectNotification; pub const SessionPowerCleanup = POWER_INFORMATION_LEVEL.SessionPowerCleanup; pub const SessionLockState = POWER_INFORMATION_LEVEL.SessionLockState; pub const SystemHiberbootState = POWER_INFORMATION_LEVEL.SystemHiberbootState; pub const PlatformInformation = POWER_INFORMATION_LEVEL.PlatformInformation; pub const PdcInvocation = POWER_INFORMATION_LEVEL.PdcInvocation; pub const MonitorInvocation = POWER_INFORMATION_LEVEL.MonitorInvocation; pub const FirmwareTableInformationRegistered = POWER_INFORMATION_LEVEL.FirmwareTableInformationRegistered; pub const SetShutdownSelectedTime = POWER_INFORMATION_LEVEL.SetShutdownSelectedTime; pub const SuspendResumeInvocation = POWER_INFORMATION_LEVEL.SuspendResumeInvocation; pub const PlmPowerRequestCreate = POWER_INFORMATION_LEVEL.PlmPowerRequestCreate; pub const ScreenOff = POWER_INFORMATION_LEVEL.ScreenOff; pub const CsDeviceNotification = POWER_INFORMATION_LEVEL.CsDeviceNotification; pub const PlatformRole = POWER_INFORMATION_LEVEL.PlatformRole; pub const LastResumePerformance = POWER_INFORMATION_LEVEL.LastResumePerformance; pub const DisplayBurst = POWER_INFORMATION_LEVEL.DisplayBurst; pub const ExitLatencySamplingPercentage = POWER_INFORMATION_LEVEL.ExitLatencySamplingPercentage; pub const RegisterSpmPowerSettings = POWER_INFORMATION_LEVEL.RegisterSpmPowerSettings; pub const PlatformIdleStates = POWER_INFORMATION_LEVEL.PlatformIdleStates; pub const ProcessorIdleVeto = POWER_INFORMATION_LEVEL.ProcessorIdleVeto; pub const PlatformIdleVeto = POWER_INFORMATION_LEVEL.PlatformIdleVeto; pub const SystemBatteryStatePrecise = POWER_INFORMATION_LEVEL.SystemBatteryStatePrecise; pub const ThermalEvent = POWER_INFORMATION_LEVEL.ThermalEvent; pub const PowerRequestActionInternal = POWER_INFORMATION_LEVEL.PowerRequestActionInternal; pub const BatteryDeviceState = POWER_INFORMATION_LEVEL.BatteryDeviceState; pub const PowerInformationInternal = POWER_INFORMATION_LEVEL.PowerInformationInternal; pub const ThermalStandby = POWER_INFORMATION_LEVEL.ThermalStandby; pub const SystemHiberFileType = POWER_INFORMATION_LEVEL.SystemHiberFileType; pub const PhysicalPowerButtonPress = POWER_INFORMATION_LEVEL.PhysicalPowerButtonPress; pub const QueryPotentialDripsConstraint = POWER_INFORMATION_LEVEL.QueryPotentialDripsConstraint; pub const EnergyTrackerCreate = POWER_INFORMATION_LEVEL.EnergyTrackerCreate; pub const EnergyTrackerQuery = POWER_INFORMATION_LEVEL.EnergyTrackerQuery; pub const UpdateBlackBoxRecorder = POWER_INFORMATION_LEVEL.UpdateBlackBoxRecorder; pub const SessionAllowExternalDmaDevices = POWER_INFORMATION_LEVEL.SessionAllowExternalDmaDevices; pub const SendSuspendResumeNotification = POWER_INFORMATION_LEVEL.SendSuspendResumeNotification; pub const PowerInformationLevelMaximum = POWER_INFORMATION_LEVEL.PowerInformationLevelMaximum; pub const SYSTEM_POWER_CONDITION = enum(i32) { Ac = 0, Dc = 1, Hot = 2, ConditionMaximum = 3, }; pub const PoAc = SYSTEM_POWER_CONDITION.Ac; pub const PoDc = SYSTEM_POWER_CONDITION.Dc; pub const PoHot = SYSTEM_POWER_CONDITION.Hot; pub const PoConditionMaximum = SYSTEM_POWER_CONDITION.ConditionMaximum; pub const SET_POWER_SETTING_VALUE = extern struct { Version: u32, Guid: Guid, PowerCondition: SYSTEM_POWER_CONDITION, DataLength: u32, Data: [1]u8, }; pub const POWER_PLATFORM_ROLE = enum(i32) { Unspecified = 0, Desktop = 1, Mobile = 2, Workstation = 3, EnterpriseServer = 4, SOHOServer = 5, AppliancePC = 6, PerformanceServer = 7, Slate = 8, Maximum = 9, }; pub const PlatformRoleUnspecified = POWER_PLATFORM_ROLE.Unspecified; pub const PlatformRoleDesktop = POWER_PLATFORM_ROLE.Desktop; pub const PlatformRoleMobile = POWER_PLATFORM_ROLE.Mobile; pub const PlatformRoleWorkstation = POWER_PLATFORM_ROLE.Workstation; pub const PlatformRoleEnterpriseServer = POWER_PLATFORM_ROLE.EnterpriseServer; pub const PlatformRoleSOHOServer = POWER_PLATFORM_ROLE.SOHOServer; pub const PlatformRoleAppliancePC = POWER_PLATFORM_ROLE.AppliancePC; pub const PlatformRolePerformanceServer = POWER_PLATFORM_ROLE.PerformanceServer; pub const PlatformRoleSlate = POWER_PLATFORM_ROLE.Slate; pub const PlatformRoleMaximum = POWER_PLATFORM_ROLE.Maximum; pub const BATTERY_REPORTING_SCALE = extern struct { Granularity: u32, Capacity: u32, }; pub const POWER_ACTION_POLICY = extern struct { Action: POWER_ACTION, Flags: u32, EventCode: POWER_ACTION_POLICY_EVENT_CODE, }; pub const SYSTEM_POWER_LEVEL = extern struct { Enable: BOOLEAN, Spare: [3]u8, BatteryLevel: u32, PowerPolicy: POWER_ACTION_POLICY, MinSystemState: SYSTEM_POWER_STATE, }; pub const SYSTEM_POWER_POLICY = extern struct { Revision: u32, PowerButton: POWER_ACTION_POLICY, SleepButton: POWER_ACTION_POLICY, LidClose: POWER_ACTION_POLICY, LidOpenWake: SYSTEM_POWER_STATE, Reserved: u32, Idle: POWER_ACTION_POLICY, IdleTimeout: u32, IdleSensitivity: u8, DynamicThrottle: u8, Spare2: [2]u8, MinSleep: SYSTEM_POWER_STATE, MaxSleep: SYSTEM_POWER_STATE, ReducedLatencySleep: SYSTEM_POWER_STATE, WinLogonFlags: u32, Spare3: u32, DozeS4Timeout: u32, BroadcastCapacityResolution: u32, DischargePolicy: [4]SYSTEM_POWER_LEVEL, VideoTimeout: u32, VideoDimDisplay: BOOLEAN, VideoReserved: [3]u32, SpindownTimeout: u32, OptimizeForPower: BOOLEAN, FanThrottleTolerance: u8, ForcedThrottle: u8, MinThrottle: u8, OverThrottled: POWER_ACTION_POLICY, }; pub const PROCESSOR_POWER_POLICY_INFO = extern struct { TimeCheck: u32, DemoteLimit: u32, PromoteLimit: u32, DemotePercent: u8, PromotePercent: u8, Spare: [2]u8, _bitfield: u32, }; pub const PROCESSOR_POWER_POLICY = extern struct { Revision: u32, DynamicThrottle: u8, Spare: [3]u8, _bitfield: u32, PolicyCount: u32, Policy: [3]PROCESSOR_POWER_POLICY_INFO, }; pub const ADMINISTRATOR_POWER_POLICY = extern struct { MinSleep: SYSTEM_POWER_STATE, MaxSleep: SYSTEM_POWER_STATE, MinVideoTimeout: u32, MaxVideoTimeout: u32, MinSpindownTimeout: u32, MaxSpindownTimeout: u32, }; pub const SYSTEM_POWER_CAPABILITIES = extern struct { PowerButtonPresent: BOOLEAN, SleepButtonPresent: BOOLEAN, LidPresent: BOOLEAN, SystemS1: BOOLEAN, SystemS2: BOOLEAN, SystemS3: BOOLEAN, SystemS4: BOOLEAN, SystemS5: BOOLEAN, HiberFilePresent: BOOLEAN, FullWake: BOOLEAN, VideoDimPresent: BOOLEAN, ApmPresent: BOOLEAN, UpsPresent: BOOLEAN, ThermalControl: BOOLEAN, ProcessorThrottle: BOOLEAN, ProcessorMinThrottle: u8, ProcessorMaxThrottle: u8, FastSystemS4: BOOLEAN, Hiberboot: BOOLEAN, WakeAlarmPresent: BOOLEAN, AoAc: BOOLEAN, DiskSpinDown: BOOLEAN, HiberFileType: u8, AoAcConnectivitySupported: BOOLEAN, spare3: [6]u8, SystemBatteriesPresent: BOOLEAN, BatteriesAreShortTerm: BOOLEAN, BatteryScale: [3]BATTERY_REPORTING_SCALE, AcOnLineWake: SYSTEM_POWER_STATE, SoftLidWake: SYSTEM_POWER_STATE, RtcWake: SYSTEM_POWER_STATE, MinDeviceWakeState: SYSTEM_POWER_STATE, DefaultLowLatencyWake: SYSTEM_POWER_STATE, }; pub const SYSTEM_BATTERY_STATE = extern struct { AcOnLine: BOOLEAN, BatteryPresent: BOOLEAN, Charging: BOOLEAN, Discharging: BOOLEAN, Spare1: [3]BOOLEAN, Tag: u8, MaxCapacity: u32, RemainingCapacity: u32, Rate: u32, EstimatedTime: u32, DefaultAlert1: u32, DefaultAlert2: u32, }; pub const POWERBROADCAST_SETTING = extern struct { PowerSetting: Guid, DataLength: u32, Data: [1]u8, }; pub const SYSTEM_POWER_STATUS = extern struct { ACLineStatus: u8, BatteryFlag: u8, BatteryLifePercent: u8, SystemStatusFlag: u8, BatteryLifeTime: u32, BatteryFullLifeTime: u32, }; //-------------------------------------------------------------------------------- // Section: Functions (97) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn CallNtPowerInformation( InformationLevel: POWER_INFORMATION_LEVEL, // TODO: what to do with BytesParamIndex 2? InputBuffer: ?*anyopaque, InputBufferLength: u32, // TODO: what to do with BytesParamIndex 4? OutputBuffer: ?*anyopaque, OutputBufferLength: u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn GetPwrCapabilities( lpspc: ?*SYSTEM_POWER_CAPABILITIES, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows8.0' pub extern "POWRPROF" fn PowerDeterminePlatformRoleEx( Version: POWER_PLATFORM_ROLE_VERSION, ) callconv(@import("std").os.windows.WINAPI) POWER_PLATFORM_ROLE; // TODO: this type is limited to platform 'windows8.0' pub extern "POWRPROF" fn PowerRegisterSuspendResumeNotification( Flags: u32, Recipient: ?HANDLE, RegistrationHandle: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows8.0' pub extern "POWRPROF" fn PowerUnregisterSuspendResumeNotification( RegistrationHandle: ?HPOWERNOTIFY, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadACValue( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, Type: ?*u32, // TODO: what to do with BytesParamIndex 6? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadDCValue( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, Type: ?*u32, // TODO: what to do with BytesParamIndex 6? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteACValueIndex( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, AcValueIndex: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteDCValueIndex( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, DcValueIndex: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerGetActiveScheme( UserRootPowerKey: ?HKEY, ActivePolicyGuid: ?*?*Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerSetActiveScheme( UserRootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.1' pub extern "POWRPROF" fn PowerSettingRegisterNotification( SettingGuid: ?*const Guid, Flags: POWER_SETTING_REGISTER_NOTIFICATION_FLAGS, Recipient: ?HANDLE, RegistrationHandle: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.1' pub extern "POWRPROF" fn PowerSettingUnregisterNotification( RegistrationHandle: ?HPOWERNOTIFY, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows10.0.17763' pub extern "POWRPROF" fn PowerRegisterForEffectivePowerModeNotifications( Version: u32, Callback: ?EFFECTIVE_POWER_MODE_CALLBACK, Context: ?*anyopaque, RegistrationHandle: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows10.0.17763' pub extern "POWRPROF" fn PowerUnregisterFromEffectivePowerModeNotifications( RegistrationHandle: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn GetPwrDiskSpindownRange( puiMax: ?*u32, puiMin: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn EnumPwrSchemes( lpfn: ?PWRSCHEMESENUMPROC, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn ReadGlobalPwrPolicy( pGlobalPowerPolicy: ?*GLOBAL_POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn ReadPwrScheme( uiID: u32, pPowerPolicy: ?*POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn WritePwrScheme( puiID: ?*u32, lpszSchemeName: ?[*:0]const u16, lpszDescription: ?[*:0]const u16, lpScheme: ?*POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn WriteGlobalPwrPolicy( pGlobalPowerPolicy: ?*GLOBAL_POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn DeletePwrScheme( uiID: u32, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn GetActivePwrScheme( puiID: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn SetActivePwrScheme( uiID: u32, pGlobalPowerPolicy: ?*GLOBAL_POWER_POLICY, pPowerPolicy: ?*POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn IsPwrSuspendAllowed( ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn IsPwrHibernateAllowed( ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn IsPwrShutdownAllowed( ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; pub extern "POWRPROF" fn IsAdminOverrideActive( papp: ?*ADMINISTRATOR_POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn SetSuspendState( bHibernate: BOOLEAN, bForce: BOOLEAN, bWakeupEventsDisabled: BOOLEAN, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn GetCurrentPowerPolicies( pGlobalPowerPolicy: ?*GLOBAL_POWER_POLICY, pPowerPolicy: ?*POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn CanUserWritePwrScheme( ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn ReadProcessorPwrScheme( uiID: u32, pMachineProcessorPowerPolicy: ?*MACHINE_PROCESSOR_POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "POWRPROF" fn WriteProcessorPwrScheme( uiID: u32, pMachineProcessorPowerPolicy: ?*MACHINE_PROCESSOR_POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; pub extern "POWRPROF" fn ValidatePowerPolicies( pGlobalPowerPolicy: ?*GLOBAL_POWER_POLICY, pPowerPolicy: ?*POWER_POLICY, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows6.1' pub extern "POWRPROF" fn PowerIsSettingRangeDefined( SubKeyGuid: ?*const Guid, SettingGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows6.1' pub extern "POWRPROF" fn PowerSettingAccessCheckEx( AccessFlags: POWER_DATA_ACCESSOR, PowerGuid: ?*const Guid, AccessType: REG_SAM_FLAGS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerSettingAccessCheck( AccessFlags: POWER_DATA_ACCESSOR, PowerGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadACValueIndex( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, AcValueIndex: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadDCValueIndex( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, DcValueIndex: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadFriendlyName( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadDescription( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadPossibleValue( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, Type: ?*u32, PossibleSettingIndex: u32, // TODO: what to do with BytesParamIndex 6? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadPossibleFriendlyName( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, PossibleSettingIndex: u32, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadPossibleDescription( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, PossibleSettingIndex: u32, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadValueMin( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ValueMinimum: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadValueMax( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ValueMaximum: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadValueIncrement( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ValueIncrement: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadValueUnitsSpecifier( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 4? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadACDefaultIndex( RootPowerKey: ?HKEY, SchemePersonalityGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, AcDefaultIndex: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadDCDefaultIndex( RootPowerKey: ?HKEY, SchemePersonalityGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, DcDefaultIndex: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadIconResourceSpecifier( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReadSettingAttributes( SubGroupGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteFriendlyName( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteDescription( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWritePossibleValue( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, Type: u32, PossibleSettingIndex: u32, // TODO: what to do with BytesParamIndex 6? Buffer: ?*u8, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWritePossibleFriendlyName( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, PossibleSettingIndex: u32, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWritePossibleDescription( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, PossibleSettingIndex: u32, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteValueMin( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ValueMinimum: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteValueMax( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ValueMaximum: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteValueIncrement( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ValueIncrement: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteValueUnitsSpecifier( RootPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 4? Buffer: ?*u8, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteACDefaultIndex( RootSystemPowerKey: ?HKEY, SchemePersonalityGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, DefaultAcIndex: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteDCDefaultIndex( RootSystemPowerKey: ?HKEY, SchemePersonalityGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, DefaultDcIndex: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteIconResourceSpecifier( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, // TODO: what to do with BytesParamIndex 5? Buffer: ?*u8, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerWriteSettingAttributes( SubGroupGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, Attributes: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerDuplicateScheme( RootPowerKey: ?HKEY, SourceSchemeGuid: ?*const Guid, DestinationSchemeGuid: ?*?*Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerImportPowerScheme( RootPowerKey: ?HKEY, ImportFileNamePath: ?[*:0]const u16, DestinationSchemeGuid: ?*?*Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerDeleteScheme( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerRemovePowerSetting( PowerSettingSubKeyGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerCreateSetting( RootSystemPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerCreatePossibleSetting( RootSystemPowerKey: ?HKEY, SubGroupOfPowerSettingsGuid: ?*const Guid, PowerSettingGuid: ?*const Guid, PossibleSettingIndex: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerEnumerate( RootPowerKey: ?HKEY, SchemeGuid: ?*const Guid, SubGroupOfPowerSettingsGuid: ?*const Guid, AccessFlags: POWER_DATA_ACCESSOR, Index: u32, // TODO: what to do with BytesParamIndex 6? Buffer: ?*u8, BufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "POWRPROF" fn PowerOpenUserPowerKey( phUserPowerKey: ?*?HKEY, Access: u32, OpenExisting: BOOL, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "POWRPROF" fn PowerOpenSystemPowerKey( phSystemPowerKey: ?*?HKEY, Access: u32, OpenExisting: BOOL, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerCanRestoreIndividualDefaultPowerScheme( SchemeGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerRestoreIndividualDefaultPowerScheme( SchemeGuid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerRestoreDefaultPowerSchemes( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerReplaceDefaultPowerSchemes( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn PowerDeterminePlatformRole( ) callconv(@import("std").os.windows.WINAPI) POWER_PLATFORM_ROLE; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn DevicePowerEnumDevices( QueryIndex: u32, QueryInterpretationFlags: u32, QueryFlags: u32, // TODO: what to do with BytesParamIndex 4? pReturnBuffer: ?*u8, pBufferSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn DevicePowerSetDeviceState( DeviceDescription: ?[*:0]const u16, SetFlags: u32, SetData: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn DevicePowerOpen( DebugMask: u32, ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "POWRPROF" fn DevicePowerClose( ) callconv(@import("std").os.windows.WINAPI) BOOLEAN; // TODO: this type is limited to platform 'windows8.1' pub extern "POWRPROF" fn PowerReportThermalEvent( Event: ?*THERMAL_EVENT, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "USER32" fn RegisterPowerSettingNotification( hRecipient: ?HANDLE, PowerSettingGuid: ?*const Guid, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) ?HPOWERNOTIFY; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "USER32" fn UnregisterPowerSettingNotification( Handle: ?HPOWERNOTIFY, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows8.0' pub extern "USER32" fn RegisterSuspendResumeNotification( hRecipient: ?HANDLE, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) ?HPOWERNOTIFY; // TODO: this type is limited to platform 'windows8.0' pub extern "USER32" fn UnregisterSuspendResumeNotification( Handle: ?HPOWERNOTIFY, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn RequestWakeupLatency( latency: LATENCY_TIME, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn IsSystemResumeAutomatic( ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn SetThreadExecutionState( esFlags: EXECUTION_STATE, ) callconv(@import("std").os.windows.WINAPI) EXECUTION_STATE; // TODO: this type is limited to platform 'windows6.1' pub extern "KERNEL32" fn PowerCreateRequest( Context: ?*REASON_CONTEXT, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; // TODO: this type is limited to platform 'windows6.1' pub extern "KERNEL32" fn PowerSetRequest( PowerRequest: ?HANDLE, RequestType: POWER_REQUEST_TYPE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.1' pub extern "KERNEL32" fn PowerClearRequest( PowerRequest: ?HANDLE, RequestType: POWER_REQUEST_TYPE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetDevicePowerState( hDevice: ?HANDLE, pfOn: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn SetSystemPowerState( fSuspend: BOOL, fForce: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetSystemPowerStatus( lpSystemPowerStatus: ?*SYSTEM_POWER_STATUS, ) callconv(@import("std").os.windows.WINAPI) BOOL; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (11) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const BOOLEAN = @import("../foundation.zig").BOOLEAN; const HANDLE = @import("../foundation.zig").HANDLE; const HKEY = @import("../system/registry.zig").HKEY; const HRESULT = @import("../foundation.zig").HRESULT; const LPARAM = @import("../foundation.zig").LPARAM; const PROPERTYKEY = @import("../ui/shell/properties_system.zig").PROPERTYKEY; const PWSTR = @import("../foundation.zig").PWSTR; const REASON_CONTEXT = @import("../system/threading.zig").REASON_CONTEXT; const REG_SAM_FLAGS = @import("../system/registry.zig").REG_SAM_FLAGS; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "EFFECTIVE_POWER_MODE_CALLBACK")) { _ = EFFECTIVE_POWER_MODE_CALLBACK; } if (@hasDecl(@This(), "PWRSCHEMESENUMPROC_V1")) { _ = PWRSCHEMESENUMPROC_V1; } if (@hasDecl(@This(), "PWRSCHEMESENUMPROC")) { _ = PWRSCHEMESENUMPROC; } if (@hasDecl(@This(), "PDEVICE_NOTIFY_CALLBACK_ROUTINE")) { _ = PDEVICE_NOTIFY_CALLBACK_ROUTINE; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/system/power.zig
const std = @import("std"); const builtin = @import("builtin"); const Builder = std.build.Builder; const CommandStep = std.build.CommandStep; const gcc_arm = "/usr/gcc-arm-none-eabi-8-2018-q4-major"; const repoName = "zig-on-rpi-using-ultibo"; const version = "v20190307"; const Config = struct { conf: []const u8, lower: []const u8, arch: []const u8, proc: []const u8, kernel: []const u8, pub fn fpcCommands (it: Config, b: *Builder, zigmain: *CommandStep, programName: []const u8) !*CommandStep { const home = try std.os.getEnvVarOwned(b.allocator, "HOME"); defer b.allocator.free(home); const bin = try s(b, "{}/ultibo/core/fpc/bin", home); defer b.allocator.free(bin); const rtl = try s(b, "{}/root/ultibo/core/source/rtl", home); defer b.allocator.free(rtl); const fpc = try bash(b, "PATH={}:{}/bin:$PATH fpc -dBUILD_{} -B -O2 -Tultibo -Parm -Cp{} -Wp{} -Fi{}/ultibo/extras -Fi{}/ultibo/core @{}/{}.CFG {}.pas >& errors.log", bin, gcc_arm, it.conf, it.arch, it.proc, rtl, rtl, bin, it.conf, programName); fpc.step.dependOn(&zigmain.step); const rename = try bash(b, "mv {} kernel-{}.img", it.kernel, it.lower); rename.step.dependOn(&fpc.step); return rename; } }; const configs = []Config { Config { .conf = "RPI", .lower = "rpi", .arch = "ARMV6", .proc = "RPIB", .kernel = "kernel.img", }, Config { .conf = "RPI2", .lower = "rpi2", .arch = "ARMV7a", .proc = "RPI2B", .kernel = "kernel7.img", }, Config { .conf = "RPI3", .lower = "rpi3", .arch = "ARMV7a", .proc = "RPI3B", .kernel = "kernel7.img", }, }; pub fn build(b: *Builder) !void { const zipFileName = try s(b, "{}-{}.zip", repoName, version); defer b.allocator.free(zipFileName); const clean_command = try bash(b, "rm -rf lib/ release/ zig-cache/ errors.log {} *.o *.a *.elf *.img", zipFileName); const clean = b.step("clean", "remove output files"); clean.dependOn(&clean_command.step); const zigmain = b.addCommand(null, b.env_map, [][]const u8 { "zig", "build-obj", "-target", "armv7-freestanding-gnueabihf", "-isystem", "subtree/ultibohub/API/include", "-isystem", "subtree/ultibohub/Userland", "-isystem", "subtree/ultibohub/Userland/host_applications/ultibo/libs/bcm_host/include", "-isystem", "subtree/ultibohub/Userland/interface", "-isystem", "subtree/ultibohub/Userland/interface/vcos/ultibo", "-isystem", "subtree/ultibohub/Userland/interface/vmcs_host/ultibo", "-isystem", "subtree/ultibohub/Userland/middleware/dlloader", "-isystem", "/usr/lib/gcc/arm-none-eabi", "-isystem", "/usr/gcc-arm-none-eabi-8-2018-q4-major/arm-none-eabi/include", "zigmain.zig" }); zigmain.step.dependOn(clean); const kernels = b.step("kernels", "build kernel images for all rpi models"); const programName = "ultibomain"; for (configs) |config, i| { var ultibomain = try config.fpcCommands(b, zigmain, programName); kernels.dependOn(&ultibomain.step); if (i == 1) { b.default_step.dependOn(&ultibomain.step); } } const release_message_command = try bash(b, "mkdir -p release/ && echo \"{} {}\" >> release/release-message.md && echo >> release/release-message.md && cat release-message.md >> release/release-message.md", repoName, version); const zip_command = try bash(b, "mkdir -p release/ && cp -a *.img firmware/* config.txt cmdline.txt release/ && zip -jqr {} release/", zipFileName); zip_command.step.dependOn(kernels); zip_command.step.dependOn(&release_message_command.step); const create_release = b.step("create-release", "create release zip file"); create_release.dependOn(&zip_command.step); const upload_command = try bash(b, "hub release create --draft -F release/release-message.md -a {} {} && echo && echo this is an unpublished draft release", zipFileName, version); upload_command.step.dependOn(create_release); const upload_release = b.step("upload-draft-release", "upload draft github release"); upload_release.dependOn(&upload_command.step); } fn bash(b: *Builder, comptime fmt: []const u8, args: ...) !*CommandStep { var command = try std.fmt.allocPrint(b.allocator, fmt, args); defer b.allocator.free(command); return b.addCommand(null, b.env_map, [][]const u8 { "bash", "-c", command, }); } fn s(b: *Builder, comptime fmt: []const u8, args: ...) ![]const u8 { return std.fmt.allocPrint(b.allocator, fmt, args); }
build.zig
const builtin = @import("builtin"); const std = @import("std"); const expect = std.testing.expect; const HasFuncs = struct { state: u32, func_field: *const fn (u32) u32, fn inc(self: *HasFuncs) void { self.state += 1; } fn get(self: HasFuncs) u32 { return self.state; } fn getPtr(self: *const HasFuncs) *const u32 { return &self.state; } fn one(_: u32) u32 { return 1; } fn two(_: u32) u32 { return 2; } }; test "standard field calls" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try expect(HasFuncs.one(0) == 1); try expect(HasFuncs.two(0) == 2); var v: HasFuncs = undefined; v.state = 0; v.func_field = HasFuncs.one; const pv = &v; const pcv: *const HasFuncs = pv; try expect(v.get() == 0); v.inc(); try expect(v.state == 1); try expect(v.get() == 1); pv.inc(); try expect(v.state == 2); try expect(pv.get() == 2); try expect(v.getPtr().* == 2); try expect(pcv.get() == 2); try expect(pcv.getPtr().* == 2); v.func_field = HasFuncs.one; try expect(v.func_field(0) == 1); try expect(pv.func_field(0) == 1); try expect(pcv.func_field(0) == 1); try expect(pcv.func_field(blk: { pv.func_field = HasFuncs.two; break :blk 0; }) == 1); v.func_field = HasFuncs.two; try expect(v.func_field(0) == 2); try expect(pv.func_field(0) == 2); try expect(pcv.func_field(0) == 2); } test "@field field calls" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try expect(@field(HasFuncs, "one")(0) == 1); try expect(@field(HasFuncs, "two")(0) == 2); var v: HasFuncs = undefined; v.state = 0; v.func_field = HasFuncs.one; const pv = &v; const pcv: *const HasFuncs = pv; try expect(@field(v, "get")() == 0); @field(v, "inc")(); try expect(v.state == 1); try expect(@field(v, "get")() == 1); @field(pv, "inc")(); try expect(v.state == 2); try expect(@field(pv, "get")() == 2); try expect(@field(v, "getPtr")().* == 2); try expect(@field(pcv, "get")() == 2); try expect(@field(pcv, "getPtr")().* == 2); v.func_field = HasFuncs.one; try expect(@field(v, "func_field")(0) == 1); try expect(@field(pv, "func_field")(0) == 1); try expect(@field(pcv, "func_field")(0) == 1); try expect(@field(pcv, "func_field")(blk: { pv.func_field = HasFuncs.two; break :blk 0; }) == 1); v.func_field = HasFuncs.two; try expect(@field(v, "func_field")(0) == 2); try expect(@field(pv, "func_field")(0) == 2); try expect(@field(pcv, "func_field")(0) == 2); }
test/behavior/member_func.zig
pub const std = @import("std"); pub const imr = @import("markups/imr.zig"); const Url = @import("url.zig").Url; const Document = imr.Document; const Tag = imr.Tag; const Allocator = std.mem.Allocator; const Event = union(enum) { MouseButton: struct { pressed: bool, x: f64, y: f64 } }; pub fn RenderContext(comptime T: type) type { return struct { /// Instance of type T. allocator: *Allocator, graphics: *T, document: Document, width: f64 = 0, height: f64 = 0, offsetY: f64 = 0, /// Used for smooth scrolling offsetYTarget: f64 = 0, // Component position to use if drawing on the same surface as the render context x: f64 = 0, y: f64 = 0, linkCallback: ?fn(ctx: *Self, url: Url) callconv(CallbackCallConv) anyerror!void = null, layout_requested: bool = false, pub const CallbackCallConv: std.builtin.CallingConvention = if (std.io.is_async) .Async else .Unspecified; const Self = @This(); pub fn setup(self: *const Self) void { //self.graphics.mouseScrollCb = Self.mouseScrollCallback; self.graphics.userData = @ptrToInt(self); } pub fn layout(self: *Self) void { var y: f64 = 0; for (self.document.tags.items) |*tag| { self.layoutTag(tag, &y); } } fn layoutTag(self: *Self, tag: *Tag, y: *f64) void { const style = tag.style; const g = self.graphics; const margin = style.margin; switch (tag.data) { .text => |text| { g.setFontFace(style.fontFace); g.setFontSize(style.fontSize); g.setTextWrap(g.getWidth()); const metrics = g.getTextMetrics(text); tag.layoutX = margin.x.get(self.width, self.height) orelse 0; y.* += margin.y.get(self.width, self.height) orelse 0; tag.layoutY = y.*; y.* += metrics.height + (margin.height.get(self.width, self.height) orelse 0); }, .container => |childrens| { for (childrens.items) |*child| { self.layoutTag(child, y); } } } } fn setColor(self: *const Self, color: imr.Color) void { const red = @intToFloat(f64, color.red) / 255.0; const green = @intToFloat(f64, color.green) / 255.0; const blue = @intToFloat(f64, color.blue) / 255.0; self.graphics.setSourceRGB(red, green, blue); } fn lerp(a: f64, b: f64, t: f64) f64 { return a * (1-t) + b * t; } pub fn render(self: *Self) void { if (self.layout_requested) { self.layout(); self.layout_requested = false; } self.width = self.graphics.getWidth(); self.height = self.graphics.getHeight(); // Smooth scrolling if (!std.math.approxEqAbs(f64, self.offsetY, self.offsetYTarget, 0.01)) { self.offsetY = lerp(self.offsetY, self.offsetYTarget, 0.4); self.graphics.request_next_frame = true; } for (self.document.tags.items) |tag| { if (self.renderTag(tag)) break; } } fn renderText(self: *const Self, tag: Tag, text: [:0]const u8) void { const g = self.graphics; g.moveTo(tag.layoutX, tag.layoutY + self.offsetY + self.y); g.text(text); g.fill(); } fn renderTag(self: *const Self, tag: Tag) bool { const style = tag.style; const g = self.graphics; switch (tag.data) { .text => |text| { g.setFontFace(style.fontFace); g.setFontSize(style.fontSize); g.setTextWrap(self.width - tag.layoutX); const metrics = g.getTextMetrics(text); const y = tag.layoutY; if (y > self.height-self.offsetY) { return true; } if (y + metrics.height >= -self.offsetY) { if (style.textColor) |color| { self.setColor(color); } else { g.setSourceRGB(0, 0, 0); } self.renderText(tag, text); } }, .container => |childrens| { for (childrens.items) |child| { if (self.renderTag(child)) return true; } } } return false; } pub fn mouseScrollCallback(backend: *T, yOffset: f64) void { const self = @intToPtr(*Self, backend.userData); self.offsetYTarget += yOffset * 35.0; if (self.offsetYTarget > 0) self.offsetYTarget = 0; backend.frame_requested = true; } pub fn mouseButtonCallback(backend: *T, button: T.MouseButton, pressed: bool) void { _ = button; const self = @intToPtr(*Self, backend.userData); const event = Event { .MouseButton = .{ .pressed = pressed, .x = backend.getCursorX() - self.x, .y = backend.getCursorY() - self.y - self.offsetY } }; if (backend.getCursorX() < self.x or backend.getCursorY() < self.y) return; for (self.document.tags.items) |tag| { const stopLoop = self.processTag(tag, &event) catch |err| { std.debug.warn("error: {}\n", .{err}); if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } return; }; if (stopLoop) break; } } fn processTag(self: *Self, tag: Tag, event: *const Event) anyerror!bool { const style = tag.style; const g = self.graphics; switch (tag.data) { .text => |text| { g.setFontSize(style.fontSize); g.setTextWrap(self.width); const metrics = g.getTextMetrics(text); const x = tag.layoutX; const y = tag.layoutY; switch (event.*) { .MouseButton => |evt| { if (tag.href) |href| { const cx = evt.x; const cy = evt.y; if (!evt.pressed and cx > x and cx <= x + metrics.width and cy > y and cy <= y + metrics.height) { if (self.linkCallback) |callback| { if (std.io.is_async) { const frameSize = @frameSize(callback); // Get the size of the callback's frame var frame = try self.allocator.alignedAlloc(u8, 16, frameSize); // Allocate a 16-byte aligned frame defer self.allocator.free(frame); var result: anyerror!void = undefined; var f = @asyncCall(frame, &result, callback, .{self, href}); try await f; } else { try callback(self, href); } g.*.frame_requested = true; return true; } } } } } }, .container => |childrens| { for (childrens.items) |child| { if (std.io.is_async) { const frameSize = @frameSize(processTag); var frame = try self.allocator.alignedAlloc(u8, 16, frameSize); defer self.allocator.free(frame); var result: anyerror!bool = undefined; var f = @asyncCall(frame, &result, processTag, .{self, child, event}); if (try await f) return true; } else { if (try self.processTag(child, event)) return true; } } } } return false; } }; }
zervo/renderer.zig
const Interpreter = @This(); const std = @import("std"); const utils = @import("utils.zig"); const MEMORY_SIZE = 30_000; const SyntaxError = error{ OpenBracketNotFound, CloseBracketNotFound, }; src: []const u8, src_current_index: usize = 0, src_next_index: usize = 0, mem: [MEMORY_SIZE]u8 = [_]u8{0} ** MEMORY_SIZE, mem_index: usize = 0, loop_stack: std.ArrayList(usize), pub fn interpret(allocator: std.mem.Allocator, src: []const u8) bool { var interpreter = Interpreter.init(allocator, src); defer interpreter.deinit(); while (interpreter.nextCommand()) |cmd| { switch (cmd) { '>' => interpreter.increasePtr(), '<' => interpreter.decreasePtr(), '+' => interpreter.increaseValue(), '-' => interpreter.decreaseValue(), '[' => interpreter.startLoop() catch |err| switch (err) { error.CloseBracketNotFound => { std.debug.print("Syntax Error: matching ']' not found at index '{d}'\n", .{interpreter.src_current_index + 1}); return false; }, else => { std.debug.print("Interpreter Error: some error occured while creating loop stack\n{s}\n", .{err}); return false; }, }, ']' => interpreter.endLoop() catch |err| switch (err) { error.OpenBracketNotFound => { std.debug.print("Syntax Error: matching '[' not found at index '{d}'\n", .{interpreter.src_current_index + 1}); return false; }, // endLoop only returns OpenBracketNotFound error therefore it's safe to continue loop else => continue, }, ',' => interpreter.readChar() catch |err| { std.debug.print("Interpreter Error: failed to read byte from stdin\n{s}\n", .{err}); return false; }, '.' => interpreter.writeChar() catch |err| { std.debug.print("Interpreter Error: failed to prnt byte on stdin\n{s}\n", .{err}); return false; }, else => continue, } } return true; } pub fn init(allocator: std.mem.Allocator, src: []const u8) Interpreter { return Interpreter{ .src = src, .loop_stack = std.ArrayList(usize).init(allocator), }; } pub fn deinit(self: Interpreter) void { self.loop_stack.deinit(); } pub fn nextCommand(self: *Interpreter) ?u8 { if (self.src_next_index >= self.src.len) return null; self.src_current_index = self.src_next_index; self.src_next_index += 1; return self.src[self.src_current_index]; } pub fn increasePtr(self: *Interpreter) void { if (self.mem_index < self.mem.len) self.mem_index += 1; } pub fn decreasePtr(self: *Interpreter) void { if (self.mem_index > 0) self.mem_index -= 1; } pub fn increaseValue(self: *Interpreter) void { self.mem[self.mem_index] +%= 1; } pub fn decreaseValue(self: *Interpreter) void { self.mem[self.mem_index] -%= 1; } pub fn startLoop(self: *Interpreter) !void { const loop_end_index = self.findEndOfLoop() orelse { return SyntaxError.CloseBracketNotFound; }; if (self.mem[self.mem_index] == 0) { // jump to matching ']' (end of loop) self.src_next_index = loop_end_index + 1; } else { try self.loop_stack.append(self.src_current_index); } } pub fn endLoop(self: *Interpreter) SyntaxError!void { const loop_start_index = self.loop_stack.popOrNull() orelse { return SyntaxError.OpenBracketNotFound; }; // Here no need to check wether current value is zero or not // because it will jump to next instruction anyway using 'src_next_index' if (self.mem[self.mem_index] != 0) { // jump to matching '[' (start of loop) self.src_next_index = loop_start_index; } } pub fn readChar(self: *Interpreter) !void { var char = try utils.stdin.readByte(); try utils.stdin.skipUntilDelimiterOrEof('\n'); self.mem[self.mem_index] = char; } pub fn writeChar(self: *Interpreter) !void { try utils.stdout.writeByte(self.mem[self.mem_index]); } fn findEndOfLoop(self: *Interpreter) ?usize { var nums_open_bracket: usize = 0; var nums_close_bracket: usize = 0; var src_slice = self.src[self.src_current_index..]; for (src_slice) |c, i| { if (c == '[') { nums_open_bracket += 1; } else if (c == ']') { nums_close_bracket += 1; } if (nums_open_bracket > 0) { if (nums_open_bracket == nums_close_bracket) { return self.src_current_index + i; } } } return null; }
src/Interpreter.zig
const std = @import("std"); const testing = std.testing; const ArrayList = std.ArrayList; const ArrayListUnmanaged = std.ArrayListUnmanaged; const StringHashMapUnmanaged = std.StringHashMapUnmanaged; const lex = @import("lex.zig"); const parse = @import("parse.zig"); const assert_one_match = true; const OutKind = enum { out, err }; const RunHooks = struct { allocator: *std.mem.Allocator, onError: fn(base: *RunHooks, pos: [*]const u8, msg: []const u8) void, onWrite: fn(base: *RunHooks, kind: OutKind, bytes: []const u8) WriteError!usize, builtin_map: StringHashMapUnmanaged(StitchObj), pub fn init( allocator: *std.mem.Allocator, onError: fn(base: *RunHooks, pos: [*]const u8, msg: []const u8) void, onWrite: fn(base: *RunHooks, kind: OutKind, bytes: []const u8) WriteError!usize, ) RunHooks { return .{ .allocator = allocator, .onError = onError, .onWrite = onWrite, .builtin_map = StringHashMapUnmanaged(StitchObj) { }, }; } pub fn deinit(self: *RunHooks) void { self.builtin_map.deinit(self.allocator); } const WriteError = error {OutOfMemory}; const WriterContext = struct { hooks: *RunHooks, kind: OutKind, fn write(self: WriterContext, bytes: []const u8) WriteError!usize { return self.hooks.onWrite(self.hooks, self.kind, bytes); } }; pub const Writer = std.io.Writer(WriterContext, WriteError, WriterContext.write); fn print(self: *RunHooks, out_kind: OutKind, comptime fmt: []const u8, args: anytype) !void { return std.fmt.format(Writer { .context = .{ .hooks = self, .kind = out_kind, }, }, fmt, args); } fn reportError(self: *RunHooks, pos: [*]const u8, comptime fmt: []const u8, args: anytype) void { var msg_buf: [200]u8 = undefined; var fbs = std.io.fixedBufferStream(&msg_buf); std.fmt.format(fbs.writer(), fmt, args) catch |e| switch (e) { error.NoSpaceLeft => { //std.debug.panic("error message exceeded {} bytes", .{msg_buf.len}); self.onError(self, pos, "error message too long"); return; }, }; self.onError(self, pos, fbs.getWritten()); } }; const ScriptContext = struct { var_map: StringHashMapUnmanaged(StitchObj) = .{}, }; const CommandContext = struct { hooks: *RunHooks, script_ctx: *ScriptContext, builtin_program_prefix_count: u16, inline_cmd_depth: u16, }; const universal_builtin_map = std.ComptimeStringMap(StitchObj, .{ .{ "false", .{ .bool = false } }, .{ "true", .{ .bool = true } }, .{ "echo", .builtin_program }, .{ "noop", .builtin_program }, }); fn lookupBuiltin(ctx: *CommandContext, name: []const u8) ?StitchObj { _ = ctx; //if (ctx.script_ctx.script_specific_builtin_objects.get(name)) |obj| // return obj; if (universal_builtin_map.get(name)) |obj| return obj; return null; } fn reportUnknownToken(hooks: *RunHooks, pos: [*]const u8, limit: [*]const u8) void { // figure out what went wrong // # time to try to figure out what went wrong // c = next.charAt(0) // if c == ord('('): // raise SyntaxError(pos, "missing close paren for: {}".format(previewStringPtr(next, limit, 30))) // if c == ord('"'): // raise SyntaxError(pos, "missing double-quote to close: {}".format(previewStringPtr(next, limit, 30))) // next_str = next.toStringWithLimit(limit) // for seq in (b"''''''", b"'''''", b"''''", b"'''", b"''", b"'"): // if next_str.startswith(seq): // phrase = "single-quote" if (len(seq) == 1) else "{} single-quote sequence".format(len(seq)) // raise SyntaxError(pos, "missing {} to close: {}".format(phrase, previewStringPtr(next, limit, 30))) // // # I think we need at most 2 characters to see what went wrong // bad_str = next_str[:min(limit.subtract(next), 2)] // raise SyntaxError(pos, "unrecognized character sequence '{}'".format(bad_str.decode('ascii'))) // if (pos[0] == '(') { // //hooks.onSyntaxError("missing close paren for: {}", .{previewStringPtr(pos, limit, 30)}); // hooks.onSyntaxError(SyntaxError.MissingCloseParen @panic("here"); } if (pos[0] == '"') { @panic("here"); } if (pos[0] == '\'') { @panic("here"); } const bad_str = pos[0..std.math.min(2, @ptrToInt(limit)-@ptrToInt(pos))]; hooks.reportError(pos, "unrecognized character sequence '{s}'", .{bad_str}); } const RunFailedError = error { RunFailed } || std.mem.Allocator.Error; pub fn runSlice(ctx: *CommandContext, s: []const u8) RunFailedError![*]const u8 { return runLimitSlice(ctx, s.ptr, s.ptr + s.len); } pub fn runLimitSlice(ctx: *CommandContext, start: [*]const u8, limit: [*]const u8) RunFailedError![*]const u8 { std.debug.print("[DEBUG] run '{s}'\n", .{start[0..@ptrToInt(limit)-@ptrToInt(start)]}); // Here we parse the command into a list of nodes. // This requires allocating space for the node data, however, this // also makes the code simpler because it provides a clean separation // between the parsing stage and the interpretation stage. var nodes = parse.NodeBuilder { .allocator = ctx.hooks.allocator }; defer nodes.deinit(); // TODO: make this overrideable (probably in hooks somewhere) const allstringliterals = false; const cmd_end = blk: { switch (parse.parseCommand(&nodes, start, limit, allstringliterals)) { .success => |end| break :blk end, .unknown_token => |pos| { reportUnknownToken(ctx.hooks, pos, limit); return error.RunFailed; }, .err => |e| return e, } }; var it = nodes.iterator(); try runNodes(ctx, &it, limit); return cmd_end; } fn firstNodeJoinPrev(nodes: *const parse.NodeBuilder.Iterator) bool { var nodes_it = nodes.*; return if (nodes_it.next()) |next| next.join_prev else false; } pub fn runNodes( ctx: *CommandContext, nodes: *parse.NodeBuilder.Iterator, limit: [*]const u8, // TODO: this might be removed, for now it's used to find out which strings are inside the script source memory ) RunFailedError!void { const first_node = nodes.next() orelse return; switch (first_node.data) { .builtin_id => { if (!firstNodeJoinPrev(nodes)) { if (lookupBuiltin(ctx, first_node.getIdSlice())) |builtin| { switch (builtin) { .builtin_program => |prog| { _ = prog; // TODO: pass this return runBuiltinProgram(ctx, first_node.getIdSlice(), nodes, limit); }, else => {}, } } } }, else => {}, } { var nodes2 = nodes.*; if (nodes2.next()) |second_node| { switch (second_node.data) { .assign => return runAssign(ctx, first_node, second_node, &nodes2), .binary_op => return runBinaryExpression(ctx, first_node, second_node, &nodes2), else => {}, } } } ctx.hooks.reportError(first_node.pos, "have not implemented a first token kind of {s} with no binary operator", .{@tagName(first_node.data)}); return RunFailedError.RunFailed; } fn builtinIdFromToken(start: [*]const u8, end: [*]const u8) []const u8 { std.debug.assert(start[0] == '@'); var len = @ptrToInt(end) - @ptrToInt(start); if (@intToPtr([*]const u8, @ptrToInt(end) - 1)[0] == '@') { len -= 1; } return start[1..len]; } const StitchObj = union(enum) { bool: bool, string: void,//parse.Node, builtin_program: void, array: void, pub fn userTypeDescription(self: StitchObj) []const u8 { return switch (self) { .bool => "Bool", .string => "String", .builtin_program => "BuiltinProgram", .array => "Array", }; } }; fn runBuiltinProgram( ctx: *CommandContext, builtin_id: []const u8, nodes: *parse.NodeBuilder.Iterator, limit: [*]const u8, // might be removed, used to find out which strings are within script source memory ) RunFailedError!void { if (std.mem.eql(u8, builtin_id, "echo")) { var args = try ArgStrings.init(ctx, nodes, limit); defer args.deinit(ctx, builtin_id.ptr, limit); if (args.al.items.len == 0) { try ctx.hooks.print(.out, "\n", .{}); } else { for (args.al.items[0 .. args.al.items.len - 1]) |arg| { try ctx.hooks.print(.out, "{s}", .{arg}); } try ctx.hooks.print(.out, "{s}\n", .{args.al.items[args.al.items.len-1]}); } // @noop is a temporary builtin for initial development } else if (std.mem.eql(u8, builtin_id, "noop")) { var next_ctx = CommandContext { .hooks = ctx.hooks, .script_ctx = ctx.script_ctx, .inline_cmd_depth = ctx.inline_cmd_depth, .builtin_program_prefix_count = ctx.builtin_program_prefix_count + 1, }; return runNodes(&next_ctx, nodes, limit); } else { std.debug.panic("builtin '@{s}' not impl", .{builtin_id}); } } fn runBinaryExpression( ctx: *CommandContext, first_node: parse.Node, op_node: parse.Node, nodes: *parse.NodeBuilder.Iterator, ) RunFailedError!void { _ = first_node; _ = op_node; _ = nodes; ctx.hooks.reportError(first_node.pos, "runBinaryExpression op={s} not implemented", .{@tagName(op_node.data)}); return RunFailedError.RunFailed; } fn runAssign( ctx: *CommandContext, first_node: parse.Node, op_node: parse.Node, nodes: *parse.NodeBuilder.Iterator, ) RunFailedError!void { std.debug.assert(op_node.data == .assign); if (ctx.builtin_program_prefix_count > 0) { ctx.hooks.reportError(op_node.pos, "unexpected '='", .{}); return RunFailedError.RunFailed; } // TODO: test this!!!!!!!!!!!!!!!!!!!!!!!!!!! if (ctx.inline_cmd_depth > 0) { ctx.hooks.reportError(op_node.pos, "assignment '=' is forbidden inside an inline command", .{}); return RunFailedError.RunFailed; } var stitch_objects = ArrayListUnmanaged(StitchObj) { }; defer stitch_objects.deinit(ctx.hooks.allocator); const first_obj = try expandNode(ctx, first_node); switch (first_obj) { .string => {}, else => { ctx.hooks.reportError(first_node.pos, "expected a String before '=' but got {s}", .{first_obj.userTypeDescription()}); return RunFailedError.RunFailed; }, } _ = op_node; _ = nodes; // var nodes: [1]Node = undefined; // const nodes_len = parseNodes(next, limit, &nodes) catch |e| switch (e) { // error.RunFailed, error.OutOfMemory => |e2| return e2, // error.TooManyNodes => { // @panic("here"); // } // }; // _ = nodes_len; ctx.hooks.reportError(first_node.pos, "runAssign not implemented", .{}); return RunFailedError.RunFailed; } fn expandNode( ctx: *CommandContext, node: parse.Node, //objects: *ArrayListUnmanaged(StitchObj), // we might need an ExpandNodeErrorContext for better error messages ) RunFailedError!StitchObj { switch (node.getKind()) { .string => { return StitchObj.string; }, .id => switch (node.data) { .user_id => @panic("user_id not impl"), .builtin_id => { const id = node.getIdSlice(); if (lookupBuiltin(ctx, id)) |obj| { return obj; } ctx.hooks.reportError(node.pos, "'@{s}' is undefined", .{id}); return RunFailedError.RunFailed; }, else => unreachable, }, else => std.debug.panic("expandNode kind '{}' not implemented", .{node.getKind()}), } } fn ptrIntersects(s: [*]const u8, start: [*]const u8, limit: [*]const u8) bool { return @ptrToInt(s) < @ptrToInt(limit) and @ptrToInt(s) >= @ptrToInt(start); } const ArgBuilder = struct { state: union(enum) { empty: void, source: []const u8, building: ArrayListUnmanaged(u8), } = .empty, pub fn errDeinit(self: *ArgBuilder, allocator: *std.mem.Allocator) void { switch (self.state) { .empty, .source => {}, .building => |*b| b.deinit(allocator), } } pub fn flush(self: *ArgBuilder, allocator: *std.mem.Allocator, args: *ArrayListUnmanaged([]const u8)) !void { switch (self.state) { .empty => {}, .source => |s| { try args.append(allocator, s); self.state = .empty; }, .building => |al| { const result = allocator.shrink(al.items.ptr[0..al.capacity], al.items.len); std.debug.assert(result.len == al.items.len); try args.append(allocator, result); self.state = .empty; }, } } pub fn add(self: *ArgBuilder, allocator: *std.mem.Allocator, arg: []const u8) !void { switch (self.state) { .empty => { self.state = .{ .source = arg }; }, .source => |s| { const min_size = s.len + arg.len; var mem = try allocator.allocAdvanced(u8, null, min_size, .at_least); @memcpy(mem.ptr, s.ptr, s.len); @memcpy(mem.ptr + s.len, arg.ptr, arg.len); self.state = .{ .building = ArrayListUnmanaged(u8) { .items = mem[0..min_size], .capacity = mem.len, }}; }, .building => |*al| { try al.appendSlice(allocator, arg); }, } } }; const ArgStrings = struct { al: ArrayListUnmanaged([]const u8), pub fn init(ctx: *CommandContext, nodes: *parse.NodeBuilder.Iterator, src_limit: [*]const u8) RunFailedError!ArgStrings { var result = ArgStrings { .al = .{} }; const first_node = nodes.front() orelse return result; const start = first_node.pos; errdefer result.deinit(ctx, start, src_limit); try allocArgStrings(ctx, &result.al, nodes); return result; } pub fn deinit(self: *ArgStrings, ctx: *CommandContext, src_start: [*]const u8, src_limit: [*]const u8) void { for (self.al.items) |arg| { if (!ptrIntersects(arg.ptr, src_start, src_limit)) { ctx.hooks.allocator.free(arg); } } self.al.deinit(ctx.hooks.allocator); } }; fn allocArgStrings(ctx: *CommandContext, args: *ArrayListUnmanaged([]const u8), nodes: *parse.NodeBuilder.Iterator) RunFailedError!void { var arg_builder = ArgBuilder { }; errdefer arg_builder.errDeinit(ctx.hooks.allocator); while (nodes.next()) |node| { if (!node.join_prev) { try arg_builder.flush(ctx.hooks.allocator, args); } switch (node.data) { //.arg_sep => try arg_builder.flush(ctx.hooks.allocator, args), .arg => |end| try arg_builder.add(ctx.hooks.allocator, node.pos[0 .. @ptrToInt(end) - @ptrToInt(node.pos)]), .assign => @panic("not impl"), .user_id => @panic("not impl"), .builtin_id => @panic("not impl"), .binary_op => @panic("not impl"), .double_quoted_string => |end| try arg_builder.add(ctx.hooks.allocator, node.pos[1 .. @ptrToInt(end) - @ptrToInt(node.pos) - 1]), .single_quoted_string => @panic("not impl"), .escape_sequence => try arg_builder.add(ctx.hooks.allocator, node.pos[1..2]), } } try arg_builder.flush(ctx.hooks.allocator, args); } const TestHooks = struct { hooks: RunHooks, stdout: ArrayList(u8), stderr: ArrayList(u8), result: union(enum) { none: void, err: struct { pos: [*]const u8, msg: []const u8, }, }, pub fn init() TestHooks { return .{ .hooks = RunHooks.init(testing.allocator, onError, onWrite), .stdout = ArrayList(u8).init(testing.allocator), .stderr = ArrayList(u8).init(testing.allocator), .result = .none, }; } pub fn deinit(self: *TestHooks) void { self.stdout.deinit(); self.stderr.deinit(); switch (self.result) { .none => {}, .err => |err| { self.hooks.allocator.free(err.msg); }, } } fn onError(base: *RunHooks, pos: [*]const u8, msg: []const u8) void { const self = @fieldParentPtr(TestHooks, "hooks", base); std.debug.assert(self.result == .none); self.result = .{ .err = .{ .pos = pos, .msg = self.hooks.allocator.dupe(u8, msg) catch |e| switch (e) { error.OutOfMemory => { std.debug.panic("failed to allocate memory for the error message '{s}'", .{msg}); }, }, }}; } fn onWrite(base: *RunHooks, out_kind: OutKind, bytes: []const u8) RunHooks.WriteError!usize { const self = @fieldParentPtr(TestHooks, "hooks", base); const out = switch (out_kind) { .out => &self.stdout, .err => &self.stderr }; try out.appendSlice(bytes); return bytes.len; } }; const TestConfig = struct { err: ?[]const u8 = null, stdout: ?[]const u8 = null, stderr: ?[]const u8 = null, stdin: ?[]const u8 = null, }; fn runTest(src: []const u8, config: TestConfig) !void { var hooks = TestHooks.init(); defer hooks.deinit(); var script_ctx = ScriptContext { }; var ctx = CommandContext { .hooks = &hooks.hooks, .script_ctx = &script_ctx, .inline_cmd_depth = 0, .builtin_program_prefix_count = 0, }; if (config.stdin) |stdin| { _ = stdin; @panic("not impl"); } const end = runSlice(&ctx, src) catch |e| switch (e) { error.OutOfMemory => return e, error.RunFailed => { const err = switch (hooks.result) { .none => @panic("RunFailed but no error was reported, this should be impossible"), .err => |err| err, }; if (config.err) |expected_error| { if (!std.mem.eql(u8, expected_error, err.msg)) { std.debug.print("\nerror: error message mismatch\nexpected: '{s}'\nactual : '{s}'\n", .{expected_error, err.msg}); return error.TestUnexpectedResult; } } else { std.debug.print("error: unexpected failure: {s}\n", .{err.msg}); return error.TestUnexpectedResult; } if (config.stdout) |stdout| { _ = stdout; @panic("not impl"); } if (config.stderr) |stderr| { if (!std.mem.eql(u8, stderr, hooks.stderr.items)) { std.debug.print("\nerror: stderr mismatch\nexpected: '{s}'\nactual : '{s}'\n", .{stderr, hooks.stderr.items}); return error.TestUnexpectedResult; } } //@panic("not impl"); return; }, }; _ = end; if (config.err) |err| { std.debug.print("\ndid not get expected error '{s}'\n", .{err}); return error.TestUnexpectedResult; } if (config.stdout) |stdout| { if (!std.mem.eql(u8, stdout, hooks.stdout.items)) { std.debug.print("\nerror: stdout mismatch\nexpected: '{s}'\nactual : '{s}'\n", .{stdout, hooks.stdout.items}); return error.TestUnexpectedResult; } } else { try testing.expect(hooks.stdout.items.len == 0); } if (config.stderr) |stderr| { _ = stderr; @panic("todo"); } else if (hooks.stderr.items.len > 0) { @panic("todo"); } } test { try runTest("@echo", .{.stdout = "\n"}); try runTest("@echo@", .{.stdout = "\n"}); try runTest("@echo hello", .{.stdout = "hello\n"}); try runTest("@echo@ hello", .{.stdout = "hello\n"}); try runTest("@echo \"a string!\"", .{.stdout = "a string!\n"}); try runTest("@echo \"string and \"arg\" put together\"", .{.stdout = "string and arg put together\n"}); try runTest("@noop @echo hello", .{.stdout = "hello\n"}); try runTest("@noop a = b", .{.err = "unexpected '='"}); //try runTest("@echo@hello", .{}); } test "ported from python test" { // testSyntaxError(b"(", "missing close paren for: (") // testSyntaxError(b"(foo", "missing close paren for: (foo") // testSyntaxError(b"(a long command that demonstrates we shouldnt print this whole thing when we show an error", // "missing close paren for: (a long command that demonstra[..snip..]") try runTest("$$", .{.err = "unrecognized character sequence '$$'"}); try runTest("@`", .{.err = "unrecognized character sequence '@`'"}); try runTest("@echo @@", .{.stdout = "@\n"}); try runTest("@echo @#", .{.stdout = "#\n"}); try runTest("@echo @$", .{.stdout = "$\n"}); try runTest("@echo @)", .{.stdout = ")\n"}); try runTest("@echo @(", .{.stdout = "(\n"}); try runTest("@echo @=", .{.stdout = "=\n"}); try runTest("@echo @\"", .{.stdout = "\"\n"}); try runTest("@echo @'", .{.stdout = "'\n"}); // // testSemanticError(b"@multiline", "@multiline requires at least 1 argument") // testSemanticError(b"@multiline @true", "the @multiline builtin is only supported within an (..inline command..)") // testSemanticError(b"(@multiline @true @and @true)", "@multiline does not accept Bool") // // testSemanticError(b"@false", "unhandled Bool") // testSemanticError(b"@true", "unhandled Bool") // // testSemanticError(b"@true dat", "unexpected Bool at the start of a command") // try runTest("@echo", .{.stdout = "\n"}); try runTest("@echo foo", .{.stdout = "foo\n"}); try runTest("@echo \"foo\"", .{.stdout = "foo\n"}); // // # // # double-quoted string literals // # // testSyntaxError(b'"', 'missing double-quote to close: "') // testSyntaxError(b'@echo "foo', 'missing double-quote to close: "foo') // testSyntaxError(b'@echo "01234567890123456789012345678', // 'missing double-quote to close: "01234567890123456789012345678') // testSyntaxError(b'@echo "012345678901234567890123456789', // 'missing double-quote to close: "01234567890123456789012345678[..snip..]') // testCommand(b'@echo @"', 0, b'"\n') // testCommand(b'@echo "#@$()"', 0, b'#@$()\n') // // # // # single-quoted string literals // # // testSyntaxError(b"'", "missing single-quote to close: '") // testSyntaxError(b"''", "missing 2 single-quote sequence to close: ''") // testSyntaxError(b"'''", "missing 3 single-quote sequence to close: '''") // testSyntaxError(b"''''", "missing 4 single-quote sequence to close: ''''") // testSyntaxError(b"'''''", "missing 5 single-quote sequence to close: '''''") // testSyntaxError(b"''''''", "missing 6 single-quote sequence to close: ''''''") // testSyntaxError(b"'''''''", "missing 6 single-quote sequence to close: '''''''") // testSyntaxError(b"'a", "missing single-quote to close: 'a") // testSyntaxError(b"''a", "missing 2 single-quote sequence to close: ''a") // testSyntaxError(b"'''a", "missing 3 single-quote sequence to close: '''a") // testSyntaxError(b"''''a", "missing 4 single-quote sequence to close: ''''a") // testSyntaxError(b"@echo 'foo", "missing single-quote to close: 'foo") // testSyntaxError(b"@echo '01234567890123456789012345678", // "missing single-quote to close: '01234567890123456789012345678") // testSyntaxError(b"@echo '012345678901234567890123456789", // "missing single-quote to close: '01234567890123456789012345678[..snip..]") // testCommand(b'@echo "#@$()"', 0, b"#@$()\n") // // testSyntaxError(b"@echo '\n", "missing single-quote to close: '") // testSyntaxError(b"@echo ''\n''", "missing 2 single-quote sequence to close: ''") // testCommand(b"@echo '''\n'''", 0, b"") // testCommand(b"@echo '''\nhello'''", 0, b"hello\n") // testCommand(b"@echo '''\nhello\nworld'''", 0, b"hello\nworld\n") // testCommand(b"@echo '''\n\"hello\"\n'world''''", 0, b"\"hello\"\n'world'\n") // // testCommand(b"@echo 'hello\"'", 0, b"hello\"\n") // testCommand(b"@echo ''hello\"'''", 0, b"hello\"'\n") // testCommand(b"@echo '''hello\"''''", 0, b"hello\"'\n") // testCommand(b"@echo ''''hello\"'''''", 0, b"hello\"'\n") // testCommand(b"@echo '''''hello\"''''''", 0, b"hello\"'\n") // testCommand(b"@echo ''''''hello\"'''''''", 0, b"hello\"'\n") // // # should be a syntax error because there are no quotes! // testSyntaxError(b"@echo 'foo'", "got a single-quote string literal without double-quotes nor newlines, use double quotes instead or invoke @allstringliterals") // testCommand(b"@allstringliterals\n@echo 'foo'", 0, b"foo\n") //try runTest("foo = bar", .{}); //try runTest( // \\name = joe // \\age = 64 // \\@echo '''\nhello '''$name''', you're\n64!!!''' // , .{ .stdout = "hello joe, you're\n64!!!\n" } //); // // #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // # TODO: test all the inline command prefixes // #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // testSemanticError(b"@exitcode true", "the @exitcode builtin is only supported within an (..inline command..)") // testSemanticError(b"@assert (@exitcode)", "@exitcode requires at least 1 argument") // testSemanticError(b"@assert (@exitcode @exitcode true)", "@exitcode is not compatible with @exitcode") // testSemanticError(b"@assert (@exitcode true @exitcode)", "cannot coerce Builtin '@exitcode' to String") // testSemanticError(b"@assert (@exitcode @true)", "unhandled Bool") // testSemanticError(b"@assert (@exitcode @multiline true)", "got @multiline but stdout is not being captured? what's going on?") // testCommand(b"@assert (@exitcode true)", 0, b"") // testCommand(b"@assert @not (@exitcode false)", 0, b"") // // testSyntaxError(b"1@and", "'@and' requires space separation") // testSyntaxError(b"@and=", "'@and' requires space separation") // testSyntaxError(b"=@and", "'=' requires space separation") // testSemanticError(b"@and", "unexpected '@and'") // testSemanticError(b"@or", "unexpected '@or'") // testSemanticError(b"true @and true", "'@and' does not accept objects of type String") // testSemanticError(b"(@array) @and @true", "'@and' does not accept objects of type Array") // testSemanticError(b"@true @and (@array)", "'@and' does not accept objects of type Array") // testError(b"@false @and", // SemanticError, "missing operand after '@and'", // SemanticError, "unhandled Bool whose value is False") // testSemanticError(b"(true) @and", "'@and' does not accept objects of type String") // testError(b"(false) @and", // SemanticError, "'@and' does not accept objects of type String", // NonZeroExitCodeError, "command failed with exit code 1") // testSemanticError(b"false false @or", "unexpected '@or'") // testError(b"(false) @or", // SemanticError, "'@or' does not accept objects of type String", // NonZeroExitCodeError, "command failed with exit code 1") // testSemanticError(b"(@multiline true) @or", "'@or' does not accept objects of type String") // testSemanticError(b"(@and)", "unexpected '@and'") // testSemanticError(b"(@and) @or", "unexpected '@and'") // testError(b"@false @and false false", // SemanticError, "'@and' does not accept objects of type String", // SemanticError, "unhandled Bool whose value is False") // testError(b"@false @and @false false", // SemanticError, "expected '@and' operator but got token 'false'; commands must be wrapped with (...)", // SemanticError, "unhandled Bool whose value is False") // testError(b"@false @and @false @or @false", // SemanticError, "'@and' and '@or' cannot be chained", // SemanticError, "unhandled Bool whose value is False") // testSemanticError(b"true @and true", "'@and' does not accept objects of type String") // testSemanticError(b"@true @and true", "'@and' does not accept objects of type String") // testSemanticError(b"true @and (true)", "'@and' does not accept objects of type String") // testSemanticError(b"@false @or @false @and @false", "'@or' and '@and' cannot be chained") // testError(b"@true @or @true @and @true", // SemanticError, "'@or' and '@and' cannot be chained", // SemanticError, "unhandled Bool whose value is True") // testSemanticError(b"@true @and @true @or @true", "'@and' and '@or' cannot be chained") // testSemanticError(b"@false @or @false @or false", "'@or' does not accept objects of type String") // // # TODO: implement stderr error messages // testCommand(b"false", 1, b"") // testError(b"@false @or (false)", // SemanticError, "'@or' does not accept objects of type String", // NonZeroExitCodeError, "command failed with exit code 1") // testError(b"(false) @or @false", // SemanticError, "'@or' does not accept objects of type String", // NonZeroExitCodeError, "command failed with exit code 1") // testError(b"(false) @or (false)", // SemanticError, "'@or' does not accept objects of type String", // NonZeroExitCodeError, "command failed with exit code 1") // testSemanticError(b"(true) @or (true)", "'@or' does not accept objects of type String") // testError(b"(false) @or (true)", // SemanticError, "'@or' does not accept objects of type String", // NonZeroExitCodeError, "command failed with exit code 1") // testBoolExpression(b"(@exitcode false) @or (@exitcode false)", False) // testBoolExpression(b"(@exitcode false) @or (@exitcode true)", True) // testBoolExpression(b"@true @or @undefined", True) // testBoolExpression(b"@true @or $undefined", True) // testBoolExpression(b"(@exitcode true) @or (@exitcode @undefined)", True) // testSemanticError(b"@true @and ($missing)", "'$missing' is undefined") // testSemanticError(b"(true) @and (false)", "'@and' does not accept objects of type String") // testBoolExpression(b"(@exitcode true) @and (@exitcode false)", False) // testSemanticError(b"(true) @and (true)", "'@and' does not accept objects of type String") // testBoolExpression(b"(@exitcode true) @and (@exitcode true)", True) // testBoolExpression(b"(@exitcode false) @or (@exitcode false) @or (@exitcode false)", False) // testBoolExpression(b"(@exitcode false) @or (@exitcode false) @or (@exitcode true)", True) // testBoolExpression(b"(@exitcode true) @and (@exitcode true) @and (@exitcode false)", False) // testBoolExpression(b"(@exitcode true) @and (@exitcode true) @and (@exitcode true)", True) // // testBoolExpression(b"@false @and @false", False) // testBoolExpression(b"@false @and @true", False) // testBoolExpression(b"@true @and @false", False) // testBoolExpression(b"@true @and @true", True) // // testBoolExpression(b"@false @or @false", False) // testBoolExpression(b"@false @or @true", True) // testBoolExpression(b"@true @or @false", True) // testBoolExpression(b"@true @or @true", True) // // testCommand(b"(@echo true)", 0, b"") // // testSemanticError(b"$missing @and @true", "'$missing' is undefined") // testSemanticError(b"(@echo hello) @and @true", "'@and' does not accept objects of type String") // testCommand(b"@assert (@exitcode @echo hello) @and @true", 0, b"hello\n") // // testSemanticError(b"((@echo true)) @and @true", "'@and' does not accept objects of type String") // testBoolExpression(b"(@exitcode (@echo true)) @and @true", True) // // testSemanticError(b"abc@false @and @true", "cannot coerce Bool to String") // testSemanticError(b"abc@scriptfile @and @true", "'@and' does not accept objects of type String") // // testSemanticError(b"@assert (@exitcode @true @and @true)", "@exitcode is not compatible with binary expressions") // # todo: update this test when @stderr is defined // testSemanticError(b"@assert (@stderr @true @and @true)", "'@stderr' is undefined") // // testSemanticError(b"@assert (@exitcode @haveprog foo)", "@exitcode is not compatible with @haveprog") // // testBoolExpression(b"((@exitcode true) @and (@exitcode true)) @and (@exitcode true)", True) // testBoolExpression(b"((@exitcode true) @and (@exitcode true)) @and ((@exitcode true) @and (@exitcode false))", False) // // testBoolExpression(b"foo @eq foo", True) // testBoolExpression(b"foo @eq bar", False) // testSemanticError(b"foo @eq @false", "'@eq' does not accept objects of type Bool") // testBoolExpression(b"(@echo foo) @eq foo", True) // testBoolExpression(b"(@echo foo) @eq bar", False) // // testSemanticError(b"@assert", "@assert requires at least 1 argument") // testError(b"@assert (@array)", // SemanticError, "@assert expects a Bool but got a CommandResult", // CommandWithNoArgumentsError, "got a command with no arguments") // testAssertError(b"@assert @false") // testCommand(b"@assert @true", 0, b"") // testSemanticError(b"@assert true", "@assert expects a Bool but got a CommandResult") // // testSemanticError(b"@not", "@not requires at least 1 argument") // testSemanticError(b"@not @true @and @false", "got binary expression inside ambiguous operator '@not', wrap inside (..parenthesis..)") // # TODO: improve this error // testError(b"@not (@array)", // SemanticError, "unhandled Bool", // CommandWithNoArgumentsError, "got a command with no arguments") // testBoolExpression(b"@not @false", True) // // testSemanticError(b"@if", "@if requires at least 1 argument") // testSemanticError(b"@if @false", "need more '@end'") // testSemanticError(b"@if @true", "need more '@end'") // testCommand(b"@if @false\n@end", 0, b"") // testCommand(b"@if @true\n@end", 0, b"") // testCommand(b"\n".join([ // b"@if @false", // b" @echo hey", // b"@end", // ]), 0, b"") // testCommand(b"\n".join([ // b"@if @true", // b" @echo hey", // b"@end", // ]), 0, b"hey\n") // // testSemanticError(b"@end", "too many '@end'") // testSemanticError(b"@assert @end", "too many '@end'") // testSemanticError(b"@not @end", "too many '@end'") // // testExecError(b"(@array)", CommandWithNoArgumentsError, "got a command with no arguments") // // testSemanticError(b"@haveprog", "@haveprog takes 1 argument but got 0") // testSemanticError(b"@haveprog a b", "@haveprog takes 1 argument but got 2") // // # // # The assign '=' operator // # // testSyntaxError(b"=foo = foo", "'=' requires space separation") // testSyntaxError(b"foo = =foo", "'=' requires space separation") // testSyntaxError(b"foo= bar", "'=' requires space separation") // testSyntaxError(b"foo =bar", "'=' requires space separation") // testSyntaxError(b"foo=bar", "'=' requires space separation") // testSemanticError(b"=", "unexpected '='") // testSemanticError(b"foo =", "expected 1 argument after '=' but got 0") // testSemanticError(b"= foo", "unexpected '='") // testSemanticError(b"= = foo", "unexpected '='") // testSemanticError(b"foo = =", "unexpected '='") // testSemanticError(b"foo = bar baz", "expected 1 argument after '=' but got 2") // testSemanticError(b"@not = bar", "unexpected '='") // testSemanticError(b"@echo =", "unexpected '='") // testSemanticError(b"foo = @not", "expected a String, Bool or Array after '=' but got Builtin") try runTest("@true = bar", .{.err = "expected a String before '=' but got Bool"}); try runTest("@missing = bar", .{.err = "'@missing' is undefined"}); // testSemanticError(b"foo = @missing", "'@missing' is undefined") // testSemanticError(b"@not foo = bat", "unexpected '='") // testSemanticError(b"(foo = bar)", "assignment '=' is not allowed inside an inline command") // testCommand(b"foo = bar", 0, b"") // // testCommand(b"(@echo foo) = bar", 0, b"") // testCommand(b"foo = @false", 0, b"") // // testSemanticError(b"@settmp", "@settmp requires at least 3 arguments but got 0") // testSemanticError(b"@settmp a", "@settmp requires at least 3 arguments but got 1") // testSemanticError(b"@settmp a b", "@settmp requires at least 3 arguments but got 2") // // testSemanticError(b"@isfile", "@isfile takes 1 argument but got 0") // testSemanticError(b"@isfile foo bar", "@isfile takes 1 argument but got 2") // testSemanticError(b"@isdir", "@isdir takes 1 argument but got 0") // testSemanticError(b"@isdir foo bar", "@isdir takes 1 argument but got 2") // // testAssertError(b"\n".join([ // b"foo = bar", // b"@assert $foo @eq bar", // b"@assert @not ($foo @eq bar)", // ])) // // testCommand(b"@echo hello", 0, b"hello\n") // testCommand(b"@assert (@exitcode @echo hello)", 0, b"hello\n") // # todo: test the same things but with a real external program, I could use the stitch intepreter itself... // testCommand(b"@assert (@exitcode @echo hello)", 0, b"hello\n") // #testCommand(b"@assert ("" @eq (@stderr @echo hello))", 0, b"hello") // #testCommand(b"@assert ("" @eq (@stderr @echo hello)", b"hello") // // // testSemanticError(b"@setenv", "@setenv takes 2 arguments but got 0") // testSemanticError(b"@setenv FOO", "@setenv takes 2 arguments but got 1") // testSemanticError(b"@setenv FOO BAR BAZ", "@setenv takes 2 arguments but got 3") // testSemanticError(b"@setenv @false BAR", "@setenv requires 2 Strings but got Bool") // testSemanticError(b"@setenv FOO @false", "@setenv requires 2 Strings but got Bool") // testCommand(b"@setenv FOO BAR", 0, b"") // # TODO: remove this if/when environment variables are no longer "sticky" across multiple ScriptContexts // testCommand(b"@unsetenv FOO", 0, b"") // // testSemanticError(b"@env", "@env takes 1 argument but got 0") // testSemanticError(b"@env PWD PWD", "@env takes 1 argument but got 2") // testSemanticError(b"@env @false", "@env requires a String but got Bool") // testExecError(b"@env THIS_IS_NOT_DEFINED", prototype.UndefinedEnvironmentVariableError, // "undefined environment variable 'THIS_IS_NOT_DEFINED'") // // testSemanticError(b"@envdefault", "@envdefault takes 2 arguments but got 0") // testSemanticError(b"@envdefault foo", "@envdefault takes 2 arguments but got 1") // testSemanticError(b"@envdefault foo bar baz", "@envdefault takes 2 arguments but got 3") // testSemanticError(b"@envdefault @false foo", "@envdefault requires a String for its first argument but got Bool") // testSemanticError(b"@envdefault foo @false", "@envdefault requires a String for its second argument but got Bool") // testCommand(b"@envdefault THIS_IS_NOT_DEFINED foo", 0, b"foo") // // # test lazy default semantics // testSemanticError(b"@echo (@envdefault anything @a_semantic_error)", "'@a_semantic_error' is undefined") // testSemanticError(b"@echo (@envdefault anything $does_not_exist)", "'$does_not_exist' is undefined") // testCommand(b"\n".join([ // b"@setenv FOO bar", // b"@echo (@envdefault FOO $does_not_exist)", // b"@unsetenv FOO", // ]), 0, b"bar\n") // // testSemanticError(b"@unsetenv", "@unsetenv takes 1 argument but got 0") // testSemanticError(b"@unsetenv PWD PWD", "@unsetenv takes 1 argument but got 2") // testSemanticError(b"@unsetenv @false", "@unsetenv requires a String but got Bool") // testExecError(b"@unsetenv THIS_IS_NOT_DEFINED", UndefinedEnvironmentVariableError, // "undefined environment variable 'THIS_IS_NOT_DEFINED'") // // testExecError(b"@unsetenv FOO", UndefinedEnvironmentVariableError, // "undefined environment variable 'FOO'") // testAssertError(b"\n".join([ // b"@setenv FOO bar", // b"@assert (@env FOO) @eq bar", // b"@assert @not ((@env FOO) @eq bar)", // ])) // # cleanup since environment variables are global right now // testCommand(b"@unsetenv FOO", 0, b"") // testCommand(b"\n".join([ // b"@setenv FOO bar", // b"@assert (@env FOO) @eq bar", // # cleanup since environment variables are global right now // b"@unsetenv FOO", // b"@assert (@envdefault FOO bar) @eq bar", // ]), 0, b"") // // testSemanticError(b"@call", "@call requires at least one argument") // testSemanticError(b"@call @false", "@call requires Strings but got Bool") // testExecError(b"@call this-file-does-not-exist", MissingStitchScriptError, // "stitch script 'this-file-does-not-exist' does not exist") // // testSemanticError(b"@cat a b", "'@cat' takes 0 or 1 arguments but got 2") // testCommand(b"@cat", 0, b"what", Options(stdin=b"what")) // // # // # Arrays // # // testSemanticError(b"@array @false", "@array requires Strings but got Bool") // testSemanticError(b"@array @echo", "@array requires Strings but got Builtin '@echo'") // testArrayExpression(b"@array", prototype.Array([])) // testArrayExpression(b"@array a", prototype.Array(["a"])) // testArrayExpression(b"@array a b", prototype.Array(["a", "b"])) // testArrayExpression(b"@array a b car", prototype.Array(["a", "b", "car"])) // testCommand(b"a = (@array)", 0, b"") // testCommand(b"a = (@array a)", 0, b"") // testCommand(b"a = (@array a b c)", 0, b"") // testCommand(b"@echo (@array)", 0, b"") // testCommand(b"@echo (@array a b c)", 0, b"a b c\n") // testCommand(b'@assert "" @eq (@echo (@array))', 0, b"") // testCommand(b'@assert "a b c" @eq (@echo (@array a b c))', 0, b"") // testCommand(b"@echo foo(@array bar baz)buz", 0, b"foobarbazbuz\n") // // testSemanticError(b"@len", "@len takes 1 argument but got 0") // testSemanticError(b"@len a", "@len requires an Array but got 'String'") // testSemanticError(b"@len @array", "@len requires an Array but got 'Builtin'") // testCommand(b"@len (@array)", 0, b"0") // testCommand(b"@len (@array a)", 0, b"1") // testCommand(b"a = (@array)\n@len $a", 0, b"0") // testCommand(b"a = (@array a b c)\n@len $a", 0, b"3") // testCommand(b"a = (@array a b c)\n@assert (@len $a) @eq 3", 0, b"") // testCommand(b"a = (@array a b c)\n@assert (@len (@array 1 2 3 4)) @eq 4", 0, b"") // // testSemanticError(b"@index", "@index takes 2 arguments but got 0") // testSemanticError(b"@index a", "@index takes 2 arguments but got 1") // testSemanticError(b"@index a b c", "@index takes 2 arguments but got 3") // testSemanticError(b"@index a 0", "@index requires an Array for argument 1 but got 'String'") // testSemanticError(b"@index (@array) @true", "@index requires a number String for argument 2 but got 'Bool'") // testSemanticError(b"@index (@array) a", "@index requires a number for argument 2 but got 'a'") // testSemanticError(b"@index (@array) -1", "@index -1 cannot be negative") // testSemanticError(b"@index (@array) 0", "@index 0 is out of bounds (length=0)") // testSemanticError(b"@index (@array a) 4", "@index 4 is out of bounds (length=1)") // testCommand(b"@index (@array a) 0", 0, b"a") // testCommand(b"@index (@array a b c d e f g) 3", 0, b"d") // // # // # Pipes // # // testSyntaxError(b"0@pipe", "'@pipe' requires space separation") // testSemanticError(b"@pipe", "unexpected '@pipe'") // def testBadPipeNode(node_str, after_only): // error = "'@pipe' requires an inline command but got '{}'".format(node_str) // if not after_only: // testSemanticError("{} @pipe (@echo)".format(node_str).encode('utf8'), error) // testSemanticError("(@echo) @pipe {}".format(node_str).encode('utf8'), error) // // testBadPipeNode("a", after_only=False) // testBadPipeNode("$foo", after_only=False) // testBadPipeNode("@false", after_only=False) // testBadPipeNode("@or", after_only=False) // testBadPipeNode("@not", after_only=True) // // testSemanticError(b"@not @pipe b", "unexpected '@pipe'") // testSemanticError(b"@not (@echo) @pipe (@echo)", "@not expects a Bool but got an ExitCode") // testSemanticError(b"(@echo) @pipe (@echo) @false", "expected '@pipe' but got '@false'") // testSemanticError(b"(@true) @pipe (@echo)", "unexpected Bool within @pipe expression") // testSemanticError(b"(@echo) @pipe (@true)", "unexpected Bool within @pipe expression") // testSemanticError(b"(@true @and @true) @pipe (@echo)", "unexpected binary expression within @pipe expression") // testError(b"((@array)) @pipe (@echo)", CommandWithNoArgumentsError, "got a command with no arguments") // testError(b"(@echo) @pipe ((@array))", CommandWithNoArgumentsError, "got a command with no arguments") // // testCommand(b"(@echo hello) @pipe (@cat)", 0, b"hello\n") // // testSemanticError(b"(@exitcode (@echo hello) @pipe (@cat))", "unhandled Bool") // testCommand(b"@assert (@exitcode (@echo hello) @pipe (@cat))", 0, b"hello\n") // testSemanticError(b"a @pipe b", "'@pipe' requires an inline command but got 'a'") // testCommand(b"(@echo hello) @pipe (@echo pipes)", 0, b"pipes\n") // testCommand(b"@assert (@exitcode (@echo) @pipe (@echo))", 0, b"") // // testCommand(b"(@assert (@exitcode @echo)) @pipe (@cat)", 0, b"") // testSemanticError(b"(@isfile this_does_not_exist) @pipe (@cat)", "commands with @isfile cannot be piped") // testSemanticError(b"(@exitcode @echo hello) @pipe (@cat)", "commands with @exitcode cannot be piped") // testSemanticError(b"(@echo hello) @pipe (@exitcode @false)", "unhandled Bool") // testSemanticError(b"(@echo hello) @pipe (@exitcode @true @and @true)", // "@exitcode is not compatible with binary expressions") // testError(b"(@echo hello) @pipe (@exitcode this_program_does_not_exist)", // SemanticError, "unhandled Bool", // MissingProgramError, "unable to find program 'this_program_does_not_exist' in PATH") // testError(b"(@echo hello) @pipe (@exitcode (@array))", // SemanticError, "unhandled Bool", // CommandWithNoArgumentsError, "got a command with no arguments") // testSemanticError(b"(@echo hello) @pipe (@exitcode @cat)", "unhandled Bool whose value is True") // testCommand(b"@assert (@echo hello) @pipe (@exitcode @cat)", 0, b"hello\n") // testSemanticError(b"@assert ((@echo hello) @pipe (@exitcode @cat))", // "this inline command captured multiple objects, a Bool and stdout") // # TODO: implement captureobj and add this test // # or maybe look into the last command of a pipe chain and limit it somehow? // #testCommand(b"@assert (@captureobj (@echo hello) @pipe (@exitcode @cat))", 0, b"hello\n") // testCommand(b"@assert (@exitcode (@echo hello) @pipe (@cat))", 0, b"hello\n") // // testCommand(b"cat @callerworkdir/assetsfortest/hellomsg", 0, b"Hello Stitch!\n") // testCommand(b"(@echo hello) @pipe (cat)", 0, b"hello\n") // testCommand(b"(@echo hello) @pipe (@cat) @pipe (@cat)", 0, b"hello\n") // testCommand(b"(@echo hello) @pipe (@cat) @pipe (cat)", 0, b"hello\n") // testCommand(b"(@echo hello) @pipe (cat) @pipe (@cat)", 0, b"hello\n") // testCommand(b"(@echo hello) @pipe (cat) @pipe (cat)", 0, b"hello\n") // testCommand(b"(@echo hello) @pipe (cat) @pipe (cat) @pipe (@cat) @pipe (cat) @pipe (@cat)", 0, b"hello\n") // // testExecError(b"@unreachable", UnreachableError, "reached @unreachable") // // if os.name == "nt": // testSemanticError(b"@getuid", "@getuid not supported on Windows") // else: // testSemanticError(b"@getuid a", "@getuid takes 0 arguments but got 1") // testCommand(b"@getuid", 0, str(os.getuid()).encode('ascii')) // // testSemanticError(b"@exit", "@exit takes 1 argument but got 0") // testSemanticError(b"@exit foo", "@exit requires an integer but got 'foo'") // # TODO: this should be an error because @exit returns NoReturn // #testSemanticError(b"@exit 0\n@echo foo", "") // // testSemanticError(b"@stdin2file", "@stdin2file takes 1 argument but got 0") // testSemanticError(b"@stdin2file a b", "@stdin2file takes 1 argument but got 2") // // def verifyAndRemoveFile(filename, content: bytes): // with open(filename, "rb") as file: // expectMatch(file.read(), content) // os.remove(filename) // // testfile = os.path.join(outdir, "testfile") // testCommand(b"@stdin2file @callerworkdir/out/testfile", 0, b"", Options(stdin=b"what")) // verifyAndRemoveFile(testfile, b"what") // testCommand(b"(@echo another) @pipe (@stdin2file @callerworkdir/out/testfile)", 0, b"") // verifyAndRemoveFile(testfile, b"another\n") // // testCommand(b"@stdin2file @callerworkdir/this-dir-does-not-exist/foo", 1, b"", Options(stdin=b"what")) // // testSemanticError(b"foo = (uname)\n$foo = baz", "variable names must be known at verification time") // // runStitchTests() // // print("test: success") // }
src/run.zig
const std = @import("std"); const pike = @import("pike.zig"); const windows = @import("os/windows.zig"); const PackedWaker = @import("waker.zig").PackedWaker; const math = std.math; const meta = std.meta; pub const SignalType = packed struct { terminate: bool = false, interrupt: bool = false, quit: bool = false, hup: bool = false, }; pub const Signal = struct { const MaskInt = meta.Int(.unsigned, @bitSizeOf(SignalType)); const Self = @This(); var refs: u64 = 0; var mask: u64 = 0; var lock: std.Thread.Mutex = .{}; var waker: PackedWaker(pike.Task, SignalType) = .{}; current_signal: SignalType, previous_signal: u64, fn handler(signal: windows.DWORD) callconv(.C) windows.BOOL { const current = @bitCast(SignalType, @truncate(MaskInt, @atomicLoad(u64, &mask, .SeqCst))); return blk: { switch (signal) { windows.CTRL_C_EVENT, windows.CTRL_BREAK_EVENT => { if (!current.interrupt and !current.terminate) break :blk windows.FALSE; const held = lock.acquire(); const next_node = waker.wake(.{ .interrupt = true, .terminate = true }); held.release(); if (next_node) |node| pike.dispatch(&node.data, .{}); break :blk windows.TRUE; }, windows.CTRL_CLOSE_EVENT => { if (!current.hup) break :blk windows.FALSE; const held = lock.acquire(); const next_node = waker.wake(.{ .hup = true }); held.release(); if (next_node) |node| pike.dispatch(&node.data, .{}); break :blk windows.TRUE; }, windows.CTRL_LOGOFF_EVENT, windows.CTRL_SHUTDOWN_EVENT => { if (!current.quit) break :blk windows.FALSE; const held = lock.acquire(); const next_node = waker.wake(.{ .quit = true }); held.release(); if (next_node) |node| pike.dispatch(&node.data, .{}); break :blk windows.TRUE; }, else => break :blk windows.FALSE, } }; } pub fn init(current_signal: SignalType) !Self { errdefer _ = @atomicRmw(u64, &refs, .Sub, 1, .SeqCst); if (@atomicRmw(u64, &refs, .Add, 1, .SeqCst) == 0) { try windows.SetConsoleCtrlHandler(handler, true); } const previous_signal = @atomicRmw(u64, &mask, .Or, @intCast(u64, @bitCast(MaskInt, current_signal)), .SeqCst); return Self{ .current_signal = current_signal, .previous_signal = previous_signal, }; } pub fn deinit(self: *const Self) void { @atomicStore(u64, &mask, self.previous_signal, .SeqCst); if (@atomicRmw(u64, &refs, .Sub, 1, .SeqCst) == 1) { windows.SetConsoleCtrlHandler(handler, false) catch unreachable; const held = lock.acquire(); while (waker.wake(@bitCast(SignalType, @as(MaskInt, math.maxInt(MaskInt))))) |node| { pike.dispatch(&node.data, .{}); } held.release(); } } pub fn wait(self: *const Self) callconv(.Async) !void { const held = lock.acquire(); if (waker.wait(self.current_signal)) { held.release(); } else { suspend { var node = @TypeOf(waker).FrameNode{ .data = pike.Task.init(@frame()) }; @TypeOf(waker).FrameList.append(&waker.heads, self.current_signal, &node); held.release(); } const next_held = lock.acquire(); const next_node = waker.next(self.current_signal); next_held.release(); if (next_node) |node| { pike.dispatch(&node.data, .{}); } } } };
signal_windows.zig
pub const PspDisplayPixelFormats = extern enum(c_int) { Format565 = 0, Format5551 = 1, Format4444 = 2, Format8888 = 3, }; pub const PspDisplaySetBufSync = extern enum(c_int) { Immediate = 0, Nextframe = 1, }; pub const PspDisplayErrorCodes = extern enum(c_int) { Ok = 0, Pointer = 2147483907, Argument = 2147483911, }; // Set display mode // // @param mode - Display mode, normally 0. // @param width - Width of screen in pixels. // @param height - Height of screen in pixels. // // @return ??? pub extern fn sceDisplaySetMode(mode: c_int, width: c_int, height: c_int) c_int; pub fn displaySetMode(mode: c_int, width: c_int, height: c_int) void { _ = sceDisplaySetMode(mode, width, height); } // Get display mode // // @param pmode - Pointer to an integer to receive the current mode. // @param pwidth - Pointer to an integer to receive the current width. // @param pheight - Pointer to an integer to receive the current height, // // @return 0 on success pub extern fn sceDisplayGetMode(pmode: *c_int, pwidth: *c_int, pheight: *c_int) c_int; pub fn displayGetMode(pmode: *c_int, pwidth: *c_int, pheight: *c_int) bool { var res = sceDisplayGetMode(pmode, pwidth, pheight); return res == 0; } // Display set framebuf // // @param topaddr - address of start of framebuffer // @param bufferwidth - buffer width (must be power of 2) // @param pixelformat - One of ::PspDisplayPixelFormats. // @param sync - One of ::PspDisplaySetBufSync // // @return 0 on success pub extern fn sceDisplaySetFrameBuf(topaddr: ?*c_void, bufferwidth: c_int, pixelformat: c_int, sync: c_int) c_int; pub fn displaySetFrameBuf(topaddr: ?*c_void, bufferwidth: c_int, pixelformat: c_int, sync: c_int) bool { var res = sceDisplaySetFrameBuf(topaddr, bufferwidth, pixelformat, sync); return res; } // Get Display Framebuffer information // // @param topaddr - pointer to void* to receive address of start of framebuffer // @param bufferwidth - pointer to int to receive buffer width (must be power of 2) // @param pixelformat - pointer to int to receive one of ::PspDisplayPixelFormats. // @param sync - One of ::PspDisplaySetBufSync // // @return 0 on success pub extern fn sceDisplayGetFrameBuf(topaddr: **c_void, bufferwidth: *c_int, pixelformat: *c_int, sync: c_int) c_int; pub fn displayGetFrameBuf(topaddr: **c_void, bufferwidth: *c_int, pixelformat: *c_int, sync: c_int) bool { var res = sceDisplayGetFrameBuf(topaddr, bufferwidth, pixelformat, sync); return res == 0; } //Number of vertical blank pulses up to now pub extern fn sceDisplayGetVcount() c_uint; //Wait for vertical blank start pub extern fn sceDisplayWaitVblankStart() c_int; pub fn displayWaitVblankStart() void { _ = sceDisplayWaitVblankStart(); } //Wait for vertical blank start with callback pub extern fn sceDisplayWaitVblankStartCB() c_int; pub fn displayWaitVblankStartCB() void { _ = sceDisplayWaitVblankStartCB(); } //Wait for vertical blank pub extern fn sceDisplayWaitVblank() c_int; pub fn displayWaitVblank() void { _ = sceDisplayWaitVblank(); } //Wait for vertical blank with callback pub extern fn sceDisplayWaitVblankCB() c_int; pub fn displayWaitVblankCB() void { _ = sceDisplayWaitVblankCB(); } //Get accumlated HSYNC count pub extern fn sceDisplayGetAccumulatedHcount() c_int; //Get current HSYNC count pub extern fn sceDisplayGetCurrentHcount() c_int; //Get number of frames per second pub extern fn sceDisplayGetFramePerSec() f32; //Get whether or not frame buffer is being displayed pub extern fn sceDisplayIsForeground() c_int; //Test whether VBLANK is active pub extern fn sceDisplayIsVblank() c_int;
src/psp/sdk/pspdisplay.zig
const std = @import("std"); const stdx = @import("stdx"); const build_options = @import("build_options"); const Backend = build_options.GraphicsBackend; const t = stdx.testing; const Vec2 = stdx.math.Vec2; const ds = stdx.ds; const graphics = @import("graphics"); const sdl = @import("sdl"); const curl = @import("curl"); const uv = @import("uv"); const h2o = @import("h2o"); const v8 = @import("v8"); const platform = @import("platform"); const gl = @import("gl"); const builtin = @import("builtin"); const v8x = @import("v8x.zig"); const js_env = @import("js_env.zig"); const log = stdx.log.scoped(.runtime); const api = @import("api.zig"); const cs_graphics = @import("api_graphics.zig").cs_graphics; const gen = @import("gen.zig"); const audio = @import("audio.zig"); const work_queue = @import("work_queue.zig"); const TaskOutput = work_queue.TaskOutput; const tasks = @import("tasks.zig"); const WorkQueue = work_queue.WorkQueue; const UvPoller = @import("uv_poller.zig").UvPoller; const HttpServer = @import("server.zig").HttpServer; const Timer = @import("timer.zig").Timer; const EventDispatcher = stdx.events.EventDispatcher; const NullId = stdx.ds.CompactNull(u32); const devmode = @import("devmode.zig"); const DevModeContext = devmode.DevModeContext; const adapter = @import("adapter.zig"); const PromiseSkipJsGen = adapter.PromiseSkipJsGen; const FuncData = adapter.FuncData; const FuncDataUserPtr = adapter.FuncDataUserPtr; pub const Environment = @import("env.zig").Environment; pub const PromiseId = u32; // Js init scripts. const api_init = @embedFile("snapshots/api_init.js"); const gen_api_init = @embedFile("snapshots/gen_api.js"); // Generated. Not tracked by git. const test_init = @embedFile("snapshots/test_init.js"); // Keep a global rt for debugging and prototyping. pub var global: *RuntimeContext = undefined; // Manages runtime resources. // Used by V8 callback functions. // TODO: Rename to Runtime pub const RuntimeContext = struct { const Self = @This(); alloc: std.mem.Allocator, str_buf: std.ArrayList(u8), window_class: v8.Persistent(v8.FunctionTemplate), graphics_class: v8.Persistent(v8.FunctionTemplate), http_response_class: v8.Persistent(v8.FunctionTemplate), http_server_class: v8.Persistent(v8.FunctionTemplate), http_response_writer: v8.Persistent(v8.ObjectTemplate), image_class: v8.Persistent(v8.FunctionTemplate), color_class: v8.Persistent(v8.FunctionTemplate), transform_class: v8.Persistent(v8.FunctionTemplate), sound_class: v8.Persistent(v8.ObjectTemplate), random_class: v8.Persistent(v8.ObjectTemplate), handle_class: v8.Persistent(v8.ObjectTemplate), rt_ctx_tmpl: v8.Persistent(v8.ObjectTemplate), default_obj_t: v8.Persistent(v8.ObjectTemplate), /// Collection of mappings from id to resource handles. /// Resources of similar type are linked together. /// Resources can be deinited by js but the resource id slot won't be freed until a js finalizer callback. resources: ds.CompactManySinglyLinkedList(ResourceListId, ResourceId, ResourceHandle), /// Weak handles are like resources except they aren't grouped together by type. /// A weak handle can be deinited but the slot won't be freed until a js finalizer callback. /// Since the finalizer callback relies on the garbage collector, the handles should be light in memory /// and have a pointer to the inner struct which can be deinited explicitly /// either through the runtime or user request. weak_handles: ds.CompactUnorderedList(WeakHandleId, WeakHandle), generic_resource_list: ResourceListId, generic_resource_list_last: ResourceId, window_resource_list: ResourceListId, window_resource_list_last: ResourceId, // Keep track of active windows so we know when to stop the app. num_windows: u32, // Window that has keyboard focus and will receive swap buffer. // Note: This is only safe if the allocation doesn't change. active_window: *CsWindow, /// Only one renderer exists for drawing to all windows and targets. renderer: graphics.Renderer, inited_renderer: bool, // Absolute path of the main script. main_script_path: ?[]const u8, // This is used to store native string slices copied from v8.String for use in the immediate native callback functions. // It will automatically clear at the pre callback step if the current size is too large. // Native callback functions that have []const u8 in their params should assume they only live until end of function scope. cb_str_buf: std.ArrayList(u8), cb_f32_buf: std.ArrayList(f32), vec2_buf: std.ArrayList(Vec2), // Whether this was invoked from "cosmic test" // TODO: Rename to is_test_runner is_test_env: bool, // Test runner. num_tests: u32, // Includes sync and async tests. num_tests_passed: u32, num_async_tests: u32, num_async_tests_finished: u32, num_async_tests_passed: u32, num_isolated_tests_finished: u32, isolated_tests: std.ArrayList(IsolatedTest), // Main thread waits for a wakeup call before running event loop. main_wakeup: std.Thread.ResetEvent, work_queue: WorkQueue, promises: ds.CompactUnorderedList(PromiseId, v8.Persistent(v8.PromiseResolver)), last_err: CsError, // uv_loop_t is quite large, so allocate on heap. uv_loop: *uv.uv_loop_t, uv_dummy_async: *uv.uv_async_t, uv_poller: UvPoller, received_uncaught_exception: bool, // Used in test callbacks to shutdown the runtime. requested_shutdown: bool, timer: Timer, dev_mode: bool, dev_ctx: DevModeContext, event_dispatcher: EventDispatcher, // V8. platform: v8.Platform, create_params: v8.CreateParams, isolate: v8.Isolate, context: v8.Persistent(v8.Context), hscope: v8.HandleScope, global: v8.Persistent(v8.Object), // Store locally for quick access. js_undefined: v8.Primitive, js_null: v8.Primitive, js_false: v8.Boolean, js_true: v8.Boolean, modules: std.AutoHashMap(u32, ModuleInfo), // Holds the result of running the main script. run_main_script_res: ?RunModuleScriptResult, // Whether the main script is done with top level awaits. // This doesn't mean that the process is done since some resources can keep it alive (eg. a window) main_script_done: bool, get_native_val_err: anyerror, env: *Environment, pub fn init(self: *Self, alloc: std.mem.Allocator, platform_: v8.Platform, config: RuntimeConfig, env: *Environment, ) void { self.* = .{ .alloc = alloc, .str_buf = std.ArrayList(u8).init(alloc), .window_class = undefined, .color_class = undefined, .transform_class = undefined, .graphics_class = undefined, .http_response_class = undefined, .http_response_writer = undefined, .http_server_class = undefined, .image_class = undefined, .handle_class = undefined, .rt_ctx_tmpl = undefined, .sound_class = undefined, .random_class = undefined, .default_obj_t = undefined, .resources = ds.CompactManySinglyLinkedList(ResourceListId, ResourceId, ResourceHandle).init(alloc), .weak_handles = ds.CompactUnorderedList(u32, WeakHandle).init(alloc), .generic_resource_list = undefined, .generic_resource_list_last = undefined, .window_resource_list = undefined, .window_resource_list_last = undefined, .num_windows = 0, .active_window = undefined, .global = undefined, .main_script_path = null, .cb_str_buf = std.ArrayList(u8).init(alloc), .cb_f32_buf = std.ArrayList(f32).init(alloc), .vec2_buf = std.ArrayList(Vec2).init(alloc), .renderer = undefined, .inited_renderer = false, .js_undefined = undefined, .js_null = undefined, .js_false = undefined, .js_true = undefined, .is_test_env = config.is_test_runner, .num_tests = 0, .num_tests_passed = 0, .num_async_tests = 0, .num_async_tests_finished = 0, .num_async_tests_passed = 0, .num_isolated_tests_finished = 0, .isolated_tests = std.ArrayList(IsolatedTest).init(alloc), .main_wakeup = undefined, .work_queue = undefined, .promises = ds.CompactUnorderedList(PromiseId, v8.Persistent(v8.PromiseResolver)).init(alloc), .uv_loop = undefined, .uv_dummy_async = undefined, .uv_poller = undefined, .received_uncaught_exception = false, .requested_shutdown = false, .last_err = error.NoError, .timer = undefined, .dev_mode = config.is_dev_mode, .dev_ctx = undefined, .event_dispatcher = undefined, .platform = platform_, .isolate = undefined, .context = undefined, .create_params = undefined, .hscope = undefined, .modules = std.AutoHashMap(u32, ModuleInfo).init(alloc), .run_main_script_res = null, .main_script_done = false, .get_native_val_err = undefined, .env = env, }; self.main_wakeup.reset(); self.initUv(); self.work_queue = WorkQueue.init(alloc, self.uv_loop, &self.main_wakeup); self.work_queue.createAndRunWorker(); // Insert dummy head so we can set last. const dummy: ResourceHandle = .{ .ptr = undefined, .tag = .Dummy, .external_handle = undefined, .deinited = true, .on_deinit_cb = null }; self.window_resource_list = self.resources.addListWithHead(dummy) catch unreachable; self.window_resource_list_last = self.resources.getListHead(self.window_resource_list).?; self.generic_resource_list = self.resources.addListWithHead(dummy) catch unreachable; self.generic_resource_list_last = self.resources.getListHead(self.generic_resource_list).?; if (builtin.os.tag == .linux or builtin.os.tag == .macos) { // Ignore sigpipe for writes to sockets that have already closed and let it return as an error to callers. const SIG_IGN = @intToPtr(fn(c_int, *const std.os.siginfo_t, ?*const anyopaque) callconv(.C) void, 1); const act = std.os.Sigaction{ .handler = .{ .sigaction = SIG_IGN }, .mask = std.os.empty_sigset, .flags = 0, }; std.os.sigaction(std.os.SIG.PIPE, &act, null) catch unreachable; } self.initJs(); // Set up timer. Needs v8 context. self.timer.init(self) catch unreachable; global = self; } fn initJs(self: *Self) void { self.create_params = v8.initCreateParams(); self.create_params.array_buffer_allocator = v8.createDefaultArrayBufferAllocator(); var iso = v8.Isolate.init(&self.create_params); self.isolate = iso; iso.enter(); defer iso.exit(); self.hscope.init(iso); defer self.hscope.deinit(); self.js_undefined = iso.initUndefined(); self.js_null = iso.initNull(); self.js_false = iso.initFalse(); self.js_true = iso.initTrue(); // Set up uncaught promise rejection handler. iso.setPromiseRejectCallback(promiseRejectCallback); // By default, scripts will automatically run microtasks when call depth returns to zero. // It also allows us to use performMicrotasksCheckpoint in cases where we need to sooner. iso.setMicrotasksPolicy(v8.MicrotasksPolicy.kAuto); // Receive the first uncaught exceptions and find the next opportunity to shutdown. const external = iso.initExternal(self).toValue(); iso.setCaptureStackTraceForUncaughtExceptions(true, 10); _ = iso.addMessageListenerWithErrorLevel(v8MessageCallback, v8.MessageErrorLevel.kMessageError, external); self.context = v8.Persistent(v8.Context).init(iso, js_env.initContext(self, iso)); self.global = iso.initPersistent(v8.Object, self.context.inner.getGlobal()); const ctx = self.getContext(); ctx.enter(); defer ctx.exit(); // Attach user context from json string. if (self.env.user_ctx_json) |json| { const json_str = iso.initStringUtf8(json); const json_val = v8.Json.parse(ctx, json_str) catch unreachable; _ = self.global.inner.setValue(ctx, iso.initStringUtf8("user"), json_val); } // Run api_init.js self.runScript("api_init.js", api_init) catch unreachable; // Run gen_api.js self.runScript("gen_api.js", gen_api_init) catch unreachable; if (self.is_test_env or builtin.is_test or self.env.include_test_api) { // Run test_init.js self.runScript("test_init.js", test_init) catch unreachable; } } fn initUv(self: *Self) void { // Ensure we're using the right headers and the linked uv has patches applied. std.debug.assert(uv.uv_loop_size() == @sizeOf(uv.uv_loop_t)); // Create libuv evloop instance. self.uv_loop = self.alloc.create(uv.uv_loop_t) catch unreachable; var res = uv.uv_loop_init(self.uv_loop); uv.assertNoError(res); // Make sure iocp allows 2 concurrent threads on windows (Main thread and uv poller thread). // If set to 1 (libuv default), the first thread that calls GetQueuedCompletionStatus will attach to the iocp and other threads won't be able to receive events. if (builtin.os.tag == .windows) { std.os.windows.CloseHandle(self.uv_loop.iocp.?); self.uv_loop.iocp = std.os.windows.CreateIoCompletionPort(std.os.windows.INVALID_HANDLE_VALUE, null, 0, 2) catch unreachable; } const S = struct { fn onWatcherQueueChanged(_loop: [*c]uv.uv_loop_t) callconv(.C) void { // log.debug("on queue changed", .{}); const loop = @ptrCast(*uv.uv_loop_t, _loop); const rt = stdx.mem.ptrCastAlign(*RuntimeContext, loop.data.?); const res_ = uv.uv_async_send(rt.uv_dummy_async); uv.assertNoError(res_); } }; // Once this is merged: https://github.com/libuv/libuv/pull/3308, // we can remove patches/libuv_on_watcher_queue_updated.patch and use the better method. self.uv_loop.data = self; if (builtin.os.tag == .linux or builtin.os.tag == .macos) { self.uv_loop.on_watcher_queue_updated = S.onWatcherQueueChanged; } // Add dummy handle or UvPoller/uv_backend_timeout will think there is nothing to wait for. self.uv_dummy_async = self.alloc.create(uv.uv_async_t) catch unreachable; res = uv.uv_async_init(self.uv_loop, self.uv_dummy_async, null); uv.assertNoError(res); self.event_dispatcher = EventDispatcher.init(self.uv_dummy_async); stdx.http.curlm_uvloop = self.uv_loop; stdx.http.dispatcher = self.event_dispatcher; // uv needs to run once to initialize or UvPoller will never get the first event. // TODO: Revisit this again. _ = uv.uv_run(self.uv_loop, uv.UV_RUN_NOWAIT); // Start uv poller thread. self.uv_poller = UvPoller.init(self.uv_loop, &self.main_wakeup); const thread = std.Thread.spawn(.{}, UvPoller.run, .{&self.uv_poller}) catch unreachable; _ = thread.setName("UV Poller") catch {}; } /// Isolate should not be entered when calling this. fn deinit(self: *Self) void { self.enter(); self.str_buf.deinit(); self.cb_str_buf.deinit(); self.cb_f32_buf.deinit(); self.vec2_buf.deinit(); if (self.dev_mode and !self.dev_ctx.restart_requested) { self.dev_ctx.deinit(); } if (self.inited_renderer) { self.renderer.deinit(self.alloc); } { var iter = self.weak_handles.iterator(); while (iter.nextPtr()) |handle| { handle.deinit(self); } self.weak_handles.deinit(); } { var iter = self.resources.nodes.iterator(); while (iter.nextPtr()) |_| { const res_id = iter.cur_id; self.destroyResourceHandle(res_id); } self.resources.deinit(); } self.work_queue.deinit(); { var iter = self.promises.iterator(); while (iter.nextPtr()) |p| { p.deinit(); } } self.promises.deinit(); for (self.isolated_tests.items) |*case| { case.deinit(self.alloc); } self.isolated_tests.deinit(); self.alloc.destroy(self.uv_dummy_async); self.alloc.destroy(self.uv_loop); if (self.main_script_path) |path| { self.alloc.free(path); } { var iter = self.modules.valueIterator(); while (iter.next()) |it| { it.deinit(self.alloc); } self.modules.deinit(); } self.timer.deinit(); self.window_class.deinit(); self.graphics_class.deinit(); self.http_response_class.deinit(); self.http_server_class.deinit(); self.http_response_writer.deinit(); self.image_class.deinit(); self.color_class.deinit(); self.transform_class.deinit(); self.handle_class.deinit(); self.rt_ctx_tmpl.deinit(); self.sound_class.deinit(); self.random_class.deinit(); self.default_obj_t.deinit(); self.global.deinit(); if (self.run_main_script_res) |*res| { res.deinit(self.alloc); } // Deinit isolate after exiting. self.exit(); self.context.deinit(); self.isolate.deinit(); v8.destroyArrayBufferAllocator(self.create_params.array_buffer_allocator.?); } /// No other v8 isolate should execute js until exit is called. fn enter(self: *Self) void { self.isolate.enter(); self.hscope.init(self.isolate); self.getContext().enter(); } fn exit(self: *Self) void { self.getContext().exit(); self.hscope.deinit(); self.isolate.exit(); } pub inline fn getContext(self: Self) v8.Context { return self.context.inner; } fn runModuleScriptFile(self: *Self, abs_path: []const u8) !RunModuleScriptResult { if (self.env.main_script_override) |src_override| { return self.runModuleScript(abs_path, self.env.main_script_origin orelse abs_path, src_override); } else { const src = try std.fs.cwd().readFileAlloc(self.alloc, abs_path, 1e9); defer self.alloc.free(src); return self.runModuleScript(abs_path, abs_path, src); } } fn getRenderer(self: *Self, win: *platform.Window) *graphics.Renderer { if (self.inited_renderer) { // Lazy load renderer. self.renderer.init(self.alloc, win); self.inited_renderer = true; } return &self.renderer; } /// origin_str is an identifier for this script and is what is displayed in stack traces. /// Normally it is set to the abs_path but somtimes it can be different (eg. for in memory scripts for tests) /// Even though the src is provided, abs_path is still needed to set up import path resolving. /// Returns a result with a success flag. /// If a js exception was thrown, the stack trace is printed to stderr and also attached to the result. fn runModuleScript(self: *Self, abs_path: []const u8, origin_str: []const u8, src: []const u8) !RunModuleScriptResult { const iso = self.isolate; const js_origin_str = iso.initStringUtf8(origin_str); const js_src = iso.initStringUtf8(src); var try_catch: v8.TryCatch = undefined; try_catch.init(iso); defer try_catch.deinit(); var origin = v8.ScriptOrigin.init(iso, js_origin_str.toValue(), 0, 0, false, -1, null, false, false, true, null, ); var mod_src: v8.ScriptCompilerSource = undefined; // TODO: Look into CachedData. mod_src.init(js_src, origin, null); defer mod_src.deinit(); const mod = v8.ScriptCompiler.compileModule(self.isolate, &mod_src, .kNoCompileOptions, .kNoCacheNoReason) catch { const trace_str = v8x.allocPrintTryCatchStackTrace(self.alloc, self.isolate, self.getContext(), try_catch).?; self.env.errorFmt("{s}", .{trace_str}); return RunModuleScriptResult{ .state = .Failed, .mod = null, .eval = null, .js_err_trace = trace_str, }; }; std.debug.assert(mod.getStatus() == .kUninstantiated); const mod_info = ModuleInfo{ .dir = self.alloc.dupe(u8, std.fs.path.dirname(abs_path).?) catch unreachable, }; self.modules.put(mod.getScriptId(), mod_info) catch unreachable; // const reqs = mod.getModuleRequests(); // log.debug("reqs: {}", .{ reqs.length() }); // const req = reqs.get(self.getContext(), 0).castTo(v8.ModuleRequest); // const spec = v8x.allocPrintValueAsUtf8(self.alloc, self.isolate, self.getContext(), req.getSpecifier()); // defer self.alloc.free(spec); // log.debug("import: {s}", .{spec}); const S = struct { fn resolveModule( ctx_ptr: ?*const v8.C_Context, spec_: ?*const v8.C_Data, import_assertions: ?*const v8.C_FixedArray, referrer: ?*const v8.C_Module ) callconv(.C) ?*const v8.C_Module { _ = import_assertions; const ctx = v8.Context{ .handle = ctx_ptr.? }; const rt = stdx.mem.ptrCastAlign(*RuntimeContext, ctx.getEmbedderData(0).castTo(v8.External).get()); const js_spec = v8.String{ .handle = spec_.? }; const iso_ = ctx.getIsolate(); var origin_ = v8.ScriptOrigin.init(iso_, js_spec.toValue(), 0, 0, false, -1, null, false, false, true, null, ); const spec_str = v8x.allocStringAsUtf8(rt.alloc, iso_, js_spec); defer rt.alloc.free(spec_str); var abs_path_: []const u8 = undefined; var abs_path_needs_free = false; defer { if (abs_path_needs_free) { rt.alloc.free(abs_path_); } } if (std.fs.path.isAbsolute(spec_str)) { abs_path_ = spec_str; } else { // Build path from referrer's script dir. const referrer_mod = v8.Module{ .handle = referrer.? }; const referrer_info = rt.modules.get(referrer_mod.getScriptId()).?; abs_path_ = std.fmt.allocPrint(rt.alloc, "{s}/{s}", .{ referrer_info.dir, spec_str }) catch unreachable; abs_path_needs_free = true; } const src_ = std.fs.cwd().readFileAlloc(rt.alloc, abs_path_, 1e9) catch { v8x.throwErrorExceptionFmt(rt.alloc, iso_, "Failed to load module: {s}", .{spec_str}); return null; }; defer rt.alloc.free(src_); const js_src_ = iso_.initStringUtf8(src_); var mod_src_: v8.ScriptCompilerSource = undefined; mod_src_.init(js_src_, origin_, null); defer mod_src_.deinit(); var try_catch_: v8.TryCatch = undefined; try_catch_.init(iso_); defer try_catch_.deinit(); const mod_ = v8.ScriptCompiler.compileModule(iso_, &mod_src_, .kNoCompileOptions, .kNoCacheNoReason) catch { _ = try_catch_.rethrow(); return null; }; const mod_info_ = ModuleInfo{ .dir = rt.alloc.dupe(u8, std.fs.path.dirname(abs_path_).?) catch unreachable, }; rt.modules.put(mod_.getScriptId(), mod_info_) catch unreachable; return mod_.handle; } }; const success = mod.instantiate(self.getContext(), S.resolveModule) catch { const trace_str = v8x.allocPrintTryCatchStackTrace(self.alloc, self.isolate, self.getContext(), try_catch).?; self.env.errorFmt("{s}", .{trace_str}); return RunModuleScriptResult{ .state = .Failed, .mod = iso.initPersistent(v8.Module, mod), .eval = null, .js_err_trace = trace_str, }; }; if (!success) { stdx.panic("TODO: Did not expect !success."); } std.debug.assert(mod.getStatus() == .kInstantiated); const res = mod.evaluate(self.getContext()) catch { const trace_str = v8x.allocPrintTryCatchStackTrace(self.alloc, self.isolate, self.getContext(), try_catch).?; self.env.errorFmt("{s}", .{trace_str}); return RunModuleScriptResult{ .state = .Failed, .mod = iso.initPersistent(v8.Module, mod), .eval = null, .js_err_trace = trace_str, }; }; // res is a promise that resolves to undefined if successful and rejects to an exception object on error. _ = res; switch (mod.getStatus()) { .kErrored => { const trace_str = allocExceptionJsStackTraceString(self, mod.getException()); self.env.errorFmt("{s}", .{trace_str}); return RunModuleScriptResult{ .state = .Failed, .mod = iso.initPersistent(v8.Module, mod), .eval = iso.initPersistent(v8.Promise, res.castTo(v8.Promise)), .js_err_trace = trace_str, }; }, .kEvaluated => { const res_p = res.castTo(v8.Promise); switch (res_p.getState()) { .kFulfilled => { return RunModuleScriptResult{ .state = .Success, .mod = iso.initPersistent(v8.Module, mod), .eval = iso.initPersistent(v8.Promise, res_p), .js_err_trace = null, }; }, .kPending => { // Attempt to pump the v8 event loop once to see if it can finish the script. // If not, the script is using the worker or evented io and needs to continue with the main event loop. processV8EventLoop(self); switch (res_p.getState()) { .kRejected => { const trace_str = allocExceptionJsStackTraceString(self, mod.getException()); self.env.errorFmt("{s}", .{trace_str}); return RunModuleScriptResult{ .state = .Failed, .mod = iso.initPersistent(v8.Module, mod), .eval = iso.initPersistent(v8.Promise, res_p), .js_err_trace = trace_str, }; }, .kFulfilled => { return RunModuleScriptResult{ .state = .Success, .mod = iso.initPersistent(v8.Module, mod), .eval = iso.initPersistent(v8.Promise, res_p), .js_err_trace = null, }; }, .kPending => { return RunModuleScriptResult{ .state = .Pending, .mod = iso.initPersistent(v8.Module, mod), .eval = iso.initPersistent(v8.Promise, res_p), .js_err_trace = null, }; }, } }, else => unreachable, } }, else => unreachable, } } fn runScriptFile(self: *Self, abs_path: []const u8) !void { const src = try std.fs.cwd().readFileAlloc(self.alloc, abs_path, 1e9); defer self.alloc.free(src); return self.runScript(abs_path, src); } fn runScript(self: *Self, origin: []const u8, src: []const u8) !void { const res = self.runScriptGetResult(origin, src); defer res.deinit(); if (!res.success) { self.env.errorFmt("{s}", .{res.err.?}); return error.RunScriptError; } } pub fn runScriptGetResult(self: *Self, origin: []const u8, src: []const u8) v8x.ExecuteResult { const js_origin = v8.String.initUtf8(self.isolate, origin); var res: v8x.ExecuteResult = undefined; v8x.executeString(self.alloc, self.isolate, self.getContext(), src, js_origin, &res); return res; } fn runMainScript(self: *Self, abs_path: []const u8) !void { self.main_script_path = self.alloc.dupe(u8, abs_path) catch unreachable; if (self.dev_mode) { // Start watching the main script. self.dev_ctx.initWatcher(self, abs_path); } const res = try self.runModuleScriptFile(self.main_script_path.?); self.run_main_script_res = res; switch (res.state) { .Failed => { self.finishMainScript(); if (!self.dev_mode) { return error.MainScriptError; } else { self.dev_ctx.enterJsErrorState(self, res.js_err_trace.?); } }, .Success => { self.finishMainScript(); if (self.dev_mode) { self.dev_ctx.enterJsSuccessState(); } }, .Pending => { // Since module has a handler for top level async calls, it won't trigger the uncaught exception callback. // Attach then and catch handlers to handle the final outcome. self.main_script_done = false; const data = self.isolate.initExternal(self); const on_fulfill = v8.Function.initWithData(self.getContext(), gen.genJsFuncSync(handleMainModuleScriptSuccess), data); const on_reject = v8.Function.initWithData(self.getContext(), gen.genJsFuncSync(handleMainModuleScriptError), data); _ = res.eval.?.inner.thenAndCatch(self.getContext(), on_fulfill, on_reject) catch unreachable; if (self.dev_mode) { self.dev_ctx.enterJsSuccessState(); } } } } pub fn wakeUpEventPoller(self: Self) void { const res = uv.uv_async_send(self.uv_dummy_async); uv.assertNoError(res); } /// Destroys the resource owned by the handle and marks it as deinited. /// If the resource can't be deinited immediately, the final deinitResourceHandle call will be deferred. pub fn startDeinitResourceHandle(self: *Self, id: ResourceId) void { const handle = self.resources.getPtrNoCheck(id); if (handle.deinited) { log.debug("Already deinited", .{}); unreachable; } switch (handle.tag) { .CsWindow => { // TODO: This should do cleanup like deleteCsWindowBySdlId const window = stdx.mem.ptrCastAlign(*CsWindow, handle.ptr); if (self.dev_mode and self.dev_ctx.restart_requested) { // Skip deiniting the window for a dev mode restart. window.deinit(self, self.dev_ctx.dev_window == window); } else { window.deinit(self, false); } // Update current vars. self.num_windows -= 1; if (self.num_windows > 0) { if (self.active_window == stdx.mem.ptrCastAlign(*CsWindow, handle.ptr)) { // TODO: Revisit this. For now just pick the last available window. const list_id = self.getResourceListId(handle.tag); if (self.resources.findInList(list_id, {}, findFirstActiveResource)) |res_id| { self.active_window = stdx.mem.ptrCastAlign(*CsWindow, self.resources.getNoCheck(res_id).ptr); } } } else { self.active_window = undefined; } self.deinitResourceHandleInternal(id); }, .CsHttpServer => { const server = stdx.mem.ptrCastAlign(*HttpServer, handle.ptr); if (server.closed) { self.deinitResourceHandleInternal(id); } else { const S = struct { fn onShutdown(ptr: *anyopaque, _: *HttpServer) void { const ctx = stdx.mem.ptrCastAlign(*ExternalResourceHandle, ptr); ctx.rt.deinitResourceHandleInternal(ctx.res_id); } }; // TODO: Should set cb atomically with shutdown op. const cb = stdx.Callback(*anyopaque, *HttpServer).init(handle.external_handle, S.onShutdown); server.on_shutdown_cb = cb; server.requestShutdown(); server.deinitPreClosing(); } }, .Dummy => {}, } handle.deinited = true; } // Internal func. Called when ready to actually free the handle fn deinitResourceHandleInternal(self: *Self, id: ResourceId) void { const handle = self.resources.getNoCheck(id); // Fire callback. if (handle.on_deinit_cb) |cb| { cb.call(id); } switch (handle.tag) { .CsWindow => { self.alloc.destroy(stdx.mem.ptrCastAlign(*CsWindow, handle.ptr)); }, .CsHttpServer => { self.alloc.destroy(stdx.mem.ptrCastAlign(*HttpServer, handle.ptr)); }, else => unreachable, } } fn v8MessageCallback(message: ?*const v8.C_Message, value: ?*const v8.C_Value) callconv(.C) void { const val = v8.Value{.handle = value.?}; const rt = stdx.mem.ptrCastAlign(*RuntimeContext, val.castTo(v8.External).get()); // Only interested in the first uncaught exception. if (!rt.received_uncaught_exception) { // Print the stack trace immediately. const js_msg = v8.Message{ .handle = message.? }; const err_str = v8x.allocPrintMessageStackTrace(rt.alloc, rt.isolate, rt.getContext(), js_msg, "Uncaught Exception"); defer rt.alloc.free(err_str); rt.env.errorFmt("\n{s}", .{err_str}); rt.received_uncaught_exception = true; if (rt.dev_mode) { rt.dev_ctx.enterJsErrorState(rt, err_str); } } } pub fn allocResourceIdsByTag(self: Self, tag: ResourceTag) []const ResourceId { const list = self.getResourceListId(tag); var cur_res = self.resources.getListHead(list).?; cur_res = self.resources.getNextIdNoCheck(cur_res); var res = std.ArrayList(ResourceId).init(self.alloc); while (cur_res != NullId) { res.append(cur_res) catch unreachable; cur_res = self.resources.getNextIdNoCheck(cur_res); } return res.toOwnedSlice(); } pub fn getResourcePtr(self: *Self, comptime Tag: ResourceTag, res_id: ResourceId) ?*Resource(Tag) { if (self.resources.has(res_id)) { const item = self.resources.getNoCheck(res_id); if (item.tag == Tag) { return stdx.mem.ptrCastAlign(*Resource(Tag), item.ptr); } } return null; } pub fn destroyWeakHandle(self: *Self, id: WeakHandleId) void { const handle = self.weak_handles.getPtr(id).?; if (handle.tag != .Null) { handle.deinit(self); } handle.obj.deinit(); self.weak_handles.remove(id); } pub fn createCsHttpServerResource(self: *Self) CreatedResource(HttpServer) { const ptr = self.alloc.create(HttpServer) catch unreachable; self.generic_resource_list_last = self.resources.insertAfter(self.generic_resource_list_last, .{ .ptr = ptr, .tag = .CsHttpServer, .external_handle = undefined, .deinited = false, .on_deinit_cb = null, }) catch unreachable; const res_id = self.generic_resource_list_last; const external = self.alloc.create(ExternalResourceHandle) catch unreachable; external.* = .{ .rt = self, .res_id = res_id, }; self.resources.getPtrNoCheck(res_id).external_handle = external; return .{ .ptr = ptr, .id = res_id, .external = external, }; } pub fn createCsWindowResource(self: *Self) CreatedResource(CsWindow) { const ptr = self.alloc.create(CsWindow) catch unreachable; self.window_resource_list_last = self.resources.insertAfter(self.window_resource_list_last, .{ .ptr = ptr, .tag = .CsWindow, .external_handle = undefined, .deinited = false, .on_deinit_cb = null, }) catch unreachable; const res_id = self.window_resource_list_last; const external = self.alloc.create(ExternalResourceHandle) catch unreachable; external.* = .{ .rt = self, .res_id = res_id, }; self.resources.getPtrNoCheck(res_id).external_handle = external; self.num_windows += 1; return .{ .ptr = ptr, .id = res_id, .external = external, }; } /// Destroys the ResourceHandle and removes it from the runtime. /// Doing so also frees the resource slot for reuse. /// This is called when the js handle invokes the weak finalizer. /// At that point no js handle still references the id so it is safe to remove the native handle. pub fn destroyResourceHandle(self: *Self, res_id: ResourceId) void { if (!self.resources.has(res_id)) { log.err("Expected resource id: {}", .{res_id}); unreachable; } const res = self.resources.getPtrNoCheck(res_id); if (!res.deinited) { self.startDeinitResourceHandle(res_id); } // The external handle is kept alive after the deinit step, // since it's needed by a finalizer callback. if (res.tag != .Dummy) { self.alloc.destroy(res.external_handle); const list_id = self.getResourceListId(res.tag); if (self.resources.findInList(list_id, res_id, findPrevResource)) |prev_id| { // Remove from resources. _ = self.resources.removeNext(prev_id) catch unreachable; if (res.tag == .CsWindow) { if (self.window_resource_list_last == res_id) { self.window_resource_list_last = prev_id; } } else if (res.tag == .CsHttpServer) { if (self.generic_resource_list == res_id) { self.generic_resource_list = prev_id; } } else unreachable; } else unreachable; } } fn getResourceListId(self: Self, tag: ResourceTag) ResourceListId { switch (tag) { .CsWindow => return self.window_resource_list, .CsHttpServer => return self.generic_resource_list, else => unreachable, } } fn findFirstActiveResource(_: void, buf: ds.CompactManySinglyLinkedList(ResourceListId, ResourceId, ResourceHandle), item_id: ResourceId) bool { return !buf.getNoCheck(item_id).deinited; } fn findPrevResource(target: ResourceId, buf: ds.CompactManySinglyLinkedList(ResourceListId, ResourceId, ResourceHandle), item_id: ResourceId) bool { return buf.getNextIdNoCheck(item_id) == target; } fn getCsWindowResourceBySdlId(self: *Self, sdl_win_id: u32) ?ResourceId { if (Backend != .OpenGL) { @panic("unsupported"); } const S = struct { fn pred(_sdl_win_id: u32, buf: ds.CompactManySinglyLinkedList(ResourceListId, ResourceId, ResourceHandle), item_id: ResourceId) bool { const res = buf.getNoCheck(item_id); // Skip dummy head. if (res.tag == .Dummy) { return false; } const cs_window = stdx.mem.ptrCastAlign(*CsWindow, res.ptr); return cs_window.window.inner.id == _sdl_win_id; } }; return self.resources.findInList(self.window_resource_list, sdl_win_id, S.pred) orelse return null; } pub fn getJsValue(self: Self, native_val: anytype) v8.Value { return .{ .handle = self.getJsValuePtr(native_val), }; } /// Returns raw value pointer so we don't need to convert back to a v8.Value. pub fn getJsValuePtr(self: Self, native_val: anytype) *const v8.C_Value { const Type = @TypeOf(native_val); const iso = self.isolate; const ctx = self.context.inner; switch (Type) { void => return self.js_undefined.handle, i16 => return iso.initIntegerI32(native_val).handle, u8 => return iso.initIntegerU32(native_val).handle, u16 => return iso.initIntegerU32(native_val).handle, u32 => return iso.initIntegerU32(native_val).handle, F64SafeUint => return iso.initNumber(@intToFloat(f64, native_val)).handle, u64 => return iso.initBigIntU64(native_val).handle, f32 => return iso.initNumber(native_val).handle, f64 => return iso.initNumber(native_val).handle, bool => return iso.initBoolean(native_val).handle, stdx.http.Response => { const headers_buf = self.alloc.alloc(v8.Value, native_val.headers.len) catch unreachable; defer self.alloc.free(headers_buf); for (native_val.headers) |header, i| { const js_header = self.default_obj_t.inner.initInstance(ctx); _ = js_header.setValue(ctx, iso.initStringUtf8("key"), iso.initStringUtf8(native_val.header[header.key.start..header.key.end])); _ = js_header.setValue(ctx, iso.initStringUtf8("value"), iso.initStringUtf8(native_val.header[header.value.start..header.value.end])); headers_buf[i] = .{ .handle = js_header.handle }; } const new = self.http_response_class.inner.getFunction(ctx).initInstance(ctx, &.{}).?; _ = new.setValue(ctx, iso.initStringUtf8("status"), iso.initIntegerU32(native_val.status_code)); _ = new.setValue(ctx, iso.initStringUtf8("headers"), iso.initArrayElements(headers_buf)); _ = new.setValue(ctx, iso.initStringUtf8("body"), iso.initStringUtf8(native_val.body)); return new.handle; }, graphics.Image => { const new = self.image_class.inner.getFunction(ctx).initInstance(ctx, &.{}).?; new.setInternalField(0, iso.initIntegerU32(native_val.id)); _ = new.setValue(ctx, iso.initStringUtf8("width"), iso.initIntegerU32(@intCast(u32, native_val.width))); _ = new.setValue(ctx, iso.initStringUtf8("height"), iso.initIntegerU32(@intCast(u32, native_val.height))); return new.handle; }, cs_graphics.Color => { const new = self.color_class.inner.getFunction(ctx).initInstance(ctx, &.{}).?; _ = new.setValue(ctx, iso.initStringUtf8("r"), iso.initIntegerU32(native_val.r)); _ = new.setValue(ctx, iso.initStringUtf8("g"), iso.initIntegerU32(native_val.g)); _ = new.setValue(ctx, iso.initStringUtf8("b"), iso.initIntegerU32(native_val.b)); _ = new.setValue(ctx, iso.initStringUtf8("a"), iso.initIntegerU32(native_val.a)); return new.handle; }, cs_graphics.Transform => { const new = self.transform_class.inner.getFunction(ctx).initInstance(ctx, &.{}).?; var buf: [16]v8.Value = undefined; for (native_val.mat) |it, i| { buf[i] = iso.initNumber(it).toValue(); } _ = new.setValue(ctx, iso.initStringUtf8("mat"), iso.initArrayElements(&buf)); return new.handle; }, Uint8Array => { const store = v8.BackingStore.init(iso, native_val.buf.len); if (store.getData()) |ptr| { const buf = @ptrCast([*]u8, ptr); std.mem.copy(u8, buf[0..native_val.buf.len], native_val.buf); } var shared = store.toSharedPtr(); defer v8.BackingStore.sharedPtrReset(&shared); const array_buffer = v8.ArrayBuffer.initWithBackingStore(iso, &shared); const js_uint8arr = v8.Uint8Array.init(array_buffer, 0, native_val.buf.len); return js_uint8arr.handle; }, v8.Value, v8.Boolean, v8.Object, v8.Array, v8.Promise => return native_val.handle, PromiseSkipJsGen => return native_val.inner.handle, []const u8 => { return iso.initStringUtf8(native_val).handle; }, []const api.cs_files.FileEntry => { const buf = self.alloc.alloc(v8.Value, native_val.len) catch unreachable; defer self.alloc.free(buf); for (native_val) |it, i| { const obj = self.default_obj_t.inner.initInstance(ctx); _ = obj.setValue(ctx, iso.initStringUtf8("name"), iso.initStringUtf8(it.name)); _ = obj.setValue(ctx, iso.initStringUtf8("kind"), iso.initStringUtf8(it.kind)); buf[i] = obj.toValue(); } return iso.initArrayElements(buf).handle; }, ds.Box([]const u8) => { return iso.initStringUtf8(native_val.slice).handle; }, anyerror => { // TODO: Should this be an Error/Exception object instead? const str = std.fmt.allocPrint(self.alloc, "{}", .{native_val}) catch unreachable; defer self.alloc.free(str); return iso.initStringUtf8(str).handle; }, *const anyopaque => { return @ptrCast(*const v8.C_Value, native_val); }, v8.Persistent(v8.Object) => { return native_val.inner.handle; }, else => { if (@typeInfo(Type) == .Optional) { if (native_val) |child_val| { return self.getJsValuePtr(child_val); } else { return self.js_null.handle; } } else if (@typeInfo(Type) == .Pointer) { if (@typeInfo(Type).Pointer.size == .Slice) { const buf = self.alloc.alloc(v8.Value, native_val.len) catch unreachable; defer self.alloc.free(buf); for (native_val) |child_val, i| { buf[i] = self.getJsValue(child_val); } return iso.initArrayElements(buf).handle; } } else if (@typeInfo(Type) == .Struct) { if (@hasDecl(Type, "ManagedSlice")) { return self.getJsValuePtr(native_val.slice); } else if (@hasDecl(Type, "ManagedStruct")) { return self.getJsValuePtr(native_val.val); } else if (@hasDecl(Type, "RtTempStruct")) { return self.getJsValuePtr(native_val.inner); } else { // Generic struct to js object. // TODO: Is it more performant to initialize from an object template if we know the fields beforehand? const obj = iso.initObject(); const Fields = std.meta.fields(Type); inline for (Fields) |Field| { _ = obj.setValue(ctx, iso.initStringUtf8(Field.name), self.getJsValue(@field(native_val, Field.name))); } return obj.handle; } } else if (@typeInfo(Type) == .Enum) { if (@hasDecl(Type, "IsStringSumType")) { // string value. return iso.initStringUtf8(@tagName(native_val)).handle; } else { // int value. return iso.initIntegerU32(@enumToInt(native_val)).handle; } } comptime @compileError(std.fmt.comptimePrint("Unsupported conversion from {s} to js.", .{@typeName(Type)})); }, } } /// functions with error returns have problems being inside inlines so a quick hack is to return an optional /// and set a temporary error var. pub inline fn getNativeValue2(self: *Self, comptime T: type, val: anytype) ?T { return self.getNativeValue(T, val) catch |err| { self.get_native_val_err = err; return null; }; } // TODO: Rename to getNativeArgValue to indicate it's meant to be used with converting from js callback args. /// Converts a js value to a target native type. /// Slice-like types depend on temporary buffers. /// This can't easily reuse runtime.getNativeValue since we are using temporary buffers, and objects/arrays can have nested children. /// Returns an error if conversion failed. pub fn getNativeValue(self: *Self, comptime T: type, val: anytype) !T { const ctx = self.getContext(); switch (T) { []const f32 => { if (val.isArray()) { const len = val.castTo(v8.Array).length(); var i: u32 = 0; const obj = val.castTo(v8.Object); const start = self.cb_f32_buf.items.len; self.cb_f32_buf.resize(start + len) catch unreachable; while (i < len) : (i += 1) { const child_val = obj.getAtIndex(ctx, i) catch return error.CantConvert; self.cb_f32_buf.items[start + i] = child_val.toF32(ctx) catch return error.CantConvert; } return self.cb_f32_buf.items[start..]; } else return error.CantConvert; }, []const u8 => { if (@TypeOf(val) == SizedJsString) { return appendSizedJsStringAssumeCap(&self.cb_str_buf, self.isolate, val); } else { return v8x.appendValueAsUtf8(&self.cb_str_buf, self.isolate, ctx, val); } }, bool => return val.toBool(self.isolate), i32 => return val.toI32(ctx), u8 => return @intCast(u8, val.toU32(ctx) catch return error.CantConvert), u16 => return @intCast(u16, val.toU32(ctx) catch return error.CantConvert), u32 => return val.toU32(ctx), f32 => return val.toF32(ctx), u64 => { if (val.isBigInt()) { return val.castTo(v8.BigInt).getUint64(); } else { return @intCast(u64, val.toU32(ctx) catch return error.CantConvert); } }, graphics.Image => { if (val.isObject()) { const obj = val.castTo(v8.Object); if (obj.toValue().instanceOf(ctx, self.image_class.inner.getFunction(ctx).toObject()) catch return error.CantConvert) { const image_id = obj.getInternalField(0).toU32(ctx) catch return error.CantConvert; return graphics.Image{ .id = image_id, .width = 0, .height = 0 }; } } return error.CantConvert; }, Uint8Array => { if (val.isUint8Array()) { var shared_store = val.castTo(v8.ArrayBufferView).getBuffer().getBackingStore(); defer v8.BackingStore.sharedPtrReset(&shared_store); const store = v8.BackingStore.sharedPtrGet(&shared_store); const len = store.getByteLength(); if (len > 0) { const buf = @ptrCast([*]u8, store.getData().?); return Uint8Array{ .buf = buf[0..len] }; } else return Uint8Array{ .buf = "" }; } else return error.CantConvert; }, v8.Uint8Array => { if (val.isUint8Array()) { return val.castTo(v8.Uint8Array); } else return error.CantConvert; }, v8.Function => { if (val.isFunction()) { return val.castTo(v8.Function); } else return error.CantConvert; }, v8.Object => { if (val.isObject()) { return val.castTo(v8.Object); } else return error.CantConvert; }, v8.Value => return val, std.StringHashMap([]const u8) => { if (val.isObject()) { const obj = val.castTo(v8.Object); var native_val = std.StringHashMap([]const u8).init(self.alloc); const keys = obj.getOwnPropertyNames(ctx); const keys_obj = keys.castTo(v8.Object); const num_keys = keys.length(); var i: u32 = 0; while (i < num_keys) { const key = keys_obj.getAtIndex(ctx, i) catch return error.CantConvert; const key_str = v8x.allocValueAsUtf8(self.alloc, self.isolate, ctx, key); if (self.getNativeValue([]const u8, key)) |child_native_val| { native_val.put(key_str, child_native_val) catch unreachable; } else |_| {} } return native_val; } else return error.CantConvert; }, else => { if (@typeInfo(T) == .Struct) { if (val.isObject()) { if (@hasDecl(T, "Handle")) { const Ptr = stdx.meta.FieldType(T, .ptr); const handle_id = @intCast(u32, @ptrToInt(val.castTo(v8.Object).getInternalField(0).castTo(v8.External).get())); const handle = self.weak_handles.getNoCheck(handle_id); if (handle.tag != .Null) { return T{ .ptr = stdx.mem.ptrCastAlign(Ptr, handle.ptr), .id = handle_id, .obj = val.castTo(v8.Object), }; } else { return error.HandleExpired; } } else { const obj = val.castTo(v8.Object); var native_val: T = undefined; if (comptime hasAllOptionalFields(T)) { native_val = .{}; } const Fields = std.meta.fields(T); inline for (Fields) |Field| { if (@typeInfo(Field.field_type) == .Optional) { const child_val = obj.getValue(ctx, self.isolate.initStringUtf8(Field.name)) catch return error.CantConvert; const Child = comptime @typeInfo(Field.field_type).Optional.child; if (child_val.isNullOrUndefined()) { @field(native_val, Field.name) = null; } else { @field(native_val, Field.name) = self.getNativeValue2(Child, child_val); } } else { const js_val = obj.getValue(ctx, self.isolate.initStringUtf8(Field.name)) catch return error.CantConvert; if (self.getNativeValue2(Field.field_type, js_val)) |child_value| { @field(native_val, Field.name) = child_value; } } } return native_val; } } else return error.CantConvert; } else if (@typeInfo(T) == .Array) { const ArrayInfo = @typeInfo(T).Array; var native_val: [ArrayInfo.len]ArrayInfo.child = undefined; if (val.isArray()) { const len = val.castTo(v8.Array).length(); if (len < ArrayInfo.len) { return error.CantConvert; } else { var i: u32 = 0; const obj = val.castTo(v8.Object); while (i < len) : (i += 1) { const child_val = obj.getAtIndex(ctx, i) catch return error.CantConvert; native_val[i] = self.getNativeValue(ArrayInfo.child, child_val) catch return error.CantConvert; } return native_val; } } else return error.CantConvert; } else if (@typeInfo(T) == .Enum) { if (@hasDecl(T, "IsStringSumType")) { // String to enum conversion. const lower = v8x.appendValueAsUtf8Lower(&self.cb_str_buf, self.isolate, ctx, val); const Fields = @typeInfo(T).Enum.fields; inline for (Fields) |Field| { // Compare with lower case. if (std.mem.eql(u8, lower, comptime ctLower(Field.name))) { return @intToEnum(T, Field.value); } } return error.CantConvert; } else { // Integer to enum conversion. const ival = val.toU32(ctx) catch return error.CantConvert; return std.meta.intToEnum(T, ival) catch { if (@hasDecl(T, "Default")) { return T.Default; } else return error.CantConvert; }; } } else { comptime @compileError(std.fmt.comptimePrint("Unsupported conversion from {s} to {s}", .{ @typeName(@TypeOf(val)), @typeName(T) })); } }, } } fn handleMouseDownEvent(self: *Self, e: api.cs_input.MouseDownEvent, comptime DevMode: bool) void { if (DevMode and self.dev_ctx.has_error) { return; } const ctx = self.getContext(); if (self.active_window.on_mouse_down_cb) |cb| { const js_event = self.getJsValue(e); _ = cb.inner.call(ctx, self.active_window.js_window, &.{ js_event }); } } fn handleMouseUpEvent(self: *Self, e: api.cs_input.MouseUpEvent, comptime DevMode: bool) void { if (DevMode and self.dev_ctx.has_error) { return; } const ctx = self.getContext(); if (self.active_window.on_mouse_up_cb) |cb| { const js_event = self.getJsValue(e); _ = cb.inner.call(ctx, self.active_window.js_window, &.{ js_event }); } } fn handleMouseMoveEvent(self: *Self, e: api.cs_input.MouseMoveEvent, comptime DevMode: bool) void { if (DevMode and self.dev_ctx.has_error) { return; } const ctx = self.getContext(); if (self.active_window.on_mouse_move_cb) |cb| { const js_event = self.getJsValue(e); _ = cb.inner.call(ctx, self.active_window.js_window, &.{ js_event }); } } fn handleKeyUpEvent(self: *Self, e: api.cs_input.KeyUpEvent, comptime DevMode: bool) void { if (DevMode) { // Manual restart hotkey. if (e.key == .f5) { self.dev_ctx.requestRestart(); } if (self.dev_ctx.has_error) { return; } } const ctx = self.getContext(); if (self.active_window.on_key_up_cb) |cb| { const js_event = self.getJsValue(e); _ = cb.inner.call(ctx, self.active_window.js_window, &.{ js_event }); } } fn handleKeyDownEvent(self: *Self, e: api.cs_input.KeyDownEvent, comptime DevMode: bool) void { if (DevMode and self.dev_ctx.has_error) { return; } const ctx = self.getContext(); if (self.active_window.on_key_down_cb) |cb| { const js_event = self.getJsValue(e); _ = cb.inner.call(ctx, self.active_window.js_window, &.{ js_event }); } } pub fn evalModuleScript(self: *Self, js: []const u8) !RunModuleScriptResult { return self.runModuleScript("/eval", "eval", js); } pub fn attachPromiseHandlers( self: *Self, p: v8.Promise, ctx_ptr: anytype, comptime on_success: fn (@TypeOf(ctx_ptr), *RuntimeContext, v8.Value) void, comptime on_failure: fn (@TypeOf(ctx_ptr), *RuntimeContext, v8.Value) void, ) !void { const Ptr = @TypeOf(ctx_ptr); const S = struct { fn onSuccess(rt: *RuntimeContext, ctx: FuncDataUserPtr(Ptr), val: v8.Value) void { on_success(ctx.ptr, rt, val); } fn onFailure(rt: *RuntimeContext, ctx: FuncDataUserPtr(Ptr), val: v8.Value) void { on_failure(ctx.ptr, rt, val); } }; const rt_val = self.isolate.initExternal(self); const data = self.rt_ctx_tmpl.inner.initInstance(self.getContext()); data.setInternalField(0, rt_val); const ctx_val = self.isolate.initExternal(ctx_ptr); data.setInternalField(1, ctx_val); const js_on_success = v8.Function.initWithData(self.getContext(), gen.genJsFunc(S.onSuccess, .{ .asyncify = false, .is_data_rt = false, }), data); const js_on_failure = v8.Function.initWithData(self.getContext(), gen.genJsFunc(S.onFailure, .{ .asyncify = false, .is_data_rt = false, }), data); _ = try p.thenAndCatch(self.getContext(), js_on_success, js_on_failure); } /// Currently only used in test env where a callback wants to end the runtime. pub fn requestShutdown(self: *Self) void { if (builtin.is_test) { self.requested_shutdown = true; const res = uv.uv_async_send(self.uv_dummy_async); uv.assertNoError(res); } } pub fn finishMainScript(self: *Self) void { self.main_script_done = true; if (builtin.is_test) { if (self.env.on_main_script_done) |handler| { handler(self.env.on_main_script_done_ctx, self) catch unreachable; } } } pub fn spawnProcess(self: *Self, ctx: ?*anyopaque, cmd: []const []const u8, cb: ProcessEndCallback) !void { const new = try ProcessHandle.create(self.alloc, self.uv_loop, cmd); new.user_ctx = ctx; new.user_cb = cb; } }; const ProcessEndCallback = fn (ctx: ?*anyopaque, output: []const u8) void; const ProcessHandle = struct { handle: uv.uv_process_t, out: uv.uv_pipe_t, out_buf: std.ArrayList(u8), user_cb: ProcessEndCallback, user_ctx: ?*anyopaque, alloc: std.mem.Allocator, closed_handles: u1, const Self = @This(); fn create(alloc: std.mem.Allocator, loop: *uv.uv_loop_t, cmd: []const []const u8) !*Self { const new = alloc.create(ProcessHandle) catch @panic("error"); new.out_buf = std.ArrayList(u8).init(alloc); new.alloc = alloc; new.closed_handles = 0; errdefer new.startDestroy(); var res = uv.uv_pipe_init(loop, &new.out, 0); uv.assertNoError(res); const cargs = stdx.cstr.allocCStrings(alloc, cmd) catch @panic("error"); defer alloc.free(cargs); const cfile = std.cstr.addNullByte(alloc, cmd[0]) catch @panic("error"); defer alloc.free(cfile); var opts: uv.uv_process_options_t = undefined; opts.file = cfile; opts.args = stdx.mem.ptrCastAlign([*c][*c]u8, cargs.ptr); opts.exit_cb = onExit; opts.flags = 0; opts.env = null; opts.cwd = null; opts.stdio_count = 3; var stdio: [3]uv.uv_stdio_container_t = undefined; opts.stdio = &stdio; opts.stdio[0].flags = uv.UV_IGNORE; opts.stdio[1].flags = uv.UV_CREATE_PIPE | uv.UV_WRITABLE_PIPE; opts.stdio[1].data.stream = @ptrCast(*uv.uv_stream_t, &new.out); opts.stdio[2].flags = uv.UV_IGNORE; opts.uid = 0; opts.gid = 0; res = uv.uv_spawn(loop, &new.handle, &opts); switch (res) { uv.UV_ENOENT => { return error.MissingBin; }, else => uv.assertNoError(res), } res = uv.uv_read_start(@ptrCast(*uv.uv_stream_t, &new.out), onAlloc, onRead); uv.assertNoError(res); return new; } fn onRead(stream: [*c]uv.uv_stream_t, nread: isize, buf: [*c]const uv.uv_buf_t) callconv(.C) void { const self = @fieldParentPtr(Self, "out", @ptrCast(*uv.uv_pipe_t, stream)); defer self.alloc.free(buf[0].base[0..buf[0].len]); if (nread < 0) { if (nread == uv.UV_EOF) { return; } else { @panic("Handle error"); } } const str = buf[0].base[0..@intCast(usize, nread)]; self.out_buf.appendSlice(str) catch @panic("error"); } fn onExit(ptr: [*c]uv.uv_process_t, exit_status: i64, term_signal: c_int) callconv(.C) void { _ = exit_status; _ = term_signal; const self = @ptrCast(*ProcessHandle, ptr); self.user_cb(self.user_ctx, self.out_buf.items); self.startDestroy(); } /// This handle is not destroyed until all close callbacks are fired. fn startDestroy(self: *Self) void { self.out_buf.deinit(); uv.uv_close(@ptrCast(*uv.uv_handle_t, &self.out), onCloseOut); // Must call close if uv_spawn failed. uv.uv_close(@ptrCast(*uv.uv_handle_t, &self.handle), onClose); } fn onAlloc(handle: [*c]uv.uv_handle_t, suggested_size: usize, out_buf: [*c]uv.uv_buf_t) callconv(.C) void { const self = @fieldParentPtr(Self, "out", @ptrCast(*uv.uv_pipe_t, handle)); const buf = self.alloc.alloc(u8, suggested_size) catch @panic("error"); out_buf[0].base = buf.ptr; out_buf[0].len = buf.len; } fn onCloseOut(ptr: [*c]uv.uv_handle_t) callconv(.C) void { const self = @fieldParentPtr(Self, "out", @ptrCast(*uv.uv_pipe_t, ptr)); if (self.closed_handles == 1) { self.alloc.destroy(self); } else { self.closed_handles += 1; } } fn onClose(ptr: [*c]uv.uv_handle_t) callconv(.C) void { const self = @ptrCast(*Self, ptr); if (self.closed_handles == 1) { self.alloc.destroy(self); } else { self.closed_handles += 1; } } }; fn hasAllOptionalFields(comptime T: type) bool { const Fields = comptime std.meta.fields(T); inline for (Fields) |Field| { if (Field.default_value == null) { return false; } } return true; } pub fn ctLower(comptime str: []const u8) []const u8 { return comptime blk :{ var lower: []const u8 = &.{}; for (str) |ch| { lower = lower ++ &[_]u8{std.ascii.toLower(ch)}; } break :blk lower; }; } /// To be converted to v8.Uint8Array. pub const Uint8Array = struct { buf: []const u8, pub fn deinit(self: @This(), alloc: std.mem.Allocator) void { alloc.free(self.buf); } }; var galloc: std.mem.Allocator = undefined; var uncaught_promise_errors: std.AutoHashMap(u32, []const u8) = undefined; fn initGlobal(alloc: std.mem.Allocator) void { galloc = alloc; uncaught_promise_errors = std.AutoHashMap(u32, []const u8).init(alloc); } fn deinitGlobal() void { var iter = uncaught_promise_errors.valueIterator(); while (iter.next()) |err_str| { galloc.free(err_str.*); } uncaught_promise_errors.deinit(); } fn promiseRejectCallback(c_msg: v8.C_PromiseRejectMessage) callconv(.C) void { const msg = v8.PromiseRejectMessage.initFromC(c_msg); // TODO: Use V8_PROMISE_INTERNAL_FIELD_COUNT and PromiseHook to set rt handle on every promise so we have proper context. const promise = msg.getPromise(); const iso = promise.toObject().getIsolate(); const ctx = promise.toObject().getCreationContext(); switch (msg.getEvent()) { v8.PromiseRejectEvent.kPromiseRejectWithNoHandler => { // Record this uncaught incident since a follow up kPromiseHandlerAddedAfterReject can remove the record. // At a later point reportUncaughtPromiseRejections will list all of them. const str = v8x.allocValueAsUtf8(galloc, iso, ctx, msg.getValue()); const key = promise.toObject().getIdentityHash(); uncaught_promise_errors.put(key, str) catch unreachable; }, v8.PromiseRejectEvent.kPromiseHandlerAddedAfterReject => { // Remove the record. const key = promise.toObject().getIdentityHash(); const value = uncaught_promise_errors.get(key).?; galloc.free(value); _ = uncaught_promise_errors.remove(key); }, else => {}, } } fn reportUncaughtPromiseRejections(env: *Environment) void { var iter = uncaught_promise_errors.valueIterator(); while (iter.next()) |err_str| { env.errorFmt("Uncaught promise rejection: {s}\n", .{err_str.*}); } } // Main loop for running user apps. pub fn runUserLoop(rt: *RuntimeContext, comptime DevMode: bool) void { const iso = rt.isolate; var try_catch: v8.TryCatch = undefined; try_catch.init(iso); // Allow uncaught exceptions to reach message listener. try_catch.setVerbose(true); defer try_catch.deinit(); while (true) { var event: sdl.SDL_Event = undefined; while (sdl.SDL_PollEvent(&event) != 0) { switch (event.@"type") { sdl.SDL_WINDOWEVENT => { switch (event.window.event) { sdl.SDL_WINDOWEVENT_CLOSE => { if (rt.getCsWindowResourceBySdlId(event.window.windowID)) |res_id| { rt.startDeinitResourceHandle(res_id); } }, sdl.SDL_WINDOWEVENT_RESIZED => handleSdlWindowResized(rt, event.window), else => {}, } }, sdl.SDL_KEYDOWN => { const std_event = platform.initSdlKeyDownEvent(event.key); rt.handleKeyDownEvent(api.fromStdKeyDownEvent(std_event), DevMode); }, sdl.SDL_KEYUP => { const std_event = platform.initSdlKeyUpEvent(event.key); rt.handleKeyUpEvent(api.fromStdKeyUpEvent(std_event), DevMode); }, sdl.SDL_MOUSEBUTTONDOWN => { const std_event = platform.initSdlMouseDownEvent(event.button); rt.handleMouseDownEvent(api.fromStdMouseDownEvent(std_event), DevMode); }, sdl.SDL_MOUSEBUTTONUP => { const std_event = platform.initSdlMouseUpEvent(event.button); rt.handleMouseUpEvent(api.fromStdMouseUpEvent(std_event), DevMode); }, sdl.SDL_MOUSEMOTION => { if (rt.active_window.on_mouse_move_cb != null) { const std_event = platform.initSdlMouseMoveEvent(event.motion); rt.handleMouseMoveEvent(api.fromStdMouseMoveEvent(std_event), DevMode); } }, sdl.SDL_QUIT => { // This can fire if the last window was closed or we received a sigint. // If we created a window, it will capture the keyboard input so the terminal won't detect ctrl+c. return; }, else => {}, } } // Receiving an uncaught exception exits in normal mode. // In dev mode, dev_ctx.has_error should also be set and continue to display a dev window. const exitFromUncaughtError = !DevMode and rt.received_uncaught_exception; const should_update = rt.num_windows > 0 and !exitFromUncaughtError; if (!should_update) { return; } if (rt.num_windows == 1) { updateSingleWindow(rt, DevMode); } else { updateMultipleWindows(rt, DevMode); } if (rt.uv_poller.polled) { processMainEventLoop(rt); } if (DevMode) { if (rt.dev_ctx.restart_requested) { return; } } } } fn updateMultipleWindows(rt: *RuntimeContext, comptime DevMode: bool) void { _ = DevMode; const ctx = rt.getContext(); // Currently, we just use the smallest delay. This forces larger target fps to be update more frequently. // TODO: Make windows with varying target fps work. var min_delay: u64 = std.math.maxInt(u64); var cur_res = rt.resources.getListHead(rt.window_resource_list).?; cur_res = rt.resources.getNextIdNoCheck(cur_res); while (cur_res != NullId) { const res = rt.resources.getNoCheck(cur_res); if (res.deinited) { cur_res = rt.resources.getNextIdNoCheck(cur_res); continue; } const win = stdx.mem.ptrCastAlign(*CsWindow, res.ptr); win.window.makeCurrent(); var cam: graphics.Camera = undefined; cam.init2D(win.window.getWidth(), win.window.getHeight()); rt.renderer.beginFrame(cam); // Start frame timer after beginFrame since it could delay to sync with OpenGL pipeline. win.fps_limiter.beginFrame(); if (win.on_update_cb) |cb| { const g_ctx = win.js_graphics.toValue(); _ = cb.inner.call(ctx, win.js_window, &.{ g_ctx }) orelse { // const trace = v8x.allocPrintTryCatchStackTrace(rt.alloc, iso, ctx, try_catch).?; // defer rt.alloc.free(trace); // errorFmt("{s}", .{trace}); // return; }; } rt.renderer.endFrame(); const delay = win.fps_limiter.endFrame(); if (delay < min_delay) { min_delay = delay; } cur_res = rt.resources.getNextIdNoCheck(cur_res); } platform.delay(min_delay); // TODO: Run any queued micro tasks. } fn updateSingleWindow(rt: *RuntimeContext, comptime DevMode: bool) void { const ctx = rt.getContext(); var cam: graphics.Camera = undefined; cam.init2D(rt.active_window.window.getWidth(), rt.active_window.window.getHeight()); rt.renderer.beginFrame(cam); // Start frame timer after beginFrame since it could delay to sync with OpenGL pipeline. rt.active_window.fps_limiter.beginFrame(); // Don't call user's onUpdate if dev mode has an error. if (!DevMode or !rt.dev_ctx.has_error) { if (rt.active_window.on_update_cb) |cb| { const g_ctx = rt.active_window.js_graphics.toValue(); _ = cb.inner.call(ctx, rt.active_window.js_window, &.{ g_ctx }) orelse { // const trace = v8x.allocPrintTryCatchStackTrace(rt.alloc, iso, ctx, try_catch).?; // defer rt.alloc.free(trace); // errorFmt("{s}", .{trace}); // return; }; } } if (DevMode) { if (rt.dev_ctx.dev_window != null) { // No user windows are active. Draw a default background. const g = rt.active_window.graphics; g.pushState(); defer g.popState(); g.resetTransform(); // Background. const Background = graphics.Color.init(30, 30, 30, 255); g.setFillColor(Background); g.fillRect(0, 0, @intToFloat(f32, rt.active_window.window.impl.width), @intToFloat(f32, rt.active_window.window.impl.height)); devmode.renderDevHud(rt, rt.active_window); } else if (rt.active_window.show_dev_mode) { const g = rt.active_window.graphics; g.pushState(); defer g.popState(); g.resetTransform(); devmode.renderDevHud(rt, rt.active_window); } } rt.renderer.endFrame(); const delay = rt.active_window.fps_limiter.endFrame(); if (delay > 0) { platform.delay(delay); } // TODO: Run any queued micro tasks. } const ResourceListId = u32; pub const ResourceId = u32; pub const ResourceTag = enum { CsWindow, CsHttpServer, Dummy, }; pub fn Resource(comptime Tag: ResourceTag) type { switch (Tag) { .CsWindow => return CsWindow, .CsHttpServer => return HttpServer, else => unreachable, } } pub fn GetResourceTag(comptime T: type) ResourceTag { switch (T) { *HttpServer => return .CsHttpServer, else => @compileError("unreachable"), } } const ResourceHandle = struct { ptr: *anyopaque, tag: ResourceTag, // Passed into a weak finalizer callback. external_handle: *ExternalResourceHandle, // Whether the underlying resource has been deinited. // The handle can still remain until the js handle is no longer used. deinited: bool, on_deinit_cb: ?stdx.Callback(*anyopaque, ResourceId), }; fn CreatedResource(comptime T: type) type { return struct { ptr: *T, id: ResourceId, external: *ExternalResourceHandle, }; } pub const CsWindow = struct { const Self = @This(); window: platform.Window, on_update_cb: ?v8.Persistent(v8.Function), on_mouse_up_cb: ?v8.Persistent(v8.Function), on_mouse_down_cb: ?v8.Persistent(v8.Function), on_mouse_move_cb: ?v8.Persistent(v8.Function), on_key_up_cb: ?v8.Persistent(v8.Function), on_key_down_cb: ?v8.Persistent(v8.Function), on_resize_cb: ?v8.Persistent(v8.Function), js_window: v8.Persistent(v8.Object), // Managed by window handle. graphics: *graphics.Graphics, js_graphics: v8.Persistent(v8.Object), fps_limiter: graphics.DefaultFpsLimiter, show_dev_mode: bool, pub fn init(self: *Self, rt: *RuntimeContext, window: platform.Window, window_id: ResourceId) void { self.window = window; const iso = rt.isolate; const ctx = rt.getContext(); const js_window = rt.window_class.inner.getFunction(ctx).initInstance(ctx, &.{}).?; const js_window_id = iso.initIntegerU32(window_id); js_window.setInternalField(0, js_window_id); const g = rt.getRenderer(&self.window).getGraphics(); const js_graphics = rt.graphics_class.inner.getFunction(ctx).initInstance(ctx, &.{}).?; js_graphics.setInternalField(0, iso.initExternal(g)); self.* = .{ .window = window, .on_update_cb = null, .on_mouse_up_cb = null, .on_mouse_down_cb = null, .on_mouse_move_cb = null, .on_key_up_cb = null, .on_key_down_cb = null, .on_resize_cb = null, .js_window = iso.initPersistent(v8.Object, js_window), .js_graphics = iso.initPersistent(v8.Object, js_graphics), .graphics = g, .fps_limiter = graphics.DefaultFpsLimiter.init(60), .show_dev_mode = false, }; } pub fn deinit(self: *Self, rt: *RuntimeContext, skip_window: bool) void { if (!skip_window) { self.window.deinit(); } if (self.on_update_cb) |*cb| { cb.deinit(); } if (self.on_mouse_up_cb) |*cb| { cb.deinit(); } if (self.on_mouse_down_cb) |*cb| { cb.deinit(); } if (self.on_mouse_move_cb) |*cb| { cb.deinit(); } if (self.on_key_up_cb) |*cb| { cb.deinit(); } if (self.on_key_down_cb) |*cb| { cb.deinit(); } if (self.on_resize_cb) |*cb| { cb.deinit(); } self.js_window.deinit(); // Invalidate graphics ptr. const iso = rt.isolate; const zero = iso.initNumberBitCastedU64(0); self.js_graphics.castToObject().setInternalField(0, zero); self.js_graphics.deinit(); } pub fn resize(self: *Self, width: u32, height: u32) void { self.window.resize(width, height); // SDL is designed to not fire SDL_WINDOWEVENT_RESIZED for resizes invoked from code, // so the user onResize handler wouldn't be called. // However, if the window was adjusted by the os window manager, // we should fire SDL_WINDOWEVENT_RESIZED just like it does for window creation. const final_width = self.window.getWidth(); const final_height = self.window.getHeight(); if (final_width != width or final_height != height) { if (Backend == .OpenGL) { var e = sdl.SDL_Event{ .window = sdl.SDL_WindowEvent{ .type = sdl.SDL_WINDOWEVENT, .event = sdl.SDL_WINDOWEVENT_RESIZED, .data1 = @intCast(c_int, final_width), .data2 = @intCast(c_int, final_height), .windowID = self.window.inner.id, .timestamp = undefined, .padding1 = undefined, .padding2 = undefined, .padding3 = undefined, }, }; _ = sdl.SDL_PushEvent(&e); } } } fn handleResizeEvent(self: *Self, rt: *RuntimeContext, e: api.cs_input.ResizeEvent) void { // Update the backend buffer. self.window.handleResize(e.width, e.height); if (rt.dev_mode and rt.dev_ctx.has_error) { return; } if (self.on_resize_cb) |cb| { const js_event = rt.getJsValue(e); _ = cb.inner.call(rt.getContext(), self.js_window, &.{ js_event }); } } }; pub fn onFreeResource(c_info: ?*const v8.C_WeakCallbackInfo) callconv(.C) void { const info = v8.WeakCallbackInfo.initFromC(c_info); const ptr = info.getParameter(); const external = stdx.mem.ptrCastAlign(*ExternalResourceHandle, ptr); external.rt.destroyResourceHandle(external.res_id); } pub fn runTestMain(alloc: std.mem.Allocator, src_path: []const u8, env: *Environment) !bool { // Measure total time. var timer = try std.time.Timer.start(); defer { const duration = timer.read(); env.printFmt("time: {}ms\n", .{duration / @floatToInt(u64, 1e6)}); } const abs_path = try std.fs.cwd().realpathAlloc(alloc, src_path); defer alloc.free(abs_path); const config = RuntimeConfig{ .is_test_runner = true, .is_dev_mode = false, }; var rt: RuntimeContext = undefined; initGlobalRuntime(alloc, &rt, config, env); defer deinitGlobalRuntime(alloc, &rt); try rt.runMainScript(abs_path); while (rt.num_async_tests_finished < rt.num_async_tests) { if (pollMainEventLoop(&rt)) { processMainEventLoop(&rt); continue; } else break; } if (rt.num_isolated_tests_finished < rt.isolated_tests.items.len) { runIsolatedTests(&rt); } reportUncaughtPromiseRejections(rt.env); // Test results. rt.env.printFmt("Passed: {d}\n", .{rt.num_tests_passed}); rt.env.printFmt("Tests: {d}\n", .{rt.num_tests}); return rt.num_tests_passed == rt.num_tests; } /// Performs a restart for dev mode. fn restart(rt: *RuntimeContext) !void { // log.debug("restart", .{}); // Save context. const alloc = rt.alloc; const platform_ = rt.platform; var main_script_path: ?[]const u8 = null; if (rt.main_script_path) |path| { main_script_path = alloc.dupe(u8, path) catch unreachable; } defer { if (main_script_path) |path| { alloc.free(path); } } rt.dev_ctx.dev_window = rt.active_window; // Set dev_window so deinit skips this window resource. const win = rt.dev_ctx.dev_window.?.window; const dev_ctx = rt.dev_ctx; const env = rt.env; // Shutdown runtime. shutdownRuntime(rt); rt.exit(); rt.deinit(); // Start runtime again with saved context. const config = RuntimeConfig{ .is_test_runner = false, .is_dev_mode = true, }; rt.init(alloc, platform_, config, env); rt.enter(); // Reuse dev context. rt.dev_ctx = dev_ctx; rt.dev_ctx.restart_requested = false; // Reuse window. const res = rt.createCsWindowResource(); res.ptr.init(rt, win, res.id); rt.active_window = res.ptr; rt.active_window.show_dev_mode = true; rt.dev_ctx.cmdLog("Restarted."); rt.dev_ctx.dev_window = res.ptr; if (main_script_path) |path| { try rt.runMainScript(path); } } /// Shutdown other threads gracefully before starting deinit. fn shutdownRuntime(rt: *RuntimeContext) void { if (rt.dev_mode) { rt.dev_ctx.close(); } // Start deiniting resources so they queue up their final events. // Resources like the http server will need some time to close out their connections. var iter = rt.resources.nodes.iterator(); while (iter.nextPtr()) |it| { const res_id = iter.cur_id; if (!it.data.deinited) { rt.startDeinitResourceHandle(res_id); } } if (rt.env.pump_rt_on_graceful_shutdown) { // Pump events for 3 seconds before force closing everything. pumpMainEventLoopFor(rt, 3000); } rt.timer.close(); rt.uv_poller.close_flag.store(true, .Release); // Make uv poller wake up with dummy update. var res = uv.uv_async_send(rt.uv_dummy_async); uv.assertNoError(res); // uv poller might be waiting for wakeup. rt.uv_poller.setPollReady(); // Busy wait. while (rt.uv_poller.close_flag.load(.Acquire)) {} // Request workers to close. // On MacOS, it's especially important to make sure semaphores (eg. std.Thread.ResetEvent) // are not in use (their counters should be reset to the original value) or we'll get an error from libdispatch. for (rt.work_queue.workers.items) |worker| { worker.close_flag.store(true, .Release); worker.wakeup.set(); } uv.uv_stop(rt.uv_loop); // Walk and close every handle. const S = struct { fn closeHandle(ptr: [*c]uv.uv_handle_t, ctx: ?*anyopaque) callconv(.C) void { _ = ctx; const handle = @ptrCast(*uv.uv_handle_t, ptr); // Don't close if it's already in a closing state. if (uv.uv_is_closing(handle) == 0) { uv.uv_close(handle, null); } } }; uv.uv_walk(rt.uv_loop, S.closeHandle, null); while (uv.uv_run(rt.uv_loop, uv.UV_RUN_NOWAIT) > 0) {} res = uv.uv_loop_close(rt.uv_loop); if (res == uv.UV_EBUSY) { @panic("Did not expect more work."); } // Wait for workers to close. for (rt.work_queue.workers.items) |worker| { while (worker.close_flag.load(.Acquire)) {} } // Wait for worker queue to finish. while (rt.work_queue.hasUnfinishedTasks()) { rt.main_wakeup.wait(); rt.main_wakeup.reset(); rt.work_queue.processDone(); } // log.debug("shutdown runtime", .{}); } /// Isolated tests are stored to be run later. const IsolatedTest = struct { const Self = @This(); name: []const u8, js_fn: v8.Persistent(v8.Function), fn deinit(self: *Self, alloc: std.mem.Allocator) void { alloc.free(self.name); self.js_fn.deinit(); } }; /// Returns whether there are pending events in libuv or the work queue. inline fn hasPendingEvents(rt: *RuntimeContext) bool { // log.debug("hasPending {} {} {} {}", .{rt.uv_loop.active_handles, rt.uv_loop.active_reqs.count, rt.uv_loop.closing_handles !=null, rt.work_queue.hasUnfinishedTasks()}); // Always return true if main script is still pending (eg. top level await new Promise(() => {})) if (!rt.main_script_done) { return true; } // There will at least be 1 active handle (the dummy async handle used to do interrupts from main thread). // uv handle checks is based on uv_loop_alive(): if (builtin.os.tag == .windows) { return rt.uv_loop.active_handles > 1 or rt.uv_loop.active_reqs.count > 0 or rt.uv_loop.endgame_handles != null or rt.work_queue.hasUnfinishedTasks(); } else { return rt.uv_loop.active_handles > 1 or rt.uv_loop.active_reqs.count > 0 or rt.uv_loop.closing_handles != null or rt.work_queue.hasUnfinishedTasks(); } } /// Pumps the main event loop for a given period in milliseconds. /// This is useful if the runtime needs to shutdown gracefully and still meet a deadline to exit the process. fn pumpMainEventLoopFor(rt: *RuntimeContext, max_ms: u32) void { var timer = std.time.Timer.start() catch unreachable; // The implementation is very similar to pollMainEventLoop/processMainEventLoop, // except we check against a timer during the poll step. while (hasPendingEvents(rt)) { const elapsed_ms = timer.read()/1000000; if (elapsed_ms > max_ms) { return; } // Keep timeout low (200ms) so we can return and check against the timer. const Timeout = 200 * 1e6; const wait_res = rt.main_wakeup.timedWait(Timeout); rt.main_wakeup.reset(); if (wait_res) |_| { // Nop. } else |err| { if (err == error.Timeout) { continue; } else { stdx.panicFmt("unknown error: {}", .{err}); } } processMainEventLoop(rt); } } /// Waits until there is work to process. /// If true, a follow up processMainEventLoop should be called to do the work and reset the poller. /// If false, there are no more pending tasks, and the caller should exit the loop. pub fn pollMainEventLoop(rt: *RuntimeContext) bool { while (hasPendingEvents(rt)) { // Wait for events. // log.debug("main thread wait", .{}); const Timeout = 4 * 1e9; const wait_res = rt.main_wakeup.timedWait(Timeout); rt.main_wakeup.reset(); if (wait_res) |_| { // Nop. } else |err| { if (err == error.Timeout) { continue; } else { stdx.panicFmt("unknown error: {}", .{err}); } } return true; } return false; } pub fn processMainEventLoop(rt: *RuntimeContext) void { // Resolve done tasks. rt.work_queue.processDone(); // Run uv loop tasks. // [uv] Poll for i/o once but don’t block if there are no pending callbacks. // Returns zero if done (no active handles or requests left), // or non-zero if more callbacks are expected (meaning you should run the event loop again sometime in the future). _ = uv.uv_run(rt.uv_loop, uv.UV_RUN_NOWAIT); // log.debug("uv run {}", .{res}); // After callbacks and js executions are done, process V8 event loop. processV8EventLoop(rt); rt.uv_poller.polled = false; rt.uv_poller.setPollReady(); } /// If there are too many promises to execute for a js execution, v8 will defer the rest into it's event loop. /// This is usually called right after a js execution. fn processV8EventLoop(rt: *RuntimeContext) void { while (rt.platform.pumpMessageLoop(rt.isolate, false)) {} } fn runIsolatedTests(rt: *RuntimeContext) void { const iso = rt.isolate; const ctx = rt.getContext(); var hscope: v8.HandleScope = undefined; hscope.init(iso); defer hscope.deinit(); var try_catch: v8.TryCatch = undefined; try_catch.init(iso); defer try_catch.deinit(); var next_test: u32 = 0; while (rt.num_isolated_tests_finished < rt.isolated_tests.items.len) { if (rt.num_isolated_tests_finished == next_test) { // Start the next test. // Assume async test, should have already validated. const case = rt.isolated_tests.items[next_test]; // log.debug("run isolated: {}/{} {s}", .{next_test, rt.isolated_tests.items.len, case.name}); if (case.js_fn.inner.call(ctx, rt.js_undefined, &.{})) |val| { const promise = val.castTo(v8.Promise); const data = iso.initExternal(rt); const on_fulfilled = v8.Function.initWithData(ctx, gen.genJsFuncSync(passIsolatedTest), data); const extra_data = rt.rt_ctx_tmpl.inner.initInstance(ctx); extra_data.setInternalField(0, data); extra_data.setInternalField(1, iso.initStringUtf8(case.name)); const on_rejected = v8.Function.initWithData(ctx, gen.genJsFunc(reportIsolatedTestFailure, .{ .asyncify = false, .is_data_rt = false, }), extra_data); _ = promise.thenAndCatch(ctx, on_fulfilled, on_rejected) catch unreachable; if (promise.getState() == .kRejected or promise.getState() == .kFulfilled) { // If the initial async call is already fullfilled or rejected, // we'll need to run microtasks manually to run our handlers. iso.performMicrotasksCheckpoint(); } } else { const err_str = v8x.allocPrintTryCatchStackTrace(rt.alloc, iso, ctx, try_catch).?; defer rt.alloc.free(err_str); rt.env.errorFmt("Test: {s}\n{s}", .{ case.name, err_str }); break; } next_test += 1; } if (pollMainEventLoop(rt)) { processMainEventLoop(rt); continue; } else { // Nothing in event queue. // Check if we're done or need to go to the next test. if (rt.num_isolated_tests_finished == rt.isolated_tests.items.len) { break; } else if (rt.num_isolated_tests_finished == next_test) { continue; } break; } } // Check for any js uncaught exceptions from calling into js. if (v8x.allocPrintTryCatchStackTrace(rt.alloc, iso, ctx, try_catch)) |err_str| { defer rt.alloc.free(err_str); rt.env.errorFmt("Uncaught Exception:\n{s}", .{ err_str }); } } pub const RuntimeConfig = struct { is_test_runner: bool = false, is_dev_mode: bool = false, }; /// Initialize libs, deps, globals, and the runtime assumed to be global. /// This is intended to be the common setup for one global runtime. pub fn initGlobalRuntime(alloc: std.mem.Allocator, rt: *RuntimeContext, config: RuntimeConfig, env: *Environment) void { _ = curl.initDefault(); stdx.http.init(alloc); h2o.init(); initGlobal(alloc); const platform_ = ensureV8Platform(); rt.init(alloc, platform_, config, env); rt.enter(); } pub fn deinitGlobalRuntime(_: std.mem.Allocator, rt: *RuntimeContext) void { shutdownRuntime(rt); rt.exit(); rt.deinit(); deinitGlobal(); stdx.http.deinit(); curl.deinit(); } /// src_path is absolute or relative to the cwd. pub fn runUserMain(alloc: std.mem.Allocator, src_path: []const u8, dev_mode: bool, env: *Environment) !void { const abs_path = try std.fs.path.resolve(alloc, &.{ src_path }); defer alloc.free(abs_path); const config = RuntimeConfig{ .is_test_runner = false, .is_dev_mode = dev_mode, }; var rt: RuntimeContext = undefined; initGlobalRuntime(alloc, &rt, config, env); defer deinitGlobalRuntime(alloc, &rt); if (dev_mode) { rt.dev_ctx.init(alloc, .{}); // Create the dev mode window. // The first window created by the user script will take over this window. const win = platform.Window.init(rt.alloc, .{ .width = 800, .height = 600, .title = "Dev Mode", .high_dpi = true, .resizable = true, .mode = .Windowed, }) catch unreachable; const res = rt.createCsWindowResource(); res.ptr.init(&rt, win, res.id); rt.active_window = res.ptr; rt.active_window.show_dev_mode = true; rt.dev_ctx.cmdLog("Dev Mode started."); rt.dev_ctx.dev_window = res.ptr; } try rt.runMainScript(abs_path); // Check whether to start off with a realtime loop or event loop. if (!dev_mode) { if (rt.num_windows > 0) { runUserLoop(&rt, false); } else { // TODO: Detect need for realtime loop (eg. on creation of a window) and switch to runUserLoop. while (true) { if (builtin.is_test and rt.requested_shutdown) { break; } if (pollMainEventLoop(&rt)) { processMainEventLoop(&rt); continue; } else break; } } } else { while (true) { runUserLoop(&rt, true); if (rt.dev_ctx.restart_requested) { try restart(&rt); continue; } else break; } } } pub const WeakHandleId = u32; const WeakHandle = struct { const Self = @This(); ptr: *anyopaque, tag: WeakHandleTag, obj: v8.Persistent(v8.Object), fn deinit(self: *Self, rt: *RuntimeContext) void { switch (self.tag) { .DrawCommandList => { const ptr = stdx.mem.ptrCastAlign(*graphics.DrawCommandList, self.ptr); ptr.deinit(); rt.alloc.destroy(ptr); }, .Sound => { const ptr = stdx.mem.ptrCastAlign(*audio.Sound, self.ptr); ptr.deinit(rt.alloc); rt.alloc.destroy(ptr); }, .Random => { const ptr = stdx.mem.ptrCastAlign(*Random, self.ptr); rt.alloc.destroy(ptr); }, .Null => {}, } } }; pub const WeakHandleTag = enum { DrawCommandList, Sound, Random, Null, }; pub fn WeakHandlePtr(comptime Tag: WeakHandleTag) type { return switch (Tag) { .DrawCommandList => *graphics.DrawCommandList, .Sound => *audio.Sound, .Random => *Random, else => unreachable, }; } /// Convenience wrapper around v8 when constructing the v8.Context. pub const ContextBuilder = struct { const Self = @This(); rt: *RuntimeContext, isolate: v8.Isolate, pub fn setFuncT(self: Self, tmpl: anytype, key: []const u8, comptime native_cb: anytype) void { const data = self.isolate.initExternal(self.rt); self.setProp(tmpl, key, v8.FunctionTemplate.initCallbackData(self.isolate, gen.genJsFuncSync(native_cb), data)); } pub fn setConstFuncT(self: Self, tmpl: anytype, key: []const u8, comptime native_cb: anytype) void { const data = self.isolate.initExternal(self.rt); self.setConstProp(tmpl, key, v8.FunctionTemplate.initCallbackData(self.isolate, gen.genJsFuncSync(native_cb), data)); } pub fn setConstAsyncFuncT(self: Self, tmpl: anytype, key: []const u8, comptime native_cb: anytype) void { const data = self.isolate.initExternal(self.rt); self.setConstProp(tmpl, key, v8.FunctionTemplate.initCallbackData(self.isolate, gen.genJsFuncAsync(native_cb), data)); } pub fn setProp(self: Self, tmpl: anytype, key: []const u8, value: anytype) void { const js_key = v8.String.initUtf8(self.isolate, key); switch (@TypeOf(value)) { u32 => { tmpl.set(js_key, v8.Integer.initU32(self.isolate, value), v8.PropertyAttribute.None); }, else => { tmpl.set(js_key, value, v8.PropertyAttribute.None); }, } } pub fn setFuncGetter(self: Self, tmpl: v8.FunctionTemplate, key: []const u8, comptime native_val_or_cb: anytype) void { const js_key = v8.String.initUtf8(self.isolate, key); if (@typeInfo(@TypeOf(native_val_or_cb)) == .Fn) { @compileError("TODO"); } else { const data = self.isolate.initExternal(self.rt); tmpl.setGetter(js_key, v8.FunctionTemplate.initCallbackData(self.isolate, gen.genJsFuncGetValue(native_val_or_cb), data)); } } pub fn setGetter(self: Self, tmpl: v8.ObjectTemplate, key: []const u8, comptime native_cb: anytype) void { const js_key = v8.String.initUtf8(self.isolate, key); tmpl.setGetter(js_key, gen.genJsGetter(native_cb)); } pub fn setAccessor(self: Self, tmpl: v8.ObjectTemplate, key: []const u8, comptime native_getter_cb: anytype, comptime native_setter_cb: anytype) void { const js_key = self.isolate.initStringUtf8(key); tmpl.setGetterAndSetter(js_key, gen.genJsGetter(native_getter_cb), gen.genJsSetter(native_setter_cb)); } pub fn setConstProp(self: Self, tmpl: anytype, key: []const u8, value: anytype) void { const iso = self.isolate; const js_key = iso.initStringUtf8(key); switch (@TypeOf(value)) { u32 => { tmpl.set(js_key, iso.initIntegerU32(value), v8.PropertyAttribute.ReadOnly); }, else => { tmpl.set(js_key, value, v8.PropertyAttribute.ReadOnly); }, } } pub fn initFuncT(self: Self, name: []const u8) v8.FunctionTemplate { const iso = self.isolate; const new = iso.initFunctionTemplateDefault(); new.setClassName(iso.initStringUtf8(name)); return new; } }; pub const SizedJsString = struct { str: v8.String, len: u32, }; pub fn appendSizedJsStringAssumeCap(arr: *std.ArrayList(u8), isolate: v8.Isolate, val: SizedJsString) []const u8 { const start = arr.items.len; arr.items.len = start + val.len; _ = val.str.writeUtf8(isolate, arr.items[start..arr.items.len]); return arr.items[start..]; } pub fn rejectPromise(rt: *RuntimeContext, promise_id: PromiseId, native_val: anytype) void { const js_val_ptr = rt.getJsValuePtr(native_val); const resolver = rt.promises.getNoCheck(promise_id); _ = resolver.inner.reject(rt.getContext(), .{ .handle = js_val_ptr }); } pub fn resolvePromise(rt: *RuntimeContext, promise_id: PromiseId, native_val: anytype) void { const js_val_ptr = rt.getJsValuePtr(native_val); const resolver = rt.promises.getNoCheck(promise_id); _ = resolver.inner.resolve(rt.getContext(), .{ .handle = js_val_ptr }); } /// A struct that also has the runtime context. pub fn RuntimeValue(comptime T: type) type { return struct { rt: *RuntimeContext, inner: T, }; } /// Holds the rt and resource id for passing into a callback. const ExternalResourceHandle = struct { rt: *RuntimeContext, res_id: ResourceId, }; fn reportIsolatedTestFailure(data: FuncData, val: v8.Value) void { const obj = data.val.castTo(v8.Object); const rt = stdx.mem.ptrCastAlign(*RuntimeContext, obj.getInternalField(0).castTo(v8.External).get()); const test_name = v8x.allocValueAsUtf8(rt.alloc, rt.isolate, rt.getContext(), obj.getInternalField(1)); defer rt.alloc.free(test_name); rt.num_isolated_tests_finished += 1; const trace_str = allocExceptionJsStackTraceString(rt, val); defer rt.alloc.free(trace_str); rt.env.printFmt("Test Failed: \"{s}\"\n{s}", .{test_name, trace_str}); } fn passIsolatedTest(rt: *RuntimeContext) void { rt.num_isolated_tests_finished += 1; rt.num_tests_passed += 1; } const Promise = struct { task_id: u32, }; // TODO: Since Cosmic uses the js stack trace api, // it might be faster to return a plain object with the code and msg. pub fn createPromiseError(rt: *RuntimeContext, err: CsError) v8.Value { const iso = rt.isolate; const api_err = std.meta.stringToEnum(api.cs_core.CsError, @errorName(err)).?; const err_msg = api.cs_core.errString(api_err); const js_err = v8.Exception.initError(iso.initStringUtf8(err_msg)); _ = js_err.castTo(v8.Object).setValue(rt.getContext(), iso.initStringUtf8("code"), iso.initIntegerU32(@enumToInt(api_err))); return js_err; } pub fn invokeFuncAsync(rt: *RuntimeContext, comptime func: anytype, args: std.meta.ArgsTuple(@TypeOf(func))) v8.Promise { const ClosureTask = tasks.ClosureTask(func); const task = ClosureTask{ .alloc = rt.alloc, .args = args, }; const iso = rt.isolate; const ctx = rt.getContext(); const resolver = iso.initPersistent(v8.PromiseResolver, v8.PromiseResolver.init(ctx)); const promise = resolver.inner.getPromise(); const promise_id = rt.promises.add(resolver) catch unreachable; const S = struct { fn onSuccess(_ctx: RuntimeValue(PromiseId), _res: TaskOutput(ClosureTask)) void { const _promise_id = _ctx.inner; resolvePromise(_ctx.rt, _promise_id, _res); } fn onFailure(ctx_: RuntimeValue(PromiseId), err_: anyerror) void { const _promise_id = ctx_.inner; if (std.meta.stringToEnum(api.cs_core.CsError, @errorName(err_))) |_| { const js_err = createPromiseError(ctx_.rt, @errSetCast(CsError, err_)); rejectPromise(ctx_.rt, _promise_id, js_err); } else { rejectPromise(ctx_.rt, _promise_id, err_); } } }; const task_ctx = RuntimeValue(PromiseId){ .rt = rt, .inner = promise_id, }; _ = rt.work_queue.addTaskWithCb(task, task_ctx, S.onSuccess, S.onFailure); return promise; } pub const CsError = error { NoError, FileNotFound, PathExists, IsDir, InvalidFormat, ConnectFailed, CertVerify, CertBadFile, CantResolveHost, Unsupported, Unknown, }; /// Double precision can represent a 53 bit significand. pub const F64SafeUint = u53; pub const F64SafeInt = i54; test "F64SafeUint, F64SafeInt" { const uint: F64SafeUint = std.math.maxInt(F64SafeUint); var double = @intToFloat(f64, uint); try t.eq(@floatToInt(F64SafeUint, double), uint); const int: F64SafeInt = std.math.maxInt(F64SafeInt); double = @intToFloat(f64, int); try t.eq(@floatToInt(F64SafeInt, double), int); } /// Used in c code to log synchronously. const c_log = stdx.log.scoped(.c); pub export fn cosmic_log(buf: [*c]const u8) void { c_log.debug("{s}", .{ buf }); } const ModuleInfo = struct { const Self = @This(); dir: []const u8, pub fn deinit(self: Self, alloc: std.mem.Allocator) void { alloc.free(self.dir); } }; pub fn finalizeHandle(c_info: ?*const v8.C_WeakCallbackInfo) callconv(.C) void { const info = v8.WeakCallbackInfo.initFromC(c_info); const rt = stdx.mem.ptrCastAlign(*RuntimeContext, info.getParameter()); const id = @intCast(u32, @ptrToInt(info.getInternalField(1)) / 2); rt.destroyWeakHandle(id); } pub fn createWeakHandle(rt: *RuntimeContext, comptime Tag: WeakHandleTag, ptr: WeakHandlePtr(Tag)) v8.Object { const ctx = rt.getContext(); const iso = rt.isolate; const template = switch (Tag) { .DrawCommandList => rt.handle_class, .Sound => rt.sound_class, .Random => rt.random_class, else => unreachable, }; const new = template.inner.initInstance(ctx); var new_p = iso.initPersistent(v8.Object, new); const id = rt.weak_handles.add(.{ .ptr = ptr, .tag = Tag, .obj = new_p, }) catch unreachable; const js_id = iso.initExternal(@intToPtr(?*anyopaque, id)); new.setInternalField(0, js_id); // id is doubled then halved on callback. // Set on the second internal field since the first is already used for the original id. new.setAlignedPointerInInternalField(1, @intToPtr(?*anyopaque, @intCast(u64, id) * 2)); new_p.setWeakFinalizer(rt, finalizeHandle, .kInternalFields); return new_p.inner; } const RunModuleScriptResult = struct { const Self = @This(); const State = enum { Pending, Success, Failed, }; // This only reflects the state after returning from runModuleScript. // To query the module eval state, check the eval promise. state: State, // If Failed, mod can be null if it failed on the compile step. mod: ?v8.Persistent(v8.Module), // The eval promise. Will resolve to undefined or reject with error. eval: ?v8.Persistent(v8.Promise), // If Failed, js_err_trace will be present. js_err_trace: ?[]const u8, pub fn deinit(self: *Self, alloc: std.mem.Allocator) void { if (self.js_err_trace) |trace| { alloc.free(trace); } if (self.mod) |*mod_| { mod_.deinit(); } if (self.eval) |*eval_| { eval_.deinit(); } } }; fn handleSdlWindowResized(rt: *RuntimeContext, event: sdl.SDL_WindowEvent) void { if (rt.getCsWindowResourceBySdlId(event.windowID)) |res_id| { if (rt.getResourcePtr(.CsWindow, res_id)) |win| { win.handleResizeEvent(rt, .{ .width = @intCast(u32, event.data1), .height = @intCast(u32, event.data2), }); } } } // The v8 platform is stored as a global since after it's deinited, // we can no longer reinit v8. See v8.deinitV8/v8.deinitV8Platform. var g_platform: ?v8.Platform = null; /// Returns global v8 platform. Initializes if needed. pub fn ensureV8Platform() v8.Platform { if (g_platform == null) { const platform_ = v8.Platform.initDefault(0, true); v8.initV8Platform(platform_); v8.initV8(); const S = struct { fn handleDcheck(file: [*c]const u8, line: c_int, msg: [*c]const u8) callconv(.C) void { log.debug("v8 dcheck {s}:{} {s}", .{file, line, msg}); // Just panic and print zig's stack trace. unreachable; } }; // Override v8 debug assert reporting. v8.setDcheckFunction(S.handleDcheck); g_platform = platform_; } return g_platform.?; } /// This should only be called at the end of the program or when v8 is no longer needed. /// V8 can't be reinited after this. fn deinitV8() void { if (g_platform) |platform_| { v8.deinitV8(); v8.deinitV8Platform(); platform_.deinit(); g_platform = null; } } /// v8.StackTrace/v8.StackFrame are limited and not as rich as the js stack trace API. /// JsStackTrace will contain data from CallSiteInfos passed into js Error.prepareStackTrace. /// See api_init.js on how Error.prepareStackTrace is set up. const JsStackTrace = struct { const Self = @This(); frames: []const JsStackFrame, fn deinit(self: Self, alloc: std.mem.Allocator) void { for (self.frames) |frame| { frame.deinit(alloc); } alloc.free(self.frames); } }; const JsStackFrame = struct { const Self = @This(); url: []const u8, func_name: ?[]const u8, line_num: u32, col_num: u32, is_constructor: bool, is_async: bool, fn deinit(self: Self, alloc: std.mem.Allocator) void { alloc.free(self.url); if (self.func_name) |name| { alloc.free(name); } } }; pub fn appendJsStackTraceString(buf: *std.ArrayList(u8), trace: JsStackTrace) void { const writer = buf.writer(); for (trace.frames) |frame| { writer.writeAll(" at ") catch unreachable; if (frame.is_async) { writer.writeAll("async ") catch unreachable; } if (frame.func_name) |name| { writer.print("{s} ", .{ name }) catch unreachable; } writer.print("{s}:{}:{}\n", .{ frame.url, frame.line_num, frame.col_num }) catch unreachable; } } pub fn allocExceptionJsStackTraceString(rt: *RuntimeContext, exception: v8.Value) []const u8 { const alloc = rt.alloc; const iso = rt.isolate; const ctx = rt.getContext(); var hscope: v8.HandleScope = undefined; hscope.init(iso); defer hscope.deinit(); var buf = std.ArrayList(u8).init(alloc); const writer = buf.writer(); _ = v8x.appendValueAsUtf8(&buf, iso, ctx, exception); writer.writeAll("\n") catch unreachable; // Access js stack property to invoke Error.prepareStackTrace if (exception.isObject()) { const exception_o = exception.castTo(v8.Object); if (exception_o.getValue(ctx, iso.initStringUtf8("stack"))) |stack| { if (stack.isString()) { if (exception_o.getValue(ctx, iso.initStringUtf8("__frames"))) |frames| { if (frames.isArray()) { // Convert to JsStackTrace const trace = JsStackTrace{ .frames = getNativeValue(alloc, iso, ctx, []const JsStackFrame, frames) catch &.{}, }; defer trace.deinit(alloc); appendJsStackTraceString(&buf, trace); } } else |_| {} } } else |_| {} } return buf.toOwnedSlice(); } /// Converts a js value to a target native type without a RuntimeContext. fn getNativeValue(alloc: std.mem.Allocator, iso: v8.Isolate, ctx: v8.Context, comptime Target: type, val: v8.Value) !Target { switch (Target) { bool => return val.toBool(iso), u32 => return val.toU32(ctx), []const u8 => { return v8x.allocValueAsUtf8(alloc, iso, ctx, val); }, else => { if (@typeInfo(Target) == .Struct) { if (val.isObject()) { const obj = val.castTo(v8.Object); var res: Target = undefined; if (comptime hasAllOptionalFields(Target)) { res = .{}; } const Fields = std.meta.fields(Target); inline for (Fields) |Field| { if (@typeInfo(Field.field_type) == .Optional) { const child_val = obj.getValue(ctx, iso.initStringUtf8(Field.name)) catch return error.CantConvert; const ChildType = comptime @typeInfo(Field.field_type).Optional.child; if (child_val.isNullOrUndefined()) { @field(res, Field.name) = null; } else { @field(res, Field.name) = getNativeValue2(alloc, iso, ctx, ChildType, child_val); } } else { const js_val = obj.getValue(ctx, iso.initStringUtf8(Field.name)) catch return error.CantConvert; if (getNativeValue2(alloc, iso, ctx, Field.field_type, js_val)) |child_value| { @field(res, Field.name) = child_value; } } } return res; } else return error.CantConvert; } else if (@typeInfo(Target) == .Pointer) { if (@typeInfo(Target).Pointer.size == .Slice) { const Child = @typeInfo(Target).Pointer.child; if (val.isArray()) { const len = val.castTo(v8.Array).length(); const buf = alloc.alloc(Child, len) catch unreachable; errdefer alloc.free(buf); const val_o = val.castTo(v8.Object); var i: u32 = 0; while (i < len) : (i += 1) { const child_val = val_o.getAtIndex(ctx, i) catch return error.CantConvert; buf[i] = getNativeValue(alloc, iso, ctx, Child, child_val) catch return error.CantConvert; } return buf; } else return error.CantConvert; } } comptime @compileError(std.fmt.comptimePrint("Unsupported conversion from {s} to {s}", .{ @typeName(@TypeOf(val)), @typeName(Target) })); } } } fn getNativeValue2(alloc: std.mem.Allocator, iso: v8.Isolate, ctx: v8.Context, comptime Target: type, val: v8.Value) ?Target { return getNativeValue(alloc, iso, ctx, Target, val) catch return null; } fn handleMainModuleScriptError(rt: *RuntimeContext, val: v8.Value) void { const err_str = allocExceptionJsStackTraceString(rt, val); defer rt.alloc.free(err_str); rt.env.errorFmt("{s}", .{err_str}); rt.main_script_done = true; if (rt.dev_mode) { rt.dev_ctx.enterJsErrorState(rt, err_str); } const res = uv.uv_async_send(rt.uv_dummy_async); uv.assertNoError(res); } fn handleMainModuleScriptSuccess(rt: *RuntimeContext) void { rt.main_script_done = true; if (rt.dev_mode) { rt.dev_ctx.enterJsSuccessState(); } const res = uv.uv_async_send(rt.uv_dummy_async); uv.assertNoError(res); } /// Override libc assert fail handler to abort with zig stack trace. /// Some dependencies like libuv use it. export fn __assert_fail(assertion: [*c]const u8, file: [*c]const u8, line: c_uint, function: [*c]const u8) callconv(.C) void { log.debug("libc assert failed: {s} {s}:{}, Assertion: {s}", .{function, file, line, assertion}); unreachable; } pub const Random = struct { impl: std.rand.DefaultPrng, iface: std.rand.Random, };
runtime/runtime.zig
const std = @import("std"); const assert = @import("std").debug.assert; const misc = @import("misc.zig"); usingnamespace @import("c.zig"); const warn = std.debug.warn; pub const List = std.ArrayList(Entry); pub const Allocator = std.mem.Allocator; fn log(comptime arg: []const u8) void { warn(arg, .{}); } /// Cleanup: Must call deinit on result pub fn loadList(allocator: *Allocator, file_path: [*c]const u8) !List { var result = List.init(allocator); //std.debug.warn("Loading Todo List: '{}'\n", .{file_path[0..strlen(&file_path[0])]}); // FIXME: Use zig std io var fd = open(file_path, O_RDONLY); defer _ = close(fd); // FIXME: assert is too heavy handed assert(fd != -1); var offset_end: off_t = lseek(fd, 0, SEEK_END); assert(offset_end != -1); if (lseek(fd, 0, SEEK_SET) == -1) return error.LSeekError; var file_contents = calloc(@intCast(c_ulong, offset_end) + 1, 1); defer free(file_contents); if (read(fd, file_contents, @intCast(usize, offset_end)) != offset_end) return error.FileReadFailed; var wide_file_contents = calloc(@intCast(c_ulong, offset_end) + 1, @sizeOf(wchar_t)) orelse return error.OutOfMem; defer free(wide_file_contents); var start_ptr: [*c]wchar_t = std.meta.cast([*c]wchar_t, wide_file_contents); _ = swprintf(start_ptr, @intCast(usize, offset_end) + 1, &([_]c_int{ '%', 's', 0 })[0], file_contents); var save_ptr: [*c]wchar_t = undefined; while (true) { var line = wcstok(start_ptr, &([_]c_int{ '\n', 0 })[0], &save_ptr); start_ptr = null; if (line == null) break; var time_added: time_t = wcstoll(line, &line, 16); var time_started: time_t = wcstoll(line, &line, 16); var time_complete: time_t = wcstoll(line, &line, 16); var statell: c_longlong = wcstoll(line, &line, 10); const state = @intToEnum(State, statell); line += wcsspn(line, &([_]c_int{ ' ', 0 })[0]); // reject spaces until actual text try result.append(.{ .time_added = time_added, .time_started = time_started, .time_complete = time_complete, .state = state, .text = wcsdup(line), }); } return result; } fn compare(conext: void, first: Entry, secnd: Entry) bool { if (first.state == secnd.state) { // identical states, compare times switch (first.state) { .Discarded, .Priority, .Doing, .In_Review => return first.time_started > secnd.time_started, .Not_Started => return first.time_added > secnd.time_added, .Done => return first.time_complete > secnd.time_complete, } return false; } if (first.state == .In_Review) { return true; } else if (secnd.state == .In_Review) return false; if (first.state == .Priority) { return true; } else if (secnd.state == .Priority) return false; if (first.state == .Doing) { return true; } else if (secnd.state == .Doing) return false; if (first.state == .Not_Started) { return true; } else if (secnd.state == .Not_Started) return false; if (first.state == .Done) { return true; } else if (secnd.state == .Done) return false; if (first.state == .Discarded) { return true; } else if (secnd.state == .Discarded) return false; return true; } pub fn sort(items: []Entry) void { std.sort.sort(Entry, items, {}, compare); } pub const State = enum(c_longlong) { Not_Started = 0, Priority = 1, Doing = 2, Done = 3, In_Review = 4, Discarded = 5, }; pub const Entry = struct { time_added: time_t, time_started: time_t, time_complete: time_t, state: State, text: *wchar_t, }; pub fn save(list: List, file_path: []u8) void { const file = fopen(&file_path[0], "w"); defer _ = fclose(file); const format = misc.u8ToWideString("%X %X %X %X %ls\n"); defer free(format); for (list.items) |entry| { _ = fwprintf( file, &format[0], entry.time_added, entry.time_started, entry.time_complete, entry.state, entry.text, ); } }
src/todo.zig
const subo = @import("subo.zig"); const std = @import("std"); const testing = std.testing; const math = std.math; fn test__subodi4(a: i64, b: i64) !void { var result_ov: c_int = undefined; var expected_ov: c_int = undefined; var result = subo.__subodi4(a, b, &result_ov); var expected: i64 = simple_subodi4(a, b, &expected_ov); try testing.expectEqual(expected, result); try testing.expectEqual(expected_ov, result_ov); } // 2 cases on evaluating `a-b`: // 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a<min+b // 2. `a-b` may overflow, iff b<0 && a>0 and a-b > max <=> a>max+b // `-b` evaluation may overflow, iff b==min, but this is handled by the hardware pub fn simple_subodi4(a: i64, b: i64, overflow: *c_int) i64 { overflow.* = 0; const min: i64 = math.minInt(i64); const max: i64 = math.maxInt(i64); if (((b > 0) and (a < min + b)) or ((b < 0) and (a > max + b))) overflow.* = 1; return a -% b; } test "subodi3" { const min: i64 = math.minInt(i64); const max: i64 = math.maxInt(i64); var i: i64 = 1; while (i < max) : (i *|= 2) { try test__subodi4(i, i); try test__subodi4(-i, -i); try test__subodi4(i, -i); try test__subodi4(-i, i); } // edge cases // 0 - 0 = 0 // MIN - MIN = 0 // MAX - MAX = 0 // 0 - MIN overflow // 0 - MAX = MIN+1 // MIN - 0 = MIN // MAX - 0 = MAX // MIN - MAX overflow // MAX - MIN overflow try test__subodi4(0, 0); try test__subodi4(min, min); try test__subodi4(max, max); try test__subodi4(0, min); try test__subodi4(0, max); try test__subodi4(min, 0); try test__subodi4(max, 0); try test__subodi4(min, max); try test__subodi4(max, min); // derived edge cases // MIN+1 - MIN = 1 // MAX-1 - MAX = -1 // 1 - MIN overflow // -1 - MIN = MAX // -1 - MAX = MIN // +1 - MAX = MIN+2 // MIN - 1 overflow // MIN - -1 = MIN+1 // MAX - 1 = MAX-1 // MAX - -1 overflow try test__subodi4(min + 1, min); try test__subodi4(max - 1, max); try test__subodi4(1, min); try test__subodi4(-1, min); try test__subodi4(-1, max); try test__subodi4(1, max); try test__subodi4(min, 1); try test__subodi4(min, -1); try test__subodi4(max, -1); try test__subodi4(max, 1); }
lib/std/special/compiler_rt/subodi4_test.zig
const std = @import("std"); pub fn Key(comptime S: type) type { return struct { const Self = @This(); index: S, version: S, pub fn equals(lhs: Self, rhs: Self) bool { return lhs.index == rhs.index and lhs.version == rhs.version; } }; } fn Slot(comptime S: type, comptime T: type) type { return struct { const Self = @This(); version: S, next_free: S, value: T, fn new(version: S, next_free: S, value: T) Self { return Self{ .version = version, .next_free = next_free, .value = value }; } fn occupied(self: Self) bool { return self.version % 2 > 0; } }; } pub fn SlotMap(comptime S: type, comptime T: type) type { return struct { const Self = @This(); const SlotType = Slot(S, T); pub const Error = error{ OverflowError, InvalidKey, }; pub const Iterator = struct { map: *const Self, index: S, pub fn keys(self: *Iterator) ?Key(S) { if (self.map.len == 0 or self.index > self.map.len) { self.reset(); return null; } while (!self.map.slots.items[self.index].occupied()) : (self.index += 1) {} self.index += 1; return Key(S){ .index = self.index - 1, .version = self.map.slots.items[self.index - 1].version, }; } pub fn values(self: *Iterator) ?T { if (self.map.len == 0 or self.index > self.map.len) { self.reset(); return null; } while (!self.map.slots.items[self.index].occupied()) : (self.index += 1) {} self.index += 1; return self.map.slots.items[self.index - 1].value; } fn reset(self: *Iterator) void { self.index = 0; } }; slots: std.ArrayList(SlotType), free_head: S, len: S, pub fn init(allocator: *std.mem.Allocator, size: S) !Self { var result = Self{ .slots = try std.ArrayList(SlotType).initCapacity(allocator, @intCast(usize, size)), .free_head = 0, .len = 0, }; return result; } pub fn deinit(self: Self) void { self.slots.deinit(); } pub fn count(self: Self) usize { return @intCast(usize, self.len); } pub fn capacity(self: Self) usize { return self.slots.capacity; } pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { try self.slots.ensureTotalCapacity(new_capacity); } pub fn hasKey(self: Self, key: Key(S)) bool { if (key.index < self.slots.items.len) { const slot = self.slots.items[key.index]; return slot.version == key.version; } else { return false; } } pub fn insert(self: *Self, value: T) !Key(S) { const new_len = self.len + 1; if (new_len == std.math.maxInt(S)) { return error.OverflowError; } const idx = self.free_head; if (idx < self.slots.items.len) { const occupied_version = self.slots.items[idx].version | 1; const result = Key(S){ .index = idx, .version = occupied_version }; self.slots.items[idx].value = value; self.slots.items[idx].version = occupied_version; self.free_head = self.slots.items[idx].next_free; self.len = new_len; return result; } else { const result = Key(S){ .index = idx, .version = 1 }; try self.slots.append(SlotType.new(1, 0, value)); self.free_head = @intCast(S, self.slots.items.len); self.len = new_len; return result; } } // TODO: find out how to do this correctly fn reserve(self: *Self) !Key(S) { const default: T = undefined; return try self.insert(default); } fn removeFromSlot(self: *Self, idx: S) T { self.slots.items[idx].next_free = self.free_head; self.slots.items[idx].version += 1; self.free_head = idx; self.len -= 1; return self.slots.items[idx].value; } pub fn remove(self: *Self, key: Key(S)) !T { if (self.hasKey(key)) { return self.removeFromSlot(key.index); } else { return error.InvalidKey; } } pub fn delete(self: *Self, key: Key(S)) !void { if (self.hasKey(key)) { _ = self.removeFromSlot(key.index); } else { return error.InvalidKey; } } // TODO: zig closures fn retain(self: *Self, filter: fn (key: Key(S), value: T) bool) void { const len = self.slots.len; var idx = 0; while (idx < len) : (idx += 1) { const slot = self.slots[idx]; const key = Key{ .index = idx, .version = slot.version }; if (slot.occupied and !filter(key, value)) { _ = self.removeFromSlot(idx); } } } pub fn clear(self: *Self) void { while (self.len > 0) { _ = self.removeFromSlot(self.len); } self.slots.shrinkRetainingCapacity(0); self.free_head = 0; } pub fn get(self: *const Self, key: Key(S)) !T { if (self.hasKey(key)) { return self.slots.items[key.index].value; } else { return error.InvalidKey; } } pub fn getPtr(self: *const Self, key: Key(S)) !*T { if (self.hasKey(key)) { return &self.slots.items[key.index].value; } else { return error.InvalidKey; } } pub fn set(self: *Self, key: Key(S), value: T) !void { if (self.hasKey(key)) { self.slots.items[key.index].value = value; } else { return error.InvalidKey; } } pub fn iterator(self: *const Self) Iterator { return Iterator{ .map = self, .index = 0, }; } }; } test "slotmap" { // const debug = std.debug; const mem = std.mem; const expect = std.testing.expect; const expectError = std.testing.expectError; const data = [_][]const u8{ "foo", "bar", "cat", "zag" }; var map = try SlotMap(u16, []const u8).init(std.testing.allocator, 3); const K = Key(u16); var keys = [_]K{K{ .index = 0, .version = 0 }} ** 3; var iter = map.iterator(); var idx: usize = 0; defer map.deinit(); for (data[0..3]) |word, i| { keys[i] = try map.insert(word); } try expect(mem.eql(u8, try map.get(keys[0]), data[0])); try expect(mem.eql(u8, try map.get(keys[1]), data[1])); try expect(mem.eql(u8, try map.get(keys[2]), data[2])); try map.set(keys[0], data[3]); try expect(mem.eql(u8, try map.get(keys[0]), data[3])); try map.delete(keys[0]); try expectError(error.InvalidKey, map.get(keys[0])); while (iter.values()) |value| : (idx += 1) { try expect(mem.eql(u8, value, data[idx + 1])); } idx = 0; while (iter.keys()) |key| : (idx += 1) { try expect(mem.eql(u8, try map.get(key), data[idx + 1])); } map.clear(); // std.debug.warn("\n"); for (keys) |key| { try expectError(error.InvalidKey, map.get(key)); } while (iter.values()) |_| { try expect(iter.index == 0); } }
src/slotmap.zig
const std = @import("std"); const panic = std.debug.panic; const assert = std.debug.assert; const bufPrint = std.fmt.bufPrint; const c = @import("c.zig"); const debug_gl = @import("debug_gl.zig"); const AllShaders = @import("all_shaders.zig").AllShaders; const StaticGeometry = @import("static_geometry.zig").StaticGeometry; var window: *c.GLFWwindow = undefined; var all_shaders: AllShaders = undefined; var static_geometry: StaticGeometry = undefined; //var font: Spritesheet = undefined; fn errorCallback(err: c_int, description: [*c]const u8) callconv(.C) void { panic("Error: {}\n", .{@as([*:0]const u8, description)}); } fn keyCallback(win: ?*c.GLFWwindow, key: c_int, scancode: c_int, action: c_int, mods: c_int) callconv(.C) void { if (action != c.GLFW_PRESS) return; //const t = @ptrCast(*Tetris, @alignCast(@alignOf(Tetris), c.glfwGetWindowUserPointer(win).?)); switch (key) { else => {}, } } //const font_png = @embedFile("../assets/font.png"); pub const window_width = 100; pub const window_height = 100; pub fn main() !void { _ = c.glfwSetErrorCallback(errorCallback); if (c.glfwInit() == c.GL_FALSE) { panic("GLFW init failure\n", .{}); } defer c.glfwTerminate(); c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MAJOR, 3); c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MINOR, 2); c.glfwWindowHint(c.GLFW_OPENGL_FORWARD_COMPAT, c.GL_TRUE); c.glfwWindowHint(c.GLFW_OPENGL_DEBUG_CONTEXT, debug_gl.is_on); c.glfwWindowHint(c.GLFW_OPENGL_PROFILE, c.GLFW_OPENGL_CORE_PROFILE); c.glfwWindowHint(c.GLFW_DEPTH_BITS, 0); c.glfwWindowHint(c.GLFW_STENCIL_BITS, 8); c.glfwWindowHint(c.GLFW_RESIZABLE, c.GL_FALSE); window = c.glfwCreateWindow(window_width, window_height, "zigui", null, null) orelse { panic("unable to create window\n", .{}); }; defer c.glfwDestroyWindow(window); _ = c.glfwSetKeyCallback(window, keyCallback); c.glfwMakeContextCurrent(window); c.glfwSwapInterval(1); // create and bind exactly one vertex array per context and use // glVertexAttribPointer etc every frame. var vertex_array_object: c.GLuint = undefined; c.glGenVertexArrays(1, &vertex_array_object); c.glBindVertexArray(vertex_array_object); defer c.glDeleteVertexArrays(1, &vertex_array_object); var framebuffer_width: c_int = window_width; var framebuffer_height: c_int = window_height; c.glfwGetFramebufferSize(window, &framebuffer_width, &framebuffer_height); assert(framebuffer_width >= window_width); assert(framebuffer_height >= window_height); all_shaders = try AllShaders.create(); defer all_shaders.destroy(); static_geometry = StaticGeometry.create(); defer static_geometry.destroy(); //font.init(font_png, font_char_width, font_char_height) catch { // panic("unable to read assets\n", .{}); //}; //defer font.deinit(); var seed_bytes: [@sizeOf(u64)]u8 = undefined; std.crypto.randomBytes(seed_bytes[0..]) catch |err| { panic("unable to seed random number generator: {}", .{err}); }; //t.prng = std.rand.DefaultPrng.init(std.mem.readIntNative(u64, &seed_bytes)); //t.rand = &t.prng.random; //resetProjection(t); //restartGame(t); c.glClearColor(0.0, 0.0, 0.0, 1.0); c.glEnable(c.GL_BLEND); c.glBlendFunc(c.GL_SRC_ALPHA, c.GL_ONE_MINUS_SRC_ALPHA); c.glPixelStorei(c.GL_UNPACK_ALIGNMENT, 1); c.glViewport(0, 0, framebuffer_width, framebuffer_height); //c.glfwSetWindowUserPointer(window, @ptrCast(*c_void, t)); debug_gl.assertNoError(); const start_time = c.glfwGetTime(); var prev_time = start_time; while (c.glfwWindowShouldClose(window) == c.GL_FALSE) { c.glClear(c.GL_COLOR_BUFFER_BIT | c.GL_DEPTH_BUFFER_BIT | c.GL_STENCIL_BUFFER_BIT); const now_time = c.glfwGetTime(); const elapsed = now_time - prev_time; prev_time = now_time; //nextFrame(t, elapsed); //draw(t, @This()); c.glfwSwapBuffers(window); c.glfwPollEvents(); } debug_gl.assertNoError(); } pub fn fillRectMvp(t: *Tetris, color: Vec4, mvp: Mat4x4) void { all_shaders.primitive.bind(); all_shaders.primitive.setUniformVec4(all_shaders.primitive_uniform_color, color); all_shaders.primitive.setUniformMat4x4(all_shaders.primitive_uniform_mvp, mvp); c.glBindBuffer(c.GL_ARRAY_BUFFER, static_geometry.rect_2d_vertex_buffer); c.glEnableVertexAttribArray(@intCast(c.GLuint, all_shaders.primitive_attrib_position)); c.glVertexAttribPointer(@intCast(c.GLuint, all_shaders.primitive_attrib_position), 3, c.GL_FLOAT, c.GL_FALSE, 0, null); c.glDrawArrays(c.GL_TRIANGLE_STRIP, 0, 4); } pub fn drawParticle(t: *Tetris, p: Particle) void { const model = mat4x4_identity.translateByVec(p.pos).rotate(p.angle, p.axis).scale(p.scale_w, p.scale_h, 0.0); const mvp = t.projection.mult(model); all_shaders.primitive.bind(); all_shaders.primitive.setUniformVec4(all_shaders.primitive_uniform_color, p.color); all_shaders.primitive.setUniformMat4x4(all_shaders.primitive_uniform_mvp, mvp); c.glBindBuffer(c.GL_ARRAY_BUFFER, static_geometry.triangle_2d_vertex_buffer); c.glEnableVertexAttribArray(@intCast(c.GLuint, all_shaders.primitive_attrib_position)); c.glVertexAttribPointer(@intCast(c.GLuint, all_shaders.primitive_attrib_position), 3, c.GL_FLOAT, c.GL_FALSE, 0, null); c.glDrawArrays(c.GL_TRIANGLE_STRIP, 0, 3); } pub fn drawText(t: *Tetris, text: []const u8, left: i32, top: i32, size: f32) void { for (text) |col, i| { if (col <= '~') { const char_left = @intToFloat(f32, left) + @intToFloat(f32, i * font_char_width) * size; const model = mat4x4_identity.translate(char_left, @intToFloat(f32, top), 0.0).scale(size, size, 0.0); const mvp = t.projection.mult(model); //font.draw(all_shaders, col, mvp); } else { unreachable; } } }
src/main.zig
const std = @import("std"); const warn = std.debug.print; const Allocator = std.mem.Allocator; const toot_lib = @import("./toot.zig"); const util = @import("./util.zig"); pub const TootList = SomeList(*toot_lib.Type); pub fn SomeList(comptime T: type) type { return struct { list: ListType, const Self = @This(); const ListType = std.TailQueue(T); pub fn init() Self { return Self{ .list = ListType{}, }; } pub fn len(self: *Self) usize { return self.list.len; } pub fn first(self: *Self) ?*ListType.Node { return self.list.first; } pub fn contains(self: *Self, item: T) bool { var ptr = self.list.first; while (ptr) |listItem| { if (util.hashIdSame(T, listItem.data, item)) { return true; } ptr = listItem.next; } return false; } pub fn author(self: *Self, acct: []const u8, allocator: *Allocator) []T { var winners = std.ArrayList(T).init(allocator); var ptr = self.list.first; while (ptr) |listItem| { const toot = listItem.data; if (toot.author(acct)) { winners.append(toot) catch unreachable; } ptr = listItem.next; } return winners.items; } pub fn sortedInsert(self: *Self, item: T, allocator: *Allocator) void { const itemDate = item.get("created_at").?.String; const node = allocator.create(ListType.Node) catch unreachable; node.data = item; var current = self.list.first; while (current) |listItem| { const listItemDate = listItem.data.get("created_at").?.String; if (std.mem.order(u8, itemDate, listItemDate) == std.math.Order.gt) { self.list.insertBefore(listItem, node); return; } else {} current = listItem.next; } self.list.append(node); } pub fn count(self: *Self) usize { var counter: usize = 0; var current = self.list.first; while (current) |item| { counter = counter + 1; current = item.next; } return counter; } }; }
src/toot_list.zig
const std = @import("std"); pub fn build(b: *std.build.Builder) void { // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard release options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. const mode = b.standardReleaseOptions(); const exe = b.addExecutable("zoltan", "src/lua.zig"); addLuaLibrary(exe, ""); exe.setTarget(target); exe.setBuildMode(mode); exe.install(); const run_cmd = exe.run(); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); } const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); const exe_tests = b.addTest("src/tests.zig"); // Lua addLuaLibrary(exe_tests, "" ); // exe_tests.setBuildMode(mode); const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&exe_tests.step); } pub fn addLuaLibrary(exe: *std.build.LibExeObjStep, installPath: [] const u8) void { var buf: [1024]u8 = undefined; // Lua headers + required source files var path = std.fmt.bufPrint(buf[0..], "{s}{s}", .{ installPath, "src/lua-5.4.3/src"}) catch unreachable; exe.addIncludeDir(path); // C compile flags const flags = [_][]const u8{ "-std=c99", "-O2", }; for (luaFiles) |luaFile| { var cPath = std.fmt.bufPrint(buf[0..], "{s}{s}", .{ installPath, luaFile}) catch unreachable; exe.addCSourceFile(cPath, &flags); } exe.linkLibC(); } const luaFiles = [_] []const u8{ "src/lua-5.4.3/src/lapi.c", "src/lua-5.4.3/src/lauxlib.c", "src/lua-5.4.3/src/lbaselib.c", "src/lua-5.4.3/src/lcode.c", "src/lua-5.4.3/src/lcorolib.c", "src/lua-5.4.3/src/lctype.c", "src/lua-5.4.3/src/ldblib.c", "src/lua-5.4.3/src/ldebug.c", "src/lua-5.4.3/src/ldo.c", "src/lua-5.4.3/src/ldump.c", "src/lua-5.4.3/src/lfunc.c", "src/lua-5.4.3/src/lgc.c", "src/lua-5.4.3/src/linit.c", "src/lua-5.4.3/src/liolib.c", "src/lua-5.4.3/src/llex.c", "src/lua-5.4.3/src/lmathlib.c", "src/lua-5.4.3/src/lmem.c", "src/lua-5.4.3/src/loadlib.c", "src/lua-5.4.3/src/lobject.c", "src/lua-5.4.3/src/lopcodes.c", "src/lua-5.4.3/src/loslib.c", "src/lua-5.4.3/src/lparser.c", "src/lua-5.4.3/src/lstate.c", "src/lua-5.4.3/src/lstring.c", "src/lua-5.4.3/src/lstrlib.c", "src/lua-5.4.3/src/ltable.c", "src/lua-5.4.3/src/ltablib.c", "src/lua-5.4.3/src/ltm.c", "src/lua-5.4.3/src/lundump.c", "src/lua-5.4.3/src/lutf8lib.c", "src/lua-5.4.3/src/lvm.c", "src/lua-5.4.3/src/lzio.c", };
build.zig
test "zig fmt: change @typeOf to @TypeOf" { try testTransform( \\const a = @typeOf(@as(usize, 10)); \\ , \\const a = @TypeOf(@as(usize, 10)); \\ ); } test "zig fmt: comptime struct field" { try testCanonical( \\const Foo = struct { \\ a: i32, \\ comptime b: i32 = 1234, \\}; \\ ); } test "zig fmt: c pointer type" { try testCanonical( \\pub extern fn repro() [*c]const u8; \\ ); } test "zig fmt: asm expression with comptime content" { try testCanonical( \\comptime { \\ asm ("foo" ++ "bar"); \\} \\pub fn main() void { \\ asm volatile ("foo" ++ "bar"); \\ asm volatile ("foo" ++ "bar" \\ : [_] "" (x) \\ ); \\ asm volatile ("foo" ++ "bar" \\ : [_] "" (x) \\ : [_] "" (y) \\ ); \\ asm volatile ("foo" ++ "bar" \\ : [_] "" (x) \\ : [_] "" (y) \\ : "h", "e", "l", "l", "o" \\ ); \\} \\ ); } test "zig fmt: var struct field" { try testCanonical( \\pub const Pointer = struct { \\ sentinel: var, \\}; \\ ); } test "zig fmt: sentinel-terminated array type" { try testCanonical( \\pub fn cStrToPrefixedFileW(s: [*:0]const u8) ![PATH_MAX_WIDE:0]u16 { \\ return sliceToPrefixedFileW(mem.toSliceConst(u8, s)); \\} \\ ); } test "zig fmt: sentinel-terminated slice type" { try testCanonical( \\pub fn toSlice(self: Buffer) [:0]u8 { \\ return self.list.toSlice()[0..self.len()]; \\} \\ ); } test "zig fmt: anon literal in array" { try testCanonical( \\var arr: [2]Foo = .{ \\ .{ .a = 2 }, \\ .{ .b = 3 }, \\}; \\ ); } test "zig fmt: anon struct literal syntax" { try testCanonical( \\const x = .{ \\ .a = b, \\ .c = d, \\}; \\ ); } test "zig fmt: anon list literal syntax" { try testCanonical( \\const x = .{ a, b, c }; \\ ); } test "zig fmt: async function" { try testCanonical( \\pub const Server = struct { \\ handleRequestFn: async fn (*Server, *const std.net.Address, File) void, \\}; \\test "hi" { \\ var ptr = @ptrCast(async fn (i32) void, other); \\} \\ ); } test "zig fmt: whitespace fixes" { try testTransform("test \"\" {\r\n\tconst hi = x;\r\n}\n// zig fmt: off\ntest \"\"{\r\n\tconst a = b;}\r\n", \\test "" { \\ const hi = x; \\} \\// zig fmt: off \\test ""{ \\ const a = b;} \\ ); } test "zig fmt: while else err prong with no block" { try testCanonical( \\test "" { \\ const result = while (returnError()) |value| { \\ break value; \\ } else |err| @as(i32, 2); \\ expect(result == 2); \\} \\ ); } test "zig fmt: tagged union with enum values" { try testCanonical( \\const MultipleChoice2 = union(enum(u32)) { \\ Unspecified1: i32, \\ A: f32 = 20, \\ Unspecified2: void, \\ B: bool = 40, \\ Unspecified3: i32, \\ C: i8 = 60, \\ Unspecified4: void, \\ D: void = 1000, \\ Unspecified5: i32, \\}; \\ ); } test "zig fmt: allowzero pointer" { try testCanonical( \\const T = [*]allowzero const u8; \\ ); } test "zig fmt: enum literal" { try testCanonical( \\const x = .hi; \\ ); } test "zig fmt: enum literal inside array literal" { try testCanonical( \\test "enums in arrays" { \\ var colors = []Color{.Green}; \\ colors = []Colors{ .Green, .Cyan }; \\ colors = []Colors{ \\ .Grey, \\ .Green, \\ .Cyan, \\ }; \\} \\ ); } test "zig fmt: character literal larger than u8" { try testCanonical( \\const x = '\u{01f4a9}'; \\ ); } test "zig fmt: infix operator and then multiline string literal" { try testCanonical( \\const x = "" ++ \\ \\ hi \\; \\ ); } test "zig fmt: C pointers" { try testCanonical( \\const Ptr = [*c]i32; \\ ); } test "zig fmt: threadlocal" { try testCanonical( \\threadlocal var x: i32 = 1234; \\ ); } test "zig fmt: linksection" { try testCanonical( \\export var aoeu: u64 linksection(".text.derp") = 1234; \\export nakedcc fn _start() linksection(".text.boot") noreturn {} \\ ); } test "zig fmt: correctly move doc comments on struct fields" { try testTransform( \\pub const section_64 = extern struct { \\ sectname: [16]u8, /// name of this section \\ segname: [16]u8, /// segment this section goes in \\}; , \\pub const section_64 = extern struct { \\ /// name of this section \\ sectname: [16]u8, \\ /// segment this section goes in \\ segname: [16]u8, \\}; \\ ); } test "zig fmt: doc comments on param decl" { try testCanonical( \\pub const Allocator = struct { \\ shrinkFn: fn ( \\ self: *Allocator, \\ /// Guaranteed to be the same as what was returned from most recent call to \\ /// `allocFn`, `reallocFn`, or `shrinkFn`. \\ old_mem: []u8, \\ /// Guaranteed to be the same as what was returned from most recent call to \\ /// `allocFn`, `reallocFn`, or `shrinkFn`. \\ old_alignment: u29, \\ /// Guaranteed to be less than or equal to `old_mem.len`. \\ new_byte_count: usize, \\ /// Guaranteed to be less than or equal to `old_alignment`. \\ new_alignment: u29, \\ ) []u8, \\}; \\ ); } test "zig fmt: aligned struct field" { try testCanonical( \\pub const S = struct { \\ f: i32 align(32), \\}; \\ ); try testCanonical( \\pub const S = struct { \\ f: i32 align(32) = 1, \\}; \\ ); } test "zig fmt: preserve space between async fn definitions" { try testCanonical( \\async fn a() void {} \\ \\async fn b() void {} \\ ); } test "zig fmt: comment to disable/enable zig fmt first" { try testCanonical( \\// Test trailing comma syntax \\// zig fmt: off \\ \\const struct_trailing_comma = struct { x: i32, y: i32, }; ); } test "zig fmt: comment to disable/enable zig fmt" { try testTransform( \\const a = b; \\// zig fmt: off \\const c = d; \\// zig fmt: on \\const e = f; , \\const a = b; \\// zig fmt: off \\const c = d; \\// zig fmt: on \\const e = f; \\ ); } test "zig fmt: line comment following 'zig fmt: off'" { try testCanonical( \\// zig fmt: off \\// Test \\const e = f; ); } test "zig fmt: doc comment following 'zig fmt: off'" { try testCanonical( \\// zig fmt: off \\/// test \\const e = f; ); } test "zig fmt: line and doc comment following 'zig fmt: off'" { try testCanonical( \\// zig fmt: off \\// test 1 \\/// test 2 \\const e = f; ); } test "zig fmt: doc and line comment following 'zig fmt: off'" { try testCanonical( \\// zig fmt: off \\/// test 1 \\// test 2 \\const e = f; ); } test "zig fmt: alternating 'zig fmt: off' and 'zig fmt: on'" { try testCanonical( \\// zig fmt: off \\// zig fmt: on \\// zig fmt: off \\const e = f; \\// zig fmt: off \\// zig fmt: on \\// zig fmt: off \\const a = b; \\// zig fmt: on \\const c = d; \\// zig fmt: on \\ ); } test "zig fmt: line comment following 'zig fmt: on'" { try testCanonical( \\// zig fmt: off \\const e = f; \\// zig fmt: on \\// test \\const e = f; \\ ); } test "zig fmt: doc comment following 'zig fmt: on'" { try testCanonical( \\// zig fmt: off \\const e = f; \\// zig fmt: on \\/// test \\const e = f; \\ ); } test "zig fmt: line and doc comment following 'zig fmt: on'" { try testCanonical( \\// zig fmt: off \\const e = f; \\// zig fmt: on \\// test1 \\/// test2 \\const e = f; \\ ); } test "zig fmt: doc and line comment following 'zig fmt: on'" { try testCanonical( \\// zig fmt: off \\const e = f; \\// zig fmt: on \\/// test1 \\// test2 \\const e = f; \\ ); } test "zig fmt: pointer of unknown length" { try testCanonical( \\fn foo(ptr: [*]u8) void {} \\ ); } test "zig fmt: spaces around slice operator" { try testCanonical( \\var a = b[c..d]; \\var a = b[c + 1 .. d]; \\var a = b[c + 1 ..]; \\var a = b[c .. d + 1]; \\var a = b[c.a..d.e]; \\ ); } test "zig fmt: async call in if condition" { try testCanonical( \\comptime { \\ if (async b()) { \\ a(); \\ } \\} \\ ); } test "zig fmt: 2nd arg multiline string" { try testCanonical( \\comptime { \\ cases.addAsm("hello world linux x86_64", \\ \\.text \\ , "Hello, world!\n"); \\} \\ ); } test "zig fmt: if condition wraps" { try testTransform( \\comptime { \\ if (cond and \\ cond) { \\ return x; \\ } \\ while (cond and \\ cond) { \\ return x; \\ } \\ if (a == b and \\ c) { \\ a = b; \\ } \\ while (a == b and \\ c) { \\ a = b; \\ } \\ if ((cond and \\ cond)) { \\ return x; \\ } \\ while ((cond and \\ cond)) { \\ return x; \\ } \\ var a = if (a) |*f| x: { \\ break :x &a.b; \\ } else |err| err; \\} , \\comptime { \\ if (cond and \\ cond) \\ { \\ return x; \\ } \\ while (cond and \\ cond) \\ { \\ return x; \\ } \\ if (a == b and \\ c) \\ { \\ a = b; \\ } \\ while (a == b and \\ c) \\ { \\ a = b; \\ } \\ if ((cond and \\ cond)) \\ { \\ return x; \\ } \\ while ((cond and \\ cond)) \\ { \\ return x; \\ } \\ var a = if (a) |*f| x: { \\ break :x &a.b; \\ } else |err| err; \\} \\ ); } test "zig fmt: if condition has line break but must not wrap" { try testCanonical( \\comptime { \\ if (self.user_input_options.put(name, UserInputOption{ \\ .name = name, \\ .used = false, \\ }) catch unreachable) |*prev_value| { \\ foo(); \\ bar(); \\ } \\ if (put( \\ a, \\ b, \\ )) { \\ foo(); \\ } \\} \\ ); } test "zig fmt: same-line doc comment on variable declaration" { try testTransform( \\pub const MAP_ANONYMOUS = 0x1000; /// allocated from memory, swap space \\pub const MAP_FILE = 0x0000; /// map from file (default) \\ \\pub const EMEDIUMTYPE = 124; /// Wrong medium type \\ \\// nameserver query return codes \\pub const ENSROK = 0; /// DNS server returned answer with no data , \\/// allocated from memory, swap space \\pub const MAP_ANONYMOUS = 0x1000; \\/// map from file (default) \\pub const MAP_FILE = 0x0000; \\ \\/// Wrong medium type \\pub const EMEDIUMTYPE = 124; \\ \\// nameserver query return codes \\/// DNS server returned answer with no data \\pub const ENSROK = 0; \\ ); } test "zig fmt: if-else with comment before else" { try testCanonical( \\comptime { \\ // cexp(finite|nan +- i inf|nan) = nan + i nan \\ if ((hx & 0x7fffffff) != 0x7f800000) { \\ return Complex(f32).new(y - y, y - y); \\ } // cexp(-inf +- i inf|nan) = 0 + i0 \\ else if (hx & 0x80000000 != 0) { \\ return Complex(f32).new(0, 0); \\ } // cexp(+inf +- i inf|nan) = inf + i nan \\ else { \\ return Complex(f32).new(x, y - y); \\ } \\} \\ ); } test "zig fmt: if nested" { try testCanonical( \\pub fn foo() void { \\ return if ((aInt & bInt) >= 0) \\ if (aInt < bInt) \\ GE_LESS \\ else if (aInt == bInt) \\ GE_EQUAL \\ else \\ GE_GREATER \\ else if (aInt > bInt) \\ GE_LESS \\ else if (aInt == bInt) \\ GE_EQUAL \\ else \\ GE_GREATER; \\} \\ ); } test "zig fmt: respect line breaks in if-else" { try testCanonical( \\comptime { \\ return if (cond) a else b; \\ return if (cond) \\ a \\ else \\ b; \\ return if (cond) \\ a \\ else if (cond) \\ b \\ else \\ c; \\} \\ ); } test "zig fmt: respect line breaks after infix operators" { try testCanonical( \\comptime { \\ self.crc = \\ lookup_tables[0][p[7]] ^ \\ lookup_tables[1][p[6]] ^ \\ lookup_tables[2][p[5]] ^ \\ lookup_tables[3][p[4]] ^ \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^ \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^ \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^ \\ lookup_tables[7][@truncate(u8, self.crc >> 0)]; \\} \\ ); } test "zig fmt: fn decl with trailing comma" { try testTransform( \\fn foo(a: i32, b: i32,) void {} , \\fn foo( \\ a: i32, \\ b: i32, \\) void {} \\ ); } test "zig fmt: enum decl with no trailing comma" { try testTransform( \\const StrLitKind = enum {Normal, C}; , \\const StrLitKind = enum { \\ Normal, \\ C, \\}; \\ ); } test "zig fmt: switch comment before prong" { try testCanonical( \\comptime { \\ switch (a) { \\ // hi \\ 0 => {}, \\ } \\} \\ ); } test "zig fmt: struct literal no trailing comma" { try testTransform( \\const a = foo{ .x = 1, .y = 2 }; \\const a = foo{ .x = 1, \\ .y = 2 }; , \\const a = foo{ .x = 1, .y = 2 }; \\const a = foo{ \\ .x = 1, \\ .y = 2, \\}; \\ ); } test "zig fmt: struct literal containing a multiline expression" { try testTransform( \\const a = A{ .x = if (f1()) 10 else 20 }; \\const a = A{ .x = if (f1()) 10 else 20, }; \\const a = A{ .x = if (f1()) \\ 10 else 20 }; \\const a = A{ .x = if (f1()) 10 else 20, .y = f2() + 100 }; \\const a = A{ .x = if (f1()) 10 else 20, .y = f2() + 100, }; \\const a = A{ .x = if (f1()) \\ 10 else 20}; \\const a = A{ .x = switch(g) {0 => "ok", else => "no"} }; \\ , \\const a = A{ .x = if (f1()) 10 else 20 }; \\const a = A{ \\ .x = if (f1()) 10 else 20, \\}; \\const a = A{ \\ .x = if (f1()) \\ 10 \\ else \\ 20, \\}; \\const a = A{ .x = if (f1()) 10 else 20, .y = f2() + 100 }; \\const a = A{ \\ .x = if (f1()) 10 else 20, \\ .y = f2() + 100, \\}; \\const a = A{ \\ .x = if (f1()) \\ 10 \\ else \\ 20, \\}; \\const a = A{ \\ .x = switch (g) { \\ 0 => "ok", \\ else => "no", \\ }, \\}; \\ ); } test "zig fmt: array literal with hint" { try testTransform( \\const a = []u8{ \\ 1, 2, // \\ 3, \\ 4, \\ 5, \\ 6, \\ 7 }; \\const a = []u8{ \\ 1, 2, // \\ 3, \\ 4, \\ 5, \\ 6, \\ 7, 8 }; \\const a = []u8{ \\ 1, 2, // \\ 3, \\ 4, \\ 5, \\ 6, // blah \\ 7, 8 }; \\const a = []u8{ \\ 1, 2, // \\ 3, // \\ 4, \\ 5, \\ 6, \\ 7 }; \\const a = []u8{ \\ 1, \\ 2, \\ 3, 4, // \\ 5, 6, // \\ 7, 8, // \\}; , \\const a = []u8{ \\ 1, 2, \\ 3, 4, \\ 5, 6, \\ 7, \\}; \\const a = []u8{ \\ 1, 2, \\ 3, 4, \\ 5, 6, \\ 7, 8, \\}; \\const a = []u8{ \\ 1, 2, \\ 3, 4, \\ 5, 6, // blah \\ 7, 8, \\}; \\const a = []u8{ \\ 1, 2, \\ 3, // \\ 4, \\ 5, 6, \\ 7, \\}; \\const a = []u8{ \\ 1, \\ 2, \\ 3, \\ 4, \\ 5, \\ 6, \\ 7, \\ 8, \\}; \\ ); } test "zig fmt: array literal veritical column alignment" { try testTransform( \\const a = []u8{ \\ 1000, 200, \\ 30, 4, \\ 50000, 60 \\}; \\const a = []u8{0, 1, 2, 3, 40, \\ 4,5,600,7, \\ 80, \\ 9, 10, 11, 0, 13, 14, 15}; \\ , \\const a = []u8{ \\ 1000, 200, \\ 30, 4, \\ 50000, 60, \\}; \\const a = []u8{ \\ 0, 1, 2, 3, 40, \\ 4, 5, 600, 7, 80, \\ 9, 10, 11, 0, 13, \\ 14, 15, \\}; \\ ); } test "zig fmt: multiline string with backslash at end of line" { try testCanonical( \\comptime { \\ err( \\ \\\ \\ ); \\} \\ ); } test "zig fmt: multiline string parameter in fn call with trailing comma" { try testCanonical( \\fn foo() void { \\ try stdout.print( \\ \\ZIG_CMAKE_BINARY_DIR {} \\ \\ZIG_C_HEADER_FILES {} \\ \\ZIG_DIA_GUIDS_LIB {} \\ \\ \\ , \\ std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR), \\ std.cstr.toSliceConst(c.ZIG_CXX_COMPILER), \\ std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB), \\ ); \\} \\ ); } test "zig fmt: trailing comma on fn call" { try testCanonical( \\comptime { \\ var module = try Module.create( \\ allocator, \\ zig_lib_dir, \\ full_cache_dir, \\ ); \\} \\ ); } test "zig fmt: multi line arguments without last comma" { try testTransform( \\pub fn foo( \\ a: usize, \\ b: usize, \\ c: usize, \\ d: usize \\) usize { \\ return a + b + c + d; \\} \\ , \\pub fn foo(a: usize, b: usize, c: usize, d: usize) usize { \\ return a + b + c + d; \\} \\ ); } test "zig fmt: empty block with only comment" { try testCanonical( \\comptime { \\ { \\ // comment \\ } \\} \\ ); } test "zig fmt: no trailing comma on struct decl" { try testTransform( \\const RoundParam = struct { \\ k: usize, s: u32, t: u32 \\}; , \\const RoundParam = struct { \\ k: usize, \\ s: u32, \\ t: u32, \\}; \\ ); } test "zig fmt: extra newlines at the end" { try testTransform( \\const a = b; \\ \\ \\ , \\const a = b; \\ ); } test "zig fmt: simple asm" { try testTransform( \\comptime { \\ asm volatile ( \\ \\.globl aoeu; \\ \\.type aoeu, @function; \\ \\.set aoeu, derp; \\ ); \\ \\ asm ("not real assembly" \\ :[a] "x" (x),); \\ asm ("not real assembly" \\ :[a] "x" (->i32),:[a] "x" (1),); \\ asm ("still not real assembly" \\ :::"a","b",); \\} , \\comptime { \\ asm volatile ( \\ \\.globl aoeu; \\ \\.type aoeu, @function; \\ \\.set aoeu, derp; \\ ); \\ \\ asm ("not real assembly" \\ : [a] "x" (x) \\ ); \\ asm ("not real assembly" \\ : [a] "x" (-> i32) \\ : [a] "x" (1) \\ ); \\ asm ("still not real assembly" \\ : \\ : \\ : "a", "b" \\ ); \\} \\ ); } test "zig fmt: nested struct literal with one item" { try testCanonical( \\const a = foo{ \\ .item = bar{ .a = b }, \\}; \\ ); } test "zig fmt: switch cases trailing comma" { try testTransform( \\fn switch_cases(x: i32) void { \\ switch (x) { \\ 1,2,3 => {}, \\ 4,5, => {}, \\ 6... 8, => {}, \\ else => {}, \\ } \\} , \\fn switch_cases(x: i32) void { \\ switch (x) { \\ 1, 2, 3 => {}, \\ 4, \\ 5, \\ => {}, \\ 6...8 => {}, \\ else => {}, \\ } \\} \\ ); } test "zig fmt: slice align" { try testCanonical( \\const A = struct { \\ items: []align(A) T, \\}; \\ ); } test "zig fmt: add trailing comma to array literal" { try testTransform( \\comptime { \\ return []u16{'m', 's', 'y', 's', '-' // hi \\ }; \\ return []u16{'m', 's', 'y', 's', \\ '-'}; \\ return []u16{'m', 's', 'y', 's', '-'}; \\} , \\comptime { \\ return []u16{ \\ 'm', 's', 'y', 's', '-', // hi \\ }; \\ return []u16{ \\ 'm', 's', 'y', 's', \\ '-', \\ }; \\ return []u16{ 'm', 's', 'y', 's', '-' }; \\} \\ ); } test "zig fmt: first thing in file is line comment" { try testCanonical( \\// Introspection and determination of system libraries needed by zig. \\ \\// Introspection and determination of system libraries needed by zig. \\ \\const std = @import("std"); \\ ); } test "zig fmt: line comment after doc comment" { try testCanonical( \\/// doc comment \\// line comment \\fn foo() void {} \\ ); } test "zig fmt: float literal with exponent" { try testCanonical( \\test "bit field alignment" { \\ assert(@TypeOf(&blah.b) == *align(1:3:6) const u3); \\} \\ ); } test "zig fmt: float literal with exponent" { try testCanonical( \\test "aoeu" { \\ switch (state) { \\ TermState.Start => switch (c) { \\ '\x1b' => state = TermState.Escape, \\ else => try out.writeByte(c), \\ }, \\ } \\} \\ ); } test "zig fmt: float literal with exponent" { try testCanonical( \\pub const f64_true_min = 4.94065645841246544177e-324; \\const threshold = 0x1.a827999fcef32p+1022; \\ ); } test "zig fmt: if-else end of comptime" { try testCanonical( \\comptime { \\ if (a) { \\ b(); \\ } else { \\ b(); \\ } \\} \\ ); } test "zig fmt: nested blocks" { try testCanonical( \\comptime { \\ { \\ { \\ { \\ a(); \\ } \\ } \\ } \\} \\ ); } test "zig fmt: block with same line comment after end brace" { try testCanonical( \\comptime { \\ { \\ b(); \\ } // comment \\} \\ ); } test "zig fmt: statements with comment between" { try testCanonical( \\comptime { \\ a = b; \\ // comment \\ a = b; \\} \\ ); } test "zig fmt: statements with empty line between" { try testCanonical( \\comptime { \\ a = b; \\ \\ a = b; \\} \\ ); } test "zig fmt: ptr deref operator and unwrap optional operator" { try testCanonical( \\const a = b.*; \\const a = b.?; \\ ); } test "zig fmt: comment after if before another if" { try testCanonical( \\test "aoeu" { \\ // comment \\ if (x) { \\ bar(); \\ } \\} \\ \\test "aoeu" { \\ if (x) { \\ foo(); \\ } \\ // comment \\ if (x) { \\ bar(); \\ } \\} \\ ); } test "zig fmt: line comment between if block and else keyword" { try testCanonical( \\test "aoeu" { \\ // cexp(finite|nan +- i inf|nan) = nan + i nan \\ if ((hx & 0x7fffffff) != 0x7f800000) { \\ return Complex(f32).new(y - y, y - y); \\ } \\ // cexp(-inf +- i inf|nan) = 0 + i0 \\ else if (hx & 0x80000000 != 0) { \\ return Complex(f32).new(0, 0); \\ } \\ // cexp(+inf +- i inf|nan) = inf + i nan \\ // another comment \\ else { \\ return Complex(f32).new(x, y - y); \\ } \\} \\ ); } test "zig fmt: same line comments in expression" { try testCanonical( \\test "aoeu" { \\ const x = ( // a \\ 0 // b \\ ); // c \\} \\ ); } test "zig fmt: add comma on last switch prong" { try testTransform( \\test "aoeu" { \\switch (self.init_arg_expr) { \\ InitArg.Type => |t| { }, \\ InitArg.None, \\ InitArg.Enum => { } \\} \\ switch (self.init_arg_expr) { \\ InitArg.Type => |t| { }, \\ InitArg.None, \\ InitArg.Enum => { }//line comment \\ } \\} , \\test "aoeu" { \\ switch (self.init_arg_expr) { \\ InitArg.Type => |t| {}, \\ InitArg.None, InitArg.Enum => {}, \\ } \\ switch (self.init_arg_expr) { \\ InitArg.Type => |t| {}, \\ InitArg.None, InitArg.Enum => {}, //line comment \\ } \\} \\ ); } test "zig fmt: same-line comment after a statement" { try testCanonical( \\test "" { \\ a = b; \\ debug.assert(H.digest_size <= H.block_size); // HMAC makes this assumption \\ a = b; \\} \\ ); } test "zig fmt: same-line comment after var decl in struct" { try testCanonical( \\pub const vfs_cap_data = extern struct { \\ const Data = struct {}; // when on disk. \\}; \\ ); } test "zig fmt: same-line comment after field decl" { try testCanonical( \\pub const dirent = extern struct { \\ d_name: u8, \\ d_name: u8, // comment 1 \\ d_name: u8, \\ d_name: u8, // comment 2 \\ d_name: u8, \\}; \\ ); } test "zig fmt: same-line comment after switch prong" { try testCanonical( \\test "" { \\ switch (err) { \\ error.PathAlreadyExists => {}, // comment 2 \\ else => return err, // comment 1 \\ } \\} \\ ); } test "zig fmt: same-line comment after non-block if expression" { try testCanonical( \\comptime { \\ if (sr > n_uword_bits - 1) // d > r \\ return 0; \\} \\ ); } test "zig fmt: same-line comment on comptime expression" { try testCanonical( \\test "" { \\ comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer to absInt \\} \\ ); } test "zig fmt: switch with empty body" { try testCanonical( \\test "" { \\ foo() catch |err| switch (err) {}; \\} \\ ); } test "zig fmt: line comments in struct initializer" { try testCanonical( \\fn foo() void { \\ return Self{ \\ .a = b, \\ \\ // Initialize these two fields to buffer_size so that \\ // in `readFn` we treat the state as being able to read \\ .start_index = buffer_size, \\ .end_index = buffer_size, \\ \\ // middle \\ \\ .a = b, \\ \\ // end \\ }; \\} \\ ); } test "zig fmt: first line comment in struct initializer" { try testCanonical( \\pub async fn acquire(self: *Self) HeldLock { \\ return HeldLock{ \\ // guaranteed allocation elision \\ .held = self.lock.acquire(), \\ .value = &self.private_data, \\ }; \\} \\ ); } test "zig fmt: doc comments before struct field" { try testCanonical( \\pub const Allocator = struct { \\ /// Allocate byte_count bytes and return them in a slice, with the \\ /// slice's pointer aligned at least to alignment bytes. \\ allocFn: fn () void, \\}; \\ ); } test "zig fmt: error set declaration" { try testCanonical( \\const E = error{ \\ A, \\ B, \\ \\ C, \\}; \\ \\const Error = error{ \\ /// no more memory \\ OutOfMemory, \\}; \\ \\const Error = error{ \\ /// no more memory \\ OutOfMemory, \\ \\ /// another \\ Another, \\ \\ // end \\}; \\ \\const Error = error{OutOfMemory}; \\const Error = error{}; \\ ); } test "zig fmt: union(enum(u32)) with assigned enum values" { try testCanonical( \\const MultipleChoice = union(enum(u32)) { \\ A = 20, \\ B = 40, \\ C = 60, \\ D = 1000, \\}; \\ ); } test "zig fmt: resume from suspend block" { try testCanonical( \\fn foo() void { \\ suspend { \\ resume @frame(); \\ } \\} \\ ); } test "zig fmt: comments before error set decl" { try testCanonical( \\const UnexpectedError = error{ \\ /// The Operating System returned an undocumented error code. \\ Unexpected, \\ // another \\ Another, \\ \\ // in between \\ \\ // at end \\}; \\ ); } test "zig fmt: comments before switch prong" { try testCanonical( \\test "" { \\ switch (err) { \\ error.PathAlreadyExists => continue, \\ \\ // comment 1 \\ \\ // comment 2 \\ else => return err, \\ // at end \\ } \\} \\ ); } test "zig fmt: comments before var decl in struct" { try testCanonical( \\pub const vfs_cap_data = extern struct { \\ // All of these are mandated as little endian \\ // when on disk. \\ const Data = struct { \\ permitted: u32, \\ inheritable: u32, \\ }; \\ \\ // in between \\ \\ /// All of these are mandated as little endian \\ /// when on disk. \\ const Data = struct { \\ permitted: u32, \\ inheritable: u32, \\ }; \\ \\ // at end \\}; \\ ); } test "zig fmt: array literal with 1 item on 1 line" { try testCanonical( \\var s = []const u64{0} ** 25; \\ ); } test "zig fmt: comments before global variables" { try testCanonical( \\/// Foo copies keys and values before they go into the map, and \\/// frees them when they get removed. \\pub const Foo = struct {}; \\ ); } test "zig fmt: comments in statements" { try testCanonical( \\test "std" { \\ // statement comment \\ _ = @import("foo/bar.zig"); \\ \\ // middle \\ // middle2 \\ \\ // end \\} \\ ); } test "zig fmt: comments before test decl" { try testCanonical( \\/// top level doc comment \\test "hi" {} \\ \\// top level normal comment \\test "hi" {} \\ \\// middle \\ \\// end \\ ); } test "zig fmt: preserve spacing" { try testCanonical( \\const std = @import("std"); \\ \\pub fn main() !void { \\ var stdout_file = std.io.getStdOut; \\ var stdout_file = std.io.getStdOut; \\ \\ var stdout_file = std.io.getStdOut; \\ var stdout_file = std.io.getStdOut; \\} \\ ); } test "zig fmt: return types" { try testCanonical( \\pub fn main() !void {} \\pub fn main() var {} \\pub fn main() i32 {} \\ ); } test "zig fmt: imports" { try testCanonical( \\const std = @import("std"); \\const std = @import(); \\ ); } test "zig fmt: global declarations" { try testCanonical( \\const a = b; \\pub const a = b; \\var a = b; \\pub var a = b; \\const a: i32 = b; \\pub const a: i32 = b; \\var a: i32 = b; \\pub var a: i32 = b; \\extern const a: i32 = b; \\pub extern const a: i32 = b; \\extern var a: i32 = b; \\pub extern var a: i32 = b; \\extern "a" const a: i32 = b; \\pub extern "a" const a: i32 = b; \\extern "a" var a: i32 = b; \\pub extern "a" var a: i32 = b; \\ ); } test "zig fmt: extern declaration" { try testCanonical( \\extern var foo: c_int; \\ ); } test "zig fmt: alignment" { try testCanonical( \\var foo: c_int align(1); \\ ); } test "zig fmt: C main" { try testCanonical( \\fn main(argc: c_int, argv: **u8) c_int { \\ const a = b; \\} \\ ); } test "zig fmt: return" { try testCanonical( \\fn foo(argc: c_int, argv: **u8) c_int { \\ return 0; \\} \\ \\fn bar() void { \\ return; \\} \\ ); } test "zig fmt: pointer attributes" { try testCanonical( \\extern fn f1(s: *align(*u8) u8) c_int; \\extern fn f2(s: **align(1) *const *volatile u8) c_int; \\extern fn f3(s: *align(1) const *align(1) volatile *const volatile u8) c_int; \\extern fn f4(s: *align(1) const volatile u8) c_int; \\extern fn f5(s: [*:0]align(1) const volatile u8) c_int; \\ ); } test "zig fmt: slice attributes" { try testCanonical( \\extern fn f1(s: *align(*u8) u8) c_int; \\extern fn f2(s: **align(1) *const *volatile u8) c_int; \\extern fn f3(s: *align(1) const *align(1) volatile *const volatile u8) c_int; \\extern fn f4(s: *align(1) const volatile u8) c_int; \\extern fn f5(s: [*:0]align(1) const volatile u8) c_int; \\ ); } test "zig fmt: test declaration" { try testCanonical( \\test "test name" { \\ const a = 1; \\ var b = 1; \\} \\ ); } test "zig fmt: infix operators" { try testCanonical( \\test "infix operators" { \\ var i = undefined; \\ i = 2; \\ i *= 2; \\ i |= 2; \\ i ^= 2; \\ i <<= 2; \\ i >>= 2; \\ i &= 2; \\ i *= 2; \\ i *%= 2; \\ i -= 2; \\ i -%= 2; \\ i += 2; \\ i +%= 2; \\ i /= 2; \\ i %= 2; \\ _ = i == i; \\ _ = i != i; \\ _ = i != i; \\ _ = i.i; \\ _ = i || i; \\ _ = i!i; \\ _ = i ** i; \\ _ = i ++ i; \\ _ = i orelse i; \\ _ = i % i; \\ _ = i / i; \\ _ = i *% i; \\ _ = i * i; \\ _ = i -% i; \\ _ = i - i; \\ _ = i +% i; \\ _ = i + i; \\ _ = i << i; \\ _ = i >> i; \\ _ = i & i; \\ _ = i ^ i; \\ _ = i | i; \\ _ = i >= i; \\ _ = i <= i; \\ _ = i > i; \\ _ = i < i; \\ _ = i and i; \\ _ = i or i; \\} \\ ); } test "zig fmt: precedence" { try testCanonical( \\test "precedence" { \\ a!b(); \\ (a!b)(); \\ !a!b; \\ !(a!b); \\ !a{}; \\ !(a{}); \\ a + b{}; \\ (a + b){}; \\ a << b + c; \\ (a << b) + c; \\ a & b << c; \\ (a & b) << c; \\ a ^ b & c; \\ (a ^ b) & c; \\ a | b ^ c; \\ (a | b) ^ c; \\ a == b | c; \\ (a == b) | c; \\ a and b == c; \\ (a and b) == c; \\ a or b and c; \\ (a or b) and c; \\ (a or b) and c; \\} \\ ); } test "zig fmt: prefix operators" { try testCanonical( \\test "prefix operators" { \\ try return --%~!&0; \\} \\ ); } test "zig fmt: call expression" { try testCanonical( \\test "test calls" { \\ a(); \\ a(1); \\ a(1, 2); \\ a(1, 2) + a(1, 2); \\} \\ ); } test "zig fmt: var type" { try testCanonical( \\fn print(args: var) var {} \\ ); } test "zig fmt: functions" { try testCanonical( \\extern fn puts(s: *const u8) c_int; \\extern "c" fn puts(s: *const u8) c_int; \\export fn puts(s: *const u8) c_int; \\inline fn puts(s: *const u8) c_int; \\noinline fn puts(s: *const u8) c_int; \\pub extern fn puts(s: *const u8) c_int; \\pub extern "c" fn puts(s: *const u8) c_int; \\pub export fn puts(s: *const u8) c_int; \\pub inline fn puts(s: *const u8) c_int; \\pub noinline fn puts(s: *const u8) c_int; \\pub extern fn puts(s: *const u8) align(2 + 2) c_int; \\pub extern "c" fn puts(s: *const u8) align(2 + 2) c_int; \\pub export fn puts(s: *const u8) align(2 + 2) c_int; \\pub inline fn puts(s: *const u8) align(2 + 2) c_int; \\pub noinline fn puts(s: *const u8) align(2 + 2) c_int; \\ ); } test "zig fmt: multiline string" { try testCanonical( \\test "" { \\ const s1 = \\ \\one \\ \\two) \\ \\three \\ ; \\ const s3 = // hi \\ \\one \\ \\two) \\ \\three \\ ; \\} \\ ); } test "zig fmt: values" { try testCanonical( \\test "values" { \\ 1; \\ 1.0; \\ "string"; \\ 'c'; \\ true; \\ false; \\ null; \\ undefined; \\ anyerror; \\ this; \\ unreachable; \\} \\ ); } test "zig fmt: indexing" { try testCanonical( \\test "test index" { \\ a[0]; \\ a[0 + 5]; \\ a[0..]; \\ a[0..5]; \\ a[a[0]]; \\ a[a[0..]]; \\ a[a[0..5]]; \\ a[a[0]..]; \\ a[a[0..5]..]; \\ a[a[0]..a[0]]; \\ a[a[0..5]..a[0]]; \\ a[a[0..5]..a[0..5]]; \\} \\ ); } test "zig fmt: struct declaration" { try testCanonical( \\const S = struct { \\ const Self = @This(); \\ f1: u8, \\ f3: u8, \\ \\ fn method(self: *Self) Self { \\ return self.*; \\ } \\ \\ f2: u8, \\}; \\ \\const Ps = packed struct { \\ a: u8, \\ b: u8, \\ \\ c: u8, \\}; \\ \\const Es = extern struct { \\ a: u8, \\ b: u8, \\ \\ c: u8, \\}; \\ ); } test "zig fmt: enum declaration" { try testCanonical( \\const E = enum { \\ Ok, \\ SomethingElse = 0, \\}; \\ \\const E2 = enum(u8) { \\ Ok, \\ SomethingElse = 255, \\ SomethingThird, \\}; \\ \\const Ee = extern enum { \\ Ok, \\ SomethingElse, \\ SomethingThird, \\}; \\ \\const Ep = packed enum { \\ Ok, \\ SomethingElse, \\ SomethingThird, \\}; \\ ); } test "zig fmt: union declaration" { try testCanonical( \\const U = union { \\ Int: u8, \\ Float: f32, \\ None, \\ Bool: bool, \\}; \\ \\const Ue = union(enum) { \\ Int: u8, \\ Float: f32, \\ None, \\ Bool: bool, \\}; \\ \\const E = enum { \\ Int, \\ Float, \\ None, \\ Bool, \\}; \\ \\const Ue2 = union(E) { \\ Int: u8, \\ Float: f32, \\ None, \\ Bool: bool, \\}; \\ \\const Eu = extern union { \\ Int: u8, \\ Float: f32, \\ None, \\ Bool: bool, \\}; \\ ); } test "zig fmt: arrays" { try testCanonical( \\test "test array" { \\ const a: [2]u8 = [2]u8{ \\ 1, \\ 2, \\ }; \\ const a: [2]u8 = []u8{ \\ 1, \\ 2, \\ }; \\ const a: [0]u8 = []u8{}; \\ const x: [4:0]u8 = undefined; \\} \\ ); } test "zig fmt: container initializers" { try testCanonical( \\const a0 = []u8{}; \\const a1 = []u8{1}; \\const a2 = []u8{ \\ 1, \\ 2, \\ 3, \\ 4, \\}; \\const s0 = S{}; \\const s1 = S{ .a = 1 }; \\const s2 = S{ \\ .a = 1, \\ .b = 2, \\}; \\ ); } test "zig fmt: catch" { try testCanonical( \\test "catch" { \\ const a: anyerror!u8 = 0; \\ _ = a catch return; \\ _ = a catch |err| return; \\} \\ ); } test "zig fmt: blocks" { try testCanonical( \\test "blocks" { \\ { \\ const a = 0; \\ const b = 0; \\ } \\ \\ blk: { \\ const a = 0; \\ const b = 0; \\ } \\ \\ const r = blk: { \\ const a = 0; \\ const b = 0; \\ }; \\} \\ ); } test "zig fmt: switch" { try testCanonical( \\test "switch" { \\ switch (0) { \\ 0 => {}, \\ 1 => unreachable, \\ 2, 3 => {}, \\ 4...7 => {}, \\ 1 + 4 * 3 + 22 => {}, \\ else => { \\ const a = 1; \\ const b = a; \\ }, \\ } \\ \\ const res = switch (0) { \\ 0 => 0, \\ 1 => 2, \\ 1 => a = 4, \\ else => 4, \\ }; \\ \\ const Union = union(enum) { \\ Int: i64, \\ Float: f64, \\ }; \\ \\ switch (u) { \\ Union.Int => |int| {}, \\ Union.Float => |*float| unreachable, \\ } \\} \\ ); } test "zig fmt: while" { try testCanonical( \\test "while" { \\ while (10 < 1) unreachable; \\ \\ while (10 < 1) unreachable else unreachable; \\ \\ while (10 < 1) { \\ unreachable; \\ } \\ \\ while (10 < 1) \\ unreachable; \\ \\ var i: usize = 0; \\ while (i < 10) : (i += 1) { \\ continue; \\ } \\ \\ i = 0; \\ while (i < 10) : (i += 1) \\ continue; \\ \\ i = 0; \\ var j: usize = 0; \\ while (i < 10) : ({ \\ i += 1; \\ j += 1; \\ }) { \\ continue; \\ } \\ \\ var a: ?u8 = 2; \\ while (a) |v| : (a = null) { \\ continue; \\ } \\ \\ while (a) |v| : (a = null) \\ unreachable; \\ \\ label: while (10 < 0) { \\ unreachable; \\ } \\ \\ const res = while (0 < 10) { \\ break 7; \\ } else { \\ unreachable; \\ }; \\ \\ const res = while (0 < 10) \\ break 7 \\ else \\ unreachable; \\ \\ var a: anyerror!u8 = 0; \\ while (a) |v| { \\ a = error.Err; \\ } else |err| { \\ i = 1; \\ } \\ \\ comptime var k: usize = 0; \\ inline while (i < 10) : (i += 1) \\ j += 2; \\} \\ ); } test "zig fmt: for" { try testCanonical( \\test "for" { \\ for (a) |v| { \\ continue; \\ } \\ \\ for (a) |v| continue; \\ \\ for (a) |v| continue else return; \\ \\ for (a) |v| { \\ continue; \\ } else return; \\ \\ for (a) |v| continue else { \\ return; \\ } \\ \\ for (a) |v| \\ continue \\ else \\ return; \\ \\ for (a) |v| \\ continue; \\ \\ for (a) |*v| \\ continue; \\ \\ for (a) |v, i| { \\ continue; \\ } \\ \\ for (a) |v, i| \\ continue; \\ \\ for (a) |b| switch (b) { \\ c => {}, \\ d => {}, \\ }; \\ \\ for (a) |b| \\ switch (b) { \\ c => {}, \\ d => {}, \\ }; \\ \\ const res = for (a) |v, i| { \\ break v; \\ } else { \\ unreachable; \\ }; \\ \\ var num: usize = 0; \\ inline for (a) |v, i| { \\ num += v; \\ num += i; \\ } \\} \\ ); try testTransform( \\test "fix for" { \\ for (a) |x| \\ f(x) else continue; \\} \\ , \\test "fix for" { \\ for (a) |x| \\ f(x) \\ else continue; \\} \\ ); } test "zig fmt: if" { try testCanonical( \\test "if" { \\ if (10 < 0) { \\ unreachable; \\ } \\ \\ if (10 < 0) unreachable; \\ \\ if (10 < 0) { \\ unreachable; \\ } else { \\ const a = 20; \\ } \\ \\ if (10 < 0) { \\ unreachable; \\ } else if (5 < 0) { \\ unreachable; \\ } else { \\ const a = 20; \\ } \\ \\ const is_world_broken = if (10 < 0) true else false; \\ const some_number = 1 + if (10 < 0) 2 else 3; \\ \\ const a: ?u8 = 10; \\ const b: ?u8 = null; \\ if (a) |v| { \\ const some = v; \\ } else if (b) |*v| { \\ unreachable; \\ } else { \\ const some = 10; \\ } \\ \\ const non_null_a = if (a) |v| v else 0; \\ \\ const a_err: anyerror!u8 = 0; \\ if (a_err) |v| { \\ const p = v; \\ } else |err| { \\ unreachable; \\ } \\} \\ ); } test "zig fmt: defer" { try testCanonical( \\test "defer" { \\ var i: usize = 0; \\ defer i = 1; \\ defer { \\ i += 2; \\ i *= i; \\ } \\ \\ errdefer i += 3; \\ errdefer { \\ i += 2; \\ i /= i; \\ } \\} \\ ); } test "zig fmt: comptime" { try testCanonical( \\fn a() u8 { \\ return 5; \\} \\ \\fn b(comptime i: u8) u8 { \\ return i; \\} \\ \\const av = comptime a(); \\const av2 = comptime blk: { \\ var res = a(); \\ res *= b(2); \\ break :blk res; \\}; \\ \\comptime { \\ _ = a(); \\} \\ \\test "comptime" { \\ const av3 = comptime a(); \\ const av4 = comptime blk: { \\ var res = a(); \\ res *= a(); \\ break :blk res; \\ }; \\ \\ comptime var i = 0; \\ comptime { \\ i = a(); \\ i += b(i); \\ } \\} \\ ); } test "zig fmt: fn type" { try testCanonical( \\fn a(i: u8) u8 { \\ return i + 1; \\} \\ \\const a: fn (u8) u8 = undefined; \\const b: extern fn (u8) u8 = undefined; \\const c: nakedcc fn (u8) u8 = undefined; \\const ap: fn (u8) u8 = a; \\ ); } test "zig fmt: inline asm" { try testCanonical( \\pub fn syscall1(number: usize, arg1: usize) usize { \\ return asm volatile ("syscall" \\ : [ret] "={rax}" (-> usize) \\ : [number] "{rax}" (number), \\ [arg1] "{rdi}" (arg1) \\ : "rcx", "r11" \\ ); \\} \\ ); } test "zig fmt: async functions" { try testCanonical( \\async fn simpleAsyncFn() void { \\ const a = async a.b(); \\ x += 1; \\ suspend; \\ x += 1; \\ suspend; \\ const p: anyframe->void = async simpleAsyncFn() catch unreachable; \\ await p; \\} \\ \\test "suspend, resume, await" { \\ const p: anyframe = async testAsyncSeq(); \\ resume p; \\ await p; \\} \\ ); } test "zig fmt: noasync" { try testCanonical( \\const a = noasync foo(); \\ ); } test "zig fmt: Block after if" { try testCanonical( \\test "Block after if" { \\ if (true) { \\ const a = 0; \\ } \\ \\ { \\ const a = 0; \\ } \\} \\ ); } test "zig fmt: use" { try testCanonical( \\usingnamespace @import("std"); \\pub usingnamespace @import("std"); \\ ); } test "zig fmt: string identifier" { try testCanonical( \\const @"a b" = @"c d".@"e f"; \\fn @"g h"() void {} \\ ); } test "zig fmt: error return" { try testCanonical( \\fn err() anyerror { \\ call(); \\ return error.InvalidArgs; \\} \\ ); } test "zig fmt: comptime block in container" { try testCanonical( \\pub fn container() type { \\ return struct { \\ comptime { \\ if (false) { \\ unreachable; \\ } \\ } \\ }; \\} \\ ); } test "zig fmt: inline asm parameter alignment" { try testCanonical( \\pub fn main() void { \\ asm volatile ( \\ \\ foo \\ \\ bar \\ ); \\ asm volatile ( \\ \\ foo \\ \\ bar \\ : [_] "" (-> usize), \\ [_] "" (-> usize) \\ ); \\ asm volatile ( \\ \\ foo \\ \\ bar \\ : \\ : [_] "" (0), \\ [_] "" (0) \\ ); \\ asm volatile ( \\ \\ foo \\ \\ bar \\ : \\ : \\ : "", "" \\ ); \\ asm volatile ( \\ \\ foo \\ \\ bar \\ : [_] "" (-> usize), \\ [_] "" (-> usize) \\ : [_] "" (0), \\ [_] "" (0) \\ : "", "" \\ ); \\} \\ ); } test "zig fmt: multiline string in array" { try testCanonical( \\const Foo = [][]const u8{ \\ \\aaa \\, \\ \\bbb \\}; \\ \\fn bar() void { \\ const Foo = [][]const u8{ \\ \\aaa \\ , \\ \\bbb \\ }; \\ const Bar = [][]const u8{ // comment here \\ \\aaa \\ \\ \\ , // and another comment can go here \\ \\bbb \\ }; \\} \\ ); } test "zig fmt: if type expr" { try testCanonical( \\const mycond = true; \\pub fn foo() if (mycond) i32 else void { \\ if (mycond) { \\ return 42; \\ } \\} \\ ); } test "zig fmt: file ends with struct field" { try testTransform( \\a: bool , \\a: bool, \\ ); } test "zig fmt: comment after empty comment" { try testTransform( \\const x = true; // \\// \\// \\//a \\ , \\const x = true; \\//a \\ ); } test "zig fmt: line comment in array" { try testTransform( \\test "a" { \\ var arr = [_]u32{ \\ 0 \\ // 1, \\ // 2, \\ }; \\} \\ , \\test "a" { \\ var arr = [_]u32{ \\ 0, // 1, \\ // 2, \\ }; \\} \\ ); try testCanonical( \\test "a" { \\ var arr = [_]u32{ \\ 0, \\ // 1, \\ // 2, \\ }; \\} \\ ); } test "zig fmt: comment after params" { try testTransform( \\fn a( \\ b: u32 \\ // c: u32, \\ // d: u32, \\) void {} \\ , \\fn a( \\ b: u32, // c: u32, \\ // d: u32, \\) void {} \\ ); try testCanonical( \\fn a( \\ b: u32, \\ // c: u32, \\ // d: u32, \\) void {} \\ ); } test "zig fmt: comment in array initializer/access" { try testCanonical( \\test "a" { \\ var a = x{ //aa \\ //bb \\ }; \\ var a = []x{ //aa \\ //bb \\ }; \\ var b = [ //aa \\ _ \\ ]x{ //aa \\ //bb \\ 9, \\ }; \\ var c = b[ //aa \\ 0 \\ ]; \\ var d = [_ \\ //aa \\ ]x{ //aa \\ //bb \\ 9, \\ }; \\ var e = d[0 \\ //aa \\ ]; \\} \\ ); } test "zig fmt: comments at several places in struct init" { try testTransform( \\var bar = Bar{ \\ .x = 10, // test \\ .y = "test" \\ // test \\}; \\ , \\var bar = Bar{ \\ .x = 10, // test \\ .y = "test", // test \\}; \\ ); try testCanonical( \\var bar = Bar{ // test \\ .x = 10, // test \\ .y = "test", \\ // test \\}; \\ ); } test "zig fmt: top level doc comments" { try testCanonical( \\//! tld 1 \\//! tld 2 \\//! tld 3 \\ \\// comment \\ \\/// A doc \\const A = struct { \\ //! A tld 1 \\ //! A tld 2 \\ //! A tld 3 \\}; \\ \\/// B doc \\const B = struct { \\ //! B tld 1 \\ //! B tld 2 \\ //! B tld 3 \\ \\ /// b doc \\ b: u32, \\}; \\ \\/// C doc \\const C = struct { \\ //! C tld 1 \\ //! C tld 2 \\ //! C tld 3 \\ \\ /// c1 doc \\ c1: u32, \\ \\ //! C tld 4 \\ //! C tld 5 \\ //! C tld 6 \\ \\ /// c2 doc \\ c2: u32, \\}; \\ ); try testCanonical( \\//! Top-level documentation. \\ \\/// This is A \\pub const A = usize; \\ ); try testCanonical( \\//! Nothing here \\ ); } const std = @import("std"); const mem = std.mem; const warn = std.debug.warn; const io = std.io; const maxInt = std.math.maxInt; var fixed_buffer_mem: [100 * 1024]u8 = undefined; fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 { const stderr = &io.getStdErr().outStream().stream; const tree = try std.zig.parse(allocator, source); defer tree.deinit(); var error_it = tree.errors.iterator(0); while (error_it.next()) |parse_error| { const token = tree.tokens.at(parse_error.loc()); const loc = tree.tokenLocation(0, parse_error.loc()); try stderr.print("(memory buffer):{}:{}: error: ", .{ loc.line + 1, loc.column + 1 }); try tree.renderError(parse_error, stderr); try stderr.print("\n{}\n", .{source[loc.line_start..loc.line_end]}); { var i: usize = 0; while (i < loc.column) : (i += 1) { try stderr.write(" "); } } { const caret_count = token.end - token.start; var i: usize = 0; while (i < caret_count) : (i += 1) { try stderr.write("~"); } } try stderr.write("\n"); } if (tree.errors.len != 0) { return error.ParseError; } var buffer = try std.Buffer.initSize(allocator, 0); errdefer buffer.deinit(); var buffer_out_stream = io.BufferOutStream.init(&buffer); anything_changed.* = try std.zig.render(allocator, &buffer_out_stream.stream, tree); return buffer.toOwnedSlice(); } fn testTransform(source: []const u8, expected_source: []const u8) !void { const needed_alloc_count = x: { // Try it once with unlimited memory, make sure it works var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize)); var anything_changed: bool = undefined; const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed); if (!mem.eql(u8, result_source, expected_source)) { warn("\n====== expected this output: =========\n", .{}); warn("{}", .{expected_source}); warn("\n======== instead found this: =========\n", .{}); warn("{}", .{result_source}); warn("\n======================================\n", .{}); return error.TestFailed; } const changes_expected = source.ptr != expected_source.ptr; if (anything_changed != changes_expected) { warn("std.zig.render returned {} instead of {}\n", .{ anything_changed, changes_expected }); return error.TestFailed; } std.testing.expect(anything_changed == changes_expected); failing_allocator.allocator.free(result_source); break :x failing_allocator.index; }; var fail_index: usize = 0; while (fail_index < needed_alloc_count) : (fail_index += 1) { var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, fail_index); var anything_changed: bool = undefined; if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| { return error.NondeterministicMemoryUsage; } else |err| switch (err) { error.OutOfMemory => { if (failing_allocator.allocated_bytes != failing_allocator.freed_bytes) { warn( "\nfail_index: {}/{}\nallocated bytes: {}\nfreed bytes: {}\nallocations: {}\ndeallocations: {}\n", .{ fail_index, needed_alloc_count, failing_allocator.allocated_bytes, failing_allocator.freed_bytes, failing_allocator.allocations, failing_allocator.deallocations, }, ); return error.MemoryLeakDetected; } }, error.ParseError => @panic("test failed"), else => @panic("test failed"), } } } fn testCanonical(source: []const u8) !void { return testTransform(source, source); }
lib/std/zig/parser_test.zig
const std = @import("../../std.zig"); const maxInt = std.math.maxInt; pub fn S_ISCHR(m: u32) bool { return m & S_IFMT == S_IFCHR; } // See: // - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/include/unistd.h // - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/sys/sys/types.h // TODO: mode_t should probably be changed to a u16, audit pid_t/off_t as well pub const fd_t = c_int; pub const pid_t = c_int; pub const off_t = c_long; pub const mode_t = c_uint; pub const uid_t = u32; pub const gid_t = u32; pub const ENOTSUP = EOPNOTSUPP; pub const EWOULDBLOCK = EAGAIN; pub const EPERM = 1; pub const ENOENT = 2; pub const ESRCH = 3; pub const EINTR = 4; pub const EIO = 5; pub const ENXIO = 6; pub const E2BIG = 7; pub const ENOEXEC = 8; pub const EBADF = 9; pub const ECHILD = 10; pub const EDEADLK = 11; pub const ENOMEM = 12; pub const EACCES = 13; pub const EFAULT = 14; pub const ENOTBLK = 15; pub const EBUSY = 16; pub const EEXIST = 17; pub const EXDEV = 18; pub const ENODEV = 19; pub const ENOTDIR = 20; pub const EISDIR = 21; pub const EINVAL = 22; pub const ENFILE = 23; pub const EMFILE = 24; pub const ENOTTY = 25; pub const ETXTBSY = 26; pub const EFBIG = 27; pub const ENOSPC = 28; pub const ESPIPE = 29; pub const EROFS = 30; pub const EMLINK = 31; pub const EPIPE = 32; pub const EDOM = 33; pub const ERANGE = 34; pub const EAGAIN = 35; pub const EINPROGRESS = 36; pub const EALREADY = 37; pub const ENOTSOCK = 38; pub const EDESTADDRREQ = 39; pub const EMSGSIZE = 40; pub const EPROTOTYPE = 41; pub const ENOPROTOOPT = 42; pub const EPROTONOSUPPORT = 43; pub const ESOCKTNOSUPPORT = 44; pub const EOPNOTSUPP = 45; pub const EPFNOSUPPORT = 46; pub const EAFNOSUPPORT = 47; pub const EADDRINUSE = 48; pub const EADDRNOTAVAIL = 49; pub const ENETDOWN = 50; pub const ENETUNREACH = 51; pub const ENETRESET = 52; pub const ECONNABORTED = 53; pub const ECONNRESET = 54; pub const ENOBUFS = 55; pub const EISCONN = 56; pub const ENOTCONN = 57; pub const ESHUTDOWN = 58; pub const ETOOMANYREFS = 59; pub const ETIMEDOUT = 60; pub const ECONNREFUSED = 61; pub const ELOOP = 62; pub const ENAMETOOLONG = 63; pub const EHOSTDOWN = 64; pub const EHOSTUNREACH = 65; pub const ENOTEMPTY = 66; pub const EPROCLIM = 67; pub const EUSERS = 68; pub const EDQUOT = 69; pub const ESTALE = 70; pub const EREMOTE = 71; pub const EBADRPC = 72; pub const ERPCMISMATCH = 73; pub const EPROGUNAVAIL = 74; pub const EPROGMISMATCH = 75; pub const EPROCUNAVAIL = 76; pub const ENOLCK = 77; pub const ENOSYS = 78; pub const EFTYPE = 79; pub const EAUTH = 80; pub const ENEEDAUTH = 81; pub const EIDRM = 82; pub const ENOMSG = 83; pub const EOVERFLOW = 84; pub const ECANCELED = 85; pub const EILSEQ = 86; pub const ENOATTR = 87; pub const EDOOFUS = 88; pub const EBADMSG = 89; pub const EMULTIHOP = 90; pub const ENOLINK = 91; pub const EPROTO = 92; pub const ENOMEDIUM = 93; pub const ELAST = 99; pub const EASYNC = 99; pub const STDIN_FILENO = 0; pub const STDOUT_FILENO = 1; pub const STDERR_FILENO = 2; pub const PROT_NONE = 0; pub const PROT_READ = 1; pub const PROT_WRITE = 2; pub const PROT_EXEC = 4; pub const MAP_FILE = 0; pub const MAP_FAILED = @intToPtr(*c_void, maxInt(usize)); pub const MAP_ANONYMOUS = MAP_ANON; pub const MAP_COPY = MAP_PRIVATE; pub const MAP_SHARED = 1; pub const MAP_PRIVATE = 2; pub const MAP_FIXED = 16; pub const MAP_RENAME = 32; pub const MAP_NORESERVE = 64; pub const MAP_INHERIT = 128; pub const MAP_NOEXTEND = 256; pub const MAP_HASSEMAPHORE = 512; pub const MAP_STACK = 1024; pub const MAP_NOSYNC = 2048; pub const MAP_ANON = 4096; pub const MAP_VPAGETABLE = 8192; pub const MAP_TRYFIXED = 65536; pub const MAP_NOCORE = 131072; pub const MAP_SIZEALIGN = 262144; pub const PATH_MAX = 1024; pub const ino_t = c_ulong; pub const libc_stat = extern struct { ino: ino_t, nlink: c_uint, dev: c_uint, mode: c_ushort, padding1: u16, uid: uid_t, gid: gid_t, rdev: c_uint, atim: timespec, mtim: timespec, ctim: timespec, size: c_ulong, blocks: i64, blksize: u32, flags: u32, gen: u32, lspare: i32, qspare1: i64, qspare2: i64, pub fn atime(self: @This()) timespec { return self.atim; } pub fn mtime(self: @This()) timespec { return self.mtim; } pub fn ctime(self: @This()) timespec { return self.ctim; } }; pub const timespec = extern struct { tv_sec: c_long, tv_nsec: c_long, }; pub const CTL_UNSPEC = 0; pub const CTL_KERN = 1; pub const CTL_VM = 2; pub const CTL_VFS = 3; pub const CTL_NET = 4; pub const CTL_DEBUG = 5; pub const CTL_HW = 6; pub const CTL_MACHDEP = 7; pub const CTL_USER = 8; pub const CTL_LWKT = 10; pub const CTL_MAXID = 11; pub const CTL_MAXNAME = 12; pub const KERN_PROC_ALL = 0; pub const KERN_OSTYPE = 1; pub const KERN_PROC_PID = 1; pub const KERN_OSRELEASE = 2; pub const KERN_PROC_PGRP = 2; pub const KERN_OSREV = 3; pub const KERN_PROC_SESSION = 3; pub const KERN_VERSION = 4; pub const KERN_PROC_TTY = 4; pub const KERN_MAXVNODES = 5; pub const KERN_PROC_UID = 5; pub const KERN_MAXPROC = 6; pub const KERN_PROC_RUID = 6; pub const KERN_MAXFILES = 7; pub const KERN_PROC_ARGS = 7; pub const KERN_ARGMAX = 8; pub const KERN_PROC_CWD = 8; pub const KERN_PROC_PATHNAME = 9; pub const KERN_SECURELVL = 9; pub const KERN_PROC_SIGTRAMP = 10; pub const KERN_HOSTNAME = 10; pub const KERN_HOSTID = 11; pub const KERN_CLOCKRATE = 12; pub const KERN_VNODE = 13; pub const KERN_PROC = 14; pub const KERN_FILE = 15; pub const KERN_PROC_FLAGMASK = 16; pub const KERN_PROF = 16; pub const KERN_PROC_FLAG_LWP = 16; pub const KERN_POSIX1 = 17; pub const KERN_NGROUPS = 18; pub const KERN_JOB_CONTROL = 19; pub const KERN_SAVED_IDS = 20; pub const KERN_BOOTTIME = 21; pub const KERN_NISDOMAINNAME = 22; pub const KERN_UPDATEINTERVAL = 23; pub const KERN_OSRELDATE = 24; pub const KERN_NTP_PLL = 25; pub const KERN_BOOTFILE = 26; pub const KERN_MAXFILESPERPROC = 27; pub const KERN_MAXPROCPERUID = 28; pub const KERN_DUMPDEV = 29; pub const KERN_IPC = 30; pub const KERN_DUMMY = 31; pub const KERN_PS_STRINGS = 32; pub const KERN_USRSTACK = 33; pub const KERN_LOGSIGEXIT = 34; pub const KERN_IOV_MAX = 35; pub const KERN_MAXPOSIXLOCKSPERUID = 36; pub const KERN_MAXID = 37; pub const HOST_NAME_MAX = 255; // access function pub const F_OK = 0; // test for existence of file pub const X_OK = 1; // test for execute or search permission pub const W_OK = 2; // test for write permission pub const R_OK = 4; // test for read permission pub const O_RDONLY = 0; pub const O_NDELAY = O_NONBLOCK; pub const O_WRONLY = 1; pub const O_RDWR = 2; pub const O_ACCMODE = 3; pub const O_NONBLOCK = 4; pub const O_APPEND = 8; pub const O_SHLOCK = 16; pub const O_EXLOCK = 32; pub const O_ASYNC = 64; pub const O_FSYNC = 128; pub const O_SYNC = 128; pub const O_NOFOLLOW = 256; pub const O_CREAT = 512; pub const O_TRUNC = 1024; pub const O_EXCL = 2048; pub const O_NOCTTY = 32768; pub const O_DIRECT = 65536; pub const O_CLOEXEC = 131072; pub const O_FBLOCKING = 262144; pub const O_FNONBLOCKING = 524288; pub const O_FAPPEND = 1048576; pub const O_FOFFSET = 2097152; pub const O_FSYNCWRITE = 4194304; pub const O_FASYNCWRITE = 8388608; pub const O_DIRECTORY = 134217728; pub const SEEK_SET = 0; pub const SEEK_CUR = 1; pub const SEEK_END = 2; pub const SEEK_DATA = 3; pub const SEEK_HOLE = 4; pub const F_ULOCK = 0; pub const F_LOCK = 1; pub const F_TLOCK = 2; pub const F_TEST = 3; pub const FD_CLOEXEC = 1; pub const AT_FDCWD = -328243; pub const AT_SYMLINK_NOFOLLOW = 1; pub const AT_REMOVEDIR = 2; pub const AT_EACCESS = 4; pub const AT_SYMLINK_FOLLOW = 8; pub fn WEXITSTATUS(s: u32) u32 { return (s & 0xff00) >> 8; } pub fn WTERMSIG(s: u32) u32 { return s & 0x7f; } pub fn WSTOPSIG(s: u32) u32 { return WEXITSTATUS(s); } pub fn WIFEXITED(s: u32) bool { return WTERMSIG(s) == 0; } pub fn WIFSTOPPED(s: u32) bool { return @intCast(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00; } pub fn WIFSIGNALED(s: u32) bool { return (s & 0xffff) -% 1 < 0xff; } pub const dirent = extern struct { d_fileno: c_ulong, d_namlen: u16, d_type: u8, d_unused1: u8, d_unused2: u32, d_name: [256]u8, pub fn reclen(self: dirent) u16 { return (@byteOffsetOf(dirent, "d_name") + self.d_namlen + 1 + 7) & ~@as(u16, 7); } }; pub const DT_UNKNOWN = 0; pub const DT_FIFO = 1; pub const DT_CHR = 2; pub const DT_DIR = 4; pub const DT_BLK = 6; pub const DT_REG = 8; pub const DT_LNK = 10; pub const DT_SOCK = 12; pub const DT_WHT = 14; pub const DT_DBF = 15; pub const CLOCK_REALTIME = 0; pub const CLOCK_VIRTUAL = 1; pub const CLOCK_PROF = 2; pub const CLOCK_MONOTONIC = 4; pub const CLOCK_UPTIME = 5; pub const CLOCK_UPTIME_PRECISE = 7; pub const CLOCK_UPTIME_FAST = 8; pub const CLOCK_REALTIME_PRECISE = 9; pub const CLOCK_REALTIME_FAST = 10; pub const CLOCK_MONOTONIC_PRECISE = 11; pub const CLOCK_MONOTONIC_FAST = 12; pub const CLOCK_SECOND = 13; pub const CLOCK_THREAD_CPUTIME_ID = 14; pub const CLOCK_PROCESS_CPUTIME_ID = 15; pub const sockaddr = extern struct { sa_len: u8, sa_family: u8, sa_data: [14]u8, }; pub const Kevent = extern struct { ident: usize, filter: c_short, flags: c_ushort, fflags: c_uint, data: isize, udata: usize, }; pub const EVFILT_FS = -10; pub const EVFILT_USER = -9; pub const EVFILT_EXCEPT = -8; pub const EVFILT_TIMER = -7; pub const EVFILT_SIGNAL = -6; pub const EVFILT_PROC = -5; pub const EVFILT_VNODE = -4; pub const EVFILT_AIO = -3; pub const EVFILT_WRITE = -2; pub const EVFILT_READ = -1; pub const EVFILT_SYSCOUNT = 10; pub const EVFILT_MARKER = 15; pub const EV_ADD = 1; pub const EV_DELETE = 2; pub const EV_ENABLE = 4; pub const EV_DISABLE = 8; pub const EV_ONESHOT = 16; pub const EV_CLEAR = 32; pub const EV_RECEIPT = 64; pub const EV_DISPATCH = 128; pub const EV_NODATA = 4096; pub const EV_FLAG1 = 8192; pub const EV_ERROR = 16384; pub const EV_EOF = 32768; pub const EV_SYSFLAGS = 61440; pub const NOTE_FFNOP = 0; pub const NOTE_TRACK = 1; pub const NOTE_DELETE = 1; pub const NOTE_LOWAT = 1; pub const NOTE_TRACKERR = 2; pub const NOTE_OOB = 2; pub const NOTE_WRITE = 2; pub const NOTE_EXTEND = 4; pub const NOTE_CHILD = 4; pub const NOTE_ATTRIB = 8; pub const NOTE_LINK = 16; pub const NOTE_RENAME = 32; pub const NOTE_REVOKE = 64; pub const NOTE_PDATAMASK = 1048575; pub const NOTE_FFLAGSMASK = 16777215; pub const NOTE_TRIGGER = 16777216; pub const NOTE_EXEC = 536870912; pub const NOTE_FFAND = 1073741824; pub const NOTE_FORK = 1073741824; pub const NOTE_EXIT = 2147483648; pub const NOTE_FFOR = 2147483648; pub const NOTE_FFCTRLMASK = 3221225472; pub const NOTE_FFCOPY = 3221225472; pub const NOTE_PCTRLMASK = 4026531840; pub const stack_t = extern struct { ss_sp: [*]u8, ss_size: isize, ss_flags: i32, }; pub const S_IREAD = S_IRUSR; pub const S_IEXEC = S_IXUSR; pub const S_IWRITE = S_IWUSR; pub const S_IXOTH = 1; pub const S_IWOTH = 2; pub const S_IROTH = 4; pub const S_IRWXO = 7; pub const S_IXGRP = 8; pub const S_IWGRP = 16; pub const S_IRGRP = 32; pub const S_IRWXG = 56; pub const S_IXUSR = 64; pub const S_IWUSR = 128; pub const S_IRUSR = 256; pub const S_IRWXU = 448; pub const S_ISTXT = 512; pub const S_BLKSIZE = 512; pub const S_ISVTX = 512; pub const S_ISGID = 1024; pub const S_ISUID = 2048; pub const S_IFIFO = 4096; pub const S_IFCHR = 8192; pub const S_IFDIR = 16384; pub const S_IFBLK = 24576; pub const S_IFREG = 32768; pub const S_IFDB = 36864; pub const S_IFLNK = 40960; pub const S_IFSOCK = 49152; pub const S_IFWHT = 57344; pub const S_IFMT = 61440; pub const SIG_ERR = @intToPtr(fn (i32) callconv(.C) void, maxInt(usize)); pub const SIG_DFL = @intToPtr(fn (i32) callconv(.C) void, 0); pub const SIG_IGN = @intToPtr(fn (i32) callconv(.C) void, 1); pub const BADSIG = SIG_ERR; pub const SIG_BLOCK = 1; pub const SIG_UNBLOCK = 2; pub const SIG_SETMASK = 3; pub const SIGIOT = SIGABRT; pub const SIGHUP = 1; pub const SIGINT = 2; pub const SIGQUIT = 3; pub const SIGILL = 4; pub const SIGTRAP = 5; pub const SIGABRT = 6; pub const SIGEMT = 7; pub const SIGFPE = 8; pub const SIGKILL = 9; pub const SIGBUS = 10; pub const SIGSEGV = 11; pub const SIGSYS = 12; pub const SIGPIPE = 13; pub const SIGALRM = 14; pub const SIGTERM = 15; pub const SIGURG = 16; pub const SIGSTOP = 17; pub const SIGTSTP = 18; pub const SIGCONT = 19; pub const SIGCHLD = 20; pub const SIGTTIN = 21; pub const SIGTTOU = 22; pub const SIGIO = 23; pub const SIGXCPU = 24; pub const SIGXFSZ = 25; pub const SIGVTALRM = 26; pub const SIGPROF = 27; pub const SIGWINCH = 28; pub const SIGINFO = 29; pub const SIGUSR1 = 30; pub const SIGUSR2 = 31; pub const SIGTHR = 32; pub const SIGCKPT = 33; pub const SIGCKPTEXIT = 34; pub const siginfo_t = extern struct { si_signo: c_int, si_errno: c_int, si_code: c_int, si_pid: c_int, si_uid: uid_t, si_status: c_int, si_addr: ?*c_void, si_value: union_sigval, si_band: c_long, __spare__: [7]c_int, }; pub const sigset_t = extern struct { __bits: [4]c_uint, }; pub const sig_atomic_t = c_int; pub const Sigaction = extern struct { __sigaction_u: extern union { __sa_handler: ?fn (c_int) callconv(.C) void, __sa_sigaction: ?fn (c_int, [*c]siginfo_t, ?*c_void) callconv(.C) void, }, sa_flags: c_int, sa_mask: sigset_t, }; pub const sig_t = [*c]fn (c_int) callconv(.C) void; pub const sigvec = extern struct { sv_handler: [*c]__sighandler_t, sv_mask: c_int, sv_flags: c_int, }; pub const SOCK_STREAM = 1; pub const SOCK_DGRAM = 2; pub const SOCK_RAW = 3; pub const SOCK_RDM = 4; pub const SOCK_SEQPACKET = 5; pub const SOCK_MAXADDRLEN = 255; pub const SOCK_CLOEXEC = 268435456; pub const SOCK_NONBLOCK = 536870912; pub const PF_INET6 = AF_INET6; pub const PF_IMPLINK = AF_IMPLINK; pub const PF_ROUTE = AF_ROUTE; pub const PF_ISO = AF_ISO; pub const PF_PIP = pseudo_AF_PIP; pub const PF_CHAOS = AF_CHAOS; pub const PF_DATAKIT = AF_DATAKIT; pub const PF_INET = AF_INET; pub const PF_APPLETALK = AF_APPLETALK; pub const PF_SIP = AF_SIP; pub const PF_OSI = AF_ISO; pub const PF_CNT = AF_CNT; pub const PF_LINK = AF_LINK; pub const PF_HYLINK = AF_HYLINK; pub const PF_MAX = AF_MAX; pub const PF_KEY = pseudo_AF_KEY; pub const PF_PUP = AF_PUP; pub const PF_COIP = AF_COIP; pub const PF_SNA = AF_SNA; pub const PF_LOCAL = AF_LOCAL; pub const PF_NETBIOS = AF_NETBIOS; pub const PF_NATM = AF_NATM; pub const PF_BLUETOOTH = AF_BLUETOOTH; pub const PF_UNSPEC = AF_UNSPEC; pub const PF_NETGRAPH = AF_NETGRAPH; pub const PF_ECMA = AF_ECMA; pub const PF_IPX = AF_IPX; pub const PF_DLI = AF_DLI; pub const PF_ATM = AF_ATM; pub const PF_CCITT = AF_CCITT; pub const PF_ISDN = AF_ISDN; pub const PF_RTIP = pseudo_AF_RTIP; pub const PF_LAT = AF_LAT; pub const PF_UNIX = PF_LOCAL; pub const PF_XTP = pseudo_AF_XTP; pub const PF_DECnet = AF_DECnet; pub const AF_UNSPEC = 0; pub const AF_OSI = AF_ISO; pub const AF_UNIX = AF_LOCAL; pub const AF_LOCAL = 1; pub const AF_INET = 2; pub const AF_IMPLINK = 3; pub const AF_PUP = 4; pub const AF_CHAOS = 5; pub const AF_NETBIOS = 6; pub const AF_ISO = 7; pub const AF_ECMA = 8; pub const AF_DATAKIT = 9; pub const AF_CCITT = 10; pub const AF_SNA = 11; pub const AF_DLI = 13; pub const AF_LAT = 14; pub const AF_HYLINK = 15; pub const AF_APPLETALK = 16; pub const AF_ROUTE = 17; pub const AF_LINK = 18; pub const AF_COIP = 20; pub const AF_CNT = 21; pub const AF_IPX = 23; pub const AF_SIP = 24; pub const AF_ISDN = 26; pub const AF_NATM = 29; pub const AF_ATM = 30; pub const AF_NETGRAPH = 32; pub const AF_BLUETOOTH = 33; pub const AF_MPLS = 34; pub const AF_MAX = 36; pub const sa_family_t = u8; pub const socklen_t = c_uint; pub const sockaddr_storage = extern struct { ss_len: u8, ss_family: sa_family_t, __ss_pad1: [6]u8, __ss_align: i64, __ss_pad2: [112]u8, }; pub const dl_phdr_info = extern struct { dlpi_addr: usize, dlpi_name: ?[*:0]const u8, dlpi_phdr: [*]std.elf.Phdr, dlpi_phnum: u16, }; pub const msghdr = extern struct { msg_name: ?*c_void, msg_namelen: socklen_t, msg_iov: [*c]iovec, msg_iovlen: c_int, msg_control: ?*c_void, msg_controllen: socklen_t, msg_flags: c_int, }; pub const cmsghdr = extern struct { cmsg_len: socklen_t, cmsg_level: c_int, cmsg_type: c_int, }; pub const cmsgcred = extern struct { cmcred_pid: pid_t, cmcred_uid: uid_t, cmcred_euid: uid_t, cmcred_gid: gid_t, cmcred_ngroups: c_short, cmcred_groups: [16]gid_t, }; pub const sf_hdtr = extern struct { headers: [*c]iovec, hdr_cnt: c_int, trailers: [*c]iovec, trl_cnt: c_int, }; pub const MS_SYNC = 0; pub const MS_ASYNC = 1; pub const MS_INVALIDATE = 2; pub const POSIX_MADV_SEQUENTIAL = 2; pub const POSIX_MADV_RANDOM = 1; pub const POSIX_MADV_DONTNEED = 4; pub const POSIX_MADV_NORMAL = 0; pub const POSIX_MADV_WILLNEED = 3; pub const MADV_SEQUENTIAL = 2; pub const MADV_CONTROL_END = MADV_SETMAP; pub const MADV_DONTNEED = 4; pub const MADV_RANDOM = 1; pub const MADV_WILLNEED = 3; pub const MADV_NORMAL = 0; pub const MADV_CONTROL_START = MADV_INVAL; pub const MADV_FREE = 5; pub const MADV_NOSYNC = 6; pub const MADV_AUTOSYNC = 7; pub const MADV_NOCORE = 8; pub const MADV_CORE = 9; pub const MADV_INVAL = 10; pub const MADV_SETMAP = 11; pub const F_DUPFD = 0; pub const F_GETFD = 1; pub const F_RDLCK = 1; pub const F_SETFD = 2; pub const F_UNLCK = 2; pub const F_WRLCK = 3; pub const F_GETFL = 3; pub const F_SETFL = 4; pub const F_GETOWN = 5; pub const F_SETOWN = 6; pub const F_GETLK = 7; pub const F_SETLK = 8; pub const F_SETLKW = 9; pub const F_DUP2FD = 10; pub const F_DUPFD_CLOEXEC = 17; pub const F_DUP2FD_CLOEXEC = 18; pub const LOCK_SH = 1; pub const LOCK_EX = 2; pub const LOCK_UN = 8; pub const LOCK_NB = 4; pub const Flock = extern struct { l_start: off_t, l_len: off_t, l_pid: pid_t, l_type: c_short, l_whence: c_short, }; pub const rlimit_resource = extern enum(c_int) { CPU = 0, FSIZE = 1, DATA = 2, STACK = 3, CORE = 4, RSS = 5, MEMLOCK = 6, NPROC = 7, NOFILE = 8, SBSIZE = 9, AS = 10, VMEM = 10, POSIXLOCKS = 11, _, }; pub const rlim_t = i64; /// No limit pub const RLIM_INFINITY: rlim_t = (1 << 63) - 1; pub const RLIM_SAVED_MAX = RLIM_INFINITY; pub const RLIM_SAVED_CUR = RLIM_INFINITY; pub const rlimit = extern struct { /// Soft limit cur: rlim_t, /// Hard limit max: rlim_t, };
lib/std/os/bits/dragonfly.zig
const std = @import("std"); const CrossTarget = std.zig.CrossTarget; const TestContext = @import("../../src/test.zig").TestContext; const linux_x64 = std.zig.CrossTarget{ .cpu_arch = .x86_64, .os_tag = .linux, }; const macos_x64 = CrossTarget{ .cpu_arch = .x86_64, .os_tag = .macos, }; const all_targets: []const CrossTarget = &[_]CrossTarget{ linux_x64, macos_x64, }; pub fn addCases(ctx: *TestContext) !void { try addLinuxTestCases(ctx); try addMacOsTestCases(ctx); // Common tests for (all_targets) |target| { { var case = ctx.exe("adding numbers at runtime and comptime", target); case.addCompareOutput( \\pub fn main() void { \\ add(3, 4); \\} \\ \\fn add(a: u32, b: u32) void { \\ if (a + b != 7) unreachable; \\} , "", ); // comptime function call case.addCompareOutput( \\pub fn main() void { \\ if (x - 7 != 0) unreachable; \\} \\ \\fn add(a: u32, b: u32) u32 { \\ return a + b; \\} \\ \\const x = add(3, 4); , "", ); // Inline function call case.addCompareOutput( \\pub fn main() void { \\ var x: usize = 3; \\ const y = add(1, 2, x); \\ if (y - 6 != 0) unreachable; \\} \\ \\fn add(a: usize, b: usize, c: usize) callconv(.Inline) usize { \\ return a + b + c; \\} , "", ); } { var case = ctx.exe("subtracting numbers at runtime", target); case.addCompareOutput( \\pub fn main() void { \\ sub(7, 4); \\} \\ \\fn sub(a: u32, b: u32) void { \\ if (a - b != 3) unreachable; \\} , "", ); } { var case = ctx.exe("unused vars", target); case.addError( \\pub fn main() void { \\ const x = 1; \\} , &.{":2:11: error: unused local constant"}); } { var case = ctx.exe("multiplying numbers at runtime and comptime", target); case.addCompareOutput( \\pub fn main() void { \\ mul(3, 4); \\} \\ \\fn mul(a: u32, b: u32) void { \\ if (a * b != 12) unreachable; \\} , "", ); // comptime function call case.addCompareOutput( \\pub fn main() void { \\ if (x - 12 != 0) unreachable; \\} \\ \\fn mul(a: u32, b: u32) u32 { \\ return a * b; \\} \\ \\const x = mul(3, 4); , "", ); // Inline function call case.addCompareOutput( \\pub fn main() void { \\ var x: usize = 5; \\ const y = mul(2, 3, x); \\ if (y - 30 != 0) unreachable; \\} \\ \\fn mul(a: usize, b: usize, c: usize) callconv(.Inline) usize { \\ return a * b * c; \\} , "", ); } { var case = ctx.exe("assert function", target); case.addCompareOutput( \\pub fn main() void { \\ add(3, 4); \\} \\ \\fn add(a: u32, b: u32) void { \\ assert(a + b == 7); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Tests copying a register. For the `c = a + b`, it has to // preserve both a and b, because they are both used later. case.addCompareOutput( \\pub fn main() void { \\ add(3, 4); \\} \\ \\fn add(a: u32, b: u32) void { \\ const c = a + b; // 7 \\ const d = a + c; // 10 \\ const e = d + b; // 14 \\ assert(e == 14); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // More stress on the liveness detection. case.addCompareOutput( \\pub fn main() void { \\ add(3, 4); \\} \\ \\fn add(a: u32, b: u32) void { \\ const c = a + b; // 7 \\ const d = a + c; // 10 \\ const e = d + b; // 14 \\ const f = d + e; // 24 \\ const g = e + f; // 38 \\ const h = f + g; // 62 \\ const i = g + h; // 100 \\ assert(i == 100); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Requires a second move. The register allocator should figure out to re-use rax. case.addCompareOutput( \\pub fn main() void { \\ add(3, 4); \\} \\ \\fn add(a: u32, b: u32) void { \\ const c = a + b; // 7 \\ const d = a + c; // 10 \\ const e = d + b; // 14 \\ const f = d + e; // 24 \\ const g = e + f; // 38 \\ const h = f + g; // 62 \\ const i = g + h; // 100 \\ const j = i + d; // 110 \\ assert(j == 110); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Now we test integer return values. case.addCompareOutput( \\pub fn main() void { \\ assert(add(3, 4) == 7); \\ assert(add(20, 10) == 30); \\} \\ \\fn add(a: u32, b: u32) u32 { \\ return a + b; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Local mutable variables. case.addCompareOutput( \\pub fn main() void { \\ assert(add(3, 4) == 7); \\ assert(add(20, 10) == 30); \\} \\ \\fn add(a: u32, b: u32) u32 { \\ var x: u32 = undefined; \\ x = 0; \\ x += a; \\ x += b; \\ return x; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Optionals case.addCompareOutput( \\pub fn main() void { \\ const a: u32 = 2; \\ const b: ?u32 = a; \\ const c = b.?; \\ if (c != 2) unreachable; \\} , "", ); switch (target.getOsTag()) { .linux => { // While loops case.addCompareOutput( \\pub fn main() void { \\ var i: u32 = 0; \\ while (i < 4) : (i += 1) print(); \\ assert(i == 4); \\} \\ \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("hello\n")), \\ [arg3] "{rdx}" (6) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "hello\nhello\nhello\nhello\n", ); // inline while requires the condition to be comptime known. case.addError( \\pub fn main() void { \\ var i: u32 = 0; \\ inline while (i < 4) : (i += 1) print(); \\ assert(i == 4); \\} \\ \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("hello\n")), \\ [arg3] "{rdx}" (6) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , &[_][]const u8{":3:21: error: unable to resolve comptime value"}); }, .macos => { // While loops case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ var i: u32 = 0; \\ while (i < 4) : (i += 1) print(); \\ assert(i == 4); \\} \\ \\fn print() void { \\ _ = write(1, @ptrToInt("hello\n"), 6); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "hello\nhello\nhello\nhello\n", ); // inline while requires the condition to be comptime known. case.addError( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ var i: u32 = 0; \\ inline while (i < 4) : (i += 1) print(); \\ assert(i == 4); \\} \\ \\fn print() void { \\ _ = write(1, @ptrToInt("hello\n"), 6); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , &[_][]const u8{":5:21: error: unable to resolve comptime value"}); }, else => unreachable, } // Labeled blocks (no conditional branch) case.addCompareOutput( \\pub fn main() void { \\ assert(add(3, 4) == 20); \\} \\ \\fn add(a: u32, b: u32) u32 { \\ const x: u32 = blk: { \\ const c = a + b; // 7 \\ const d = a + c; // 10 \\ const e = d + b; // 14 \\ break :blk e; \\ }; \\ const y = x + a; // 17 \\ const z = y + a; // 20 \\ return z; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // This catches a possible bug in the logic for re-using dying operands. case.addCompareOutput( \\pub fn main() void { \\ assert(add(3, 4) == 116); \\} \\ \\fn add(a: u32, b: u32) u32 { \\ const x: u32 = blk: { \\ const c = a + b; // 7 \\ const d = a + c; // 10 \\ const e = d + b; // 14 \\ const f = d + e; // 24 \\ const g = e + f; // 38 \\ const h = f + g; // 62 \\ const i = g + h; // 100 \\ const j = i + d; // 110 \\ break :blk j; \\ }; \\ const y = x + a; // 113 \\ const z = y + a; // 116 \\ return z; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Spilling registers to the stack. case.addCompareOutput( \\pub fn main() void { \\ assert(add(3, 4) == 1221); \\ assert(mul(3, 4) == 21609); \\} \\ \\fn add(a: u32, b: u32) u32 { \\ const x: u32 = blk: { \\ const c = a + b; // 7 \\ const d = a + c; // 10 \\ const e = d + b; // 14 \\ const f = d + e; // 24 \\ const g = e + f; // 38 \\ const h = f + g; // 62 \\ const i = g + h; // 100 \\ const j = i + d; // 110 \\ const k = i + j; // 210 \\ const l = j + k; // 320 \\ const m = l + c; // 327 \\ const n = m + d; // 337 \\ const o = n + e; // 351 \\ const p = o + f; // 375 \\ const q = p + g; // 413 \\ const r = q + h; // 475 \\ const s = r + i; // 575 \\ const t = s + j; // 685 \\ const u = t + k; // 895 \\ const v = u + l; // 1215 \\ break :blk v; \\ }; \\ const y = x + a; // 1218 \\ const z = y + a; // 1221 \\ return z; \\} \\ \\fn mul(a: u32, b: u32) u32 { \\ const x: u32 = blk: { \\ const c = a * a * a * a; // 81 \\ const d = a * a * a * b; // 108 \\ const e = a * a * b * a; // 108 \\ const f = a * a * b * b; // 144 \\ const g = a * b * a * a; // 108 \\ const h = a * b * a * b; // 144 \\ const i = a * b * b * a; // 144 \\ const j = a * b * b * b; // 192 \\ const k = b * a * a * a; // 108 \\ const l = b * a * a * b; // 144 \\ const m = b * a * b * a; // 144 \\ const n = b * a * b * b; // 192 \\ const o = b * b * a * a; // 144 \\ const p = b * b * a * b; // 192 \\ const q = b * b * b * a; // 192 \\ const r = b * b * b * b; // 256 \\ const s = c + d + e + f + g + h + i + j + k + l + m + n + o + p + q + r; // 2401 \\ break :blk s; \\ }; \\ const y = x * a; // 7203 \\ const z = y * a; // 21609 \\ return z; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Reusing the registers of dead operands playing nicely with conditional branching. case.addCompareOutput( \\pub fn main() void { \\ assert(add(3, 4) == 791); \\ assert(add(4, 3) == 79); \\} \\ \\fn add(a: u32, b: u32) u32 { \\ const x: u32 = if (a < b) blk: { \\ const c = a + b; // 7 \\ const d = a + c; // 10 \\ const e = d + b; // 14 \\ const f = d + e; // 24 \\ const g = e + f; // 38 \\ const h = f + g; // 62 \\ const i = g + h; // 100 \\ const j = i + d; // 110 \\ const k = i + j; // 210 \\ const l = k + c; // 217 \\ const m = l + d; // 227 \\ const n = m + e; // 241 \\ const o = n + f; // 265 \\ const p = o + g; // 303 \\ const q = p + h; // 365 \\ const r = q + i; // 465 \\ const s = r + j; // 575 \\ const t = s + k; // 785 \\ break :blk t; \\ } else blk: { \\ const t = b + b + a; // 10 \\ const c = a + t; // 14 \\ const d = c + t; // 24 \\ const e = d + t; // 34 \\ const f = e + t; // 44 \\ const g = f + t; // 54 \\ const h = c + g; // 68 \\ break :blk h + b; // 71 \\ }; \\ const y = x + a; // 788, 75 \\ const z = y + a; // 791, 79 \\ return z; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Character literals and multiline strings. case.addCompareOutput( \\pub fn main() void { \\ const ignore = \\ \\ cool thx \\ \\ \\ ; \\ _ = ignore; \\ add('ぁ', '\x03'); \\} \\ \\fn add(a: u32, b: u32) void { \\ assert(a + b == 12356); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Global const. case.addCompareOutput( \\pub fn main() void { \\ add(aa, bb); \\} \\ \\const aa = 'ぁ'; \\const bb = '\x03'; \\ \\fn add(a: u32, b: u32) void { \\ assert(a + b == 12356); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Array access. case.addCompareOutput( \\pub fn main() void { \\ assert("hello"[0] == 'h'); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // Array access to a global array. case.addCompareOutput( \\const hello = "hello".*; \\pub fn main() void { \\ assert(hello[1] == 'e'); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); // 64bit set stack case.addCompareOutput( \\pub fn main() void { \\ var i: u64 = 0xFFEEDDCCBBAA9988; \\ assert(i == 0xFFEEDDCCBBAA9988); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); switch (target.getOsTag()) { .linux => { // Basic for loop case.addCompareOutput( \\pub fn main() void { \\ for ("hello") |_| print(); \\} \\ \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("hello\n")), \\ [arg3] "{rdx}" (6) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , "hello\nhello\nhello\nhello\nhello\n", ); }, .macos => { // Basic for loop case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ for ("hello") |_| print(); \\} \\ \\fn print() void { \\ _ = write(1, @ptrToInt("hello\n"), 6); \\} , "hello\nhello\nhello\nhello\nhello\n", ); }, else => unreachable, } } { var case = ctx.exe("@TypeOf", target); case.addCompareOutput( \\pub fn main() void { \\ var x: usize = 0; \\ _ = x; \\ const z = @TypeOf(x, @as(u128, 5)); \\ assert(z == u128); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ const z = @TypeOf(true); \\ assert(z == bool); \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); case.addError( \\pub fn main() void { \\ _ = @TypeOf(true, 1); \\} , &[_][]const u8{ ":2:9: error: incompatible types: 'bool' and 'comptime_int'", ":2:17: note: type 'bool' here", ":2:23: note: type 'comptime_int' here", }); } { var case = ctx.exe("basic import", target); case.addCompareOutput( \\pub fn main() void { \\ @import("print.zig").print(); \\} , "Hello, World!\n", ); switch (target.getOsTag()) { .linux => try case.files.append(.{ .src = \\pub fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (@as(usize, 1)), \\ [arg1] "{rdi}" (@as(usize, 1)), \\ [arg2] "{rsi}" (@ptrToInt("Hello, World!\n")), \\ [arg3] "{rdx}" (@as(usize, 14)) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , .path = "print.zig", }), .macos => try case.files.append(.{ .src = \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn print() void { \\ _ = write(1, @ptrToInt("Hello, World!\n"), 14); \\} , .path = "print.zig", }), else => unreachable, } } { var case = ctx.exe("redundant comptime", target); case.addError( \\pub fn main() void { \\ var a: comptime u32 = 0; \\} , &.{":2:12: error: redundant comptime keyword in already comptime scope"}, ); case.addError( \\pub fn main() void { \\ comptime { \\ var a: u32 = comptime 0; \\ } \\} , &.{":3:22: error: redundant comptime keyword in already comptime scope"}, ); } { var case = ctx.exe("try in comptime in struct in test", target); case.addError( \\test "@unionInit on union w/ tag but no fields" { \\ const S = struct { \\ comptime { \\ try expect(false); \\ } \\ }; \\ _ = S; \\} , &.{":4:13: error: 'try' outside function scope"}, ); } { var case = ctx.exe("import private", target); case.addError( \\pub fn main() void { \\ @import("print.zig").print(); \\} , &.{ ":2:25: error: 'print' is not marked 'pub'", "print.zig:2:1: note: declared here", }, ); switch (target.getOsTag()) { .linux => try case.files.append(.{ .src = \\// dummy comment to make print be on line 2 \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (@as(usize, 1)), \\ [arg1] "{rdi}" (@as(usize, 1)), \\ [arg2] "{rsi}" (@ptrToInt("Hello, World!\n")), \\ [arg3] "{rdx}" (@as(usize, 14)) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , .path = "print.zig", }), .macos => try case.files.append(.{ .src = \\extern "c" fn write(usize, usize, usize) usize; \\fn print() void { \\ _ = write(1, @ptrToInt("Hello, World!\n"), 14); \\} , .path = "print.zig", }), else => unreachable, } } ctx.compileError("function redeclaration", target, \\// dummy comment \\fn entry() void {} \\fn entry() void {} \\ \\fn foo() void { \\ var foo = 1234; \\} , &[_][]const u8{ ":3:1: error: redeclaration of 'entry'", ":2:1: note: other declaration here", ":6:9: error: local shadows declaration of 'foo'", ":5:1: note: declared here", }); ctx.compileError("returns in try", target, \\pub fn main() !void { \\ try a(); \\ try b(); \\} \\ \\pub fn a() !void { \\ defer try b(); \\} \\pub fn b() !void { \\ defer return a(); \\} , &[_][]const u8{ ":7:8: error: 'try' not allowed inside defer expression", ":10:8: error: cannot return from defer expression", }); ctx.compileError("ambiguous references", target, \\const T = struct { \\ const T = struct { \\ fn f() void { \\ _ = T; \\ } \\ }; \\}; , &.{ ":4:17: error: ambiguous reference", ":2:5: note: declared here", ":1:1: note: also declared here", }); ctx.compileError("inner func accessing outer var", target, \\pub fn f() void { \\ var bar: bool = true; \\ const S = struct { \\ fn baz() bool { \\ return bar; \\ } \\ }; \\ _ = S; \\} , &.{ ":5:20: error: mutable 'bar' not accessible from here", ":2:9: note: declared mutable here", ":3:15: note: crosses namespace boundary here", }); ctx.compileError("global variable redeclaration", target, \\// dummy comment \\var foo = false; \\var foo = true; , &[_][]const u8{ ":3:1: error: redeclaration of 'foo'", ":2:1: note: other declaration here", }); ctx.compileError("compileError", target, \\export fn foo() void { \\ @compileError("this is an error"); \\} , &[_][]const u8{":2:3: error: this is an error"}); { var case = ctx.exe("intToPtr", target); case.addError( \\pub fn main() void { \\ _ = @intToPtr(*u8, 0); \\} , &[_][]const u8{ ":2:24: error: pointer type '*u8' does not allow address zero", }); case.addError( \\pub fn main() void { \\ _ = @intToPtr(*u32, 2); \\} , &[_][]const u8{ ":2:25: error: pointer type '*u32' requires aligned address", }); } { var case = ctx.obj("variable shadowing", target); case.addError( \\pub fn main() void { \\ var i: u32 = 10; \\ var i: u32 = 10; \\} , &[_][]const u8{ ":3:9: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); case.addError( \\var testing: i64 = 10; \\pub fn main() void { \\ var testing: i64 = 20; \\} , &[_][]const u8{ ":3:9: error: local shadows declaration of 'testing'", ":1:1: note: declared here", }); case.addError( \\fn a() type { \\ return struct { \\ pub fn b() void { \\ const c = 6; \\ const c = 69; \\ } \\ }; \\} , &[_][]const u8{ ":5:19: error: redeclaration of local constant 'c'", ":4:19: note: previous declaration here", }); case.addError( \\pub fn main() void { \\ var i = 0; \\ for ("n") |_, i| { \\ } \\} , &[_][]const u8{ ":3:19: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); case.addError( \\pub fn main() void { \\ var i = 0; \\ for ("n") |i| { \\ } \\} , &[_][]const u8{ ":3:16: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); case.addError( \\pub fn main() void { \\ var i = 0; \\ while ("n") |i| { \\ } \\} , &[_][]const u8{ ":3:18: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); case.addError( \\pub fn main() void { \\ var i = 0; \\ while ("n") |bruh| { \\ _ = bruh; \\ } else |i| { \\ \\ } \\} , &[_][]const u8{ ":5:13: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); case.addError( \\pub fn main() void { \\ var i = 0; \\ if (true) |i| {} \\} , &[_][]const u8{ ":3:16: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); case.addError( \\pub fn main() void { \\ var i = 0; \\ if (true) |i| {} else |e| {} \\} , &[_][]const u8{ ":3:16: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); case.addError( \\pub fn main() void { \\ var i = 0; \\ if (true) |_| {} else |i| {} \\} , &[_][]const u8{ ":3:28: error: redeclaration of local variable 'i'", ":2:9: note: previous declaration here", }); } { // TODO make the test harness support checking the compile log output too var case = ctx.obj("@compileLog", target); // The other compile error prevents emission of a "found compile log" statement. case.addError( \\export fn _start() noreturn { \\ const b = true; \\ var f: u32 = 1; \\ @compileLog(b, 20, f, x); \\ @compileLog(1000); \\ var bruh: usize = true; \\ _ = bruh; \\ unreachable; \\} \\export fn other() void { \\ @compileLog(1234); \\} \\fn x() void {} , &[_][]const u8{ ":6:23: error: expected usize, found bool", }); // Now only compile log statements remain. One per Decl. case.addError( \\export fn _start() noreturn { \\ const b = true; \\ var f: u32 = 1; \\ @compileLog(b, 20, f, x); \\ @compileLog(1000); \\ unreachable; \\} \\export fn other() void { \\ @compileLog(1234); \\} \\fn x() void {} , &[_][]const u8{ ":9:5: error: found compile log statement", ":4:5: note: also here", }); } { var case = ctx.obj("extern variable has no type", target); case.addError( \\comptime { \\ const x = foo + foo; \\ _ = x; \\} \\extern var foo: i32; , &[_][]const u8{":2:15: error: unable to resolve comptime value"}); case.addError( \\export fn entry() void { \\ _ = foo; \\} \\extern var foo; , &[_][]const u8{":4:8: error: unable to infer variable type"}); } { var case = ctx.exe("break/continue", target); // Break out of loop case.addCompareOutput( \\pub fn main() void { \\ while (true) { \\ break; \\ } \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ foo: while (true) { \\ break :foo; \\ } \\} , "", ); // Continue in loop case.addCompareOutput( \\pub fn main() void { \\ var i: u64 = 0; \\ while (true) : (i+=1) { \\ if (i == 4) return; \\ continue; \\ } \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ var i: u64 = 0; \\ foo: while (true) : (i+=1) { \\ if (i == 4) return; \\ continue :foo; \\ } \\} , "", ); } { var case = ctx.exe("unused labels", target); case.addError( \\comptime { \\ foo: {} \\} , &[_][]const u8{":2:5: error: unused block label"}); case.addError( \\comptime { \\ foo: while (true) {} \\} , &[_][]const u8{":2:5: error: unused while loop label"}); case.addError( \\comptime { \\ foo: for ("foo") |_| {} \\} , &[_][]const u8{":2:5: error: unused for loop label"}); case.addError( \\comptime { \\ blk: {blk: {}} \\} , &[_][]const u8{ ":2:11: error: redefinition of label 'blk'", ":2:5: note: previous definition here", }); } { var case = ctx.exe("bad inferred variable type", target); case.addError( \\pub fn main() void { \\ var x = null; \\ _ = x; \\} , &[_][]const u8{ ":2:9: error: variable of type '@Type(.Null)' must be const or comptime", }); } { var case = ctx.exe("compile error in inline fn call fixed", target); case.addError( \\pub fn main() void { \\ var x: usize = 3; \\ const y = add(10, 2, x); \\ if (y - 6 != 0) unreachable; \\} \\ \\fn add(a: usize, b: usize, c: usize) callconv(.Inline) usize { \\ if (a == 10) @compileError("bad"); \\ return a + b + c; \\} , &[_][]const u8{ ":8:18: error: bad", ":3:18: note: called from here", }); case.addCompareOutput( \\pub fn main() void { \\ var x: usize = 3; \\ const y = add(1, 2, x); \\ if (y - 6 != 0) unreachable; \\} \\ \\fn add(a: usize, b: usize, c: usize) callconv(.Inline) usize { \\ if (a == 10) @compileError("bad"); \\ return a + b + c; \\} , "", ); } { var case = ctx.exe("recursive inline function", target); case.addCompareOutput( \\pub fn main() void { \\ const y = fibonacci(7); \\ if (y - 21 != 0) unreachable; \\} \\ \\fn fibonacci(n: usize) callconv(.Inline) usize { \\ if (n <= 2) return n; \\ return fibonacci(n - 2) + fibonacci(n - 1); \\} , "", ); // This additionally tests that the compile error reports the correct source location. // Without storing source locations relative to the owner decl, the compile error // here would be off by 2 bytes (from the "7" -> "999"). case.addError( \\pub fn main() void { \\ const y = fibonacci(999); \\ if (y - 21 != 0) unreachable; \\} \\ \\fn fibonacci(n: usize) callconv(.Inline) usize { \\ if (n <= 2) return n; \\ return fibonacci(n - 2) + fibonacci(n - 1); \\} , &[_][]const u8{":8:21: error: evaluation exceeded 1000 backwards branches"}); } { var case = ctx.exe("orelse at comptime", target); case.addCompareOutput( \\pub fn main() void { \\ const i: ?u64 = 0; \\ const result = i orelse 5; \\ assert(result == 0); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ const i: ?u64 = null; \\ const result = i orelse 5; \\ assert(result == 5); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , "", ); } { var case = ctx.exe("passing u0 to function", target); case.addCompareOutput( \\pub fn main() void { \\ doNothing(0); \\} \\fn doNothing(arg: u0) void { \\ _ = arg; \\} , "", ); } { var case = ctx.exe("catch at comptime", target); case.addCompareOutput( \\pub fn main() void { \\ const i: anyerror!u64 = 0; \\ const caught = i catch 5; \\ assert(caught == 0); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ const i: anyerror!u64 = error.B; \\ const caught = i catch 5; \\ assert(caught == 5); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ const a: anyerror!comptime_int = 42; \\ const b: *const comptime_int = &(a catch unreachable); \\ assert(b.* == 42); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; // assertion failure \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ const a: anyerror!u32 = error.B; \\ _ = &(a catch |err| assert(err == error.B)); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ const a: anyerror!u32 = error.Bar; \\ a catch |err| assert(err == error.Bar); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , ""); } { var case = ctx.exe("runtime bitwise and", target); case.addCompareOutput( \\pub fn main() void { \\ var i: u32 = 10; \\ var j: u32 = 11; \\ assert(i & 1 == 0); \\ assert(j & 1 == 1); \\ var m1: u32 = 0b1111; \\ var m2: u32 = 0b0000; \\ assert(m1 & 0b1010 == 0b1010); \\ assert(m2 & 0b1010 == 0b0000); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , "", ); } { var case = ctx.exe("runtime bitwise or", target); case.addCompareOutput( \\pub fn main() void { \\ var i: u32 = 10; \\ var j: u32 = 11; \\ assert(i | 1 == 11); \\ assert(j | 1 == 11); \\ var m1: u32 = 0b1111; \\ var m2: u32 = 0b0000; \\ assert(m1 | 0b1010 == 0b1111); \\ assert(m2 | 0b1010 == 0b1010); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , "", ); } { var case = ctx.exe("merge error sets", target); case.addCompareOutput( \\pub fn main() void { \\ const E = error{ A, B, D } || error { A, B, C }; \\ E.A catch {}; \\ E.B catch {}; \\ E.C catch {}; \\ E.D catch {}; \\ const E2 = error { X, Y } || @TypeOf(error.Z); \\ E2.X catch {}; \\ E2.Y catch {}; \\ E2.Z catch {}; \\ assert(anyerror || error { Z } == anyerror); \\} \\fn assert(b: bool) void { \\ if (!b) unreachable; \\} , "", ); case.addError( \\pub fn main() void { \\ const z = true || false; \\ _ = z; \\} , &.{ ":2:15: error: expected error set type, found 'bool'", ":2:20: note: '||' merges error sets; 'or' performs boolean OR", }); } { var case = ctx.exe("comptime var", target); case.addError( \\pub fn main() void { \\ var a: u32 = 0; \\ comptime var b: u32 = 0; \\ if (a == 0) b = 3; \\} , &.{ ":4:21: error: store to comptime variable depends on runtime condition", ":4:11: note: runtime condition here", }); case.addError( \\pub fn main() void { \\ var a: u32 = 0; \\ comptime var b: u32 = 0; \\ switch (a) { \\ 0 => {}, \\ else => b = 3, \\ } \\} , &.{ ":6:21: error: store to comptime variable depends on runtime condition", ":4:13: note: runtime condition here", }); switch (target.getOsTag()) { .linux => case.addCompareOutput( \\pub fn main() void { \\ comptime var len: u32 = 5; \\ print(len); \\ len += 9; \\ print(len); \\} \\ \\fn print(len: usize) void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("Hello, World!\n")), \\ [arg3] "{rdx}" (len) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , "HelloHello, World!\n"), .macos => case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ comptime var len: u32 = 5; \\ print(len); \\ len += 9; \\ print(len); \\} \\ \\fn print(len: usize) void { \\ _ = write(1, @ptrToInt("Hello, World!\n"), len); \\} , "HelloHello, World!\n"), else => unreachable, } case.addError( \\comptime { \\ var x: i32 = 1; \\ x += 1; \\ if (x != 1) unreachable; \\} \\pub fn main() void {} , &.{":4:17: error: unable to resolve comptime value"}); case.addError( \\pub fn main() void { \\ comptime var i: u64 = 0; \\ while (i < 5) : (i += 1) {} \\} , &.{ ":3:24: error: cannot store to comptime variable in non-inline loop", ":3:5: note: non-inline loop here", }); case.addCompareOutput( \\pub fn main() void { \\ var a: u32 = 0; \\ if (a == 0) { \\ comptime var b: u32 = 0; \\ b = 1; \\ } \\} \\comptime { \\ var x: i32 = 1; \\ x += 1; \\ if (x != 2) unreachable; \\} , ""); switch (target.getOsTag()) { .linux => case.addCompareOutput( \\pub fn main() void { \\ comptime var i: u64 = 2; \\ inline while (i < 6) : (i+=1) { \\ print(i); \\ } \\} \\fn print(len: usize) void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("Hello")), \\ [arg3] "{rdx}" (len) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , "HeHelHellHello"), .macos => case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ comptime var i: u64 = 2; \\ inline while (i < 6) : (i+=1) { \\ print(i); \\ } \\} \\fn print(len: usize) void { \\ _ = write(1, @ptrToInt("Hello"), len); \\} , "HeHelHellHello"), else => unreachable, } } { var case = ctx.exe("double ampersand", target); case.addError( \\pub const a = if (true && false) 1 else 2; , &[_][]const u8{":1:24: error: ambiguous use of '&&'; use 'and' for logical AND, or change whitespace to ' & &' for bitwise AND"}); case.addError( \\pub fn main() void { \\ const a = true; \\ const b = false; \\ _ = a & &b; \\} , &[_][]const u8{ ":4:11: error: incompatible types: 'bool' and '*const bool'", ":4:9: note: type 'bool' here", ":4:13: note: type '*const bool' here", }); case.addCompareOutput( \\pub fn main() void { \\ const b: u8 = 1; \\ _ = &&b; \\} , ""); } { var case = ctx.exe("setting an address space on a local variable", target); case.addError( \\export fn entry() i32 { \\ var foo: i32 addrspace(".general") = 1234; \\ return foo; \\} , &[_][]const u8{ ":2:28: error: cannot set address space of local variable 'foo'", }); } { var case = ctx.exe("saving vars of different ABI size to stack", target); case.addCompareOutput( \\pub fn main() void { \\ assert(callMe(2) == 24); \\} \\ \\fn callMe(a: u8) u8 { \\ var b: u8 = a + 10; \\ const c = 2 * b; \\ return c; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ assert(callMe(2) == 24); \\} \\ \\fn callMe(a: u16) u16 { \\ var b: u16 = a + 10; \\ const c = 2 * b; \\ return c; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); case.addCompareOutput( \\pub fn main() void { \\ assert(callMe(2) == 24); \\} \\ \\fn callMe(a: u32) u32 { \\ var b: u32 = a + 10; \\ const c = 2 * b; \\ return c; \\} \\ \\pub fn assert(ok: bool) void { \\ if (!ok) unreachable; // assertion failure \\} , "", ); } { var case = ctx.exe("issue 7187: miscompilation with bool return type", target); case.addCompareOutput( \\pub fn main() void { \\ var x: usize = 1; \\ var y: bool = getFalse(); \\ _ = y; \\ \\ assert(x == 1); \\} \\ \\fn getFalse() bool { \\ return false; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); } { var case = ctx.exe("load-store via pointer deref", target); case.addCompareOutput( \\pub fn main() void { \\ var x: u32 = undefined; \\ set(&x); \\ assert(x == 123); \\} \\ \\fn set(x: *u32) void { \\ x.* = 123; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ var x: u16 = undefined; \\ set(&x); \\ assert(x == 123); \\} \\ \\fn set(x: *u16) void { \\ x.* = 123; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ var x: u8 = undefined; \\ set(&x); \\ assert(x == 123); \\} \\ \\fn set(x: *u8) void { \\ x.* = 123; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); } { var case = ctx.exe("optional payload", target); case.addCompareOutput( \\pub fn main() void { \\ var x: u32 = undefined; \\ const maybe_x = byPtr(&x); \\ assert(maybe_x != null); \\ maybe_x.?.* = 123; \\ assert(x == 123); \\} \\ \\fn byPtr(x: *u32) ?*u32 { \\ return x; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ var x: u32 = undefined; \\ const maybe_x = byPtr(&x); \\ assert(maybe_x == null); \\} \\ \\fn byPtr(x: *u32) ?*u32 { \\ _ = x; \\ return null; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ var x: u8 = undefined; \\ const maybe_x = byPtr(&x); \\ assert(maybe_x != null); \\ maybe_x.?.* = 255; \\ assert(x == 255); \\} \\ \\fn byPtr(x: *u8) ?*u8 { \\ return x; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ var x: i8 = undefined; \\ const maybe_x = byPtr(&x); \\ assert(maybe_x != null); \\ maybe_x.?.* = -1; \\ assert(x == -1); \\} \\ \\fn byPtr(x: *i8) ?*i8 { \\ return x; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); } { var case = ctx.exe("unwrap error union - simple errors", target); case.addCompareOutput( \\pub fn main() void { \\ maybeErr() catch unreachable; \\} \\ \\fn maybeErr() !void { \\ return; \\} , ""); case.addCompareOutput( \\pub fn main() void { \\ maybeErr() catch return; \\ unreachable; \\} \\ \\fn maybeErr() !void { \\ return error.NoWay; \\} , ""); } { var case = ctx.exe("access slice element by index - slice_elem_val", target); case.addCompareOutput( \\var array = [_]usize{ 0, 42, 123, 34 }; \\var slice: []const usize = &array; \\ \\pub fn main() void { \\ assert(slice[0] == 0); \\ assert(slice[1] == 42); \\ assert(slice[2] == 123); \\ assert(slice[3] == 34); \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); } { var case = ctx.exe("lower unnamed constants - structs", target); case.addCompareOutput( \\const Foo = struct { \\ a: u8, \\ b: u32, \\ \\ fn first(self: *Foo) u8 { \\ return self.a; \\ } \\ \\ fn second(self: *Foo) u32 { \\ return self.b; \\ } \\}; \\ \\pub fn main() void { \\ var foo = Foo{ .a = 1, .b = 5 }; \\ assert(foo.first() == 1); \\ assert(foo.second() == 5); \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); case.addCompareOutput( \\const Foo = struct { \\ a: u8, \\ b: u32, \\ \\ fn first(self: *Foo) u8 { \\ return self.a; \\ } \\ \\ fn second(self: *Foo) u32 { \\ return self.b; \\ } \\}; \\ \\pub fn main() void { \\ var foo = Foo{ .a = 1, .b = 5 }; \\ assert(foo.first() == 1); \\ assert(foo.second() == 5); \\ \\ foo.a = 10; \\ foo.b = 255; \\ \\ assert(foo.first() == 10); \\ assert(foo.second() == 255); \\ \\ var foo2 = Foo{ .a = 15, .b = 255 }; \\ assert(foo2.first() == 15); \\ assert(foo2.second() == 255); \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); case.addCompareOutput( \\const Foo = struct { \\ a: u8, \\ b: u32, \\ \\ fn first(self: *Foo) u8 { \\ return self.a; \\ } \\ \\ fn second(self: *Foo) u32 { \\ return self.b; \\ } \\}; \\ \\pub fn main() void { \\ var foo2 = Foo{ .a = 15, .b = 255 }; \\ assert(foo2.first() == 15); \\ assert(foo2.second() == 255); \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} , ""); } } } fn addLinuxTestCases(ctx: *TestContext) !void { // Linux tests { var case = ctx.exe("hello world with updates", linux_x64); case.addError("", &[_][]const u8{ ":108:9: error: struct 'tmp.tmp' has no member named 'main'", }); // Incorrect return type case.addError( \\pub export fn _start() noreturn { \\} , &[_][]const u8{":2:1: error: expected noreturn, found void"}); // Regular old hello world case.addCompareOutput( \\pub export fn _start() noreturn { \\ print(); \\ \\ exit(); \\} \\ \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("Hello, World!\n")), \\ [arg3] "{rdx}" (14) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} \\ \\fn exit() noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (231), \\ [arg1] "{rdi}" (0) \\ : "rcx", "r11", "memory" \\ ); \\ unreachable; \\} , "Hello, World!\n", ); // Convert to pub fn main case.addCompareOutput( \\pub fn main() void { \\ print(); \\} \\ \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("Hello, World!\n")), \\ [arg3] "{rdx}" (14) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , "Hello, World!\n", ); // Now change the message only case.addCompareOutput( \\pub fn main() void { \\ print(); \\} \\ \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n")), \\ [arg3] "{rdx}" (104) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , "What is up? This is a longer message that will force the data to be relocated in virtual address space.\n", ); // Now we print it twice. case.addCompareOutput( \\pub fn main() void { \\ print(); \\ print(); \\} \\ \\fn print() void { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n")), \\ [arg3] "{rdx}" (104) \\ : "rcx", "r11", "memory" \\ ); \\ return; \\} , \\What is up? This is a longer message that will force the data to be relocated in virtual address space. \\What is up? This is a longer message that will force the data to be relocated in virtual address space. \\ ); } { var case = ctx.exe("adding numbers at comptime", linux_x64); case.addCompareOutput( \\pub export fn _start() noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (1), \\ [arg1] "{rdi}" (1), \\ [arg2] "{rsi}" (@ptrToInt("Hello, World!\n")), \\ [arg3] "{rdx}" (10 + 4) \\ : "rcx", "r11", "memory" \\ ); \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (@as(usize, 230) + @as(usize, 1)), \\ [arg1] "{rdi}" (0) \\ : "rcx", "r11", "memory" \\ ); \\ unreachable; \\} , "Hello, World!\n", ); } { var case = ctx.exe("only 1 function and it gets updated", linux_x64); case.addCompareOutput( \\pub export fn _start() noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (60), // exit \\ [arg1] "{rdi}" (0) \\ : "rcx", "r11", "memory" \\ ); \\ unreachable; \\} , "", ); case.addCompareOutput( \\pub export fn _start() noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (231), // exit_group \\ [arg1] "{rdi}" (0) \\ : "rcx", "r11", "memory" \\ ); \\ unreachable; \\} , "", ); } { var case = ctx.exe("inline assembly", linux_x64); case.addError( \\pub fn main() void { \\ const number = 1234; \\ const x = asm volatile ("syscall" \\ : [o] "{rax}" (-> number) \\ : [number] "{rax}" (231), \\ [arg1] "{rdi}" (60) \\ : "rcx", "r11", "memory" \\ ); \\ _ = x; \\} , &[_][]const u8{":4:27: error: expected type, found comptime_int"}); case.addError( \\const S = struct { \\ comptime { \\ asm volatile ( \\ \\zig_moment: \\ \\syscall \\ ); \\ } \\}; \\pub fn main() void { \\ _ = S; \\} , &.{":3:13: error: volatile is meaningless on global assembly"}); case.addError( \\pub fn main() void { \\ var bruh: u32 = 1; \\ asm ("" \\ : \\ : [bruh] "{rax}" (4) \\ : "memory" \\ ); \\} , &.{":3:5: error: assembly expression with no output must be marked volatile"}); case.addError( \\pub fn main() void {} \\comptime { \\ asm ("" \\ : \\ : [bruh] "{rax}" (4) \\ : "memory" \\ ); \\} , &.{":3:5: error: global assembly cannot have inputs, outputs, or clobbers"}); } { var case = ctx.exe("issue 10138: callee preserved regs working", linux_x64); case.addCompareOutput( \\pub fn main() void { \\ const fd = open(); \\ _ = write(fd, "a", 1); \\ _ = close(fd); \\} \\ \\fn open() usize { \\ return 42; \\} \\ \\fn write(fd: usize, a: [*]const u8, len: usize) usize { \\ return syscall4(.WRITE, fd, @ptrToInt(a), len); \\} \\ \\fn syscall4(n: enum { WRITE }, a: usize, b: usize, c: usize) usize { \\ _ = n; \\ _ = a; \\ _ = b; \\ _ = c; \\ return 23; \\} \\ \\fn close(fd: usize) usize { \\ if (fd != 42) \\ unreachable; \\ return 0; \\} , ""); } } fn addMacOsTestCases(ctx: *TestContext) !void { // macOS tests { var case = ctx.exe("darwin hello world with updates", macos_x64); case.addError("", &[_][]const u8{ ":108:9: error: struct 'tmp.tmp' has no member named 'main'", }); // Incorrect return type case.addError( \\pub export fn main() noreturn { \\} , &[_][]const u8{ ":2:1: error: expected noreturn, found void", }); // Regular old hello world case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\extern "c" fn exit(usize) noreturn; \\ \\pub export fn main() noreturn { \\ print(); \\ \\ exit(0); \\} \\ \\fn print() void { \\ const msg = @ptrToInt("Hello, World!\n"); \\ const len = 14; \\ _ = write(1, msg, len); \\} , "Hello, World!\n", ); // Now using start.zig without an explicit extern exit fn case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ print(); \\} \\ \\fn print() void { \\ const msg = @ptrToInt("Hello, World!\n"); \\ const len = 14; \\ _ = write(1, msg, len); \\} , "Hello, World!\n", ); // Print it 4 times and force growth and realloc. case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ print(); \\ print(); \\ print(); \\ print(); \\} \\ \\fn print() void { \\ const msg = @ptrToInt("Hello, World!\n"); \\ const len = 14; \\ _ = write(1, msg, len); \\} , \\Hello, World! \\Hello, World! \\Hello, World! \\Hello, World! \\ ); // Print it once, and change the message. case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ print(); \\} \\ \\fn print() void { \\ const msg = @ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n"); \\ const len = 104; \\ _ = write(1, msg, len); \\} , "What is up? This is a longer message that will force the data to be relocated in virtual address space.\n", ); // Now we print it twice. case.addCompareOutput( \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn main() void { \\ print(); \\ print(); \\} \\ \\fn print() void { \\ const msg = @ptrToInt("What is up? This is a longer message that will force the data to be relocated in virtual address space.\n"); \\ const len = 104; \\ _ = write(1, msg, len); \\} , \\What is up? This is a longer message that will force the data to be relocated in virtual address space. \\What is up? This is a longer message that will force the data to be relocated in virtual address space. \\ ); } }
test/stage2/x86_64.zig
const std = @import("std"); const builtin = @import("builtin"); const assert = std.debug.assert; const Allocator = std.mem.Allocator; const log = std.log.scoped(.codegen); const math = std.math; const native_endian = builtin.cpu.arch.endian(); const llvm = @import("llvm/bindings.zig"); const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Module = @import("../Module.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const target_util = @import("../target.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const LazySrcLoc = Module.LazySrcLoc; const Error = error{ OutOfMemory, CodegenFail }; pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { const llvm_arch = switch (target.cpu.arch) { .arm => "arm", .armeb => "armeb", .aarch64 => "aarch64", .aarch64_be => "aarch64_be", .aarch64_32 => "aarch64_32", .arc => "arc", .avr => "avr", .bpfel => "bpfel", .bpfeb => "bpfeb", .csky => "csky", .hexagon => "hexagon", .m68k => "m68k", .mips => "mips", .mipsel => "mipsel", .mips64 => "mips64", .mips64el => "mips64el", .msp430 => "msp430", .powerpc => "powerpc", .powerpcle => "powerpcle", .powerpc64 => "powerpc64", .powerpc64le => "powerpc64le", .r600 => "r600", .amdgcn => "amdgcn", .riscv32 => "riscv32", .riscv64 => "riscv64", .sparc => "sparc", .sparcv9 => "sparcv9", .sparcel => "sparcel", .s390x => "s390x", .tce => "tce", .tcele => "tcele", .thumb => "thumb", .thumbeb => "thumbeb", .i386 => "i386", .x86_64 => "x86_64", .xcore => "xcore", .nvptx => "nvptx", .nvptx64 => "nvptx64", .le32 => "le32", .le64 => "le64", .amdil => "amdil", .amdil64 => "amdil64", .hsail => "hsail", .hsail64 => "hsail64", .spir => "spir", .spir64 => "spir64", .kalimba => "kalimba", .shave => "shave", .lanai => "lanai", .wasm32 => "wasm32", .wasm64 => "wasm64", .renderscript32 => "renderscript32", .renderscript64 => "renderscript64", .ve => "ve", .spu_2 => return error.@"LLVM backend does not support SPU Mark II", .spirv32 => return error.@"LLVM backend does not support SPIR-V", .spirv64 => return error.@"LLVM backend does not support SPIR-V", }; const llvm_os = switch (target.os.tag) { .freestanding => "unknown", .ananas => "ananas", .cloudabi => "cloudabi", .dragonfly => "dragonfly", .freebsd => "freebsd", .fuchsia => "fuchsia", .ios => "ios", .kfreebsd => "kfreebsd", .linux => "linux", .lv2 => "lv2", .macos => "macosx", .netbsd => "netbsd", .openbsd => "openbsd", .solaris => "solaris", .windows => "windows", .zos => "zos", .haiku => "haiku", .minix => "minix", .rtems => "rtems", .nacl => "nacl", .aix => "aix", .cuda => "cuda", .nvcl => "nvcl", .amdhsa => "amdhsa", .ps4 => "ps4", .elfiamcu => "elfiamcu", .tvos => "tvos", .watchos => "watchos", .mesa3d => "mesa3d", .contiki => "contiki", .amdpal => "amdpal", .hermit => "hermit", .hurd => "hurd", .wasi => "wasi", .emscripten => "emscripten", .uefi => "windows", .opencl, .glsl450, .vulkan, .plan9, .other, => "unknown", }; const llvm_abi = switch (target.abi) { .none => "unknown", .gnu => "gnu", .gnuabin32 => "gnuabin32", .gnuabi64 => "gnuabi64", .gnueabi => "gnueabi", .gnueabihf => "gnueabihf", .gnux32 => "gnux32", .gnuilp32 => "gnuilp32", .code16 => "code16", .eabi => "eabi", .eabihf => "eabihf", .android => "android", .musl => "musl", .musleabi => "musleabi", .musleabihf => "musleabihf", .muslx32 => "muslx32", .msvc => "msvc", .itanium => "itanium", .cygnus => "cygnus", .coreclr => "coreclr", .simulator => "simulator", .macabi => "macabi", }; return std.fmt.allocPrintZ(allocator, "{s}-unknown-{s}-{s}", .{ llvm_arch, llvm_os, llvm_abi }); } pub const Object = struct { llvm_module: *const llvm.Module, context: *const llvm.Context, target_machine: *const llvm.TargetMachine, /// Ideally we would use `llvm_module.getNamedFunction` to go from *Decl to LLVM function, /// but that has some downsides: /// * we have to compute the fully qualified name every time we want to do the lookup /// * for externally linked functions, the name is not fully qualified, but when /// a Decl goes from exported to not exported and vice-versa, we would use the wrong /// version of the name and incorrectly get function not found in the llvm module. /// * it works for functions not all globals. /// Therefore, this table keeps track of the mapping. decl_map: std.AutoHashMapUnmanaged(*const Module.Decl, *const llvm.Value), /// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of /// the compiler, but the Type/Value memory here is backed by `type_map_arena`. /// TODO we need to remove entries from this map in response to incremental compilation /// but I think the frontend won't tell us about types that get deleted because /// hasRuntimeBits() is false for types. type_map: TypeMap, /// The backing memory for `type_map`. Periodically garbage collected after flush(). /// The code for doing the periodical GC is not yet implemented. type_map_arena: std.heap.ArenaAllocator, /// The LLVM global table which holds the names corresponding to Zig errors. Note that the values /// are not added until flushModule, when all errors in the compilation are known. error_name_table: ?*const llvm.Value, pub const TypeMap = std.HashMapUnmanaged( Type, *const llvm.Type, Type.HashContext64, std.hash_map.default_max_load_percentage, ); pub fn create(gpa: Allocator, options: link.Options) !*Object { const obj = try gpa.create(Object); errdefer gpa.destroy(obj); obj.* = try Object.init(gpa, options); return obj; } pub fn init(gpa: Allocator, options: link.Options) !Object { const context = llvm.Context.create(); errdefer context.dispose(); initializeLLVMTarget(options.target.cpu.arch); const root_nameZ = try gpa.dupeZ(u8, options.root_name); defer gpa.free(root_nameZ); const llvm_module = llvm.Module.createWithName(root_nameZ.ptr, context); errdefer llvm_module.dispose(); const llvm_target_triple = try targetTriple(gpa, options.target); defer gpa.free(llvm_target_triple); var error_message: [*:0]const u8 = undefined; var target: *const llvm.Target = undefined; if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message).toBool()) { defer llvm.disposeMessage(error_message); log.err("LLVM failed to parse '{s}': {s}", .{ llvm_target_triple, error_message }); return error.InvalidLlvmTriple; } const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug) .None else .Aggressive; const reloc_mode: llvm.RelocMode = if (options.pic) .PIC else if (options.link_mode == .Dynamic) llvm.RelocMode.DynamicNoPIC else .Static; const code_model: llvm.CodeModel = switch (options.machine_code_model) { .default => .Default, .tiny => .Tiny, .small => .Small, .kernel => .Kernel, .medium => .Medium, .large => .Large, }; // TODO handle float ABI better- it should depend on the ABI portion of std.Target const float_abi: llvm.ABIType = .Default; const target_machine = llvm.TargetMachine.create( target, llvm_target_triple.ptr, if (options.target.cpu.model.llvm_name) |s| s.ptr else null, options.llvm_cpu_features, opt_level, reloc_mode, code_model, options.function_sections, float_abi, if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null, ); errdefer target_machine.dispose(); const target_data = target_machine.createTargetDataLayout(); defer target_data.dispose(); llvm_module.setModuleDataLayout(target_data); return Object{ .llvm_module = llvm_module, .context = context, .target_machine = target_machine, .decl_map = .{}, .type_map = .{}, .type_map_arena = std.heap.ArenaAllocator.init(gpa), .error_name_table = null, }; } pub fn deinit(self: *Object, gpa: Allocator) void { self.target_machine.dispose(); self.llvm_module.dispose(); self.context.dispose(); self.decl_map.deinit(gpa); self.type_map.deinit(gpa); self.type_map_arena.deinit(); self.* = undefined; } pub fn destroy(self: *Object, gpa: Allocator) void { self.deinit(gpa); gpa.destroy(self); } fn locPath( arena: Allocator, opt_loc: ?Compilation.EmitLoc, cache_directory: Compilation.Directory, ) !?[*:0]u8 { const loc = opt_loc orelse return null; const directory = loc.directory orelse cache_directory; const slice = try directory.joinZ(arena, &[_][]const u8{loc.basename}); return slice.ptr; } fn genErrorNameTable(self: *Object, comp: *Compilation) !void { // If self.error_name_table is null, there was no instruction that actually referenced the error table. const error_name_table_ptr_global = self.error_name_table orelse return; const mod = comp.bin_file.options.module.?; const target = mod.getTarget(); const llvm_ptr_ty = self.context.intType(8).pointerType(0); // TODO: Address space const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); const type_fields = [_]*const llvm.Type{ llvm_ptr_ty, llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const slice_alignment = slice_ty.abiAlignment(target); const error_name_list = mod.error_name_list.items; const llvm_errors = try comp.gpa.alloc(*const llvm.Value, error_name_list.len); defer comp.gpa.free(llvm_errors); llvm_errors[0] = llvm_slice_ty.getUndef(); for (llvm_errors[1..]) |*llvm_error, i| { const name = error_name_list[1..][i]; const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_global = self.llvm_module.addGlobal(str_init.typeOf(), ""); str_global.setInitializer(str_init); str_global.setLinkage(.Private); str_global.setGlobalConstant(.True); str_global.setUnnamedAddr(.True); str_global.setAlignment(1); const slice_fields = [_]*const llvm.Value{ str_global.constBitCast(llvm_ptr_ty), llvm_usize_ty.constInt(name.len, .False), }; llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); } const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @intCast(c_uint, error_name_list.len)); const error_name_table_global = self.llvm_module.addGlobal(error_name_table_init.typeOf(), ""); error_name_table_global.setInitializer(error_name_table_init); error_name_table_global.setLinkage(.Private); error_name_table_global.setGlobalConstant(.True); error_name_table_global.setUnnamedAddr(.True); error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode const error_name_table_ptr = error_name_table_global.constBitCast(llvm_slice_ty.pointerType(0)); // TODO: Address space error_name_table_ptr_global.setInitializer(error_name_table_ptr); } pub fn flushModule(self: *Object, comp: *Compilation) !void { try self.genErrorNameTable(comp); if (comp.verbose_llvm_ir) { self.llvm_module.dump(); } if (std.debug.runtime_safety) { var error_message: [*:0]const u8 = undefined; // verifyModule always allocs the error_message even if there is no error defer llvm.disposeMessage(error_message); if (self.llvm_module.verify(.ReturnStatus, &error_message).toBool()) { std.debug.print("\n{s}\n", .{error_message}); @panic("LLVM module verification failed"); } } var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); const mod = comp.bin_file.options.module.?; const cache_dir = mod.zig_cache_artifact_directory; var emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit) |emit| try emit.basenamePath(arena, try arena.dupeZ(u8, comp.bin_file.intermediary_basename.?)) else null; const emit_asm_path = try locPath(arena, comp.emit_asm, cache_dir); const emit_llvm_ir_path = try locPath(arena, comp.emit_llvm_ir, cache_dir); const emit_llvm_bc_path = try locPath(arena, comp.emit_llvm_bc, cache_dir); const emit_asm_msg = emit_asm_path orelse "(none)"; const emit_bin_msg = emit_bin_path orelse "(none)"; const emit_llvm_ir_msg = emit_llvm_ir_path orelse "(none)"; const emit_llvm_bc_msg = emit_llvm_bc_path orelse "(none)"; log.debug("emit LLVM object asm={s} bin={s} ir={s} bc={s}", .{ emit_asm_msg, emit_bin_msg, emit_llvm_ir_msg, emit_llvm_bc_msg, }); var error_message: [*:0]const u8 = undefined; if (self.target_machine.emitToFile( self.llvm_module, &error_message, comp.bin_file.options.optimize_mode == .Debug, comp.bin_file.options.optimize_mode == .ReleaseSmall, comp.time_report, comp.bin_file.options.tsan, comp.bin_file.options.lto, emit_asm_path, emit_bin_path, emit_llvm_ir_path, emit_llvm_bc_path, )) { defer llvm.disposeMessage(error_message); log.err("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{ emit_asm_msg, emit_bin_msg, emit_llvm_ir_msg, emit_llvm_bc_msg, error_message, }); return error.FailedToEmit; } } pub fn updateFunc( self: *Object, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness, ) !void { const decl = func.owner_decl; var dg: DeclGen = .{ .context = self.context, .object = self, .module = module, .decl = decl, .err_msg = null, .gpa = module.gpa, }; const llvm_func = try dg.resolveLlvmFunction(decl); if (module.align_stack_fns.get(func)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { DeclGen.removeFnAttr(llvm_func, "alignstack"); if (!func.is_noinline) DeclGen.removeFnAttr(llvm_func, "noinline"); } if (func.is_cold) { dg.addFnAttr(llvm_func, "cold"); } else { DeclGen.removeFnAttr(llvm_func, "cold"); } // This gets the LLVM values from the function and stores them in `dg.args`. const fn_info = decl.ty.fnInfo(); const ret_ty_by_ref = isByRef(fn_info.return_type); const ret_ptr = if (ret_ty_by_ref) llvm_func.getParam(0) else null; var args = std.ArrayList(*const llvm.Value).init(dg.gpa); defer args.deinit(); const param_offset: c_uint = @boolToInt(ret_ptr != null); for (fn_info.param_types) |param_ty| { if (!param_ty.hasRuntimeBits()) continue; const llvm_arg_i = @intCast(c_uint, args.items.len) + param_offset; try args.append(llvm_func.getParam(llvm_arg_i)); } // Remove all the basic blocks of a function in order to start over, generating // LLVM IR from an empty function body. while (llvm_func.getFirstBasicBlock()) |bb| { bb.deleteBasicBlock(); } const builder = dg.context.createBuilder(); const entry_block = dg.context.appendBasicBlock(llvm_func, "Entry"); builder.positionBuilderAtEnd(entry_block); var fg: FuncGen = .{ .gpa = dg.gpa, .air = air, .liveness = liveness, .context = dg.context, .dg = &dg, .builder = builder, .ret_ptr = ret_ptr, .args = args.toOwnedSlice(), .arg_index = 0, .func_inst_table = .{}, .llvm_func = llvm_func, .blocks = .{}, .single_threaded = module.comp.bin_file.options.single_threaded, }; defer fg.deinit(); fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { var dg: DeclGen = .{ .context = self.context, .object = self, .module = module, .decl = decl, .err_msg = null, .gpa = module.gpa, }; dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDeclExports( self: *Object, module: *const Module, decl: *const Module.Decl, exports: []const *Module.Export, ) !void { // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl) orelse return; const is_extern = decl.isExtern(); if (is_extern) { llvm_global.setValueName(decl.name); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); } else if (exports.len != 0) { const exp_name = exports[0].options.name; llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); switch (exports[0].options.linkage) { .Internal => unreachable, .Strong => llvm_global.setLinkage(.External), .Weak => llvm_global.setLinkage(.WeakODR), .LinkOnce => llvm_global.setLinkage(.LinkOnceODR), } // If a Decl is exported more than one time (which is rare), // we add aliases for all but the first export. // TODO LLVM C API does not support deleting aliases. We need to // patch it to support this or figure out how to wrap the C++ API ourselves. // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name); defer module.gpa.free(exp_name_z); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); } else { _ = self.llvm_module.addAlias( llvm_global.typeOf(), llvm_global, exp_name_z, ); } } } else { const fqn = try decl.getFullyQualifiedName(module.gpa); defer module.gpa.free(fqn); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); llvm_global.setUnnamedAddr(.True); } } pub fn freeDecl(self: *Object, decl: *Module.Decl) void { const llvm_value = self.decl_map.get(decl) orelse return; llvm_value.deleteGlobal(); } }; pub const DeclGen = struct { context: *const llvm.Context, object: *Object, module: *Module, decl: *Module.Decl, gpa: Allocator, err_msg: ?*Module.ErrorMsg, fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); assert(self.err_msg == null); const src_loc = @as(LazySrcLoc, .{ .node_offset = 0 }).toSrcLoc(self.decl); self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } fn llvmModule(self: *DeclGen) *const llvm.Module { return self.object.llvm_module; } fn genDecl(dg: *DeclGen) !void { const decl = dg.decl; assert(decl.has_tv); log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val }); if (decl.val.castTag(.function)) |func_payload| { _ = func_payload; @panic("TODO llvm backend genDecl function pointer"); } else if (decl.val.castTag(.extern_fn)) |extern_fn| { _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); } else { const target = dg.module.getTarget(); const global = try dg.resolveGlobalDecl(decl); global.setAlignment(decl.getAlignment(target)); assert(decl.has_tv); const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { const variable = payload.data; break :init_val variable.init; } else init_val: { global.setGlobalConstant(.True); break :init_val decl.val; }; if (init_val.tag() != .unreachable_value) { const llvm_init = try dg.genTypedValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); } else { // LLVM does not allow us to change the type of globals. So we must // create a new global with the correct type, copy all its attributes, // and then update all references to point to the new global, // delete the original, and rename the new one to the old one's name. // This is necessary because LLVM does not support const bitcasting // a struct with padding bytes, which is needed to lower a const union value // to LLVM, when a field other than the most-aligned is active. Instead, // we must lower to an unnamed struct, and pointer cast at usage sites // of the global. Such an unnamed struct is the cause of the global type // mismatch, because we don't have the LLVM type until the *value* is created, // whereas the global needs to be created based on the type alone, because // lowering the value may reference the global as a pointer. const new_global = dg.object.llvm_module.addGlobalInAddressSpace( llvm_init.typeOf(), "", dg.llvmAddressSpace(decl.@"addrspace"), ); new_global.setLinkage(global.getLinkage()); new_global.setUnnamedAddr(global.getUnnamedAddress()); new_global.setAlignment(global.getAlignment()); new_global.setInitializer(llvm_init); // replaceAllUsesWith requires the type to be unchanged. So we bitcast // the new global to the old type and use that as the thing to replace // old uses. const new_global_ptr = new_global.constBitCast(global.typeOf()); global.replaceAllUsesWith(new_global_ptr); dg.object.decl_map.putAssumeCapacity(decl, new_global); new_global.takeName(global); global.deleteGlobal(); } } } } /// If the llvm function does not exist, create it. /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. fn resolveLlvmFunction(dg: *DeclGen, decl: *Module.Decl) !*const llvm.Value { const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); const zig_fn_type = decl.ty; const fn_info = zig_fn_type.fnInfo(); const target = dg.module.getTarget(); const sret = firstParamSRet(fn_info, target); const fn_type = try dg.llvmType(zig_fn_type); const fqn = try decl.getFullyQualifiedName(dg.gpa); defer dg.gpa.free(fqn); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); gop.value_ptr.* = llvm_fn; const is_extern = decl.val.tag() == .extern_fn; if (!is_extern) { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } if (sret) { dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 dg.addArgAttr(llvm_fn, 0, "noalias"); const raw_llvm_ret_ty = try dg.llvmType(fn_info.return_type); llvm_fn.addSretAttr(0, raw_llvm_ret_ty); } // Set parameter attributes. var llvm_param_i: c_uint = @boolToInt(sret); for (fn_info.param_types) |param_ty| { if (!param_ty.hasRuntimeBits()) continue; if (isByRef(param_ty)) { dg.addArgAttr(llvm_fn, llvm_param_i, "nonnull"); // TODO readonly, noalias, align } llvm_param_i += 1; } // TODO: more attributes. see codegen.cpp `make_fn_llvm_value`. if (fn_info.cc == .Naked) { dg.addFnAttr(llvm_fn, "naked"); } else { llvm_fn.setFunctionCallConv(toLlvmCallConv(fn_info.cc, target)); } if (fn_info.alignment != 0) { llvm_fn.setAlignment(fn_info.alignment); } // Function attributes that are independent of analysis results of the function body. dg.addCommonFnAttributes(llvm_fn); if (fn_info.return_type.isNoReturn()) { dg.addFnAttr(llvm_fn, "noreturn"); } return llvm_fn; } fn addCommonFnAttributes(dg: *DeclGen, llvm_fn: *const llvm.Value) void { if (!dg.module.comp.bin_file.options.red_zone) { dg.addFnAttr(llvm_fn, "noredzone"); } if (dg.module.comp.bin_file.options.omit_frame_pointer) { dg.addFnAttrString(llvm_fn, "frame-pointer", "none"); } else { dg.addFnAttrString(llvm_fn, "frame-pointer", "all"); } dg.addFnAttr(llvm_fn, "nounwind"); if (dg.module.comp.unwind_tables) { dg.addFnAttr(llvm_fn, "uwtable"); } if (dg.module.comp.bin_file.options.skip_linker_dependencies) { // The intent here is for compiler-rt and libc functions to not generate // infinite recursion. For example, if we are compiling the memcpy function, // and llvm detects that the body is equivalent to memcpy, it may replace the // body of memcpy with a call to memcpy, which would then cause a stack // overflow instead of performing memcpy. dg.addFnAttr(llvm_fn, "nobuiltin"); } if (dg.module.comp.bin_file.options.optimize_mode == .ReleaseSmall) { dg.addFnAttr(llvm_fn, "minsize"); dg.addFnAttr(llvm_fn, "optsize"); } if (dg.module.comp.bin_file.options.tsan) { dg.addFnAttr(llvm_fn, "sanitize_thread"); } // TODO add target-cpu and target-features fn attributes } fn resolveGlobalDecl(dg: *DeclGen, decl: *Module.Decl) Error!*const llvm.Value { const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); if (gop.found_existing) return gop.value_ptr.*; errdefer assert(dg.object.decl_map.remove(decl)); const fqn = try decl.getFullyQualifiedName(dg.gpa); defer dg.gpa.free(fqn); const llvm_type = try dg.llvmType(decl.ty); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(llvm_type, fqn, llvm_addrspace); gop.value_ptr.* = llvm_global; const is_extern = decl.val.tag() == .unreachable_value; if (!is_extern) { llvm_global.setLinkage(.Internal); llvm_global.setUnnamedAddr(.True); } return llvm_global; } fn llvmAddressSpace(self: DeclGen, address_space: std.builtin.AddressSpace) c_uint { const target = self.module.getTarget(); return switch (target.cpu.arch) { .i386, .x86_64 => switch (address_space) { .generic => llvm.address_space.default, .gs => llvm.address_space.x86.gs, .fs => llvm.address_space.x86.fs, .ss => llvm.address_space.x86.ss, else => unreachable, }, .nvptx, .nvptx64 => switch (address_space) { .generic => llvm.address_space.default, .global => llvm.address_space.nvptx.global, .constant => llvm.address_space.nvptx.constant, .param => llvm.address_space.nvptx.param, .shared => llvm.address_space.nvptx.shared, .local => llvm.address_space.nvptx.local, else => unreachable, }, else => switch (address_space) { .generic => llvm.address_space.default, else => unreachable, }, }; } fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool { // Once `llvmType` succeeds, successive calls to it with the same Zig type // are guaranteed to succeed. So if a call to `llvmType` fails here it means // it is the first time lowering the type, which means the value can't possible // have that type. const llvm_ty = dg.llvmType(ty) catch return true; return val.typeOf() != llvm_ty; } fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type { const gpa = dg.gpa; const target = dg.module.getTarget(); switch (t.zigTypeTag()) { .Void, .NoReturn => return dg.context.voidType(), .Int => { const info = t.intInfo(target); assert(info.bits != 0); return dg.context.intType(info.bits); }, .Enum => { var buffer: Type.Payload.Bits = undefined; const int_ty = t.intTagType(&buffer); const bit_count = int_ty.intInfo(target).bits; assert(bit_count != 0); return dg.context.intType(bit_count); }, .Float => switch (t.floatBits(target)) { 16 => return dg.context.halfType(), 32 => return dg.context.floatType(), 64 => return dg.context.doubleType(), 80 => return if (backendSupportsF80(target)) dg.context.x86FP80Type() else dg.context.intType(80), 128 => return dg.context.fp128Type(), else => unreachable, }, .Bool => return dg.context.intType(1), .Pointer => { if (t.isSlice()) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = t.slicePtrFieldType(&buf); const fields: [2]*const llvm.Type = .{ try dg.llvmType(ptr_type), try dg.llvmType(Type.usize), }; return dg.context.structType(&fields, fields.len, .False); } const ptr_info = t.ptrInfo().data; const llvm_addrspace = dg.llvmAddressSpace(ptr_info.@"addrspace"); if (ptr_info.host_size != 0) { return dg.context.intType(ptr_info.host_size * 8).pointerType(llvm_addrspace); } const elem_ty = ptr_info.pointee_type; const lower_elem_ty = switch (elem_ty.zigTypeTag()) { .Opaque, .Fn => true, .Array => elem_ty.childType().hasRuntimeBits(), else => elem_ty.hasRuntimeBits(), }; const llvm_elem_ty = if (lower_elem_ty) try dg.llvmType(elem_ty) else dg.context.intType(8); return llvm_elem_ty.pointerType(llvm_addrspace); }, .Opaque => switch (t.tag()) { .@"opaque" => { const gop = try dg.object.type_map.getOrPut(gpa, t); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const opaque_obj = t.castTag(.@"opaque").?.data; const name = try opaque_obj.getFullyQualifiedName(gpa); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls return llvm_struct_ty; }, .anyopaque => return dg.context.intType(8), else => unreachable, }, .Array => { const elem_ty = t.childType(); assert(elem_ty.onePossibleValue() == null); const elem_llvm_ty = try dg.llvmType(elem_ty); const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); }, .Vector => { const elem_type = try dg.llvmType(t.childType()); return elem_type.vectorType(t.vectorLen()); }, .Optional => { var buf: Type.Payload.ElemType = undefined; const child_type = t.optionalChild(&buf); if (!child_type.hasRuntimeBits()) { return dg.context.intType(1); } const payload_llvm_ty = try dg.llvmType(child_type); if (t.isPtrLikeOptional()) { return payload_llvm_ty; } else if (!child_type.hasRuntimeBits()) { return dg.context.intType(1); } const fields: [2]*const llvm.Type = .{ payload_llvm_ty, dg.context.intType(1), }; return dg.context.structType(&fields, fields.len, .False); }, .ErrorUnion => { const error_type = t.errorUnionSet(); const payload_type = t.errorUnionPayload(); const llvm_error_type = try dg.llvmType(error_type); if (!payload_type.hasRuntimeBits()) { return llvm_error_type; } const llvm_payload_type = try dg.llvmType(payload_type); const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type }; return dg.context.structType(&fields, fields.len, .False); }, .ErrorSet => { return dg.context.intType(16); }, .Struct => { const gop = try dg.object.type_map.getOrPut(gpa, t); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); if (t.castTag(.tuple)) |tuple| { const llvm_struct_ty = dg.context.structCreateNamed(""); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls const types = tuple.data.types; const values = tuple.data.values; var llvm_field_types = try std.ArrayListUnmanaged(*const llvm.Type).initCapacity(gpa, types.len); defer llvm_field_types.deinit(gpa); for (types) |field_ty, i| { const field_val = values[i]; if (field_val.tag() != .unreachable_value) continue; llvm_field_types.appendAssumeCapacity(try dg.llvmType(field_ty)); } llvm_struct_ty.structSetBody( llvm_field_types.items.ptr, @intCast(c_uint, llvm_field_types.items.len), .False, ); return llvm_struct_ty; } const struct_obj = t.castTag(.@"struct").?.data; if (struct_obj.layout == .Packed) { var buf: Type.Payload.Bits = undefined; const int_ty = struct_obj.packedIntegerType(target, &buf); const int_llvm_ty = try dg.llvmType(int_ty); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; } const name = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls assert(struct_obj.haveFieldTypes()); var llvm_field_types = try std.ArrayListUnmanaged(*const llvm.Type).initCapacity(gpa, struct_obj.fields.count()); defer llvm_field_types.deinit(gpa); for (struct_obj.fields.values()) |field| { if (!field.ty.hasRuntimeBits()) continue; llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty)); } llvm_struct_ty.structSetBody( llvm_field_types.items.ptr, @intCast(c_uint, llvm_field_types.items.len), .False, ); return llvm_struct_ty; }, .Union => { const gop = try dg.object.type_map.getOrPut(gpa, t); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const union_obj = t.cast(Type.Payload.Union).?.data; if (t.unionTagType()) |enum_tag_ty| { const layout = union_obj.getLayout(target, true); if (layout.payload_size == 0) { const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty); gop.value_ptr.* = enum_tag_llvm_ty; return enum_tag_llvm_ty; } const name = try union_obj.getFullyQualifiedName(gpa); defer gpa.free(name); const llvm_union_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty); const llvm_payload_ty = t: { if (layout.most_aligned_field_size == layout.payload_size) { break :t llvm_aligned_field_ty; } const padding_len = @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size); const fields: [2]*const llvm.Type = .{ llvm_aligned_field_ty, dg.context.intType(8).arrayType(padding_len), }; break :t dg.context.structType(&fields, fields.len, .False); }; if (layout.tag_size == 0) { var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty}; llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); return llvm_union_ty; } const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty); // Put the tag before or after the payload depending on which one's // alignment is greater. var llvm_fields: [2]*const llvm.Type = undefined; if (layout.tag_align >= layout.payload_align) { llvm_fields[0] = enum_tag_llvm_ty; llvm_fields[1] = llvm_payload_ty; } else { llvm_fields[0] = llvm_payload_ty; llvm_fields[1] = enum_tag_llvm_ty; } llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); return llvm_union_ty; } // Untagged union const layout = union_obj.getLayout(target, false); const name = try union_obj.getFullyQualifiedName(gpa); defer gpa.free(name); const llvm_union_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls const big_field = union_obj.fields.values()[layout.biggest_field]; const llvm_big_field_ty = try dg.llvmType(big_field.ty); var llvm_fields: [1]*const llvm.Type = .{llvm_big_field_ty}; llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); return llvm_union_ty; }, .Fn => { const fn_info = t.fnInfo(); const sret = firstParamSRet(fn_info, target); const return_type = fn_info.return_type; const llvm_sret_ty = if (return_type.hasRuntimeBits()) try dg.llvmType(return_type) else dg.context.voidType(); const llvm_ret_ty = if (sret) dg.context.voidType() else llvm_sret_ty; var llvm_params = std.ArrayList(*const llvm.Type).init(dg.gpa); defer llvm_params.deinit(); if (sret) { try llvm_params.append(llvm_sret_ty.pointerType(0)); } for (fn_info.param_types) |param_ty| { if (!param_ty.hasRuntimeBits()) continue; const raw_llvm_ty = try dg.llvmType(param_ty); const actual_llvm_ty = if (!isByRef(param_ty)) raw_llvm_ty else raw_llvm_ty.pointerType(0); try llvm_params.append(actual_llvm_ty); } return llvm.functionType( llvm_ret_ty, llvm_params.items.ptr, @intCast(c_uint, llvm_params.items.len), llvm.Bool.fromBool(fn_info.is_var_args), ); }, .ComptimeInt => unreachable, .ComptimeFloat => unreachable, .Type => unreachable, .Undefined => unreachable, .Null => unreachable, .EnumLiteral => unreachable, .BoundFn => @panic("TODO remove BoundFn from the language"), .Frame => @panic("TODO implement llvmType for Frame types"), .AnyFrame => @panic("TODO implement llvmType for AnyFrame types"), } } fn genTypedValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value { if (tv.val.isUndef()) { const llvm_type = try dg.llvmType(tv.ty); return llvm_type.getUndef(); } switch (tv.ty.zigTypeTag()) { .Bool => { const llvm_type = try dg.llvmType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, // TODO this duplicates code with Pointer but they should share the handling // of the tv.val.tag() and then Int should do extra constPtrToInt on top .Int => switch (tv.val.tag()) { .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), else => { var bigint_space: Value.BigIntSpace = undefined; const bigint = tv.val.toBigInt(&bigint_space); const target = dg.module.getTarget(); const int_info = tv.ty.intInfo(target); assert(int_info.bits != 0); const llvm_type = dg.context.intType(int_info.bits); const unsigned_val = v: { if (bigint.limbs.len == 1) { break :v llvm_type.constInt(bigint.limbs[0], .False); } if (@sizeOf(usize) == @sizeOf(u64)) { break :v llvm_type.constIntOfArbitraryPrecision( @intCast(c_uint, bigint.limbs.len), bigint.limbs.ptr, ); } @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); }; if (!bigint.positive) { return llvm.constNeg(unsigned_val); } return unsigned_val; }, }, .Enum => { var int_buffer: Value.Payload.U64 = undefined; const int_val = tv.enumToInt(&int_buffer); var bigint_space: Value.BigIntSpace = undefined; const bigint = int_val.toBigInt(&bigint_space); const target = dg.module.getTarget(); const int_info = tv.ty.intInfo(target); const llvm_type = dg.context.intType(int_info.bits); const unsigned_val = v: { if (bigint.limbs.len == 1) { break :v llvm_type.constInt(bigint.limbs[0], .False); } if (@sizeOf(usize) == @sizeOf(u64)) { break :v llvm_type.constIntOfArbitraryPrecision( @intCast(c_uint, bigint.limbs.len), bigint.limbs.ptr, ); } @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); }; if (!bigint.positive) { return llvm.constNeg(unsigned_val); } return unsigned_val; }, .Float => { const llvm_ty = try dg.llvmType(tv.ty); const target = dg.module.getTarget(); switch (tv.ty.floatBits(target)) { 16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)), 80 => { const float = tv.val.toFloat(f80); const repr = std.math.break_f80(float); const llvm_i80 = dg.context.intType(80); var x = llvm_i80.constInt(repr.exp, .False); x = x.constShl(llvm_i80.constInt(64, .False)); x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); if (backendSupportsF80(target)) { return x.constBitCast(llvm_ty); } else { return x; } }, 128 => { var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128)); // LLVM seems to require that the lower half of the f128 be placed first // in the buffer. if (native_endian == .Big) { std.mem.swap(u64, &buf[0], &buf[1]); } const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); return int.constBitCast(llvm_ty); }, else => unreachable, } }, .Pointer => switch (tv.val.tag()) { .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), .variable => { const decl = tv.val.castTag(.variable).?.data.owner_decl; decl.markAlive(); const val = try dg.resolveGlobalDecl(decl); const llvm_var_type = try dg.llvmType(tv.ty); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_type = llvm_var_type.pointerType(llvm_addrspace); return val.constBitCast(llvm_type); }, .slice => { const slice = tv.val.castTag(.slice).?.data; var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*const llvm.Value = .{ try dg.genTypedValue(.{ .ty = tv.ty.slicePtrFieldType(&buf), .val = slice.ptr, }), try dg.genTypedValue(.{ .ty = Type.usize, .val = slice.len, }), }; return dg.context.constStruct(&fields, fields.len, .False); }, .int_u64, .one, .int_big_positive => { const llvm_usize = try dg.llvmType(Type.usize); const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False); return llvm_int.constIntToPtr(try dg.llvmType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr => { const parent = try dg.lowerParentPtr(tv.val); return parent.llvm_ptr.constBitCast(try dg.llvmType(tv.ty)); }, .elem_ptr => { const elem_ptr = tv.val.castTag(.elem_ptr).?.data; const parent = try dg.lowerParentPtr(elem_ptr.array_ptr); const llvm_usize = try dg.llvmType(Type.usize); if (parent.llvm_ptr.typeOf().getElementType().getTypeKind() == .Array) { const indices: [2]*const llvm.Value = .{ llvm_usize.constInt(0, .False), llvm_usize.constInt(elem_ptr.index, .False), }; return parent.llvm_ptr.constInBoundsGEP(&indices, indices.len); } else { const indices: [1]*const llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; return parent.llvm_ptr.constInBoundsGEP(&indices, indices.len); } }, .null_value, .zero => { const llvm_type = try dg.llvmType(tv.ty); return llvm_type.constNull(); }, else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }), }, .Array => switch (tv.val.tag()) { .bytes => { const bytes = tv.val.castTag(.bytes).?.data; return dg.context.constString( bytes.ptr, @intCast(c_uint, bytes.len), .True, // don't null terminate. bytes has the sentinel, if any. ); }, .array => { const elem_vals = tv.val.castTag(.array).?.data; const elem_ty = tv.ty.elemType(); const gpa = dg.gpa; const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len); defer gpa.free(llvm_elems); var need_unnamed = false; for (elem_vals) |elem_val, i| { llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val }); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); } if (need_unnamed) { return dg.context.constStruct( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), .True, ); } else { const llvm_elem_ty = try dg.llvmType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), ); } }, .repeated => { const val = tv.val.castTag(.repeated).?.data; const elem_ty = tv.ty.elemType(); const sentinel = tv.ty.sentinel(); const len = @intCast(usize, tv.ty.arrayLen()); const len_including_sent = len + @boolToInt(sentinel != null); const gpa = dg.gpa; const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent); defer gpa.free(llvm_elems); var need_unnamed = false; if (len != 0) { for (llvm_elems[0..len]) |*elem| { elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); } need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); } if (sentinel) |sent| { llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent }); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); } if (need_unnamed) { return dg.context.constStruct( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), .True, ); } else { const llvm_elem_ty = try dg.llvmType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), ); } }, .empty_array_sentinel => { const elem_ty = tv.ty.elemType(); const sent_val = tv.ty.sentinel().?; const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val }); const llvm_elems: [1]*const llvm.Value = .{sentinel}; const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); if (need_unnamed) { return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); } else { const llvm_elem_ty = try dg.llvmType(elem_ty); return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); } }, else => unreachable, }, .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = tv.ty.optionalChild(&buf); const llvm_i1 = dg.context.intType(1); const is_pl = !tv.val.isNull(); const non_null_bit = if (is_pl) llvm_i1.constAllOnes() else llvm_i1.constNull(); if (!payload_ty.hasRuntimeBits()) { return non_null_bit; } if (tv.ty.isPtrLikeOptional()) { if (tv.val.castTag(.opt_payload)) |payload| { return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data }); } else if (is_pl) { return dg.genTypedValue(.{ .ty = payload_ty, .val = tv.val }); } else { const llvm_ty = try dg.llvmType(tv.ty); return llvm_ty.constNull(); } } assert(payload_ty.zigTypeTag() != .Fn); const fields: [2]*const llvm.Value = .{ try dg.genTypedValue(.{ .ty = payload_ty, .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), }), non_null_bit, }; return dg.context.constStruct(&fields, fields.len, .False); }, .Fn => { const fn_decl = switch (tv.val.tag()) { .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, .function => tv.val.castTag(.function).?.data.owner_decl, else => unreachable, }; fn_decl.markAlive(); return dg.resolveLlvmFunction(fn_decl); }, .ErrorSet => { const llvm_ty = try dg.llvmType(tv.ty); switch (tv.val.tag()) { .@"error" => { const err_name = tv.val.castTag(.@"error").?.data.name; const kv = try dg.module.getErrorValue(err_name); return llvm_ty.constInt(kv.value, .False); }, else => { // In this case we are rendering an error union which has a 0 bits payload. return llvm_ty.constNull(); }, } }, .ErrorUnion => { const error_type = tv.ty.errorUnionSet(); const payload_type = tv.ty.errorUnionPayload(); const is_pl = tv.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); return dg.genTypedValue(.{ .ty = error_type, .val = err_val }); } const fields: [2]*const llvm.Value = .{ try dg.genTypedValue(.{ .ty = error_type, .val = if (is_pl) Value.initTag(.zero) else tv.val, }), try dg.genTypedValue(.{ .ty = payload_type, .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), }), }; return dg.context.constStruct(&fields, fields.len, .False); }, .Struct => { const llvm_struct_ty = try dg.llvmType(tv.ty); const field_vals = tv.val.castTag(.@"struct").?.data; const gpa = dg.gpa; const struct_obj = tv.ty.castTag(.@"struct").?.data; if (struct_obj.layout == .Packed) { const target = dg.module.getTarget(); var int_ty_buf: Type.Payload.Bits = undefined; const int_ty = struct_obj.packedIntegerType(target, &int_ty_buf); const int_llvm_ty = try dg.llvmType(int_ty); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *const llvm.Value = int_llvm_ty.constNull(); var running_bits: u16 = 0; for (field_vals) |field_val, i| { const field = fields[i]; if (!field.ty.hasRuntimeBits()) continue; const non_int_val = try dg.genTypedValue(.{ .ty = field.ty, .val = field_val, }); const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); const small_int_ty = dg.context.intType(ty_bit_size); const small_int_val = non_int_val.constBitCast(small_int_ty); const shift_rhs = int_llvm_ty.constInt(running_bits, .False); const extended_int_val = small_int_val.constZExt(int_llvm_ty); const shifted = extended_int_val.constShl(shift_rhs); running_int = running_int.constOr(shifted); running_bits += ty_bit_size; } return running_int; } const llvm_field_count = llvm_struct_ty.countStructElementTypes(); var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count); defer llvm_fields.deinit(gpa); var need_unnamed = false; for (field_vals) |field_val, i| { const field_ty = tv.ty.structFieldType(i); if (!field_ty.hasRuntimeBits()) continue; const field_llvm_val = try dg.genTypedValue(.{ .ty = field_ty, .val = field_val, }); need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val); llvm_fields.appendAssumeCapacity(field_llvm_val); } if (need_unnamed) { return dg.context.constStruct( llvm_fields.items.ptr, @intCast(c_uint, llvm_fields.items.len), .False, ); } else { return llvm_struct_ty.constNamedStruct( llvm_fields.items.ptr, @intCast(c_uint, llvm_fields.items.len), ); } }, .Union => { const llvm_union_ty = try dg.llvmType(tv.ty); const tag_and_val = tv.val.castTag(.@"union").?.data; const target = dg.module.getTarget(); const layout = tv.ty.unionGetLayout(target); if (layout.payload_size == 0) { return genTypedValue(dg, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag, }); } const union_obj = tv.ty.cast(Type.Payload.Union).?.data; const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag).?; assert(union_obj.haveFieldTypes()); const field_ty = union_obj.fields.values()[field_index].ty; const payload = p: { if (!field_ty.hasRuntimeBits()) { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } const field = try genTypedValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); const field_size = field_ty.abiSize(target); if (field_size == layout.payload_size) { break :p field; } const padding_len = @intCast(c_uint, layout.payload_size - field_size); const fields: [2]*const llvm.Value = .{ field, dg.context.intType(8).arrayType(padding_len).getUndef(), }; break :p dg.context.constStruct(&fields, fields.len, .False); }; // In this case we must make an unnamed struct because LLVM does // not support bitcasting our payload struct to the true union payload type. // Instead we use an unnamed struct and every reference to the global // must pointer cast to the expected type before accessing the union. const need_unnamed = layout.most_aligned_field != field_index; if (layout.tag_size == 0) { const fields: [1]*const llvm.Value = .{payload}; if (need_unnamed) { return dg.context.constStruct(&fields, fields.len, .False); } else { return llvm_union_ty.constNamedStruct(&fields, fields.len); } } const llvm_tag_value = try genTypedValue(dg, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag, }); var fields: [2]*const llvm.Value = undefined; if (layout.tag_align >= layout.payload_align) { fields = .{ llvm_tag_value, payload }; } else { fields = .{ payload, llvm_tag_value }; } if (need_unnamed) { return dg.context.constStruct(&fields, fields.len, .False); } else { return llvm_union_ty.constNamedStruct(&fields, fields.len); } }, .Vector => switch (tv.val.tag()) { .bytes => { // Note, sentinel is not stored even if the type has a sentinel. const bytes = tv.val.castTag(.bytes).?.data; const vector_len = @intCast(usize, tv.ty.arrayLen()); assert(vector_len == bytes.len or vector_len + 1 == bytes.len); const elem_ty = tv.ty.elemType(); const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem, i| { var byte_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = bytes[i], }; elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = Value.initPayload(&byte_payload.base), }); } return llvm.constVector( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), ); }, .array => { // Note, sentinel is not stored even if the type has a sentinel. // The value includes the sentinel in those cases. const elem_vals = tv.val.castTag(.array).?.data; const vector_len = @intCast(usize, tv.ty.arrayLen()); assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); const elem_ty = tv.ty.elemType(); const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem, i| { elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] }); } return llvm.constVector( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), ); }, .repeated => { // Note, sentinel is not stored even if the type has a sentinel. const val = tv.val.castTag(.repeated).?.data; const elem_ty = tv.ty.elemType(); const len = @intCast(usize, tv.ty.arrayLen()); const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem| { elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); } return llvm.constVector( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), ); }, else => unreachable, }, .ComptimeInt => unreachable, .ComptimeFloat => unreachable, .Type => unreachable, .EnumLiteral => unreachable, .Void => unreachable, .NoReturn => unreachable, .Undefined => unreachable, .Null => unreachable, .BoundFn => unreachable, .Opaque => unreachable, .Frame, .AnyFrame, => return dg.todo("implement const of type '{}'", .{tv.ty}), } } const ParentPtr = struct { ty: Type, llvm_ptr: *const llvm.Value, }; fn lowerParentPtrDecl(dg: *DeclGen, ptr_val: Value, decl: *Module.Decl) Error!ParentPtr { decl.markAlive(); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = decl.ty, }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl); return ParentPtr{ .llvm_ptr = llvm_ptr, .ty = decl.ty, }; } fn lowerParentPtr(dg: *DeclGen, ptr_val: Value) Error!ParentPtr { switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; return dg.lowerParentPtrDecl(ptr_val, decl); }, .decl_ref => { const decl = ptr_val.castTag(.decl_ref).?.data; return dg.lowerParentPtrDecl(ptr_val, decl); }, .variable => { const decl = ptr_val.castTag(.variable).?.data.owner_decl; return dg.lowerParentPtrDecl(ptr_val, decl); }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent = try dg.lowerParentPtr(field_ptr.container_ptr); const field_index = @intCast(u32, field_ptr.field_index); const llvm_u32 = dg.context.intType(32); const target = dg.module.getTarget(); switch (parent.ty.zigTypeTag()) { .Union => { const fields = parent.ty.unionFields(); const layout = parent.ty.unionGetLayout(target); const field_ty = fields.values()[field_index].ty; if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. return ParentPtr{ .llvm_ptr = parent.llvm_ptr, .ty = field_ty, }; } if (layout.tag_size == 0) { const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(0, .False), }; return ParentPtr{ .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), .ty = field_ty, }; } const llvm_pl_index = @boolToInt(layout.tag_align >= layout.payload_align); const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_pl_index, .False), }; return ParentPtr{ .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), .ty = field_ty, }; }, .Struct => { var ty_buf: Type.Payload.Pointer = undefined; const llvm_field_index = llvmFieldIndex(parent.ty, field_index, target, &ty_buf).?; const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_field_index, .False), }; return ParentPtr{ .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), .ty = parent.ty.structFieldType(field_index), }; }, else => unreachable, } }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const parent = try dg.lowerParentPtr(elem_ptr.array_ptr); const llvm_usize = try dg.llvmType(Type.usize); const indices: [2]*const llvm.Value = .{ llvm_usize.constInt(0, .False), llvm_usize.constInt(elem_ptr.index, .False), }; return ParentPtr{ .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), .ty = parent.ty.childType(), }; }, .opt_payload_ptr => { const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; const parent = try dg.lowerParentPtr(opt_payload_ptr); var buf: Type.Payload.ElemType = undefined; const payload_ty = parent.ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBits() or parent.ty.isPtrLikeOptional()) { // In this case, we represent pointer to optional the same as pointer // to the payload. return ParentPtr{ .llvm_ptr = parent.llvm_ptr, .ty = payload_ty, }; } const llvm_u32 = dg.context.intType(32); const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(0, .False), }; return ParentPtr{ .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), .ty = payload_ty, }; }, .eu_payload_ptr => { const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; const parent = try dg.lowerParentPtr(eu_payload_ptr); const payload_ty = parent.ty.errorUnionPayload(); if (!payload_ty.hasRuntimeBits()) { // In this case, we represent pointer to error union the same as pointer // to the payload. return ParentPtr{ .llvm_ptr = parent.llvm_ptr, .ty = payload_ty, }; } const llvm_u32 = dg.context.intType(32); const indices: [2]*const llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(1, .False), }; return ParentPtr{ .llvm_ptr = parent.llvm_ptr.constInBoundsGEP(&indices, indices.len), .ty = payload_ty, }; }, else => unreachable, } } fn lowerDeclRefValue( self: *DeclGen, tv: TypedValue, decl: *Module.Decl, ) Error!*const llvm.Value { if (tv.ty.isSlice()) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = tv.val.sliceLen(), }; const fields: [2]*const llvm.Value = .{ try self.genTypedValue(.{ .ty = ptr_ty, .val = tv.val, }), try self.genTypedValue(.{ .ty = Type.usize, .val = Value.initPayload(&slice_len.base), }), }; return self.context.constStruct(&fields, fields.len, .False); } const is_fn_body = decl.ty.zigTypeTag() == .Fn; if (!is_fn_body and !decl.ty.hasRuntimeBits()) { return self.lowerPtrToVoid(tv.ty); } decl.markAlive(); const llvm_val = if (is_fn_body) try self.resolveLlvmFunction(decl) else try self.resolveGlobalDecl(decl); const llvm_type = try self.llvmType(tv.ty); if (tv.ty.zigTypeTag() == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { return llvm_val.constBitCast(llvm_type); } } fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*const llvm.Value { const target = dg.module.getTarget(); const alignment = ptr_ty.ptrAlignment(target); // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. const llvm_usize = try dg.llvmType(Type.usize); const llvm_ptr_ty = try dg.llvmType(ptr_ty); if (alignment != 0) { return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty); } // Note that these 0xaa values are appropriate even in release-optimized builds // because we need a well-defined value that is not null, and LLVM does not // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR // instruction is followed by a `wrap_optional`, it will return this value // verbatim, and the result should test as non-null. const int = switch (target.cpu.arch.ptrBitWidth()) { 32 => llvm_usize.constInt(0xaaaaaaaa, .False), 64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False), else => unreachable, }; return int.constIntToPtr(llvm_ptr_ty); } fn addAttr(dg: DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void { return dg.addAttrInt(val, index, name, 0); } fn addArgAttr(dg: DeclGen, fn_val: *const llvm.Value, param_index: u32, attr_name: []const u8) void { return dg.addAttr(fn_val, param_index + 1, attr_name); } fn removeAttr(val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void { const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len); assert(kind_id != 0); val.removeEnumAttributeAtIndex(index, kind_id); } fn addAttrInt( dg: DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8, int: u64, ) void { const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len); assert(kind_id != 0); const llvm_attr = dg.context.createEnumAttribute(kind_id, int); val.addAttributeAtIndex(index, llvm_attr); } fn addAttrString( dg: *DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8, value: []const u8, ) void { const llvm_attr = dg.context.createStringAttribute( name.ptr, @intCast(c_uint, name.len), value.ptr, @intCast(c_uint, value.len), ); val.addAttributeAtIndex(index, llvm_attr); } fn addFnAttr(dg: DeclGen, val: *const llvm.Value, name: []const u8) void { dg.addAttr(val, std.math.maxInt(llvm.AttributeIndex), name); } fn addFnAttrString(dg: *DeclGen, val: *const llvm.Value, name: []const u8, value: []const u8) void { dg.addAttrString(val, std.math.maxInt(llvm.AttributeIndex), name, value); } fn removeFnAttr(fn_val: *const llvm.Value, name: []const u8) void { removeAttr(fn_val, std.math.maxInt(llvm.AttributeIndex), name); } fn addFnAttrInt(dg: DeclGen, fn_val: *const llvm.Value, name: []const u8, int: u64) void { return dg.addAttrInt(fn_val, std.math.maxInt(llvm.AttributeIndex), name, int); } /// If the operand type of an atomic operation is not byte sized we need to /// widen it before using it and then truncate the result. /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*const llvm.Type { const target = dg.module.getTarget(); var buffer: Type.Payload.Bits = undefined; const int_ty = switch (ty.zigTypeTag()) { .Int => ty, .Enum => ty.intTagType(&buffer), .Float => { if (!is_rmw_xchg) return null; return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8)); }, .Bool => return dg.context.intType(8), else => return null, }; const bit_count = int_ty.intInfo(target).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8)); } else { return null; } } }; pub const FuncGen = struct { gpa: Allocator, dg: *DeclGen, air: Air, liveness: Liveness, context: *const llvm.Context, builder: *const llvm.Builder, /// This stores the LLVM values used in a function, such that they can be referred to /// in other instructions. This table is cleared before every function is generated. func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, *const llvm.Value), /// If the return type isByRef, this is the result pointer. Otherwise null. ret_ptr: ?*const llvm.Value, /// These fields are used to refer to the LLVM value of the function parameters /// in an Arg instruction. /// This list may be shorter than the list according to the zig type system; /// it omits 0-bit types. args: []*const llvm.Value, arg_index: usize, llvm_func: *const llvm.Value, /// This data structure is used to implement breaking to blocks. blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { parent_bb: *const llvm.BasicBlock, break_bbs: *BreakBasicBlocks, break_vals: *BreakValues, }), single_threaded: bool, const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock); const BreakValues = std.ArrayListUnmanaged(*const llvm.Value); fn deinit(self: *FuncGen) void { self.builder.dispose(); self.func_inst_table.deinit(self.gpa); self.gpa.free(self.args); self.blocks.deinit(self.gpa); } fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) Error { @setCold(true); return self.dg.todo(format, args); } fn llvmModule(self: *FuncGen) *const llvm.Module { return self.dg.object.llvm_module; } fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value { const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst); if (gop.found_existing) return gop.value_ptr.*; const val = self.air.value(inst).?; const ty = self.air.typeOf(inst); const llvm_val = try self.dg.genTypedValue(.{ .ty = ty, .val = val }); if (!isByRef(ty)) { gop.value_ptr.* = llvm_val; return llvm_val; } // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. const target = self.dg.module.getTarget(); const global = self.dg.object.llvm_module.addGlobal(llvm_val.typeOf(), ""); global.setInitializer(llvm_val); global.setLinkage(.Private); global.setGlobalConstant(.True); global.setUnnamedAddr(.True); global.setAlignment(ty.abiAlignment(target)); // Because of LLVM limitations for lowering certain types such as unions, // the type of global constants might not match the type it is supposed to // be, and so we must bitcast the pointer at the usage sites. const wanted_llvm_ty = try self.dg.llvmType(ty); const wanted_llvm_ptr_ty = wanted_llvm_ty.pointerType(0); const casted_ptr = global.constBitCast(wanted_llvm_ptr_ty); gop.value_ptr.* = casted_ptr; return casted_ptr; } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { const air_tags = self.air.instructions.items(.tag); for (body) |inst| { const opt_value: ?*const llvm.Value = switch (air_tags[inst]) { // zig fmt: off .add => try self.airAdd(inst), .addwrap => try self.airAddWrap(inst), .add_sat => try self.airAddSat(inst), .sub => try self.airSub(inst), .subwrap => try self.airSubWrap(inst), .sub_sat => try self.airSubSat(inst), .mul => try self.airMul(inst), .mulwrap => try self.airMulWrap(inst), .mul_sat => try self.airMulSat(inst), .div_float => try self.airDivFloat(inst), .div_trunc => try self.airDivTrunc(inst), .div_floor => try self.airDivFloor(inst), .div_exact => try self.airDivExact(inst), .rem => try self.airRem(inst), .mod => try self.airMod(inst), .ptr_add => try self.airPtrAdd(inst), .ptr_sub => try self.airPtrSub(inst), .shl => try self.airShl(inst), .shl_sat => try self.airShlSat(inst), .shl_exact => try self.airShlExact(inst), .min => try self.airMin(inst), .max => try self.airMax(inst), .slice => try self.airSlice(inst), .add_with_overflow => try self.airOverflow(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"), .sub_with_overflow => try self.airOverflow(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"), .mul_with_overflow => try self.airOverflow(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"), .shl_with_overflow => try self.airShlWithOverflow(inst), .bit_and, .bool_and => try self.airAnd(inst), .bit_or, .bool_or => try self.airOr(inst), .xor => try self.airXor(inst), .shr => try self.airShr(inst, false), .shr_exact => try self.airShr(inst, true), .sqrt => try self.airUnaryOp(inst, "llvm.sqrt"), .sin => try self.airUnaryOp(inst, "llvm.sin"), .cos => try self.airUnaryOp(inst, "llvm.cos"), .exp => try self.airUnaryOp(inst, "llvm.exp"), .exp2 => try self.airUnaryOp(inst, "llvm.exp2"), .log => try self.airUnaryOp(inst, "llvm.log"), .log2 => try self.airUnaryOp(inst, "llvm.log2"), .log10 => try self.airUnaryOp(inst, "llvm.log10"), .fabs => try self.airUnaryOp(inst, "llvm.fabs"), .floor => try self.airUnaryOp(inst, "llvm.floor"), .ceil => try self.airUnaryOp(inst, "llvm.ceil"), .round => try self.airUnaryOp(inst, "llvm.round"), .trunc_float => try self.airUnaryOp(inst, "llvm.trunc"), .cmp_eq => try self.airCmp(inst, .eq), .cmp_gt => try self.airCmp(inst, .gt), .cmp_gte => try self.airCmp(inst, .gte), .cmp_lt => try self.airCmp(inst, .lt), .cmp_lte => try self.airCmp(inst, .lte), .cmp_neq => try self.airCmp(inst, .neq), .is_non_null => try self.airIsNonNull(inst, false, false, .NE), .is_non_null_ptr => try self.airIsNonNull(inst, true , false, .NE), .is_null => try self.airIsNonNull(inst, false, true , .EQ), .is_null_ptr => try self.airIsNonNull(inst, true , true , .EQ), .is_non_err => try self.airIsErr(inst, .EQ, false), .is_non_err_ptr => try self.airIsErr(inst, .EQ, true), .is_err => try self.airIsErr(inst, .NE, false), .is_err_ptr => try self.airIsErr(inst, .NE, true), .alloc => try self.airAlloc(inst), .ret_ptr => try self.airRetPtr(inst), .arg => try self.airArg(inst), .bitcast => try self.airBitCast(inst), .bool_to_int => try self.airBoolToInt(inst), .block => try self.airBlock(inst), .br => try self.airBr(inst), .switch_br => try self.airSwitchBr(inst), .breakpoint => try self.airBreakpoint(inst), .ret_addr => try self.airRetAddr(inst), .call => try self.airCall(inst), .cond_br => try self.airCondBr(inst), .intcast => try self.airIntCast(inst), .trunc => try self.airTrunc(inst), .fptrunc => try self.airFptrunc(inst), .fpext => try self.airFpext(inst), .ptrtoint => try self.airPtrToInt(inst), .load => try self.airLoad(inst), .loop => try self.airLoop(inst), .not => try self.airNot(inst), .ret => try self.airRet(inst), .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst), .assembly => try self.airAssembly(inst), .slice_ptr => try self.airSliceField(inst, 0), .slice_len => try self.airSliceField(inst, 1), .ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0), .ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1), .array_to_slice => try self.airArrayToSlice(inst), .float_to_int => try self.airFloatToInt(inst), .int_to_float => try self.airIntToFloat(inst), .cmpxchg_weak => try self.airCmpxchg(inst, true), .cmpxchg_strong => try self.airCmpxchg(inst, false), .fence => try self.airFence(inst), .atomic_rmw => try self.airAtomicRmw(inst), .atomic_load => try self.airAtomicLoad(inst), .memset => try self.airMemset(inst), .memcpy => try self.airMemcpy(inst), .set_union_tag => try self.airSetUnionTag(inst), .get_union_tag => try self.airGetUnionTag(inst), .clz => try self.airClzCtz(inst, "llvm.ctlz"), .ctz => try self.airClzCtz(inst, "llvm.cttz"), .popcount => try self.airBitOp(inst, "llvm.ctpop"), .byte_swap => try self.airByteSwap(inst, "llvm.bswap"), .bit_reverse => try self.airBitOp(inst, "llvm.bitreverse"), .tag_name => try self.airTagName(inst), .error_name => try self.airErrorName(inst), .splat => try self.airSplat(inst), .vector_init => try self.airVectorInit(inst), .prefetch => try self.airPrefetch(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), .atomic_store_release => try self.airAtomicStore(inst, .Release), .atomic_store_seq_cst => try self.airAtomicStore(inst, .SequentiallyConsistent), .struct_field_ptr => try self.airStructFieldPtr(inst), .struct_field_val => try self.airStructFieldVal(inst), .struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0), .struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1), .struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2), .struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3), .field_parent_ptr => try self.airFieldParentPtr(inst), .array_elem_val => try self.airArrayElemVal(inst), .slice_elem_val => try self.airSliceElemVal(inst), .slice_elem_ptr => try self.airSliceElemPtr(inst), .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), .optional_payload => try self.airOptionalPayload(inst), .optional_payload_ptr => try self.airOptionalPayloadPtr(inst), .optional_payload_ptr_set => try self.airOptionalPayloadPtrSet(inst), .unwrap_errunion_payload => try self.airErrUnionPayload(inst, false), .unwrap_errunion_payload_ptr => try self.airErrUnionPayload(inst, true), .unwrap_errunion_err => try self.airErrUnionErr(inst, false), .unwrap_errunion_err_ptr => try self.airErrUnionErr(inst, true), .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), .wrap_errunion_err => try self.airWrapErrUnionErr(inst), .constant => unreachable, .const_ty => unreachable, .unreach => self.airUnreach(inst), .dbg_stmt => blk: { // TODO: implement debug info break :blk null; }, // zig fmt: on }; if (opt_value) |val| { const ref = Air.indexToRef(inst); try self.func_inst_table.putNoClobber(self.gpa, ref, val); } } } fn airCall(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.air.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag()) { .Fn => callee_ty, .Pointer => callee_ty.childType(), else => unreachable, }; const fn_info = zig_fn_ty.fnInfo(); const return_type = fn_info.return_type; const llvm_fn = try self.resolveInst(pl_op.operand); const target = self.dg.module.getTarget(); const sret = firstParamSRet(fn_info, target); var llvm_args = std.ArrayList(*const llvm.Value).init(self.gpa); defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { const llvm_ret_ty = try self.dg.llvmType(return_type); const ret_ptr = self.buildAlloca(llvm_ret_ty); ret_ptr.setAlignment(return_type.abiAlignment(target)); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; if (fn_info.is_var_args) { for (args) |arg| { try llvm_args.append(try self.resolveInst(arg)); } } else { for (args) |arg, i| { const param_ty = fn_info.param_types[i]; if (!param_ty.hasRuntimeBits()) continue; try llvm_args.append(try self.resolveInst(arg)); } } const call = self.builder.buildCall( llvm_fn, llvm_args.items.ptr, @intCast(c_uint, llvm_args.items.len), toLlvmCallConv(zig_fn_ty.fnCallingConvention(), target), .Auto, "", ); if (return_type.isNoReturn()) { _ = self.builder.buildUnreachable(); return null; } else if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBits()) { return null; } else if (sret) { const llvm_ret_ty = try self.dg.llvmType(return_type); call.setCallSret(llvm_ret_ty); return ret_ptr; } else { return call; } } fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const ret_ty = self.air.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = ret_ty, }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); self.store(ret_ptr, ptr_ty, operand, .NotAtomic); _ = self.builder.buildRetVoid(); return null; } if (!ret_ty.hasRuntimeBits()) { _ = self.builder.buildRetVoid(); return null; } const operand = try self.resolveInst(un_op); _ = self.builder.buildRet(operand); return null; } fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); if (!ret_ty.hasRuntimeBits() or isByRef(ret_ty)) { _ = self.builder.buildRetVoid(); return null; } const ptr = try self.resolveInst(un_op); const loaded = self.builder.buildLoad(ptr, ""); _ = self.builder.buildRet(loaded); return null; } fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const operand_ty = self.air.typeOf(bin_op.lhs); return self.cmp(lhs, rhs, operand_ty, op); } fn cmp( self: *FuncGen, lhs: *const llvm.Value, rhs: *const llvm.Value, operand_ty: Type, op: math.CompareOperator, ) *const llvm.Value { var int_buffer: Type.Payload.Bits = undefined; var opt_buffer: Type.Payload.ElemType = undefined; const int_ty = switch (operand_ty.zigTypeTag()) { .Enum => operand_ty.intTagType(&int_buffer), .Int, .Bool, .Pointer, .ErrorSet => operand_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBits() or operand_ty.isPtrLikeOptional()) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. const is_by_ref = isByRef(operand_ty); const lhs_non_null = self.optIsNonNull(lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(rhs, is_by_ref); const llvm_i2 = self.context.intType(2); const lhs_non_null_i2 = self.builder.buildZExt(lhs_non_null, llvm_i2, ""); const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2, ""); const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, llvm_i2.constInt(1, .False), ""); const lhs_rhs_ored = self.builder.buildOr(lhs_shifted, rhs_non_null_i2, ""); const both_null_block = self.context.appendBasicBlock(self.llvm_func, "BothNull"); const mixed_block = self.context.appendBasicBlock(self.llvm_func, "Mixed"); const both_pl_block = self.context.appendBasicBlock(self.llvm_func, "BothNonNull"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const llvm_switch = self.builder.buildSwitch(lhs_rhs_ored, mixed_block, 2); const llvm_i2_00 = llvm_i2.constInt(0b00, .False); const llvm_i2_11 = llvm_i2.constInt(0b11, .False); llvm_switch.addCase(llvm_i2_00, both_null_block); llvm_switch.addCase(llvm_i2_11, both_pl_block); self.builder.positionBuilderAtEnd(both_null_block); _ = self.builder.buildBr(end_block); self.builder.positionBuilderAtEnd(mixed_block); _ = self.builder.buildBr(end_block); self.builder.positionBuilderAtEnd(both_pl_block); const lhs_payload = self.optPayloadHandle(lhs, is_by_ref); const rhs_payload = self.optPayloadHandle(rhs, is_by_ref); const payload_cmp = self.cmp(lhs_payload, rhs_payload, payload_ty, op); _ = self.builder.buildBr(end_block); const both_pl_block_end = self.builder.getInsertBlock(); self.builder.positionBuilderAtEnd(end_block); const incoming_blocks: [3]*const llvm.BasicBlock = .{ both_null_block, mixed_block, both_pl_block_end, }; const llvm_i1 = self.context.intType(1); const llvm_i1_0 = llvm_i1.constInt(0, .False); const llvm_i1_1 = llvm_i1.constInt(1, .False); const incoming_values: [3]*const llvm.Value = .{ switch (op) { .eq => llvm_i1_1, .neq => llvm_i1_0, else => unreachable, }, switch (op) { .eq => llvm_i1_0, .neq => llvm_i1_1, else => unreachable, }, payload_cmp, }; const phi_node = self.builder.buildPhi(llvm_i1, ""); comptime assert(incoming_values.len == incoming_blocks.len); phi_node.addIncoming( &incoming_values, &incoming_blocks, incoming_values.len, ); return phi_node; }, .Float => { const operation: llvm.RealPredicate = switch (op) { .eq => .OEQ, .neq => .UNE, .lt => .OLT, .lte => .OLE, .gt => .OGT, .gte => .OGE, }; return self.builder.buildFCmp(operation, lhs, rhs, ""); }, else => unreachable, }; const is_signed = int_ty.isSignedInt(); const operation: llvm.IntPredicate = switch (op) { .eq => .EQ, .neq => .NE, .lt => if (is_signed) llvm.IntPredicate.SLT else .ULT, .lte => if (is_signed) llvm.IntPredicate.SLE else .ULE, .gt => if (is_signed) llvm.IntPredicate.SGT else .UGT, .gte => if (is_signed) llvm.IntPredicate.SGE else .UGE, }; return self.builder.buildICmp(operation, lhs, rhs, ""); } fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const inst_ty = self.air.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); if (inst_ty.isNoReturn()) { try self.genBody(body); return null; } var break_bbs: BreakBasicBlocks = .{}; defer break_bbs.deinit(self.gpa); var break_vals: BreakValues = .{}; defer break_vals.deinit(self.gpa); try self.blocks.putNoClobber(self.gpa, inst, .{ .parent_bb = parent_bb, .break_bbs = &break_bbs, .break_vals = &break_vals, }); defer assert(self.blocks.remove(inst)); try self.genBody(body); self.llvm_func.appendExistingBasicBlock(parent_bb); self.builder.positionBuilderAtEnd(parent_bb); // If the block does not return a value, we dont have to create a phi node. const is_body = inst_ty.zigTypeTag() == .Fn; if (!is_body and !inst_ty.hasRuntimeBits()) return null; const raw_llvm_ty = try self.dg.llvmType(inst_ty); const llvm_ty = ty: { // If the zig tag type is a function, this represents an actual function body; not // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. if (is_body or isByRef(inst_ty)) { break :ty raw_llvm_ty.pointerType(0); } break :ty raw_llvm_ty; }; const phi_node = self.builder.buildPhi(llvm_ty, ""); phi_node.addIncoming( break_vals.items.ptr, break_bbs.items.ptr, @intCast(c_uint, break_vals.items.len), ); return phi_node; } fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const branch = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(branch.block_inst).?; // If the break doesn't break a value, then we don't have to add // the values to the lists. const operand_ty = self.air.typeOf(branch.operand); if (operand_ty.hasRuntimeBits() or operand_ty.zigTypeTag() == .Fn) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the // break instructions. try block.break_bbs.append(self.gpa, self.builder.getInsertBlock()); try block.break_vals.append(self.gpa, val); } _ = self.builder.buildBr(block.parent_bb); return null; } fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const then_block = self.context.appendBasicBlock(self.llvm_func, "Then"); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); _ = self.builder.buildCondBr(cond, then_block, else_block); self.builder.positionBuilderAtEnd(then_block); try self.genBody(then_body); self.builder.positionBuilderAtEnd(else_block); try self.genBody(else_body); // No need to reset the insert cursor since this instruction is noreturn. return null; } fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); const llvm_switch = self.builder.buildSwitch(cond, else_block, switch_br.data.cases_len); var extra_index: usize = switch_br.end; var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; const case_block = self.context.appendBasicBlock(self.llvm_func, "Case"); for (items) |item| { const llvm_item = try self.resolveInst(item); llvm_switch.addCase(llvm_item, case_block); } self.builder.positionBuilderAtEnd(case_block); try self.genBody(case_body); } self.builder.positionBuilderAtEnd(else_block); const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; if (else_body.len != 0) { try self.genBody(else_body); } else { _ = self.builder.buildUnreachable(); } // No need to reset the insert cursor since this instruction is noreturn. return null; } fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_block = self.context.appendBasicBlock(self.llvm_func, "Loop"); _ = self.builder.buildBr(loop_block); self.builder.positionBuilderAtEnd(loop_block); try self.genBody(body); // TODO instead of this logic, change AIR to have the property that // every block is guaranteed to end with a noreturn instruction. // Then we can simply rely on the fact that a repeat or break instruction // would have been emitted already. Also the main loop in genBody can // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. if (body.len == 0 or !self.air.typeOfIndex(body[body.len - 1]).isNoReturn()) { _ = self.builder.buildBr(loop_block); } return null; } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); const llvm_usize = try self.dg.llvmType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); if (!array_ty.hasRuntimeBits()) { return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, ""); } const operand = try self.resolveInst(ty_op.operand); const indices: [2]*const llvm.Value = .{ llvm_usize.constNull(), llvm_usize.constNull(), }; const ptr = self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); } fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); const dest_llvm_ty = try self.dg.llvmType(dest_ty); if (dest_ty.isSignedInt()) { return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); } else { return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); } } fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); const dest_llvm_ty = try self.dg.llvmType(dest_ty); // TODO set fast math flag if (dest_ty.isSignedInt()) { return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); } else { return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); } } fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); return self.builder.buildExtractValue(operand, index, ""); } fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); return self.builder.buildStructGEP(slice_ptr, index, ""); } fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.air.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); const ptr = self.sliceElemPtr(slice, index); return self.load(ptr, slice_ty); } fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); return self.sliceElemPtr(slice, index); } fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const array_ty = self.air.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); if (isByRef(array_ty)) { const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), rhs }; const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_val, &indices, indices.len, ""); const elem_ty = array_ty.childType(); if (isByRef(elem_ty)) { return elem_ptr; } else { return self.builder.buildLoad(elem_ptr, ""); } } // This branch can be reached for vectors, which are always by-value. return self.builder.buildExtractElement(array_llvm_val, rhs, ""); } fn airPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const ptr = if (ptr_ty.isSinglePointer()) ptr: { // If this is a single-item pointer to an array, we need another index in the GEP. const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), rhs }; break :ptr self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } else ptr: { const indices: [1]*const llvm.Value = .{rhs}; break :ptr self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); }; return self.load(ptr, ptr_ty); } fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); if (!elem_ty.hasRuntimeBits()) return null; const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); if (ptr_ty.isSinglePointer()) { // If this is a single-item pointer to an array, we need another index in the GEP. const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), rhs }; return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } else { const indices: [1]*const llvm.Value = .{rhs}; return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } } fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, struct_field.field_index); } fn airStructFieldPtrIndex( self: *FuncGen, inst: Air.Inst.Index, field_index: u32, ) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); const struct_ptr_ty = self.air.typeOf(ty_op.operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ty = self.air.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); if (!field_ty.hasRuntimeBits()) { return null; } const target = self.dg.module.getTarget(); if (!isByRef(struct_ty)) { assert(!isByRef(field_ty)); switch (struct_ty.zigTypeTag()) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; const bit_offset = struct_obj.packedFieldBitOffset(target, field_index); const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.llvmType(field_ty); if (field_ty.zigTypeTag() == .Float) { const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); }, else => { var ptr_ty_buf: Type.Payload.Pointer = undefined; const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); }, }, .Union => { return self.todo("airStructFieldVal byval union", .{}); }, else => unreachable, } } switch (struct_ty.zigTypeTag()) { .Struct => { assert(struct_ty.containerLayout() != .Packed); var ptr_ty_buf: Type.Payload.Pointer = undefined; const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; const field_ptr = self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, ""); const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); return self.load(field_ptr, field_ptr_ty); }, .Union => { const llvm_field_ty = try self.dg.llvmType(field_ty); const layout = struct_ty.unionGetLayout(target); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, ""); const field_ptr = self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), ""); if (isByRef(field_ty)) { return field_ptr; } else { return self.builder.buildLoad(field_ptr, ""); } }, else => unreachable, } } fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try self.resolveInst(extra.field_ptr); const target = self.dg.module.getTarget(); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); const field_offset = struct_ty.structFieldOffset(extra.field_index, target); const res_ty = try self.dg.llvmType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { return self.builder.buildBitCast(field_ptr, res_ty, ""); } const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, ""); const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), ""); return self.builder.buildIntToPtr(base_ptr_int, res_ty, ""); } fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); return self.builder.buildNot(operand, ""); } fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value { _ = inst; _ = self.builder.buildUnreachable(); return null; } fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { // Eventually, the Zig compiler needs to be reworked to have inline assembly go // through the same parsing code regardless of backend, and have LLVM-flavored // inline assembly be *output* from that assembler. // We don't have such an assembler implemented yet though. For now, this // implementation feeds the inline assembly code directly to LLVM, same // as stage1. const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; const clobbers_len = @truncate(u31, extra.data.flags); var extra_i: usize = extra.end; if (!is_volatile and self.liveness.isUnused(inst)) return null; const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; if (outputs.len > 1) { return self.todo("implement llvm codegen for asm with more than 1 output", .{}); } var llvm_constraints: std.ArrayListUnmanaged(u8) = .{}; defer llvm_constraints.deinit(self.gpa); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); const llvm_params_len = inputs.len; const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len); const llvm_param_values = try arena.alloc(*const llvm.Value, llvm_params_len); var llvm_param_i: usize = 0; var total_i: usize = 0; for (outputs) |output| { if (output != .none) { return self.todo("implement inline asm with non-returned output", .{}); } const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += constraint.len / 4 + 1; try llvm_constraints.ensureUnusedCapacity(self.gpa, constraint.len + 1); if (total_i != 0) { llvm_constraints.appendAssumeCapacity(','); } llvm_constraints.appendAssumeCapacity('='); llvm_constraints.appendSliceAssumeCapacity(constraint[1..]); total_i += 1; } for (inputs) |input| { const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += constraint.len / 4 + 1; const arg_llvm_value = try self.resolveInst(input); llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); try llvm_constraints.ensureUnusedCapacity(self.gpa, constraint.len + 1); if (total_i != 0) { llvm_constraints.appendAssumeCapacity(','); } llvm_constraints.appendSliceAssumeCapacity(constraint); llvm_param_i += 1; total_i += 1; } { var clobber_i: u32 = 0; while (clobber_i < clobbers_len) : (clobber_i += 1) { const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += clobber.len / 4 + 1; try llvm_constraints.ensureUnusedCapacity(self.gpa, clobber.len + 4); if (total_i != 0) { llvm_constraints.appendAssumeCapacity(','); } llvm_constraints.appendSliceAssumeCapacity("~{"); llvm_constraints.appendSliceAssumeCapacity(clobber); llvm_constraints.appendSliceAssumeCapacity("}"); total_i += 1; } } const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; const ret_ty = self.air.typeOfIndex(inst); const ret_llvm_ty = try self.dg.llvmType(ret_ty); const llvm_fn_ty = llvm.functionType( ret_llvm_ty, llvm_param_types.ptr, @intCast(c_uint, llvm_param_types.len), .False, ); const asm_fn = llvm.getInlineAsm( llvm_fn_ty, asm_source.ptr, asm_source.len, llvm_constraints.items.ptr, llvm_constraints.items.len, llvm.Bool.fromBool(is_volatile), .False, .ATT, .False, ); return self.builder.buildCall( asm_fn, llvm_param_values.ptr, @intCast(c_uint, llvm_param_values.len), .C, .Auto, "", ); } fn airIsNonNull( self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool, invert: bool, pred: llvm.IntPredicate, ) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.air.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; if (optional_ty.isPtrLikeOptional()) { const optional_llvm_ty = try self.dg.llvmType(optional_ty); const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); } var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBits()) { if (invert) { return self.builder.buildNot(operand, ""); } else { return operand; } } const is_by_ref = operand_is_ptr or isByRef(optional_ty); const non_null_bit = self.optIsNonNull(operand, is_by_ref); if (invert) { return self.builder.buildNot(non_null_bit, ""); } else { return non_null_bit; } } fn airIsErr( self: *FuncGen, inst: Air.Inst.Index, op: llvm.IntPredicate, operand_is_ptr: bool, ) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const err_union_ty = self.air.typeOf(un_op); const payload_ty = err_union_ty.errorUnionPayload(); const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror)); const zero = err_set_ty.constNull(); if (!payload_ty.hasRuntimeBits()) { const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(op, loaded, zero, ""); } if (operand_is_ptr or isByRef(err_union_ty)) { const err_field_ptr = self.builder.buildStructGEP(operand, 0, ""); const loaded = self.builder.buildLoad(err_field_ptr, ""); return self.builder.buildICmp(op, loaded, zero, ""); } const loaded = self.builder.buildExtractValue(operand, 0, ""); return self.builder.buildICmp(op, loaded, zero, ""); } fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBits()) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; } if (optional_ty.isPtrLikeOptional()) { // The payload and the optional are the same value. return operand; } const index_type = self.context.intType(32); const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer index_type.constNull(), // first field is the payload }; return self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); } fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(1).constAllOnes(); if (!payload_ty.hasRuntimeBits()) { // We have a pointer to a i1. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); return operand; } if (optional_ty.isPtrLikeOptional()) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; } const index_type = self.context.intType(32); { // First set the non-null bit. const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer index_type.constInt(1, .False), // second field is the payload }; const non_null_ptr = self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); _ = self.builder.buildStore(non_null_bit, non_null_ptr); } // Then return the payload pointer (only if it's used). if (self.liveness.isUnused(inst)) return null; const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer index_type.constNull(), // first field is the payload }; return self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); } fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); if (!payload_ty.hasRuntimeBits()) return null; if (optional_ty.isPtrLikeOptional()) { // Payload value is the same as the optional value. return operand; } return self.optPayloadHandle(operand, isByRef(payload_ty)); } fn airErrUnionPayload( self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool, ) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const result_ty = self.air.getRefType(ty_op.ty); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; if (!payload_ty.hasRuntimeBits()) return null; if (operand_is_ptr or isByRef(payload_ty)) { return self.builder.buildStructGEP(operand, 1, ""); } return self.builder.buildExtractValue(operand, 1, ""); } fn airErrUnionErr( self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool, ) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const err_set_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const payload_ty = err_set_ty.errorUnionPayload(); if (!payload_ty.hasRuntimeBits()) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(operand, ""); } if (operand_is_ptr or isByRef(err_set_ty)) { const err_field_ptr = self.builder.buildStructGEP(operand, 0, ""); return self.builder.buildLoad(err_field_ptr, ""); } return self.builder.buildExtractValue(operand, 0, ""); } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const error_set_ty = self.air.typeOf(ty_op.operand).childType(); const error_ty = error_set_ty.errorUnionSet(); const payload_ty = error_set_ty.errorUnionPayload(); const non_error_val = try self.dg.genTypedValue(.{ .ty = error_ty, .val = Value.zero }); if (!payload_ty.hasRuntimeBits()) { // We have a pointer to a i1. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_error_val, operand); return operand; } const index_type = self.context.intType(32); { // First set the non-error value. const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer index_type.constNull(), // first field is the payload }; const non_null_ptr = self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); _ = self.builder.buildStore(non_error_val, non_null_ptr); } // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return null; const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer index_type.constInt(1, .False), // second field is the payload }; return self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); } fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); const non_null_bit = self.context.intType(1).constAllOnes(); if (!payload_ty.hasRuntimeBits()) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); if (optional_ty.isPtrLikeOptional()) return operand; const llvm_optional_ty = try self.dg.llvmType(optional_ty); if (isByRef(optional_ty)) { const optional_ptr = self.buildAlloca(llvm_optional_ty); const payload_ptr = self.builder.buildStructGEP(optional_ptr, 0, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = payload_ty, }; const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); const non_null_ptr = self.builder.buildStructGEP(optional_ptr, 1, ""); _ = self.builder.buildStore(non_null_bit, non_null_ptr); return optional_ptr; } const partial = self.builder.buildInsertValue(llvm_optional_ty.getUndef(), operand, 0, ""); return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); } fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBits()) { return operand; } const inst_ty = self.air.typeOfIndex(inst); const ok_err_code = self.context.intType(16).constNull(); const err_un_llvm_ty = try self.dg.llvmType(inst_ty); if (isByRef(inst_ty)) { const result_ptr = self.buildAlloca(err_un_llvm_ty); const err_ptr = self.builder.buildStructGEP(result_ptr, 0, ""); _ = self.builder.buildStore(ok_err_code, err_ptr); const payload_ptr = self.builder.buildStructGEP(result_ptr, 1, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = payload_ty, }; const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); return result_ptr; } const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, 0, ""); return self.builder.buildInsertValue(partial, operand, 1, ""); } fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.air.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBits()) { return operand; } const err_un_llvm_ty = try self.dg.llvmType(err_un_ty); if (isByRef(err_un_ty)) { const result_ptr = self.buildAlloca(err_un_llvm_ty); const err_ptr = self.builder.buildStructGEP(result_ptr, 0, ""); _ = self.builder.buildStore(operand, err_ptr); const payload_ptr = self.builder.buildStructGEP(result_ptr, 1, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = payload_ty, }; const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; return result_ptr; } const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, 0, ""); // TODO set payload bytes to undef return partial; } fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const scalar_ty = self.air.typeOfIndex(inst).scalarType(); if (scalar_ty.isAnyFloat()) return self.builder.buildMinNum(lhs, rhs, ""); if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, ""); return self.builder.buildUMin(lhs, rhs, ""); } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const scalar_ty = self.air.typeOfIndex(inst).scalarType(); if (scalar_ty.isAnyFloat()) return self.builder.buildMaxNum(lhs, rhs, ""); if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, ""); return self.builder.buildUMax(lhs, rhs, ""); } fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); const llvm_slice_ty = try self.dg.llvmType(inst_ty); const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); } fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.builder.buildFAdd(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); return self.builder.buildAdd(lhs, rhs, ""); } fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.todo("saturating float add", .{}); if (inst_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); return self.builder.buildUAddSat(lhs, rhs, ""); } fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.builder.buildFSub(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); return self.builder.buildSub(lhs, rhs, ""); } fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); if (inst_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); return self.builder.buildUSubSat(lhs, rhs, ""); } fn airMul(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.builder.buildFMul(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); return self.builder.buildMul(lhs, rhs, ""); } fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); if (inst_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); return self.builder.buildUMulFixSat(lhs, rhs, ""); } fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); return self.builder.buildFDiv(lhs, rhs, ""); } fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isRuntimeFloat()) { const result = self.builder.buildFDiv(lhs, rhs, ""); return self.callTrunc(result, inst_ty); } if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); return self.builder.buildUDiv(lhs, rhs, ""); } fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isRuntimeFloat()) { const result = self.builder.buildFDiv(lhs, rhs, ""); return try self.callFloor(result, inst_ty); } if (inst_ty.isSignedInt()) { // const d = @divTrunc(a, b); // const r = @rem(a, b); // return if (r == 0) d else d - ((a < 0) ^ (b < 0)); const result_llvm_ty = try self.dg.llvmType(inst_ty); const zero = result_llvm_ty.constNull(); const div_trunc = self.builder.buildSDiv(lhs, rhs, ""); const rem = self.builder.buildSRem(lhs, rhs, ""); const rem_eq_0 = self.builder.buildICmp(.EQ, rem, zero, ""); const a_lt_0 = self.builder.buildICmp(.SLT, lhs, zero, ""); const b_lt_0 = self.builder.buildICmp(.SLT, rhs, zero, ""); const a_b_xor = self.builder.buildXor(a_lt_0, b_lt_0, ""); const a_b_xor_ext = self.builder.buildZExt(a_b_xor, div_trunc.typeOf(), ""); const d_sub_xor = self.builder.buildSub(div_trunc, a_b_xor_ext, ""); return self.builder.buildSelect(rem_eq_0, div_trunc, d_sub_xor, ""); } return self.builder.buildUDiv(lhs, rhs, ""); } fn airDivExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, ""); return self.builder.buildExactUDiv(lhs, rhs, ""); } fn airRem(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isRuntimeFloat()) return self.builder.buildFRem(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); return self.builder.buildURem(lhs, rhs, ""); } fn airMod(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); const inst_llvm_ty = try self.dg.llvmType(inst_ty); if (inst_ty.isRuntimeFloat()) { const a = self.builder.buildFRem(lhs, rhs, ""); const b = self.builder.buildFAdd(a, rhs, ""); const c = self.builder.buildFRem(b, rhs, ""); const zero = inst_llvm_ty.constNull(); const ltz = self.builder.buildFCmp(.OLT, lhs, zero, ""); return self.builder.buildSelect(ltz, c, a, ""); } if (inst_ty.isSignedInt()) { const a = self.builder.buildSRem(lhs, rhs, ""); const b = self.builder.buildNSWAdd(a, rhs, ""); const c = self.builder.buildSRem(b, rhs, ""); const zero = inst_llvm_ty.constNull(); const ltz = self.builder.buildICmp(.SLT, lhs, zero, ""); return self.builder.buildSelect(ltz, c, a, ""); } return self.builder.buildURem(lhs, rhs, ""); } fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const base_ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const ptr_ty = self.air.typeOf(bin_op.lhs); if (ptr_ty.ptrSize() == .One) { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), offset, }; return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } else { const indices: [1]*const llvm.Value = .{offset}; return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } } fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const base_ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); const ptr_ty = self.air.typeOf(bin_op.lhs); if (ptr_ty.ptrSize() == .One) { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), negative_offset, }; return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } else { const indices: [1]*const llvm.Value = .{negative_offset}; return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } } fn airOverflow( self: *FuncGen, inst: Air.Inst.Index, signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, ) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const ptr_ty = self.air.typeOf(pl_op.operand); const lhs_ty = self.air.typeOf(extra.lhs); const intrinsic_name = if (lhs_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; const llvm_lhs_ty = try self.dg.llvmType(lhs_ty); const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); const result_struct = self.builder.buildCall(llvm_fn, &[_]*const llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); const result = self.builder.buildExtractValue(result_struct, 0, ""); const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); self.store(ptr, ptr_ty, result, .NotAtomic); return overflow_bit; } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const ptr_ty = self.air.typeOf(pl_op.operand); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_ty.bitSize(tg) < lhs_ty.bitSize(tg)) self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") else rhs; const result = self.builder.buildShl(lhs, casted_rhs, ""); const reconstructed = if (lhs_ty.isSignedInt()) self.builder.buildAShr(result, casted_rhs, "") else self.builder.buildLShr(result, casted_rhs, ""); const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); self.store(ptr, ptr_ty, result, .NotAtomic); return overflow_bit; } fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); return self.builder.buildAnd(lhs, rhs, ""); } fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); return self.builder.buildOr(lhs, rhs, ""); } fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); return self.builder.buildXor(lhs, rhs, ""); } fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_type = self.air.typeOf(bin_op.lhs); const tg = self.dg.module.getTarget(); const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") else rhs; if (lhs_type.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); return self.builder.buildNUWShl(lhs, casted_rhs, ""); } fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_type = self.air.typeOf(bin_op.lhs); const tg = self.dg.module.getTarget(); const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") else rhs; return self.builder.buildShl(lhs, casted_rhs, ""); } fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_type = self.air.typeOf(bin_op.lhs); const tg = self.dg.module.getTarget(); const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") else rhs; if (lhs_type.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, ""); return self.builder.buildUShlSat(lhs, casted_rhs, ""); } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_type = self.air.typeOf(bin_op.lhs); const tg = self.dg.module.getTarget(); const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") else rhs; const is_signed_int = self.air.typeOfIndex(inst).isSignedInt(); if (is_exact) { if (is_signed_int) { return self.builder.buildAShrExact(lhs, casted_rhs, ""); } else { return self.builder.buildLShrExact(lhs, casted_rhs, ""); } } else { if (is_signed_int) { return self.builder.buildAShr(lhs, casted_rhs, ""); } else { return self.builder.buildLShr(lhs, casted_rhs, ""); } } } fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const target = self.dg.module.getTarget(); const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.air.typeOfIndex(inst); const dest_info = dest_ty.intInfo(target); const dest_llvm_ty = try self.dg.llvmType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(target); if (operand_info.bits < dest_info.bits) { switch (operand_info.signedness) { .signed => return self.builder.buildSExt(operand, dest_llvm_ty, ""), .unsigned => return self.builder.buildZExt(operand, dest_llvm_ty, ""), } } else if (operand_info.bits > dest_info.bits) { return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } else { return operand; } } fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); } fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); return self.builder.buildFPExt(operand, dest_llvm_ty, ""); } fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand, dest_llvm_ty, ""); } fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const inst_ty = self.air.typeOfIndex(inst); const operand_is_ref = isByRef(operand_ty); const result_is_ref = isByRef(inst_ty); const llvm_dest_ty = try self.dg.llvmType(inst_ty); if (operand_is_ref and result_is_ref) { // They are both pointers; just do a bitcast on the pointers :) return self.builder.buildBitCast(operand, llvm_dest_ty.pointerType(0), ""); } if (operand_ty.zigTypeTag() == .Int and inst_ty.zigTypeTag() == .Pointer) { return self.builder.buildIntToPtr(operand, llvm_dest_ty, ""); } if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) { const target = self.dg.module.getTarget(); const elem_ty = operand_ty.childType(); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } const array_ptr = self.buildAlloca(llvm_dest_ty); const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; if (bitcast_ok) { const llvm_vector_ty = try self.dg.llvmType(operand_ty); const casted_ptr = self.builder.buildBitCast(array_ptr, llvm_vector_ty.pointerType(0), ""); _ = self.builder.buildStore(operand, casted_ptr); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. const llvm_usize = try self.dg.llvmType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(); var i: u64 = 0; while (i < vector_len) : (i += 1) { const index_usize = llvm_usize.constInt(i, .False); const index_u32 = llvm_u32.constInt(i, .False); const indexes: [2]*const llvm.Value = .{ zero, index_usize }; const elem_ptr = self.builder.buildInBoundsGEP(array_ptr, &indexes, indexes.len, ""); const elem = self.builder.buildExtractElement(operand, index_u32, ""); _ = self.builder.buildStore(elem, elem_ptr); } } return array_ptr; } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) { const target = self.dg.module.getTarget(); const elem_ty = operand_ty.childType(); const llvm_vector_ty = try self.dg.llvmType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; if (bitcast_ok) { const llvm_vector_ptr_ty = llvm_vector_ty.pointerType(0); const casted_ptr = self.builder.buildBitCast(operand, llvm_vector_ptr_ty, ""); const vector = self.builder.buildLoad(casted_ptr, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. vector.setAlignment(elem_ty.abiAlignment(target)); return vector; } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. const llvm_usize = try self.dg.llvmType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(); var vector = llvm_vector_ty.getUndef(); var i: u64 = 0; while (i < vector_len) : (i += 1) { const index_usize = llvm_usize.constInt(i, .False); const index_u32 = llvm_u32.constInt(i, .False); const indexes: [2]*const llvm.Value = .{ zero, index_usize }; const elem_ptr = self.builder.buildInBoundsGEP(operand, &indexes, indexes.len, ""); const elem = self.builder.buildLoad(elem_ptr, ""); vector = self.builder.buildInsertElement(vector, elem, index_u32, ""); } return vector; } } if (operand_is_ref) { // Bitcast the operand pointer, then load. const casted_ptr = self.builder.buildBitCast(operand, llvm_dest_ty.pointerType(0), ""); return self.builder.buildLoad(casted_ptr, ""); } if (result_is_ref) { // Bitcast the result pointer, then store. const result_ptr = self.buildAlloca(llvm_dest_ty); const operand_llvm_ty = try self.dg.llvmType(operand_ty); const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), ""); _ = self.builder.buildStore(operand, casted_ptr); return result_ptr; } return self.builder.buildBitCast(operand, llvm_dest_ty, ""); } fn airBoolToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); return operand; } fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const arg_val = self.args[self.arg_index]; self.arg_index += 1; const inst_ty = self.air.typeOfIndex(inst); if (isByRef(inst_ty)) { // TODO declare debug variable return arg_val; } else { const ptr_val = self.buildAlloca(try self.dg.llvmType(inst_ty)); _ = self.builder.buildStore(arg_val, ptr_val); // TODO declare debug variable return arg_val; } } fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ptr_ty = self.air.typeOfIndex(inst); const pointee_type = ptr_ty.childType(); if (!pointee_type.isFnOrHasRuntimeBits()) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.llvmType(pointee_type); const alloca_inst = self.buildAlloca(pointee_llvm_ty); const target = self.dg.module.getTarget(); const alignment = ptr_ty.ptrAlignment(target); alloca_inst.setAlignment(alignment); return alloca_inst; } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ptr_ty = self.air.typeOfIndex(inst); const ret_ty = ptr_ty.childType(); if (!ret_ty.isFnOrHasRuntimeBits()) return null; if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.llvmType(ret_ty); const target = self.dg.module.getTarget(); const alloca_inst = self.buildAlloca(ret_llvm_ty); alloca_inst.setAlignment(ptr_ty.ptrAlignment(target)); return alloca_inst; } /// Use this instead of builder.buildAlloca, because this function makes sure to /// put the alloca instruction at the top of the function! fn buildAlloca(self: *FuncGen, llvm_ty: *const llvm.Type) *const llvm.Value { const prev_block = self.builder.getInsertBlock(); const entry_block = self.llvm_func.getFirstBasicBlock().?; if (entry_block.getFirstInstruction()) |first_inst| { self.builder.positionBuilder(entry_block, first_inst); } else { self.builder.positionBuilderAtEnd(entry_block); } const alloca = self.builder.buildAlloca(llvm_ty, ""); self.builder.positionBuilderAtEnd(prev_block); return alloca; } fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); // TODO Sema should emit a different instruction when the store should // possibly do the safety 0xaa bytes for undefined. const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; if (val_is_undef) { const elem_ty = ptr_ty.childType(); const target = self.dg.module.getTarget(); const elem_size = elem_ty.abiSize(target); const u8_llvm_ty = self.context.intType(8); const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0); const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); const fill_char = u8_llvm_ty.constInt(0xaa, .False); const dest_ptr_align = ptr_ty.ptrAlignment(target); const usize_llvm_ty = try self.dg.llvmType(Type.usize); const len = usize_llvm_ty.constInt(elem_size, .False); _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); if (self.dg.module.comp.bin_file.options.valgrind) { // TODO generate valgrind client request to mark byte range as undefined // see gen_valgrind_undef() in codegen.cpp } } else { const src_operand = try self.resolveInst(bin_op.rhs); self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic); } return null; } fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ptr_ty = self.air.typeOf(ty_op.operand); if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const ptr = try self.resolveInst(ty_op.operand); return self.load(ptr, ptr_ty); } fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { _ = inst; const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{}); _ = self.builder.buildCall(llvm_fn, undefined, 0, .C, .Auto, ""); return null; } fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { _ = inst; const i32_zero = self.context.intType(32).constNull(); const usize_llvm_ty = try self.dg.llvmType(Type.usize); const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{}); const ptr_val = self.builder.buildCall(llvm_fn, &[_]*const llvm.Value{i32_zero}, 1, .Fast, .Auto, ""); return self.builder.buildPtrToInt(ptr_val, usize_llvm_ty, ""); } fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const atomic_order = self.air.instructions.items(.data)[inst].fence; const llvm_memory_order = toLlvmAtomicOrdering(atomic_order); const single_threaded = llvm.Bool.fromBool(self.single_threaded); _ = self.builder.buildFence(llvm_memory_order, single_threaded, ""); return null; } fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*const llvm.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; var ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); const operand_ty = self.air.typeOf(extra.ptr).elemType(); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); if (operand_ty.isSignedInt()) { expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); new_value = self.builder.buildSExt(new_value, abi_ty, ""); } else { expected_value = self.builder.buildZExt(expected_value, abi_ty, ""); new_value = self.builder.buildZExt(new_value, abi_ty, ""); } } const result = self.builder.buildAtomicCmpXchg( ptr, expected_value, new_value, toLlvmAtomicOrdering(extra.successOrder()), toLlvmAtomicOrdering(extra.failureOrder()), llvm.Bool.fromBool(self.single_threaded), ); result.setWeak(llvm.Bool.fromBool(is_weak)); const optional_ty = self.air.typeOfIndex(inst); var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { payload = self.builder.buildTrunc(payload, try self.dg.llvmType(operand_ty), ""); } const success_bit = self.builder.buildExtractValue(result, 1, ""); if (optional_ty.isPtrLikeOptional()) { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } const optional_llvm_ty = try self.dg.llvmType(optional_ty); const non_null_bit = self.builder.buildNot(success_bit, ""); const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, ""); return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); } fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.air.typeOf(pl_op.operand); const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(); const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); const single_threaded = llvm.Bool.fromBool(self.single_threaded); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, op == .Xchg); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating or bitcasting. const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); const casted_operand = if (is_float) self.builder.buildBitCast(operand, abi_ty, "") else if (is_signed_int) self.builder.buildSExt(operand, abi_ty, "") else self.builder.buildZExt(operand, abi_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( op, casted_ptr, casted_operand, ordering, single_threaded, ); const operand_llvm_ty = try self.dg.llvmType(operand_ty); if (is_float) { return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); } else { return self.builder.buildTrunc(uncasted_result, operand_llvm_ty, ""); } } if (operand.typeOf().getTypeKind() != .Pointer) { return self.builder.buildAtomicRmw(op, ptr, operand, ordering, single_threaded); } // It's a pointer but we need to treat it as an int. const usize_llvm_ty = try self.dg.llvmType(Type.usize); const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), ""); const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( op, casted_ptr, casted_operand, ordering, single_threaded, ); const operand_llvm_ty = try self.dg.llvmType(operand_ty); return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.air.typeOf(atomic_load.ptr); if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const operand_ty = ptr_ty.elemType(); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); const load_inst = (try self.load(casted_ptr, ptr_ty)).?; load_inst.setOrdering(ordering); return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), ""); } const load_inst = (try self.load(ptr, ptr_ty)).?; load_inst.setOrdering(ordering); return load_inst; } fn airAtomicStore( self: *FuncGen, inst: Air.Inst.Index, ordering: llvm.AtomicOrdering, ) !?*const llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); if (!operand_ty.isFnOrHasRuntimeBits()) return null; var ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); if (operand_ty.isSignedInt()) { element = self.builder.buildSExt(element, abi_ty, ""); } else { element = self.builder.buildZExt(element, abi_ty, ""); } } self.store(ptr, ptr_ty, element, ordering); return null; } fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const dest_ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.air.typeOf(pl_op.operand); const value = try self.resolveInst(extra.lhs); const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndefDeep() else false; const len = try self.resolveInst(extra.rhs); const u8_llvm_ty = self.context.intType(8); const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0); const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value; const target = self.dg.module.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(target); _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) { // TODO generate valgrind client request to mark byte range as undefined // see gen_valgrind_undef() in codegen.cpp } return null; } fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const dest_ptr = try self.resolveInst(pl_op.operand); const dest_ptr_ty = self.air.typeOf(pl_op.operand); const src_ptr = try self.resolveInst(extra.lhs); const src_ptr_ty = self.air.typeOf(extra.lhs); const len = try self.resolveInst(extra.rhs); const llvm_ptr_u8 = self.context.intType(8).pointerType(0); const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, llvm_ptr_u8, ""); const src_ptr_u8 = self.builder.buildBitCast(src_ptr, llvm_ptr_u8, ""); const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); const target = self.dg.module.getTarget(); _ = self.builder.buildMemCpy( dest_ptr_u8, dest_ptr_ty.ptrAlignment(target), src_ptr_u8, src_ptr_ty.ptrAlignment(target), len, is_volatile, ); return null; } fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const un_ty = self.air.typeOf(bin_op.lhs).childType(); const target = self.dg.module.getTarget(); const layout = un_ty.unionGetLayout(target); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); if (layout.payload_size == 0) { _ = self.builder.buildStore(new_tag, union_ptr); return null; } const tag_index = @boolToInt(layout.tag_align < layout.payload_align); const tag_field_ptr = self.builder.buildStructGEP(union_ptr, tag_index, ""); _ = self.builder.buildStore(new_tag, tag_field_ptr); return null; } fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.air.typeOf(ty_op.operand); const target = self.dg.module.getTarget(); const layout = un_ty.unionGetLayout(target); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); if (isByRef(un_ty)) { if (layout.payload_size == 0) { return self.builder.buildLoad(union_handle, ""); } const tag_index = @boolToInt(layout.tag_align < layout.payload_align); const tag_field_ptr = self.builder.buildStructGEP(union_handle, tag_index, ""); return self.builder.buildLoad(tag_field_ptr, ""); } else { if (layout.payload_size == 0) { return union_handle; } const tag_index = @boolToInt(layout.tag_align < layout.payload_align); return self.builder.buildExtractValue(union_handle, tag_index, ""); } } fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.air.typeOf(un_op); const operand_llvm_ty = try self.dg.llvmType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const params = [_]*const llvm.Value{operand}; return self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, ""); } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); const operand_llvm_ty = try self.dg.llvmType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.llvmType(result_ty); const target = self.dg.module.getTarget(); const bits = operand_ty.intInfo(target).bits; const result_bits = result_ty.intInfo(target).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); } else { return wrong_size_result; } } fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const params = [_]*const llvm.Value{operand}; const operand_llvm_ty = try self.dg.llvmType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.llvmType(result_ty); const target = self.dg.module.getTarget(); const bits = operand_ty.intInfo(target).bits; const result_bits = result_ty.intInfo(target).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); } else { return wrong_size_result; } } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const target = self.dg.module.getTarget(); const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); var bits = operand_ty.intInfo(target).bits; assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); var operand_llvm_ty = try self.dg.llvmType(operand_ty); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap operand_llvm_ty = self.context.intType(bits + 8); const extended = self.builder.buildZExt(operand, operand_llvm_ty, ""); operand = self.builder.buildShl(extended, operand_llvm_ty.constInt(8, .False), ""); bits = bits + 8; } const params = [_]*const llvm.Value{operand}; const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.llvmType(result_ty); const result_bits = result_ty.intInfo(target).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); } else { return wrong_size_result; } } fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.air.typeOf(un_op); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{ try enum_ty.getOwnerDecl().getFullyQualifiedName(arena), }); const llvm_fn = try self.getEnumTagNameFunction(enum_ty, llvm_fn_name); const params = [_]*const llvm.Value{operand}; return self.builder.buildCall(llvm_fn, &params, params.len, .Fast, .Auto, ""); } fn getEnumTagNameFunction( self: *FuncGen, enum_ty: Type, llvm_fn_name: [:0]const u8, ) !*const llvm.Value { // TODO: detect when the type changes and re-emit this function. if (self.dg.object.llvm_module.getNamedFunction(llvm_fn_name)) |llvm_fn| { return llvm_fn; } const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const llvm_ret_ty = try self.dg.llvmType(slice_ty); const usize_llvm_ty = try self.dg.llvmType(Type.usize); const target = self.dg.module.getTarget(); const slice_alignment = slice_ty.abiAlignment(target); var int_tag_type_buffer: Type.Payload.Bits = undefined; const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); const param_types = [_]*const llvm.Type{try self.dg.llvmType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False); const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); fn_val.setLinkage(.Internal); fn_val.setFunctionCallConv(.Fast); self.dg.addCommonFnAttributes(fn_val); const prev_block = self.builder.getInsertBlock(); const prev_debug_location = self.builder.getCurrentDebugLocation2(); defer { self.builder.positionBuilderAtEnd(prev_block); if (!self.dg.module.comp.bin_file.options.strip) { self.builder.setCurrentDebugLocation2(prev_debug_location); } } const entry_block = self.dg.context.appendBasicBlock(fn_val, "Entry"); self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); const fields = enum_ty.enumFields(); const bad_value_block = self.dg.context.appendBasicBlock(fn_val, "BadValue"); const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count())); const array_ptr_indices = [_]*const llvm.Value{ usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; for (fields.keys()) |name, field_index| { const str_init = self.dg.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_global = self.dg.object.llvm_module.addGlobal(str_init.typeOf(), ""); str_global.setInitializer(str_init); str_global.setLinkage(.Private); str_global.setGlobalConstant(.True); str_global.setUnnamedAddr(.True); str_global.setAlignment(1); const slice_fields = [_]*const llvm.Value{ str_global.constInBoundsGEP(&array_ptr_indices, array_ptr_indices.len), usize_llvm_ty.constInt(name.len, .False), }; const slice_init = llvm_ret_ty.constNamedStruct(&slice_fields, slice_fields.len); const slice_global = self.dg.object.llvm_module.addGlobal(slice_init.typeOf(), ""); slice_global.setInitializer(slice_init); slice_global.setLinkage(.Private); slice_global.setGlobalConstant(.True); slice_global.setUnnamedAddr(.True); slice_global.setAlignment(slice_alignment); const return_block = self.dg.context.appendBasicBlock(fn_val, "Name"); const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, field_index), }; break :int try self.dg.genTypedValue(.{ .ty = enum_ty, .val = Value.initPayload(&tag_val_payload.base), }); }; switch_instr.addCase(this_tag_int_value, return_block); self.builder.positionBuilderAtEnd(return_block); const loaded = self.builder.buildLoad(slice_global, ""); loaded.setAlignment(slice_alignment); _ = self.builder.buildRet(loaded); } self.builder.positionBuilderAtEnd(bad_value_block); _ = self.builder.buildUnreachable(); return fn_val; } fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const error_name_table_ptr = try self.getErrorNameTable(); const error_name_table = self.builder.buildLoad(error_name_table_ptr, ""); const indices = [_]*const llvm.Value{operand}; const error_name_ptr = self.builder.buildInBoundsGEP(error_name_table, &indices, indices.len, ""); return self.builder.buildLoad(error_name_ptr, ""); } fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); const scalar_ty = self.air.typeOf(ty_op.operand); const vector_ty = self.air.typeOfIndex(inst); const len = vector_ty.vectorLen(); const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); const op_llvm_ty = scalar_llvm_ty.vectorType(1); const u32_llvm_ty = self.context.intType(32); const mask_llvm_ty = u32_llvm_ty.vectorType(len); const undef_vector = op_llvm_ty.getUndef(); const u32_zero = u32_llvm_ty.constNull(); const op_vector = self.builder.buildInsertElement(undef_vector, scalar, u32_zero, ""); return self.builder.buildShuffleVector(op_vector, undef_vector, mask_llvm_ty.constNull(), ""); } fn airVectorInit(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.llvmType(result_ty); switch (result_ty.zigTypeTag()) { .Vector => { const llvm_u32 = self.context.intType(32); var vector = llvm_result_ty.getUndef(); for (elements) |elem, i| { const index_u32 = llvm_u32.constInt(i, .False); const llvm_elem = try self.resolveInst(elem); vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, ""); } return vector; }, .Struct => { const tuple = result_ty.castTag(.tuple).?.data; if (isByRef(result_ty)) { const llvm_u32 = self.context.intType(32); const alloca_inst = self.buildAlloca(llvm_result_ty); const target = self.dg.module.getTarget(); alloca_inst.setAlignment(result_ty.abiAlignment(target)); var indices: [2]*const llvm.Value = .{ llvm_u32.constNull(), undefined }; var llvm_i: u32 = 0; for (elements) |elem, i| { if (tuple.values[i].tag() != .unreachable_value) continue; const field_ty = tuple.types[i]; const llvm_elem = try self.resolveInst(elem); indices[1] = llvm_u32.constInt(llvm_i, .False); llvm_i += 1; const field_ptr = self.builder.buildInBoundsGEP(alloca_inst, &indices, indices.len, ""); const store_inst = self.builder.buildStore(llvm_elem, field_ptr); store_inst.setAlignment(field_ty.abiAlignment(target)); } return alloca_inst; } else { var result = llvm_result_ty.getUndef(); var llvm_i: u32 = 0; for (elements) |elem, i| { if (tuple.values[i].tag() != .unreachable_value) continue; const llvm_elem = try self.resolveInst(elem); result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); llvm_i += 1; } return result; } }, .Array => { assert(isByRef(result_ty)); const llvm_usize = try self.dg.llvmType(Type.usize); const target = self.dg.module.getTarget(); const alloca_inst = self.buildAlloca(llvm_result_ty); alloca_inst.setAlignment(result_ty.abiAlignment(target)); const elem_ty = result_ty.childType(); for (elements) |elem, i| { const indices: [2]*const llvm.Value = .{ llvm_usize.constNull(), llvm_usize.constInt(@intCast(c_uint, i), .False), }; const elem_ptr = self.builder.buildInBoundsGEP(alloca_inst, &indices, indices.len, ""); const llvm_elem = try self.resolveInst(elem); const store_inst = self.builder.buildStore(llvm_elem, elem_ptr); store_inst.setAlignment(elem_ty.abiAlignment(target)); } return alloca_inst; }, else => unreachable, } } fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const prefetch = self.air.instructions.items(.data)[inst].prefetch; comptime assert(@enumToInt(std.builtin.PrefetchOptions.Rw.read) == 0); comptime assert(@enumToInt(std.builtin.PrefetchOptions.Rw.write) == 1); // TODO these two asserts should be able to be comptime because the type is a u2 assert(prefetch.locality >= 0); assert(prefetch.locality <= 3); comptime assert(@enumToInt(std.builtin.PrefetchOptions.Cache.instruction) == 0); comptime assert(@enumToInt(std.builtin.PrefetchOptions.Cache.data) == 1); // LLVM fails during codegen of instruction cache prefetchs for these architectures. // This is an LLVM bug as the prefetch intrinsic should be a noop if not supported // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 const target = self.dg.module.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { .x86_64, .i386 => return null, .arm, .armeb, .thumb, .thumbeb => { switch (prefetch.rw) { .write => return null, else => {}, } }, else => {}, }, .data => {}, } const llvm_u8 = self.context.intType(8); const llvm_ptr_u8 = llvm_u8.pointerType(0); const llvm_u32 = self.context.intType(32); const llvm_fn_name = "llvm.prefetch.p0i8"; const fn_val = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { // declare void @llvm.prefetch(i8*, i32, i32, i32) const llvm_void = self.context.voidType(); const param_types = [_]*const llvm.Type{ llvm_ptr_u8, llvm_u32, llvm_u32, llvm_u32, }; const fn_type = llvm.functionType(llvm_void, &param_types, param_types.len, .False); break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); }; const ptr = try self.resolveInst(prefetch.ptr); const ptr_u8 = self.builder.buildBitCast(ptr, llvm_ptr_u8, ""); const params = [_]*const llvm.Value{ ptr_u8, llvm_u32.constInt(@enumToInt(prefetch.rw), .False), llvm_u32.constInt(prefetch.locality, .False), llvm_u32.constInt(@enumToInt(prefetch.cache), .False), }; _ = self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, ""); return null; } fn getErrorNameTable(self: *FuncGen) !*const llvm.Value { if (self.dg.object.error_name_table) |table| { return table; } const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); const llvm_slice_ty = try self.dg.llvmType(slice_ty); const llvm_slice_ptr_ty = llvm_slice_ty.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); error_name_table_global.setInitializer(llvm_slice_ptr_ty.getUndef()); error_name_table_global.setLinkage(.Private); error_name_table_global.setGlobalConstant(.True); error_name_table_global.setUnnamedAddr(.True); error_name_table_global.setAlignment(slice_alignment); self.dg.object.error_name_table = error_name_table_global; return error_name_table_global; } /// Assumes the optional is not pointer-like and payload has bits. fn optIsNonNull(self: *FuncGen, opt_handle: *const llvm.Value, is_by_ref: bool) *const llvm.Value { if (is_by_ref) { const index_type = self.context.intType(32); const indices: [2]*const llvm.Value = .{ index_type.constNull(), index_type.constInt(1, .False), }; const field_ptr = self.builder.buildInBoundsGEP(opt_handle, &indices, indices.len, ""); return self.builder.buildLoad(field_ptr, ""); } return self.builder.buildExtractValue(opt_handle, 1, ""); } /// Assumes the optional is not pointer-like and payload has bits. fn optPayloadHandle(self: *FuncGen, opt_handle: *const llvm.Value, is_by_ref: bool) *const llvm.Value { if (is_by_ref) { // We have a pointer and we need to return a pointer to the first field. const index_type = self.context.intType(32); const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer index_type.constNull(), // first field is the payload }; return self.builder.buildInBoundsGEP(opt_handle, &indices, indices.len, ""); } return self.builder.buildExtractValue(opt_handle, 0, ""); } fn callFloor(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value { return self.callFloatUnary(arg, ty, "floor"); } fn callCeil(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value { return self.callFloatUnary(arg, ty, "ceil"); } fn callTrunc(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value { return self.callFloatUnary(arg, ty, "trunc"); } fn callFloatUnary(self: *FuncGen, arg: *const llvm.Value, ty: Type, name: []const u8) !*const llvm.Value { const target = self.dg.module.getTarget(); var fn_name_buf: [100]u8 = undefined; const llvm_fn_name = std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.f{d}", .{ name, ty.floatBits(target), }) catch unreachable; const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { const operand_llvm_ty = try self.dg.llvmType(ty); const param_types = [_]*const llvm.Type{operand_llvm_ty}; const fn_type = llvm.functionType(operand_llvm_ty, &param_types, param_types.len, .False); break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); }; const args: [1]*const llvm.Value = .{arg}; return self.builder.buildCall(llvm_fn, &args, args.len, .C, .Auto, ""); } fn fieldPtr( self: *FuncGen, inst: Air.Inst.Index, struct_ptr: *const llvm.Value, struct_ptr_ty: Type, field_index: u32, ) !?*const llvm.Value { const struct_ty = struct_ptr_ty.childType(); switch (struct_ty.zigTypeTag()) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { // From LLVM's perspective, a pointer to a packed struct and a pointer // to a field of a packed struct are the same. The difference is in the // Zig pointer type which provides information for how to mask and shift // out the relevant bits when accessing the pointee. // Here we perform a bitcast because we want to use the host_size // as the llvm pointer element type. const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); // TODO this can be removed if we change host_size to be bits instead // of bytes. return self.builder.buildBitCast(struct_ptr, result_llvm_ty, ""); }, else => { const target = self.dg.module.getTarget(); var ty_buf: Type.Payload.Pointer = undefined; if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| { return self.builder.buildStructGEP(struct_ptr, llvm_field_index, ""); } else { // If we found no index then this means this is a zero sized field at the // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. const llvm_usize = try self.dg.llvmType(Type.usize); const llvm_index = llvm_usize.constInt(1, .False); const indices: [1]*const llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, ""); } }, }, .Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index), else => unreachable, } } fn unionFieldPtr( self: *FuncGen, inst: Air.Inst.Index, union_ptr: *const llvm.Value, union_ty: Type, field_index: c_uint, ) !?*const llvm.Value { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = &union_obj.fields.values()[field_index]; const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); if (!field.ty.hasRuntimeBits()) { return null; } const target = self.dg.module.getTarget(); const layout = union_ty.unionGetLayout(target); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_field_ptr = self.builder.buildStructGEP(union_ptr, payload_index, ""); return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, ""); } fn sliceElemPtr( self: *FuncGen, slice: *const llvm.Value, index: *const llvm.Value, ) *const llvm.Value { const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*const llvm.Value = .{index}; return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } fn getIntrinsic(self: *FuncGen, name: []const u8, types: []*const llvm.Type) *const llvm.Value { const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); return self.llvmModule().getIntrinsicDeclaration(id, types.ptr, types.len); } fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) !?*const llvm.Value { const info = ptr_ty.ptrInfo().data; if (!info.pointee_type.hasRuntimeBits()) return null; const target = self.dg.module.getTarget(); const ptr_alignment = ptr_ty.ptrAlignment(target); const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); if (info.host_size == 0) { if (isByRef(info.pointee_type)) return ptr; const llvm_inst = self.builder.buildLoad(ptr, ""); llvm_inst.setAlignment(ptr_alignment); llvm_inst.setVolatile(ptr_volatile); return llvm_inst; } const int_ptr_ty = self.context.intType(info.host_size * 8).pointerType(0); const int_ptr = self.builder.buildBitCast(ptr, int_ptr_ty, ""); const containing_int = self.builder.buildLoad(int_ptr, ""); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.llvmType(info.pointee_type); if (isByRef(info.pointee_type)) { const result_align = info.pointee_type.abiAlignment(target); const result_ptr = self.buildAlloca(elem_llvm_ty); result_ptr.setAlignment(result_align); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); const bitcasted_ptr = self.builder.buildBitCast(result_ptr, same_size_int.pointerType(0), ""); const store_inst = self.builder.buildStore(truncated_int, bitcasted_ptr); store_inst.setAlignment(result_align); return result_ptr; } if (info.pointee_type.zigTypeTag() == .Float) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); } fn store( self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type, elem: *const llvm.Value, ordering: llvm.AtomicOrdering, ) void { const info = ptr_ty.ptrInfo().data; const elem_ty = info.pointee_type; if (!elem_ty.isFnOrHasRuntimeBits()) { return; } const target = self.dg.module.getTarget(); const ptr_alignment = ptr_ty.ptrAlignment(target); const ptr_volatile = llvm.Bool.fromBool(info.@"volatile"); if (info.host_size != 0) { const int_ptr_ty = self.context.intType(info.host_size * 8).pointerType(0); const int_ptr = self.builder.buildBitCast(ptr, int_ptr_ty, ""); const containing_int = self.builder.buildLoad(int_ptr, ""); assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store const value_bits_type = self.context.intType(elem_bits); const value_bits = self.builder.buildBitCast(elem, value_bits_type, ""); var mask_val = value_bits_type.constAllOnes(); mask_val = mask_val.constZExt(containing_int_ty); mask_val = mask_val.constShl(shift_amt); mask_val = mask_val.constNot(); const anded_containing_int = self.builder.buildAnd(containing_int, mask_val, ""); const extended_value = self.builder.buildZExt(value_bits, containing_int_ty, ""); const shifted_value = self.builder.buildShl(extended_value, shift_amt, ""); const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, ""); const store_inst = self.builder.buildStore(ored_value, int_ptr); assert(ordering == .NotAtomic); store_inst.setAlignment(ptr_alignment); store_inst.setVolatile(ptr_volatile); return; } if (!isByRef(elem_ty)) { const store_inst = self.builder.buildStore(elem, ptr); store_inst.setOrdering(ordering); store_inst.setAlignment(ptr_alignment); store_inst.setVolatile(ptr_volatile); return; } assert(ordering == .NotAtomic); const llvm_ptr_u8 = self.context.intType(8).pointerType(0); const size_bytes = elem_ty.abiSize(target); _ = self.builder.buildMemCpy( self.builder.buildBitCast(ptr, llvm_ptr_u8, ""), ptr_ty.ptrAlignment(target), self.builder.buildBitCast(elem, llvm_ptr_u8, ""), elem_ty.abiAlignment(target), self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False), info.@"volatile", ); } }; fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { switch (arch) { .aarch64, .aarch64_be, .aarch64_32 => { llvm.LLVMInitializeAArch64Target(); llvm.LLVMInitializeAArch64TargetInfo(); llvm.LLVMInitializeAArch64TargetMC(); llvm.LLVMInitializeAArch64AsmPrinter(); llvm.LLVMInitializeAArch64AsmParser(); }, .amdgcn => { llvm.LLVMInitializeAMDGPUTarget(); llvm.LLVMInitializeAMDGPUTargetInfo(); llvm.LLVMInitializeAMDGPUTargetMC(); llvm.LLVMInitializeAMDGPUAsmPrinter(); llvm.LLVMInitializeAMDGPUAsmParser(); }, .thumb, .thumbeb, .arm, .armeb => { llvm.LLVMInitializeARMTarget(); llvm.LLVMInitializeARMTargetInfo(); llvm.LLVMInitializeARMTargetMC(); llvm.LLVMInitializeARMAsmPrinter(); llvm.LLVMInitializeARMAsmParser(); }, .avr => { llvm.LLVMInitializeAVRTarget(); llvm.LLVMInitializeAVRTargetInfo(); llvm.LLVMInitializeAVRTargetMC(); llvm.LLVMInitializeAVRAsmPrinter(); llvm.LLVMInitializeAVRAsmParser(); }, .bpfel, .bpfeb => { llvm.LLVMInitializeBPFTarget(); llvm.LLVMInitializeBPFTargetInfo(); llvm.LLVMInitializeBPFTargetMC(); llvm.LLVMInitializeBPFAsmPrinter(); llvm.LLVMInitializeBPFAsmParser(); }, .hexagon => { llvm.LLVMInitializeHexagonTarget(); llvm.LLVMInitializeHexagonTargetInfo(); llvm.LLVMInitializeHexagonTargetMC(); llvm.LLVMInitializeHexagonAsmPrinter(); llvm.LLVMInitializeHexagonAsmParser(); }, .lanai => { llvm.LLVMInitializeLanaiTarget(); llvm.LLVMInitializeLanaiTargetInfo(); llvm.LLVMInitializeLanaiTargetMC(); llvm.LLVMInitializeLanaiAsmPrinter(); llvm.LLVMInitializeLanaiAsmParser(); }, .mips, .mipsel, .mips64, .mips64el => { llvm.LLVMInitializeMipsTarget(); llvm.LLVMInitializeMipsTargetInfo(); llvm.LLVMInitializeMipsTargetMC(); llvm.LLVMInitializeMipsAsmPrinter(); llvm.LLVMInitializeMipsAsmParser(); }, .msp430 => { llvm.LLVMInitializeMSP430Target(); llvm.LLVMInitializeMSP430TargetInfo(); llvm.LLVMInitializeMSP430TargetMC(); llvm.LLVMInitializeMSP430AsmPrinter(); llvm.LLVMInitializeMSP430AsmParser(); }, .nvptx, .nvptx64 => { llvm.LLVMInitializeNVPTXTarget(); llvm.LLVMInitializeNVPTXTargetInfo(); llvm.LLVMInitializeNVPTXTargetMC(); llvm.LLVMInitializeNVPTXAsmPrinter(); // There is no LLVMInitializeNVPTXAsmParser function available. }, .powerpc, .powerpcle, .powerpc64, .powerpc64le => { llvm.LLVMInitializePowerPCTarget(); llvm.LLVMInitializePowerPCTargetInfo(); llvm.LLVMInitializePowerPCTargetMC(); llvm.LLVMInitializePowerPCAsmPrinter(); llvm.LLVMInitializePowerPCAsmParser(); }, .riscv32, .riscv64 => { llvm.LLVMInitializeRISCVTarget(); llvm.LLVMInitializeRISCVTargetInfo(); llvm.LLVMInitializeRISCVTargetMC(); llvm.LLVMInitializeRISCVAsmPrinter(); llvm.LLVMInitializeRISCVAsmParser(); }, .sparc, .sparcv9, .sparcel => { llvm.LLVMInitializeSparcTarget(); llvm.LLVMInitializeSparcTargetInfo(); llvm.LLVMInitializeSparcTargetMC(); llvm.LLVMInitializeSparcAsmPrinter(); llvm.LLVMInitializeSparcAsmParser(); }, .s390x => { llvm.LLVMInitializeSystemZTarget(); llvm.LLVMInitializeSystemZTargetInfo(); llvm.LLVMInitializeSystemZTargetMC(); llvm.LLVMInitializeSystemZAsmPrinter(); llvm.LLVMInitializeSystemZAsmParser(); }, .wasm32, .wasm64 => { llvm.LLVMInitializeWebAssemblyTarget(); llvm.LLVMInitializeWebAssemblyTargetInfo(); llvm.LLVMInitializeWebAssemblyTargetMC(); llvm.LLVMInitializeWebAssemblyAsmPrinter(); llvm.LLVMInitializeWebAssemblyAsmParser(); }, .i386, .x86_64 => { llvm.LLVMInitializeX86Target(); llvm.LLVMInitializeX86TargetInfo(); llvm.LLVMInitializeX86TargetMC(); llvm.LLVMInitializeX86AsmPrinter(); llvm.LLVMInitializeX86AsmParser(); }, .xcore => { llvm.LLVMInitializeXCoreTarget(); llvm.LLVMInitializeXCoreTargetInfo(); llvm.LLVMInitializeXCoreTargetMC(); llvm.LLVMInitializeXCoreAsmPrinter(); // There is no LLVMInitializeXCoreAsmParser function. }, .m68k => { if (build_options.llvm_has_m68k) { llvm.LLVMInitializeM68kTarget(); llvm.LLVMInitializeM68kTargetInfo(); llvm.LLVMInitializeM68kTargetMC(); llvm.LLVMInitializeM68kAsmPrinter(); llvm.LLVMInitializeM68kAsmParser(); } }, .csky => { if (build_options.llvm_has_csky) { llvm.LLVMInitializeCSKYTarget(); llvm.LLVMInitializeCSKYTargetInfo(); llvm.LLVMInitializeCSKYTargetMC(); // There is no LLVMInitializeCSKYAsmPrinter function. llvm.LLVMInitializeCSKYAsmParser(); } }, .ve => { if (build_options.llvm_has_ve) { llvm.LLVMInitializeVETarget(); llvm.LLVMInitializeVETargetInfo(); llvm.LLVMInitializeVETargetMC(); llvm.LLVMInitializeVEAsmPrinter(); llvm.LLVMInitializeVEAsmParser(); } }, .arc => { if (build_options.llvm_has_arc) { llvm.LLVMInitializeARCTarget(); llvm.LLVMInitializeARCTargetInfo(); llvm.LLVMInitializeARCTargetMC(); llvm.LLVMInitializeARCAsmPrinter(); // There is no LLVMInitializeARCAsmParser function. } }, // LLVM backends that have no initialization functions. .tce, .tcele, .r600, .le32, .le64, .amdil, .amdil64, .hsail, .hsail64, .shave, .spir, .spir64, .kalimba, .renderscript32, .renderscript64, => {}, .spu_2 => unreachable, // LLVM does not support this backend .spirv32 => unreachable, // LLVM does not support this backend .spirv64 => unreachable, // LLVM does not support this backend } } fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrdering { return switch (atomic_order) { .Unordered => .Unordered, .Monotonic => .Monotonic, .Acquire => .Acquire, .Release => .Release, .AcqRel => .AcquireRelease, .SeqCst => .SequentiallyConsistent, }; } fn toLlvmAtomicRmwBinOp( op: std.builtin.AtomicRmwOp, is_signed: bool, is_float: bool, ) llvm.AtomicRMWBinOp { return switch (op) { .Xchg => .Xchg, .Add => if (is_float) llvm.AtomicRMWBinOp.FAdd else return .Add, .Sub => if (is_float) llvm.AtomicRMWBinOp.FSub else return .Sub, .And => .And, .Nand => .Nand, .Or => .Or, .Xor => .Xor, .Max => if (is_signed) llvm.AtomicRMWBinOp.Max else return .UMax, .Min => if (is_signed) llvm.AtomicRMWBinOp.Min else return .UMin, }; } fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.CallConv { return switch (cc) { .Unspecified, .Inline, .Async => .Fast, .C, .Naked => .C, .Stdcall => .X86_StdCall, .Fastcall => .X86_FastCall, .Vectorcall => return switch (target.cpu.arch) { .i386, .x86_64 => .X86_VectorCall, .aarch64, .aarch64_be, .aarch64_32 => .AArch64_VectorCall, else => unreachable, }, .Thiscall => .X86_ThisCall, .APCS => .ARM_APCS, .AAPCS => .ARM_AAPCS, .AAPCSVFP => .ARM_AAPCS_VFP, .Interrupt => return switch (target.cpu.arch) { .i386, .x86_64 => .X86_INTR, .avr => .AVR_INTR, .msp430 => .MSP430_INTR, else => unreachable, }, .Signal => .AVR_SIGNAL, .SysV => .X86_64_SysV, .PtxKernel => return switch (target.cpu.arch) { .nvptx, .nvptx64 => .PTX_Kernel, else => unreachable, }, }; } /// Take into account 0 bit fields. Returns null if an llvm field could not be found. This only /// happens if you want the field index of a zero sized field at the end of the struct. fn llvmFieldIndex( ty: Type, field_index: u32, target: std.Target, ptr_pl_buf: *Type.Payload.Pointer, ) ?c_uint { if (ty.castTag(.tuple)) |payload| { const values = payload.data.values; var llvm_field_index: c_uint = 0; for (values) |val, i| { if (val.tag() != .unreachable_value) { continue; } if (field_index > i) { llvm_field_index += 1; continue; } const field_ty = payload.data.types[i]; ptr_pl_buf.* = .{ .data = .{ .pointee_type = field_ty, .@"align" = field_ty.abiAlignment(target), .@"addrspace" = .generic, }, }; return llvm_field_index; } return null; } const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout != .Packed); var llvm_field_index: c_uint = 0; for (struct_obj.fields.values()) |field, i| { if (!field.ty.hasRuntimeBits()) continue; if (field_index > i) { llvm_field_index += 1; continue; } ptr_pl_buf.* = .{ .data = .{ .pointee_type = field.ty, .@"align" = field.normalAlignment(target), .@"addrspace" = .generic, }, }; return llvm_field_index; } else { // We did not find an llvm field that corresponds to this zig field. return null; } } fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool { switch (fn_info.cc) { .Unspecified, .Inline => return isByRef(fn_info.return_type), .C => {}, else => return false, } switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { .windows => return @import("../arch/x86_64/abi.zig").classifyWindows(fn_info.return_type, target) == .memory, else => return @import("../arch/x86_64/abi.zig").classifySystemV(fn_info.return_type, target)[0] == .memory, }, else => return false, // TODO investigate C ABI for other architectures } } fn isByRef(ty: Type) bool { // For tuples (and TODO structs), if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 2; switch (ty.zigTypeTag()) { .Type, .ComptimeInt, .ComptimeFloat, .EnumLiteral, .Undefined, .Null, .BoundFn, .Opaque, => unreachable, .NoReturn, .Void, .Bool, .Int, .Float, .Pointer, .ErrorSet, .Fn, .Enum, .Vector, .AnyFrame, => return false, .Array, .Frame => return ty.hasRuntimeBits(), .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout() == .Packed) return false; if (!ty.hasRuntimeBits()) return false; if (ty.castTag(.tuple)) |tuple| { var count: usize = 0; for (tuple.data.values) |field_val, i| { if (field_val.tag() != .unreachable_value) continue; count += 1; if (count > max_fields_byval) { return true; } const field_ty = tuple.data.types[i]; if (isByRef(field_ty)) { return true; } } return false; } return true; }, .Union => return ty.hasRuntimeBits(), .ErrorUnion => return isByRef(ty.errorUnionPayload()), .Optional => { var buf: Type.Payload.ElemType = undefined; return isByRef(ty.optionalChild(&buf)); }, } } /// This function returns true if we expect LLVM to lower x86_fp80 correctly /// and false if we expect LLVM to crash if it counters an x86_fp80 type. fn backendSupportsF80(target: std.Target) bool { return switch (target.cpu.arch) { .x86_64, .i386 => true, else => false, }; }
src/codegen/llvm.zig
const std = @import("std"); const mem = std.mem; const c = @import("./c.zig"); const glfw = @import("./main.zig"); const VideoMode = glfw.VideoMode; /// Gamma ramp. const GammaRamp = glfw.GammaRamp; /// Opaque monitor object. pub const Monitor = packed struct { const Self = @This(); handle: *c.GLFWmonitor, /// This function returns a slice of `Monitor` for all currently /// connected monitors. The primary monitor is always first in the /// returned slice. pub fn all() ![]Monitor { var count: i32 = 0; var handles = c.glfwGetMonitors(&count); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized => return err, else => unreachable, }; return @ptrCast([*]Self, handles)[0..@intCast(usize, count)]; } /// This function returns the primary monitor. This is usually the /// monitor where elements like the task bar or global menu bar /// are located. pub fn primary() !?Monitor { var maybeHandle = c.glfwGetPrimaryMonitor(); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized => return err, else => unreachable, }; if (maybeHandle) |handle| { return Monitor{ .handle = handle, }; } else { return null; } } pub const Position = struct { x: i32, y: i32, }; /// This function returns the position, in screen coordinates, of /// the upper-left corner of the specified monitor. pub fn getPosition(self: Self) !Position { var pos: Position = undefined; c.glfwGetMonitorPos(self.handle, &pos.x, &pos.y); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.PlatformError => return err, else => unreachable, }; return pos; } pub const Workarea = struct { x: i32, y: i32, width: i32, height: i32, }; /// This function returns the position, in screen coordinates, of /// the upper-left corner of the work area of the specified monitor /// along with the work area size in screen coordinates. The work /// area is defined as the area of the monitor not occluded by the /// operating system task bar where present. If no task bar exists /// then the work area is the monitor resolution in screen /// coordinates. pub fn getWorkarea(self: Self) !Workarea { var workarea: Workarea = undefined; c.glfwGetMonitorWorkarea(self.handle, &workarea.x, &workarea.y, &workarea.width, &workarea.height); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.PlatformError => return err, else => unreachable, }; return workarea; } pub const PhysicalSize = struct { width: i32, height: i32, }; /// This function returns the size, in millimetres, of the display /// area of the specified monitor. /// /// Some systems do not provide accurate monitor size information, /// either because the monitor EDID data is incorrect or because the /// driver does not report it accurately. pub fn getPhysicalSize(self: Self) !PhysicalSize { var physicalSize: PhysicalSize = undefined; c.glfwGetMonitorPhysicalSize(self.handle, &physicalSize.width, &physicalSize.height); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized => return err, else => unreachable, }; return physicalSize; } pub const ContentScale = struct { x: f32, y: f32, }; /// This function retrieves the content scale for the specified /// monitor. The content scale is the ratio between the current DPI /// and the platform's default DPI. This is especially important for /// text and any UI elements. If the pixel dimensions of your UI /// scaled by this look appropriate on your machine then it should /// appear at a reasonable size on other machines regardless of /// their DPI and scaling settings. This relies on the system DPI /// and scaling settings being somewhat correct. /// /// The content scale may depend on both the monitor resolution and /// pixel density and on user settings. It may be very different /// from the raw DPI calculated from the physical size and current /// resolution. pub fn getContentScale(self: Self) !ContentScale { var contentScale: ContentScale = undefined; c.glfwGetMonitorContentScale(self.handle, &contentScale.x, &contentScale.y); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.PlatformError => return err, else => unreachable, }; return contentScale; } /// This function returns a human-readable name, encoded as UTF-8, /// of the specified monitor. The name typically reflects the make /// and model of the monitor and is not guaranteed to be unique /// among the connected monitors. pub fn getName(self: Self) ![:0]const u8 { var name: [*c]const u8 = c.glfwGetMonitorName(self.handle); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized => return err, else => unreachable, }; return mem.spanZ(@as([*:0]const u8, name)); } /// This function sets the user-defined pointer of the specified /// monitor. The current value is retained until the monitor is /// disconnected. The initial value is `null`. /// /// This function may be called from the monitor callback, even for /// a monitor that is being disconnected. pub fn setUserPointer(self: *Self, comptime T: type, pointer: ?*T) !void { c.glfwSetMonitorUserPointer(self.handle, @ptrCast(?*c_void, pointer)); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized => return err, else => unreachable, }; } /// This function returns the current value of the user-defined /// pointer of the specified monitor. The initial value is `null`. /// /// This function may be called from the monitor callback, even for /// a monitor that is being disconnected. pub fn getUserPointer(self: Self, comptime T: type) !?*T { var userPointer = c.glfwGetMonitorUserPointer(self.handle); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized => return err, else => unreachable, }; return @ptrCast(?*T, userPointer); } /// This function returns an array of all video modes supported by the /// specified monitor. The returned array is sorted in ascending order, /// first by color bit depth (the sum of all channel depths) and then by /// resolution area (the product of width and height). pub fn getVideoModes(self: Self) ![]*const VideoMode { var count: i32 = 0; var handles = c.glfwGetVideoModes(self.handle, &count); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.PlatformError => return err, else => unreachable, }; return handles[0..count]; } /// This function returns the current video mode of the specified monitor. /// If you have created a full screen window for that monitor, the return /// value will depend on whether that window is iconified. pub fn getVideoMode(self: Self) !*const VideoMode { var videoMode = c.glfwGetVideoMode(self.handle); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.PlatformError => return err, else => unreachable, }; return videoMode; } /// This function generates an appropriately sized gamma ramp from the /// specified exponent and then calls `setGammaRamp` with it. The value must /// be a finite number greater than zero. /// /// The software controlled gamma ramp is applied in addition to the /// hardware gamma correction, which today is usually an approximation of /// sRGB gamma. This means that setting a perfectly linear ramp, or gamma /// 1.0, will produce the default (usually sRGB-like) behavior. /// /// For gamma correct rendering with OpenGL or OpenGL ES, see the /// `SRGBCapable` hint. pub fn setGamma(self: *Self, gamma: f32) !void { c.glfwSetGamma(self.handle, gamma); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.InvalidValue, glfw.Error.PlatformError => return err, else => unreachable, }; } /// This function returns the current gamma ramp of the specified monitor. pub fn getGammaRamp(self: Self) !GammaRamp { var gammaRamp = c.glfwGetGammaRamp(self.handle); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.PlatformError => return err, else => unreachable, }; return GammaRamp{ .red = gammaRamp.*.red[0..gammaRamp.*.size], .green = gammaRamp.*.green[0..gammaRamp.*.size], .blue = gammaRamp.*.blue[0..gammaRamp.*.size], .size = gammaRamp.*.size, }; } /// This function sets the current gamma ramp for the specified monitor. The /// original gamma ramp for that monitor is saved by GLFW the first time /// this function is called and is restored by glfwTerminate. /// /// The software controlled gamma ramp is applied in addition to the /// hardware gamma correction, which today is usually an approximation of /// sRGB gamma. This means that setting a perfectly linear ramp, or gamma /// 1.0, will produce the default (usually sRGB-like) behavior. /// /// For gamma correct rendering with OpenGL or OpenGL ES, see the /// `SRGBCapable` hint. pub fn setGammaRamp(self: *Self, gammaRamp: GammaRamp) !void { if (gammaRamp.red.len != gammaRamp.size or gammaRamp.green.len != gammaRamp.size or gammaRamp.blue.len != gammaRamp.size) { return glfw.Error.InvalidValue; } var raw = c.GLFWgammaramp{ .red = gammaRamp.red.ptr, .green = gammaRamp.green.ptr, .blue = gammaRamp.blue.ptr, .size = @intCast(c_uint, gammaRamp.size), }; c.glfwSetGammaRamp(self.handle, &raw); glfw.getError() catch |err| switch (err) { glfw.Error.NotInitialized, glfw.Error.PlatformError => return err, else => unreachable, }; } };
src/monitor.zig
const std = @import("std.zig"); const builtin = std.builtin; const os = std.os; const fs = std.fs; const BufMap = std.BufMap; const mem = std.mem; const math = std.math; const Allocator = mem.Allocator; const assert = std.debug.assert; const testing = std.testing; pub const abort = os.abort; pub const exit = os.exit; pub const changeCurDir = os.chdir; pub const changeCurDirC = os.chdirC; /// The result is a slice of `out_buffer`, from index `0`. pub fn getCwd(out_buffer: *[fs.MAX_PATH_BYTES]u8) ![]u8 { return os.getcwd(out_buffer); } /// Caller must free the returned memory. pub fn getCwdAlloc(allocator: *Allocator) ![]u8 { var buf: [fs.MAX_PATH_BYTES]u8 = undefined; return mem.dupe(allocator, u8, try os.getcwd(&buf)); } test "getCwdAlloc" { const cwd = try getCwdAlloc(testing.allocator); testing.allocator.free(cwd); } /// Caller owns resulting `BufMap`. pub fn getEnvMap(allocator: *Allocator) !BufMap { var result = BufMap.init(allocator); errdefer result.deinit(); if (builtin.os.tag == .windows) { const ptr = os.windows.peb().ProcessParameters.Environment; var i: usize = 0; while (ptr[i] != 0) { const key_start = i; while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {} const key_w = ptr[key_start..i]; const key = try std.unicode.utf16leToUtf8Alloc(allocator, key_w); errdefer allocator.free(key); if (ptr[i] == '=') i += 1; const value_start = i; while (ptr[i] != 0) : (i += 1) {} const value_w = ptr[value_start..i]; const value = try std.unicode.utf16leToUtf8Alloc(allocator, value_w); errdefer allocator.free(value); i += 1; // skip over null byte try result.setMove(key, value); } return result; } else if (builtin.os.tag == .wasi) { var environ_count: usize = undefined; var environ_buf_size: usize = undefined; const environ_sizes_get_ret = os.wasi.environ_sizes_get(&environ_count, &environ_buf_size); if (environ_sizes_get_ret != os.wasi.ESUCCESS) { return os.unexpectedErrno(environ_sizes_get_ret); } // TODO: Verify that the documentation is incorrect // https://github.com/WebAssembly/WASI/issues/27 var environ = try allocator.alloc(?[*:0]u8, environ_count + 1); defer allocator.free(environ); var environ_buf = try allocator.alloc(u8, environ_buf_size); defer allocator.free(environ_buf); const environ_get_ret = os.wasi.environ_get(environ.ptr, environ_buf.ptr); if (environ_get_ret != os.wasi.ESUCCESS) { return os.unexpectedErrno(environ_get_ret); } for (environ) |env| { if (env) |ptr| { const pair = mem.spanZ(ptr); var parts = mem.split(pair, "="); const key = parts.next().?; const value = parts.next().?; try result.set(key, value); } } return result; } else if (builtin.link_libc) { var ptr = std.c.environ; while (ptr.*) |line| : (ptr += 1) { var line_i: usize = 0; while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {} const key = line[0..line_i]; var end_i: usize = line_i; while (line[end_i] != 0) : (end_i += 1) {} const value = line[line_i + 1 .. end_i]; try result.set(key, value); } return result; } else { for (os.environ) |line| { var line_i: usize = 0; while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {} const key = line[0..line_i]; var end_i: usize = line_i; while (line[end_i] != 0) : (end_i += 1) {} const value = line[line_i + 1 .. end_i]; try result.set(key, value); } return result; } } test "os.getEnvMap" { var env = try getEnvMap(std.testing.allocator); defer env.deinit(); } pub const GetEnvVarOwnedError = error{ OutOfMemory, EnvironmentVariableNotFound, /// See https://github.com/ziglang/zig/issues/1774 InvalidUtf8, }; /// Caller must free returned memory. pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 { if (builtin.os.tag == .windows) { const result_w = blk: { const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key); defer allocator.free(key_w); break :blk std.os.getenvW(key_w) orelse return error.EnvironmentVariableNotFound; }; return std.unicode.utf16leToUtf8Alloc(allocator, result_w) catch |err| switch (err) { error.DanglingSurrogateHalf => return error.InvalidUtf8, error.ExpectedSecondSurrogateHalf => return error.InvalidUtf8, error.UnexpectedSecondSurrogateHalf => return error.InvalidUtf8, else => |e| return e, }; } else { const result = os.getenv(key) orelse return error.EnvironmentVariableNotFound; return mem.dupe(allocator, u8, result); } } test "os.getEnvVarOwned" { var ga = std.testing.allocator; testing.expectError(error.EnvironmentVariableNotFound, getEnvVarOwned(ga, "BADENV")); } pub const ArgIteratorPosix = struct { index: usize, count: usize, pub fn init() ArgIteratorPosix { return ArgIteratorPosix{ .index = 0, .count = os.argv.len, }; } pub fn next(self: *ArgIteratorPosix) ?[]const u8 { if (self.index == self.count) return null; const s = os.argv[self.index]; self.index += 1; return mem.spanZ(s); } pub fn skip(self: *ArgIteratorPosix) bool { if (self.index == self.count) return false; self.index += 1; return true; } }; pub const ArgIteratorWindows = struct { index: usize, cmd_line: [*]const u8, in_quote: bool, quote_count: usize, seen_quote_count: usize, pub const NextError = error{OutOfMemory}; pub fn init() ArgIteratorWindows { return initWithCmdLine(os.windows.kernel32.GetCommandLineA()); } pub fn initWithCmdLine(cmd_line: [*]const u8) ArgIteratorWindows { return ArgIteratorWindows{ .index = 0, .cmd_line = cmd_line, .in_quote = false, .quote_count = countQuotes(cmd_line), .seen_quote_count = 0, }; } /// You must free the returned memory when done. pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![]u8) { // march forward over whitespace while (true) : (self.index += 1) { const byte = self.cmd_line[self.index]; switch (byte) { 0 => return null, ' ', '\t' => continue, else => break, } } return self.internalNext(allocator); } pub fn skip(self: *ArgIteratorWindows) bool { // march forward over whitespace while (true) : (self.index += 1) { const byte = self.cmd_line[self.index]; switch (byte) { 0 => return false, ' ', '\t' => continue, else => break, } } var backslash_count: usize = 0; while (true) : (self.index += 1) { const byte = self.cmd_line[self.index]; switch (byte) { 0 => return true, '"' => { const quote_is_real = backslash_count % 2 == 0; if (quote_is_real) { self.seen_quote_count += 1; } }, '\\' => { backslash_count += 1; }, ' ', '\t' => { if (self.seen_quote_count % 2 == 0 or self.seen_quote_count == self.quote_count) { return true; } backslash_count = 0; }, else => { backslash_count = 0; continue; }, } } } fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); var backslash_count: usize = 0; while (true) : (self.index += 1) { const byte = self.cmd_line[self.index]; switch (byte) { 0 => return buf.toOwnedSlice(), '"' => { const quote_is_real = backslash_count % 2 == 0; try self.emitBackslashes(&buf, backslash_count / 2); backslash_count = 0; if (quote_is_real) { self.seen_quote_count += 1; if (self.seen_quote_count == self.quote_count and self.seen_quote_count % 2 == 1) { try buf.append('"'); } } else { try buf.append('"'); } }, '\\' => { backslash_count += 1; }, ' ', '\t' => { try self.emitBackslashes(&buf, backslash_count); backslash_count = 0; if (self.seen_quote_count % 2 == 1 and self.seen_quote_count != self.quote_count) { try buf.append(byte); } else { return buf.toOwnedSlice(); } }, else => { try self.emitBackslashes(&buf, backslash_count); backslash_count = 0; try buf.append(byte); }, } } } fn emitBackslashes(self: *ArgIteratorWindows, buf: *std.ArrayList(u8), emit_count: usize) !void { var i: usize = 0; while (i < emit_count) : (i += 1) { try buf.append('\\'); } } fn countQuotes(cmd_line: [*]const u8) usize { var result: usize = 0; var backslash_count: usize = 0; var index: usize = 0; while (true) : (index += 1) { const byte = cmd_line[index]; switch (byte) { 0 => return result, '\\' => backslash_count += 1, '"' => { result += 1 - (backslash_count % 2); backslash_count = 0; }, else => { backslash_count = 0; }, } } } }; pub const ArgIterator = struct { const InnerType = if (builtin.os.tag == .windows) ArgIteratorWindows else ArgIteratorPosix; inner: InnerType, pub fn init() ArgIterator { if (builtin.os.tag == .wasi) { // TODO: Figure out a compatible interface accomodating WASI @compileError("ArgIterator is not yet supported in WASI. Use argsAlloc and argsFree instead."); } return ArgIterator{ .inner = InnerType.init() }; } pub const NextError = ArgIteratorWindows.NextError; /// You must free the returned memory when done. pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![]u8) { if (builtin.os.tag == .windows) { return self.inner.next(allocator); } else { return mem.dupe(allocator, u8, self.inner.next() orelse return null); } } /// If you only are targeting posix you can call this and not need an allocator. pub fn nextPosix(self: *ArgIterator) ?[]const u8 { return self.inner.next(); } /// Parse past 1 argument without capturing it. /// Returns `true` if skipped an arg, `false` if we are at the end. pub fn skip(self: *ArgIterator) bool { return self.inner.skip(); } }; pub fn args() ArgIterator { return ArgIterator.init(); } /// Caller must call argsFree on result. pub fn argsAlloc(allocator: *mem.Allocator) ![][]u8 { if (builtin.os.tag == .wasi) { var count: usize = undefined; var buf_size: usize = undefined; const args_sizes_get_ret = os.wasi.args_sizes_get(&count, &buf_size); if (args_sizes_get_ret != os.wasi.ESUCCESS) { return os.unexpectedErrno(args_sizes_get_ret); } var argv = try allocator.alloc([*:0]u8, count); defer allocator.free(argv); var argv_buf = try allocator.alloc(u8, buf_size); const args_get_ret = os.wasi.args_get(argv.ptr, argv_buf.ptr); if (args_get_ret != os.wasi.ESUCCESS) { return os.unexpectedErrno(args_get_ret); } var result_slice = try allocator.alloc([]u8, count); var i: usize = 0; while (i < count) : (i += 1) { result_slice[i] = mem.spanZ(argv[i]); } return result_slice; } // TODO refactor to only make 1 allocation. var it = args(); var contents = std.ArrayList(u8).init(allocator); defer contents.deinit(); var slice_list = std.ArrayList(usize).init(allocator); defer slice_list.deinit(); while (it.next(allocator)) |arg_or_err| { const arg = try arg_or_err; defer allocator.free(arg); try contents.appendSlice(arg); try slice_list.append(arg.len); } const contents_slice = contents.span(); const slice_sizes = slice_list.span(); const slice_list_bytes = try math.mul(usize, @sizeOf([]u8), slice_sizes.len); const total_bytes = try math.add(usize, slice_list_bytes, contents_slice.len); const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes); errdefer allocator.free(buf); const result_slice_list = mem.bytesAsSlice([]u8, buf[0..slice_list_bytes]); const result_contents = buf[slice_list_bytes..]; mem.copy(u8, result_contents, contents_slice); var contents_index: usize = 0; for (slice_sizes) |len, i| { const new_index = contents_index + len; result_slice_list[i] = result_contents[contents_index..new_index]; contents_index = new_index; } return result_slice_list; } pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void { if (builtin.os.tag == .wasi) { const last_item = args_alloc[args_alloc.len - 1]; const last_byte_addr = @ptrToInt(last_item.ptr) + last_item.len + 1; // null terminated const first_item_ptr = args_alloc[0].ptr; const len = last_byte_addr - @ptrToInt(first_item_ptr); allocator.free(first_item_ptr[0..len]); return allocator.free(args_alloc); } var total_bytes: usize = 0; for (args_alloc) |arg| { total_bytes += @sizeOf([]u8) + arg.len; } const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes]; const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf); return allocator.free(aligned_allocated_buf); } test "windows arg parsing" { testWindowsCmdLine("a b\tc d", &[_][]const u8{ "a", "b", "c", "d" }); testWindowsCmdLine("\"abc\" d e", &[_][]const u8{ "abc", "d", "e" }); testWindowsCmdLine("a\\\\\\b d\"e f\"g h", &[_][]const u8{ "a\\\\\\b", "de fg", "h" }); testWindowsCmdLine("a\\\\\\\"b c d", &[_][]const u8{ "a\\\"b", "c", "d" }); testWindowsCmdLine("a\\\\\\\\\"b c\" d e", &[_][]const u8{ "a\\\\b c", "d", "e" }); testWindowsCmdLine("a b\tc \"d f", &[_][]const u8{ "a", "b", "c", "\"d", "f" }); testWindowsCmdLine("\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\"", &[_][]const u8{ ".\\..\\zig-cache\\build", "bin\\zig.exe", ".\\..", ".\\..\\zig-cache", "--help", }); } fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []const u8) void { var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line); for (expected_args) |expected_arg| { const arg = it.next(std.testing.allocator).? catch unreachable; defer std.testing.allocator.free(arg); testing.expectEqualSlices(u8, expected_arg, arg); } testing.expect(it.next(std.testing.allocator) == null); } pub const UserInfo = struct { uid: u32, gid: u32, }; /// POSIX function which gets a uid from username. pub fn getUserInfo(name: []const u8) !UserInfo { return switch (builtin.os.tag) { .linux, .macosx, .watchos, .tvos, .ios, .freebsd, .netbsd => posixGetUserInfo(name), else => @compileError("Unsupported OS"), }; } /// TODO this reads /etc/passwd. But sometimes the user/id mapping is in something else /// like NIS, AD, etc. See `man nss` or look at an strace for `id myuser`. pub fn posixGetUserInfo(name: []const u8) !UserInfo { var in_stream = try io.InStream.open("/etc/passwd", null); defer in_stream.close(); const State = enum { Start, WaitForNextLine, SkipPassword, ReadUserId, ReadGroupId, }; var buf: [std.mem.page_size]u8 = undefined; var name_index: usize = 0; var state = State.Start; var uid: u32 = 0; var gid: u32 = 0; while (true) { const amt_read = try in_stream.read(buf[0..]); for (buf[0..amt_read]) |byte| { switch (state) { .Start => switch (byte) { ':' => { state = if (name_index == name.len) State.SkipPassword else State.WaitForNextLine; }, '\n' => return error.CorruptPasswordFile, else => { if (name_index == name.len or name[name_index] != byte) { state = .WaitForNextLine; } name_index += 1; }, }, .WaitForNextLine => switch (byte) { '\n' => { name_index = 0; state = .Start; }, else => continue, }, .SkipPassword => switch (byte) { '\n' => return error.CorruptPasswordFile, ':' => { state = .ReadUserId; }, else => continue, }, .ReadUserId => switch (byte) { ':' => { state = .ReadGroupId; }, '\n' => return error.CorruptPasswordFile, else => { const digit = switch (byte) { '0'...'9' => byte - '0', else => return error.CorruptPasswordFile, }; if (@mulWithOverflow(u32, uid, 10, *uid)) return error.CorruptPasswordFile; if (@addWithOverflow(u32, uid, digit, *uid)) return error.CorruptPasswordFile; }, }, .ReadGroupId => switch (byte) { '\n', ':' => { return UserInfo{ .uid = uid, .gid = gid, }; }, else => { const digit = switch (byte) { '0'...'9' => byte - '0', else => return error.CorruptPasswordFile, }; if (@mulWithOverflow(u32, gid, 10, *gid)) return error.CorruptPasswordFile; if (@addWithOverflow(u32, gid, digit, *gid)) return error.CorruptPasswordFile; }, }, } } if (amt_read < buf.len) return error.UserNotFound; } } pub fn getBaseAddress() usize { switch (builtin.os.tag) { .linux => { const base = os.system.getauxval(std.elf.AT_BASE); if (base != 0) { return base; } const phdr = os.system.getauxval(std.elf.AT_PHDR); return phdr - @sizeOf(std.elf.Ehdr); }, .macosx, .freebsd, .netbsd => { return @ptrToInt(&std.c._mh_execute_header); }, .windows => return @ptrToInt(os.windows.kernel32.GetModuleHandleW(null)), else => @compileError("Unsupported OS"), } } /// Caller owns the result value and each inner slice. /// TODO Remove the `Allocator` requirement from this API, which will remove the `Allocator` /// requirement from `std.zig.system.NativeTargetInfo.detect`. Most likely this will require /// introducing a new, lower-level function which takes a callback function, and then this /// function which takes an allocator can exist on top of it. pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 { switch (builtin.link_mode) { .Static => return &[_][:0]u8{}, .Dynamic => {}, } const List = std.ArrayList([:0]u8); switch (builtin.os.tag) { .linux, .freebsd, .netbsd, .dragonfly, => { var paths = List.init(allocator); errdefer { const slice = paths.toOwnedSlice(); for (slice) |item| { allocator.free(item); } allocator.free(slice); } try os.dl_iterate_phdr(&paths, error{OutOfMemory}, struct { fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void { const name = info.dlpi_name orelse return; if (name[0] == '/') { const item = try mem.dupeZ(list.allocator, u8, mem.spanZ(name)); errdefer list.allocator.free(item); try list.append(item); } } }.callback); return paths.toOwnedSlice(); }, .macosx, .ios, .watchos, .tvos => { var paths = List.init(allocator); errdefer { const slice = paths.toOwnedSlice(); for (slice) |item| { allocator.free(item); } allocator.free(slice); } const img_count = std.c._dyld_image_count(); var i: u32 = 0; while (i < img_count) : (i += 1) { const name = std.c._dyld_get_image_name(i); const item = try mem.dupeZ(allocator, u8, mem.spanZ(name)); errdefer allocator.free(item); try paths.append(item); } return paths.toOwnedSlice(); }, else => @compileError("getSelfExeSharedLibPaths unimplemented for this target"), } }
lib/std/process.zig
const std = @import("std"); const interfaces = @import("interface"); usingnamespace @import("environment.zig"); usingnamespace @import("vm.zig"); usingnamespace @import("value.zig"); /// Non-owning interface to a abstract LoLa object. /// It is associated with a object handle in the `ObjectPool` and provides /// a way to get methods as well as destroy the object when it's garbage collected. pub const Object = struct { const Interface = interfaces.Interface(struct { getMethod: fn (self: *interfaces.SelfType, name: []const u8) ?Function, destroyObject: fn (self: *interfaces.SelfType) void, }, interfaces.Storage.NonOwning); const Class = interfaces.Interface(struct { serializeObject: ?fn (self: *interfaces.SelfType, stream: OutputStream) anyerror!void, deserializeObject: ?fn (stream: InputStream) anyerror!*interfaces.SelfType, }, interfaces.Storage.NonOwning); const Self = @This(); impl: Interface, pub fn init(ptr: anytype) Self { return Self{ .impl = Interface.init(ptr) catch unreachable, }; } fn getMethod(self: *const Self, name: []const u8) ?Function { return self.impl.call("getMethod", .{name}); } fn destroyObject(self: *Self) void { self.impl.call("destroyObject", .{}); self.* = undefined; } }; /// A opaque handle to objects. These are used inside the virtual machine and environment and /// will be passed around. They do not hold any memory references and require an object pool to /// resolve to actual objects. pub const ObjectHandle = enum(u64) { const Self = @This(); _, // Just an non-exhaustive handle, no named members }; pub const InputStream = struct { const Self = @This(); pub const ErasedSelf = @Type(.Opaque); self: *const ErasedSelf, read: fn (self: *const ErasedSelf, buf: []u8) anyerror!usize, fn from(reader_ptr: anytype) Self { const T = std.meta.Child(@TypeOf(reader_ptr)); return Self{ .self = @ptrCast(*const ErasedSelf, reader_ptr), .read = struct { fn read(self: *const ErasedSelf, buf: []u8) anyerror!usize { return @ptrCast(*const T, @alignCast(@alignOf(T), self)).read(buf); } }.read, }; } fn readSome(self: Self, buffer: []u8) anyerror!usize { return self.read(self.self, buffer); } fn reader(self: @This()) Reader { return Reader{ .context = self, }; } pub const Reader = std.io.Reader(Self, anyerror, readSome); }; pub const OutputStream = struct { const Self = @This(); pub const ErasedSelf = @Type(.Opaque); self: *const ErasedSelf, write: fn (self: *const ErasedSelf, buf: []const u8) anyerror!usize, fn from(writer_ptr: anytype) Self { const T = std.meta.Child(@TypeOf(writer_ptr)); return Self{ .self = @ptrCast(*const ErasedSelf, writer_ptr), .write = struct { fn write(self: *const ErasedSelf, buf: []const u8) anyerror!usize { return @ptrCast(*const T, @alignCast(@alignOf(T), self)).write(buf); } }.write, }; } fn writeSome(self: Self, buffer: []const u8) anyerror!usize { return self.write(self.self, buffer); } fn writer(self: @This()) Writer { return Writer{ .context = self, }; } pub const Writer = std.io.Writer(Self, anyerror, writeSome); }; const ObjectGetError = error{InvalidObject}; pub const ObjectPoolInterface = struct { const ErasedSelf = @Type(.Opaque); self: *ErasedSelf, getMethodFn: fn (self: *ErasedSelf, handle: ObjectHandle, name: []const u8) ObjectGetError!?Function, destroyObjectFn: fn (self: *ErasedSelf, handle: ObjectHandle) void, isObjectValidFn: fn (self: *ErasedSelf, handle: ObjectHandle) bool, pub fn getMethod(self: @This(), handle: ObjectHandle, name: []const u8) ObjectGetError!?Function { return self.getMethodFn(self.self, handle, name); } pub fn destroyObject(self: @This(), handle: ObjectHandle) void { return self.destroyObjectFn(self.self, handle); } pub fn isObjectValid(self: @This(), handle: ObjectHandle) bool { return self.isObjectValidFn(self.self, handle); } pub fn castTo(self: *@This(), comptime PoolType: type) *PoolType { return @ptrCast(*PoolType, @alignCast(@alignOf(PoolType), self.self)); } }; /// An object pool is a structure that is used for garbage collecting objects. /// Each object gets a unique number assigned when being put into the pool /// via `createObject`. This handle can then be passed into a VM, used opaquely. /// The VM can also request methods from objects via `getMethod` call. /// To collect garbage, the following procedure should be done: /// 1. Call `clearUsageCounters` to initiate garbage collection /// 2. Call `walkEnvironment`, `walkVM` or `walkValue` to collect all live objects in different elements /// 3. Call `collectGarbage` to delete all objects that have no reference counters set. /// For each object to be deleted, `destroyObject` is invoked and the object is removed from the pool. /// To retain objects by hand in areas not reachable by any of the `walk*` functions, it's possible to /// call `retainObject` to increment the reference counter by 1 and `releaseObject` to reduce it by one. /// Objects marked with this reference counter will not be deleted even when the object is not encountered /// betewen `clearUsageCounters` and `collectGarbage`. pub fn ObjectPool(comptime classes_list: anytype) type { // enforce type safety here comptime var classes: [classes_list.len]type = undefined; for (classes_list) |item, i| { classes[i] = item; } comptime var hasher = std.hash.SipHash64(2, 4).init("ObjectPool Serialization Version 1"); comptime var all_classes_can_serialize = (classes.len > 0); inline for (classes) |class| { const can_serialize = @hasDecl(class, "serializeObject"); if (can_serialize != @hasDecl(class, "deserializeObject")) { @compileError("Each class requires either both serializeObject and deserializeObject to be present or none."); } all_classes_can_serialize = all_classes_can_serialize and can_serialize; // this requires to use a typeHash structure instead of the type name hasher.update(@typeName(class)); } const TypeIndex = std.meta.Int( false, // We need 1 extra value, so 0xFFFF… is never a valid type index // this marks the end of objects in the stream std.mem.alignForward(std.math.log2_int_ceil(usize, classes.len + 1), 8), ); const ClassInfo = struct { name: []const u8, serialize: fn (stream: OutputStream, obj: Object) anyerror!void, deserialize: fn (allocator: *std.mem.Allocator, stream: InputStream) anyerror!Object, }; // Provide a huge-enough branch quota @setEvalBranchQuota(1000 * (classes.len + 1)); const class_lut = comptime if (all_classes_can_serialize) blk: { var lut: [classes.len]ClassInfo = undefined; for (lut) |*info, i| { const Class = classes[i]; const Interface = struct { fn serialize(stream: OutputStream, obj: Object) anyerror!void { try Class.serializeObject(stream.writer(), @ptrCast(*Class, @alignCast(@alignOf(Class), obj.impl.storage.erased_ptr))); } fn deserialize(allocator: *std.mem.Allocator, stream: InputStream) anyerror!Object { var ptr = try Class.deserializeObject(allocator, stream.reader()); return Object.init(ptr); } }; info.* = ClassInfo{ .name = @typeName(Class), .serialize = Interface.serialize, .deserialize = Interface.deserialize, }; } break :blk lut; } else {}; const pool_signature = hasher.finalInt(); return struct { const Self = @This(); const ManagedObject = struct { refcount: usize, manualRefcount: usize, object: Object, class_id: TypeIndex, }; /// Is `true` when all classes in the ObjectPool allow seriaization pub const serializable: bool = all_classes_can_serialize; /// ever-increasing number which is used to allocate new object handles. objectCounter: u64, /// stores all alive objects. Removing elements from this /// requires to call `.object.destroyObject()`! objects: std.AutoHashMap(ObjectHandle, ManagedObject), /// Creates a new object pool, using `allocator` to handle hashmap allocations. pub fn init(allocator: *std.mem.Allocator) Self { return Self{ .objectCounter = 0, .objects = std.AutoHashMap(ObjectHandle, ManagedObject).init(allocator), }; } /// Destroys all objects in the pool, then releases all associated memory. /// Do not use the ObjectPool afterwards! pub fn deinit(self: *Self) void { var iter = self.objects.iterator(); while (iter.next()) |obj| { obj.value.object.destroyObject(); } self.objects.deinit(); self.* = undefined; } // Serialization API /// Serializes the whole object pool into the `stream`. pub fn serialize(self: Self, stream: anytype) !void { if (all_classes_can_serialize) { try stream.writeIntLittle(u64, pool_signature); var iter = self.objects.iterator(); while (iter.next()) |entry| { const obj = &entry.value; var class = class_lut[obj.class_id]; try stream.writeIntLittle(TypeIndex, obj.class_id); try stream.writeIntLittle(u64, @enumToInt(entry.key)); try class.serialize(OutputStream.from(&stream), obj.object); } try stream.writeIntLittle(TypeIndex, std.math.maxInt(TypeIndex)); } else { @compileError("This ObjectPool is not serializable!"); } } /// Deserializes a object pool from `steam` and returns it. pub fn deserialize(allocator: *std.mem.Allocator, stream: anytype) !Self { if (all_classes_can_serialize) { var pool = init(allocator); errdefer pool.deinit(); var signature = try stream.readIntLittle(u64); if (signature != pool_signature) return error.InvalidStream; while (true) { const type_index = try stream.readIntLittle(TypeIndex); if (type_index == std.math.maxInt(TypeIndex)) break; // end of objects if (type_index >= class_lut.len) return error.InvalidStream; const object_id = try stream.readIntLittle(u64); pool.objectCounter = std.math.max(object_id + 1, pool.objectCounter); const gop = try pool.objects.getOrPut(@intToEnum(ObjectHandle, object_id)); if (gop.found_existing) return error.InvalidStream; const object = try class_lut[type_index].deserialize(allocator, InputStream.from(&stream)); gop.entry.value = ManagedObject{ .object = object, .refcount = 0, .manualRefcount = 0, .class_id = type_index, }; } return pool; } else { @compileError("This ObjectPool is not serializable!"); } } // Public API /// Inserts a new object into the pool and returns a handle to it. /// `object_ptr` must be a mutable pointer to the object itself. pub fn createObject(self: *Self, object_ptr: anytype) !ObjectHandle { const ObjectTypeInfo = @typeInfo(@TypeOf(object_ptr)).Pointer; if (ObjectTypeInfo.is_const) @compileError("Passing a const pointer to ObjectPool.createObject is not allowed!"); // Calculate the index of the type: const type_index = inline for (classes) |class, index| { if (class == ObjectTypeInfo.child) break index; } else @compileError("The type " ++ @typeName(ObjectTypeInfo.child) ++ " is not valid for this object pool. Add it to the class list in the type definition to allow creation."); var object = Object.init(object_ptr); self.objectCounter += 1; errdefer self.objectCounter -= 1; const handle = @intToEnum(ObjectHandle, self.objectCounter); try self.objects.putNoClobber(handle, ManagedObject{ .object = object, .refcount = 0, .manualRefcount = 0, .class_id = type_index, }); return handle; } /// Keeps the object from beeing garbage collected. /// To allow recollection, call `releaseObject`. pub fn retainObject(self: *Self, object: ObjectHandle) ObjectGetError!void { if (self.objects.getEntry(object)) |obj| { obj.value.manualRefcount += 1; } else { return error.InvalidObject; } } /// Removes a restrain from `retainObject` to re-allow garbage collection. pub fn releaseObject(self: *Self, object: ObjectHandle) ObjectGetError!void { if (self.objects.getEntry(object)) |obj| { obj.value.manualRefcount -= 1; } else { return error.InvalidObject; } } /// Destroys an object by external means. This will also invoke the object destructor. pub fn destroyObject(self: *Self, object: ObjectHandle) void { if (self.objects.remove(object)) |obj| { var copy = obj.value.object; copy.destroyObject(); } } /// Returns if an object handle is still valid. pub fn isObjectValid(self: Self, object: ObjectHandle) bool { return if (self.objects.get(object)) |obj| true else false; } /// Gets the method of an object or `null` if the method does not exist. /// The returned `Function` is non-owned. pub fn getMethod(self: Self, object: ObjectHandle, name: []const u8) ObjectGetError!?Function { if (self.objects.get(object)) |obj| { return obj.object.getMethod(name); } else { return error.InvalidObject; } } // Garbage Collector API /// Sets all usage counters to zero. pub fn clearUsageCounters(self: *Self) void { var iter = self.objects.iterator(); while (iter.next()) |obj| { obj.value.refcount = 0; } } /// Marks an object handle as used pub fn markUsed(self: *Self, object: ObjectHandle) ObjectGetError!void { if (self.objects.getEntry(object)) |obj| { obj.value.refcount += 1; } else { return error.InvalidObject; } } /// Walks through the value marks all referenced objects as used. pub fn walkValue(self: *Self, value: Value) ObjectGetError!void { switch (value) { .object => |oid| try self.markUsed(oid), .array => |arr| for (arr.contents) |val| { try self.walkValue(val); }, else => {}, } } /// Walks through all values stored in an environment and marks all referenced objects as used. pub fn walkEnvironment(self: *Self, env: Environment) ObjectGetError!void { for (env.scriptGlobals) |glob| { try self.walkValue(glob); } } /// Walks through all values stored in a virtual machine and marks all referenced objects as used. pub fn walkVM(self: *Self, vm: VM) ObjectGetError!void { for (vm.stack.items) |val| { try self.walkValue(val); } for (vm.calls.items) |call| { for (call.locals) |local| { try self.walkValue(local); } } } /// Removes and destroys all objects that are not marked as used. pub fn collectGarbage(self: *Self) void { // Now this?! var iter = self.objects.iterator(); while (iter.next()) |obj| { if (obj.value.refcount == 0 and obj.value.manualRefcount == 0) { if (self.objects.remove(obj.key)) |kv| { var temp_obj = kv.value.object; temp_obj.destroyObject(); } else { unreachable; } // Hack: Remove modification safety check, // we want to mutate the HashMap! // iter.initial_modification_count = iter.hm.modification_count; } } } // Interface API: /// Returns the non-generic interface for this object pool. /// Pass this to `Environment` or other LoLa components. pub fn interface(self: *Self) ObjectPoolInterface { const Impl = struct { const ErasedSelf = ObjectPoolInterface.ErasedSelf; fn cast(erased_self: *ErasedSelf) *Self { return @ptrCast(*Self, @alignCast(@alignOf(Self), erased_self)); } fn getMethod(erased_self: *ErasedSelf, handle: ObjectHandle, name: []const u8) ObjectGetError!?Function { return cast(erased_self).getMethod(handle, name); } fn destroyObject(erased_self: *ErasedSelf, handle: ObjectHandle) void { return cast(erased_self).destroyObject(handle); } fn isObjectValid(erased_self: *ErasedSelf, handle: ObjectHandle) bool { return cast(erased_self).isObjectValid(handle); } }; return ObjectPoolInterface{ .self = @ptrCast(*ObjectPoolInterface.ErasedSelf, self), .destroyObjectFn = Impl.destroyObject, .getMethodFn = Impl.getMethod, .isObjectValidFn = Impl.isObjectValid, }; } }; } const TestObject = struct { const Self = @This(); got_method_query: bool = false, got_destroy_call: bool = false, was_serialized: bool = false, was_deserialized: bool = false, pub fn getMethod(self: *Self, name: []const u8) ?Function { self.got_method_query = true; return null; } pub fn destroyObject(self: *Self) void { self.got_destroy_call = true; } pub fn serializeObject(writer: OutputStream.Writer, object: *Self) !void { try writer.writeAll("test object"); object.was_serialized = true; } var deserialize_instance = Self{}; pub fn deserializeObject(allocator: *std.mem.Allocator, reader: InputStream.Reader) !*Self { var buf: [11]u8 = undefined; try reader.readNoEof(&buf); std.testing.expectEqualStrings("test object", &buf); deserialize_instance.was_deserialized = true; return &deserialize_instance; } }; const TestPool = ObjectPool([_]type{TestObject}); comptime { if (std.builtin.is_test) { { if (ObjectPool([_]type{}).serializable != false) @compileError("Empty ObjectPool is required to be unserializable!"); } { if (TestPool.serializable != true) @compileError("TestPool is required to be serializable!"); } { const Unserializable = struct { const Self = @This(); pub fn getMethod(self: *Self, name: []const u8) ?Function { unreachable; } pub fn destroyObject(self: *Self) void { unreachable; } }; if (ObjectPool([_]type{ TestObject, Unserializable }).serializable != false) @compileError("Unserializable detection doesn't work!"); } } } test "Object" { var test_obj = TestObject{}; var object = Object.init(&test_obj); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(false, test_obj.got_method_query); _ = object.getMethod("irrelevant"); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(true, test_obj.got_method_query); object.destroyObject(); std.testing.expectEqual(true, test_obj.got_destroy_call); std.testing.expectEqual(true, test_obj.got_method_query); } test "ObjectPool basic object create/destroy cycle" { var pool = TestPool.init(std.testing.allocator); defer pool.deinit(); var test_obj = TestObject{}; const handle = try pool.createObject(&test_obj); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(false, test_obj.got_method_query); std.testing.expectEqual(true, pool.isObjectValid(handle)); _ = try pool.getMethod(handle, "irrelevant"); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(true, test_obj.got_method_query); pool.destroyObject(handle); std.testing.expectEqual(true, test_obj.got_destroy_call); std.testing.expectEqual(true, test_obj.got_method_query); std.testing.expectEqual(false, pool.isObjectValid(handle)); } test "ObjectPool automatic cleanup" { var pool = TestPool.init(std.testing.allocator); errdefer pool.deinit(); var test_obj = TestObject{}; const handle = try pool.createObject(&test_obj); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(false, test_obj.got_method_query); std.testing.expectEqual(true, pool.isObjectValid(handle)); pool.deinit(); std.testing.expectEqual(true, test_obj.got_destroy_call); std.testing.expectEqual(false, test_obj.got_method_query); } test "ObjectPool garbage collection" { var pool = TestPool.init(std.testing.allocator); defer pool.deinit(); var test_obj = TestObject{}; const handle = try pool.createObject(&test_obj); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(true, pool.isObjectValid(handle)); // Prevent the object from being collected because it is marked as used pool.clearUsageCounters(); try pool.markUsed(handle); pool.collectGarbage(); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(true, pool.isObjectValid(handle)); // Prevent the object from being collected because it is marked as referenced try pool.retainObject(handle); pool.clearUsageCounters(); pool.collectGarbage(); try pool.releaseObject(handle); std.testing.expectEqual(false, test_obj.got_destroy_call); std.testing.expectEqual(true, pool.isObjectValid(handle)); // Destroy the object by not marking it referenced at last pool.clearUsageCounters(); pool.collectGarbage(); std.testing.expectEqual(true, test_obj.got_destroy_call); std.testing.expectEqual(false, pool.isObjectValid(handle)); } // TODO: Write tests for walkEnvironment and walkVM test "ObjectPool serialization" { var backing_buffer: [1024]u8 = undefined; const serialized_id = blk: { var pool = TestPool.init(std.testing.allocator); defer pool.deinit(); var test_obj = TestObject{}; const id = try pool.createObject(&test_obj); std.testing.expectEqual(false, test_obj.was_serialized); var fbs = std.io.fixedBufferStream(&backing_buffer); try pool.serialize(fbs.writer()); std.testing.expectEqual(true, test_obj.was_serialized); break :blk id; }; { var fbs = std.io.fixedBufferStream(&backing_buffer); std.testing.expectEqual(false, TestObject.deserialize_instance.was_deserialized); var pool = try TestPool.deserialize(std.testing.allocator, fbs.reader()); defer pool.deinit(); std.testing.expectEqual(true, TestObject.deserialize_instance.was_deserialized); std.testing.expect(pool.isObjectValid(serialized_id)); } }
src/library/runtime/objects.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const Target = std.Target; const log = std.log.scoped(.codegen); const assert = std.debug.assert; const Module = @import("../Module.zig"); const Decl = Module.Decl; const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const spec = @import("spirv/spec.zig"); const Opcode = spec.Opcode; const Word = spec.Word; const IdRef = spec.IdRef; const IdResult = spec.IdResult; const IdResultType = spec.IdResultType; const SpvModule = @import("spirv/Module.zig"); const SpvSection = @import("spirv/Section.zig"); const SpvType = @import("spirv/type.zig").Type; const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef); const IncomingBlock = struct { src_label_id: IdRef, break_value_id: IdRef, }; pub const BlockMap = std.AutoHashMapUnmanaged(Air.Inst.Index, struct { label_id: IdRef, incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock), }); /// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that. pub const DeclGen = struct { /// The Zig module that we are generating decls for. module: *Module, /// The SPIR-V module code should be put in. spv: *SpvModule, /// The decl we are currently generating code for. decl: *Decl, /// The intermediate code of the declaration we are currently generating. Note: If /// the declaration is not a function, this value will be undefined! air: Air, /// The liveness analysis of the intermediate code for the declaration we are currently generating. /// Note: If the declaration is not a function, this value will be undefined! liveness: Liveness, /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. args: std.ArrayListUnmanaged(IdRef) = .{}, /// A counter to keep track of how many `arg` instructions we've seen yet. next_arg_index: u32, /// A map keeping track of which instruction generated which result-id. inst_results: InstMap = .{}, /// We need to keep track of result ids for block labels, as well as the 'incoming' /// blocks for a block. blocks: BlockMap = .{}, /// The label of the SPIR-V block we are currently generating. current_block_label_id: IdRef, /// The actual instructions for this function. We need to declare all locals in /// the first block, and because we don't know which locals there are going to be, /// we're just going to generate everything after the locals-section in this array. /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the /// initial OpLabel. These will be generated into spv.sections.functions directly. code: SpvSection = .{}, /// If `gen` returned `Error.CodegenFail`, this contains an explanatory message. /// Memory is owned by `module.gpa`. error_msg: ?*Module.ErrorMsg, /// Possible errors the `gen` function may return. const Error = error{ CodegenFail, OutOfMemory }; /// This structure is used to return information about a type typically used for /// arithmetic operations. These types may either be integers, floats, or a vector /// of these. Most scalar operations also work on vectors, so we can easily represent /// those as arithmetic types. If the type is a scalar, 'inner type' refers to the /// scalar type. Otherwise, if its a vector, it refers to the vector's element type. const ArithmeticTypeInfo = struct { /// A classification of the inner type. const Class = enum { /// A boolean. bool, /// A regular, **native**, integer. /// This is only returned when the backend supports this int as a native type (when /// the relevant capability is enabled). integer, /// A regular float. These are all required to be natively supported. Floating points /// for which the relevant capability is not enabled are not emulated. float, /// An integer of a 'strange' size (which' bit size is not the same as its backing /// type. **Note**: this may **also** include power-of-2 integers for which the /// relevant capability is not enabled), but still within the limits of the largest /// natively supported integer type. strange_integer, /// An integer with more bits than the largest natively supported integer type. composite_integer, }; /// The number of bits in the inner type. /// This is the actual number of bits of the type, not the size of the backing integer. bits: u16, /// Whether the type is a vector. is_vector: bool, /// Whether the inner type is signed. Only relevant for integers. signedness: std.builtin.Signedness, /// A classification of the inner type. These scenarios /// will all have to be handled slightly different. class: Class, }; /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, /// only set when `gen` is called. pub fn init(module: *Module, spv: *SpvModule) DeclGen { return .{ .module = module, .spv = spv, .decl = undefined, .air = undefined, .liveness = undefined, .next_arg_index = undefined, .current_block_label_id = undefined, .error_msg = undefined, }; } /// Generate the code for `decl`. If a reportable error occurred during code generation, /// a message is returned by this function. Callee owns the memory. If this function /// returns such a reportable error, it is valid to be called again for a different decl. pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. self.decl = decl; self.air = air; self.liveness = liveness; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); self.blocks.clearRetainingCapacity(); self.current_block_label_id = undefined; self.code.reset(); self.error_msg = null; self.genDecl() catch |err| switch (err) { error.CodegenFail => return self.error_msg, else => |others| return others, }; return null; } /// Free resources owned by the DeclGen. pub fn deinit(self: *DeclGen) void { self.args.deinit(self.spv.gpa); self.inst_results.deinit(self.spv.gpa); self.blocks.deinit(self.spv.gpa); self.code.deinit(self.spv.gpa); } /// Return the target which we are currently compiling for. fn getTarget(self: *DeclGen) std.Target { return self.module.getTarget(); } fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLoc(self.decl); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); return error.CodegenFail; } fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLoc(self.decl); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "TODO (SPIR-V): " ++ format, args); return error.CodegenFail; } /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { if (self.air.value(inst)) |val| { return self.genConstant(self.air.typeOf(inst), val); } const index = Air.refToIndex(inst).?; return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage. } /// Start a new SPIR-V block, Emits the label of the new block, and stores which /// block we are currently generating. /// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to /// keep track of the previous block. fn beginSpvBlock(self: *DeclGen, label_id: IdResult) !void { try self.code.emit(self.spv.gpa, .OpLabel, .{ .id_result = label_id }); self.current_block_label_id = label_id.toRef(); } /// SPIR-V requires enabling specific integer sizes through capabilities, and so if they are not enabled, we need /// to emulate them in other instructions/types. This function returns, given an integer bit width (signed or unsigned, sign /// included), the width of the underlying type which represents it, given the enabled features for the current target. /// If the result is `null`, the largest type the target platform supports natively is not able to perform computations using /// that size. In this case, multiple elements of the largest type should be used. /// The backing type will be chosen as the smallest supported integer larger or equal to it in number of bits. /// The result is valid to be used with OpTypeInt. /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers). /// TODO: Should the result of this function be cached? fn backingIntBits(self: *DeclGen, bits: u16) ?u16 { const target = self.getTarget(); // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function. assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{ .{ .bits = 8, .feature = .Int8 }, .{ .bits = 16, .feature = .Int16 }, .{ .bits = 32, .feature = null }, .{ .bits = 64, .feature = .Int64 }, }; for (ints) |int| { const has_feature = if (int.feature) |feature| Target.spirv.featureSetHas(target.cpu.features, feature) else true; if (bits <= int.bits and has_feature) { return int.bits; } } return null; } /// Return the amount of bits in the largest supported integer type. This is either 32 (always supported), or 64 (if /// the Int64 capability is enabled). /// Note: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). /// In theory that could also be used, but since the spec says that it only guarantees support up to 32-bit ints there /// is no way of knowing whether those are actually supported. /// TODO: Maybe this should be cached? fn largestSupportedIntBits(self: *DeclGen) u16 { const target = self.getTarget(); return if (Target.spirv.featureSetHas(target.cpu.features, .Int64)) 64 else 32; } /// Checks whether the type is "composite int", an integer consisting of multiple native integers. These are represented by /// arrays of largestSupportedIntBits(). /// Asserts `ty` is an integer. fn isCompositeInt(self: *DeclGen, ty: Type) bool { return self.backingIntBits(ty) == null; } fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo { const target = self.getTarget(); return switch (ty.zigTypeTag()) { .Bool => ArithmeticTypeInfo{ .bits = 1, // Doesn't matter for this class. .is_vector = false, .signedness = .unsigned, // Technically, but doesn't matter for this class. .class = .bool, }, .Float => ArithmeticTypeInfo{ .bits = ty.floatBits(target), .is_vector = false, .signedness = .signed, // Technically, but doesn't matter for this class. .class = .float, }, .Int => blk: { const int_info = ty.intInfo(target); // TODO: Maybe it's useful to also return this value. const maybe_backing_bits = self.backingIntBits(int_info.bits); break :blk ArithmeticTypeInfo{ .bits = int_info.bits, .is_vector = false, .signedness = int_info.signedness, .class = if (maybe_backing_bits) |backing_bits| if (backing_bits == int_info.bits) ArithmeticTypeInfo.Class.integer else ArithmeticTypeInfo.Class.strange_integer else .composite_integer, }; }, // As of yet, there is no vector support in the self-hosted compiler. .Vector => self.todo("implement arithmeticTypeInfo for Vector", .{}), // TODO: For which types is this the case? else => self.todo("implement arithmeticTypeInfo for {}", .{ty.fmtDebug()}), }; } /// Generate a constant representing `val`. /// TODO: Deduplication? fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!IdRef { const target = self.getTarget(); const section = &self.spv.sections.types_globals_constants; const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); if (val.isUndef()) { try section.emit(self.spv.gpa, .OpUndef, .{ .id_result_type = result_type_id, .id_result = result_id }); return result_id.toRef(); } switch (ty.zigTypeTag()) { .Int => { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. return self.todo("implement composite int constants for {}", .{ty.fmtDebug()}); }; // We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any // SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this // might need to be updated. assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); // Note, value is required to be sign-extended, so we don't need to mask off the upper bits. // See https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html#Literal var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(target); const value: spec.LiteralContextDependentNumber = switch (backing_bits) { 1...32 => .{ .uint32 = @truncate(u32, int_bits) }, 33...64 => .{ .uint64 = int_bits }, else => unreachable, }; try section.emit(self.spv.gpa, .OpConstant, .{ .id_result_type = result_type_id, .id_result = result_id, .value = value, }); }, .Bool => { const operands = .{ .id_result_type = result_type_id, .id_result = result_id }; if (val.toBool()) { try section.emit(self.spv.gpa, .OpConstantTrue, operands); } else { try section.emit(self.spv.gpa, .OpConstantFalse, operands); } }, .Float => { // At this point we are guaranteed that the target floating point type is supported, otherwise the function // would have exited at resolveTypeId(ty). const value: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) { // Prevent upcasting to f32 by bitcasting and writing as a uint32. 16 => .{ .uint32 = @bitCast(u16, val.toFloat(f16)) }, 32 => .{ .float32 = val.toFloat(f32) }, 64 => .{ .float64 = val.toFloat(f64) }, 128 => unreachable, // Filtered out in the call to resolveTypeId. // TODO: Insert case for long double when the layout for that is determined? else => unreachable, }; try section.emit(self.spv.gpa, .OpConstant, .{ .id_result_type = result_type_id, .id_result = result_id, .value = value, }); }, .Void => unreachable, else => return self.todo("constant generation of type {}", .{ty.fmtDebug()}), } return result_id.toRef(); } /// Turn a Zig type into a SPIR-V Type, and return its type result-id. fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType { return self.spv.typeResultId(try self.resolveType(ty)); } /// Turn a Zig type into a SPIR-V Type, and return a reference to it. fn resolveType(self: *DeclGen, ty: Type) Error!SpvType.Ref { const target = self.getTarget(); return switch (ty.zigTypeTag()) { .Void => try self.spv.resolveType(SpvType.initTag(.void)), .Bool => blk: { // TODO: SPIR-V booleans are opaque. For local variables this is fine, but for structs // members we want to use integer types instead. break :blk try self.spv.resolveType(SpvType.initTag(.bool)); }, .Int => blk: { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // TODO: Integers too big for any native type are represented as "composite integers": // An array of largestSupportedIntBits. return self.todo("Implement composite int type {}", .{ty.fmtDebug()}); }; const payload = try self.spv.arena.create(SpvType.Payload.Int); payload.* = .{ .width = backing_bits, .signedness = int_info.signedness, }; break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base)); }, .Float => blk: { // We can (and want) not really emulate floating points with other floating point types like with the integer types, // so if the float is not supported, just return an error. const bits = ty.floatBits(target); const supported = switch (bits) { 16 => Target.spirv.featureSetHas(target.cpu.features, .Float16), // 32-bit floats are always supported (see spec, 2.16.1, Data rules). 32 => true, 64 => Target.spirv.featureSetHas(target.cpu.features, .Float64), else => false, }; if (!supported) { return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); } const payload = try self.spv.arena.create(SpvType.Payload.Float); payload.* = .{ .width = bits, }; break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base)); }, .Fn => blk: { // We only support zig-calling-convention functions, no varargs. if (ty.fnCallingConvention() != .Unspecified) return self.fail("Unsupported calling convention for SPIR-V", .{}); if (ty.fnIsVarArgs()) return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen()); for (param_types) |*param, i| { param.* = try self.resolveType(ty.fnParamType(i)); } const return_type = try self.resolveType(ty.fnReturnType()); const payload = try self.spv.arena.create(SpvType.Payload.Function); payload.* = .{ .return_type = return_type, .parameters = param_types }; break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base)); }, .Pointer => { // This type can now be properly implemented, but we still need to implement the storage classes as proper address spaces. return self.todo("Implement type Pointer properly", .{}); }, .Vector => { // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations // which work on them), so simply use those. // Note: SPIR-V vectors only support bools, ints and floats, so pointer vectors need to be supported another way. // "composite integers" (larger than the largest supported native type) can probably be represented by an array of vectors. // TODO: The SPIR-V spec mentions that vector sizes may be quite restricted! look into which we can use, and whether OpTypeVector // is adequate at all for this. // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. return self.todo("Implement type Vector", .{}); }, .Null, .Undefined, .EnumLiteral, .ComptimeFloat, .ComptimeInt, .Type, => unreachable, // Must be comptime. .BoundFn => unreachable, // this type will be deleted from the language. else => |tag| return self.todo("Implement zig type '{}'", .{tag}), }; } /// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that. /// TODO: The result of this needs to be cached. fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !IdResultType { assert(ty.zigTypeTag() == .Pointer); const result_id = self.spv.allocId(); // TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types // if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled. // These also relates to the pointer's address space. const child_id = try self.resolveTypeId(ty.elemType()); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{ .id_result = result_id, .storage_class = storage_class, .type = child_id.toRef(), }); return result_id.toResultType(); } fn genDecl(self: *DeclGen) !void { const decl = self.decl; const result_id = decl.fn_link.spirv.id; if (decl.val.castTag(.function)) |_| { assert(decl.ty.zigTypeTag() == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.spv.sections.functions.emit(self.spv.gpa, .OpFunction, .{ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), .id_result = result_id, .function_control = .{}, // TODO: We can set inline here if the type requires it. .function_type = prototype_id.toRef(), }); const params = decl.ty.fnParamLen(); var i: usize = 0; try self.args.ensureUnusedCapacity(self.spv.gpa, params); while (i < params) : (i += 1) { const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i)); const arg_result_id = self.spv.allocId(); try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionParameter, .{ .id_result_type = param_type_id, .id_result = arg_result_id, }); self.args.appendAssumeCapacity(arg_result_id.toRef()); } // TODO: This could probably be done in a better way... const root_block_id = self.spv.allocId(); // We need to generate the label directly in the functions section here because we're going to write the local variables after // here. Since we're not generating in self.code, we're just going to bypass self.beginSpvBlock here. try self.spv.sections.functions.emit(self.spv.gpa, .OpLabel, .{ .id_result = root_block_id, }); self.current_block_label_id = root_block_id.toRef(); const main_body = self.air.getMainBody(); try self.genBody(main_body); // Append the actual code into the functions section. try self.spv.sections.functions.append(self.spv.gpa, self.code); try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionEnd, {}); } else { // TODO // return self.todo("generate decl type {}", .{decl.ty.zigTypeTag()}); } } fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void { for (body) |inst| { try self.genInst(inst); } } fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { const air_tags = self.air.instructions.items(.tag); const result_id = switch (air_tags[inst]) { // zig fmt: off .add, .addwrap => try self.airArithOp(inst, .OpFAdd, .OpIAdd, .OpIAdd), .sub, .subwrap => try self.airArithOp(inst, .OpFSub, .OpISub, .OpISub), .mul, .mulwrap => try self.airArithOp(inst, .OpFMul, .OpIMul, .OpIMul), .bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd), .bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr), .xor => try self.airBinOpSimple(inst, .OpBitwiseXor), .bool_and => try self.airBinOpSimple(inst, .OpLogicalAnd), .bool_or => try self.airBinOpSimple(inst, .OpLogicalOr), .not => try self.airNot(inst), .cmp_eq => try self.airCmp(inst, .OpFOrdEqual, .OpLogicalEqual, .OpIEqual), .cmp_neq => try self.airCmp(inst, .OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual), .cmp_gt => try self.airCmp(inst, .OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan), .cmp_gte => try self.airCmp(inst, .OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual), .cmp_lt => try self.airCmp(inst, .OpFOrdLessThan, .OpSLessThan, .OpULessThan), .cmp_lte => try self.airCmp(inst, .OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual), .arg => self.airArg(), .alloc => try self.airAlloc(inst), .block => (try self.airBlock(inst)) orelse return, .load => try self.airLoad(inst), .br => return self.airBr(inst), .breakpoint => return, .cond_br => return self.airCondBr(inst), .constant => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), .store => return self.airStore(inst), .unreach => return self.airUnreach(), // zig fmt: on else => |tag| return self.todo("implement AIR tag {s}", .{ @tagName(tag), }), }; try self.inst_results.putNoClobber(self.spv.gpa, inst, result_id); } fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, comptime opcode: Opcode) !IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); try self.code.emit(self.spv.gpa, opcode, .{ .id_result_type = result_type_id, .id_result = result_id, .operand_1 = lhs_id, .operand_2 = rhs_id, }); return result_id.toRef(); } fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !IdRef { // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. const ty = self.air.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); const target = self.getTarget(); assert(self.air.typeOf(bin_op.lhs).eql(ty, target)); assert(self.air.typeOf(bin_op.rhs).eql(ty, target)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. const info = try self.arithmeticTypeInfo(ty); const opcode_index: usize = switch (info.class) { .composite_integer => { return self.todo("binary operations for composite integers", .{}); }, .strange_integer => { return self.todo("binary operations for strange integers", .{}); }, .integer => switch (info.signedness) { .signed => @as(usize, 1), .unsigned => @as(usize, 2), }, .float => 0, else => unreachable, }; const operands = .{ .id_result_type = result_type_id, .id_result = result_id, .operand_1 = lhs_id, .operand_2 = rhs_id, }; switch (opcode_index) { 0 => try self.code.emit(self.spv.gpa, fop, operands), 1 => try self.code.emit(self.spv.gpa, sop, operands), 2 => try self.code.emit(self.spv.gpa, uop, operands), else => unreachable, } // TODO: Trap on overflow? Probably going to be annoying. // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap. return result_id.toRef(); } fn airCmp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(Type.initTag(.bool)); const op_ty = self.air.typeOf(bin_op.lhs); assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.getTarget())); // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, // but int and float versions of operations require different opcodes. const info = try self.arithmeticTypeInfo(op_ty); const opcode_index: usize = switch (info.class) { .composite_integer => { return self.todo("binary operations for composite integers", .{}); }, .strange_integer => { return self.todo("comparison for strange integers", .{}); }, .float => 0, .bool => 1, .integer => switch (info.signedness) { .signed => @as(usize, 1), .unsigned => @as(usize, 2), }, }; const operands = .{ .id_result_type = result_type_id, .id_result = result_id, .operand_1 = lhs_id, .operand_2 = rhs_id, }; switch (opcode_index) { 0 => try self.code.emit(self.spv.gpa, fop, operands), 1 => try self.code.emit(self.spv.gpa, sop, operands), 2 => try self.code.emit(self.spv.gpa, uop, operands), else => unreachable, } return result_id.toRef(); } fn airNot(self: *DeclGen, inst: Air.Inst.Index) !IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(Type.initTag(.bool)); try self.code.emit(self.spv.gpa, .OpLogicalNot, .{ .id_result_type = result_type_id, .id_result = result_id, .operand = operand_id, }); return result_id.toRef(); } fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !IdRef { const ty = self.air.typeOfIndex(inst); const storage_class = spec.StorageClass.Function; const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocId(); // Rather than generating into code here, we're just going to generate directly into the functions section so that // variable declarations appear in the first block of the function. try self.spv.sections.functions.emit(self.spv.gpa, .OpVariable, .{ .id_result_type = result_type_id, .id_result = result_id, .storage_class = storage_class, }); return result_id.toRef(); } fn airArg(self: *DeclGen) IdRef { defer self.next_arg_index += 1; return self.args.items[self.next_arg_index]; } fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { // In AIR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current // ir.Block in a different SPIR-V block. const label_id = self.spv.allocId(); // 4 chosen as arbitrary initial capacity. var incoming_blocks = try std.ArrayListUnmanaged(IncomingBlock).initCapacity(self.spv.gpa, 4); try self.blocks.putNoClobber(self.spv.gpa, inst, .{ .label_id = label_id.toRef(), .incoming_blocks = &incoming_blocks, }); defer { assert(self.blocks.remove(inst)); incoming_blocks.deinit(self.spv.gpa); } const ty = self.air.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.genBody(body); try self.beginSpvBlock(label_id); // If this block didn't produce a value, simply return here. if (!ty.hasRuntimeBits()) return null; // Combine the result from the blocks using the Phi instruction. const result_id = self.spv.allocId(); // TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types // are not allowed to be created from a phi node, and throw an error for those. For now, resolveTypeId already throws // an error for pointers. const result_type_id = try self.resolveTypeId(ty); _ = result_type_id; try self.code.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... for (incoming_blocks.items) |incoming| { self.code.writeOperand(spec.PairIdRefIdRef, .{ incoming.break_value_id, incoming.src_label_id }); } return result_id.toRef(); } fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; const operand_ty = self.air.typeOf(br.operand); if (operand_ty.hasRuntimeBits()) { const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); } try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = block.label_id }); } fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]; const condition_id = try self.resolve(pl_op.operand); // These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block. const then_label_id = self.spv.allocId(); const else_label_id = self.spv.allocId(); // TODO: We can generate OpSelectionMerge here if we know the target block that both of these will resolve to, // but i don't know if those will always resolve to the same block. try self.code.emit(self.spv.gpa, .OpBranchConditional, .{ .condition = condition_id, .true_label = then_label_id.toRef(), .false_label = else_label_id.toRef(), }); try self.beginSpvBlock(then_label_id); try self.genBody(then_body); try self.beginSpvBlock(else_label_id); try self.genBody(else_body); } fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); try self.code.emit(self.spv.gpa, .OpLine, .{ .file = src_fname_id, .line = dbg_stmt.line, .column = dbg_stmt.column, }); } fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const ty = self.air.typeOfIndex(inst); const result_type_id = try self.resolveTypeId(ty); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ .Volatile = ty.isVolatilePtr(), }; try self.code.emit(self.spv.gpa, .OpLoad, .{ .id_result_type = result_type_id, .id_result = result_id, .pointer = operand_id, .memory_access = access, }); return result_id.toRef(); } fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocId(); // Jump to the loop entry point try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() }); // TODO: Look into OpLoopMerge. try self.beginSpvBlock(loop_label_id); try self.genBody(body); try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() }); } fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; const operand_ty = self.air.typeOf(operand); if (operand_ty.hasRuntimeBits()) { const operand_id = try self.resolve(operand); try self.code.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); } else { try self.code.emit(self.spv.gpa, .OpReturn, {}); } } fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr_id = try self.resolve(bin_op.lhs); const src_val_id = try self.resolve(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const access = spec.MemoryAccess.Extended{ .Volatile = lhs_ty.isVolatilePtr(), }; try self.code.emit(self.spv.gpa, .OpStore, .{ .pointer = dst_ptr_id, .object = src_val_id, .memory_access = access, }); } fn airUnreach(self: *DeclGen) !void { try self.code.emit(self.spv.gpa, .OpUnreachable, {}); } };
src/codegen/spirv.zig
const std = @import("std"); const fs = std.fs; const io = std.io; const info = std.log.info; const print = std.debug.print; const fmt = std.fmt; const utils = @import("utils.zig"); var gpa = std.heap.GeneralPurposeAllocator(.{}){}; fn parseMask(line: []const u8, allow: u8) usize { var res: usize = 0; const one: usize = 1; // i could parse this with std.fmt but that would mean allocations for replace for (line) |tok, i| { res <<= 1; if (tok != allow) { continue; } res |= one; } return res; } const Memory = struct { allo: *std.mem.Allocator, mem: std.AutoHashMap(usize, usize), ones_mask: usize, zeros_mask: usize, float_mask: usize, p2_mode: bool, pub fn init(allo: *std.mem.Allocator, p2_mode: bool) Memory { return Memory{ .allo = allo, .mem = std.AutoHashMap(usize, usize).init(allo), .ones_mask = 0x00, .zeros_mask = 0x00, .float_mask = 0x00, .p2_mode = p2_mode, }; } pub fn deinit(self: *Memory) void { self.mem.deinit(); } pub fn doLine(self: *Memory, line: []const u8) void { const is_mask = std.mem.indexOf(u8, line, "mask"); if (is_mask) |_val| { self.doMaskLine(line); } else { self.doMemLine(line); } } fn doMaskLine(self: *Memory, line: []const u8) void { var tokens = std.mem.tokenize(line, "mask ="); const mask = tokens.next() orelse unreachable; self.zeros_mask = parseMask(mask, '0'); self.ones_mask = parseMask(mask, '1'); self.float_mask = parseMask(mask, 'X'); } fn doMemLine(self: *Memory, line: []const u8) void { var tokens = std.mem.tokenize(line, "mem[] ="); const addr = fmt.parseInt(usize, tokens.next() orelse unreachable, 10) catch unreachable; const val = fmt.parseInt(usize, tokens.next() orelse unreachable, 10) catch unreachable; if (!self.p2_mode) { self.doP1Mem(addr, val); } else { self.doP2Mem(addr, val); } } fn doP1Mem(self: *Memory, addr: usize, val: usize) void { const ones_masked = val | self.ones_mask; const zeros_masked = ones_masked & ~self.zeros_mask; _ = self.mem.put(addr, zeros_masked) catch unreachable; } fn doP2Mem(self: *Memory, addr: usize, val: usize) void { const float_bits_cnt = @intCast(u6, @popCount(usize, self.float_mask)); const one: usize = 1; const upper: usize = one << float_bits_cnt; var i: usize = 0; while (i < upper) : (i += 1) { var addr_tweaked: usize = addr; addr_tweaked |= self.ones_mask; var mod_mask = self.float_mask; var long: usize = 0; var short: usize = 0; while (true) : (long += 1) { const shifted_mask = mod_mask >> @intCast(u6, long); if (shifted_mask == 0) { // all mask bits set break; } if (shifted_mask & 0x01 == 1) { const bit = i >> @intCast(u6, short) & 0x01; const bit_mask = one << @intCast(u6, long); if (bit == 0) { addr_tweaked &= ~(one << @intCast(u6, long)); } else { addr_tweaked |= bit_mask; } short += 1; } } self.mem.put(addr_tweaked, val) catch unreachable; } } fn sumAll(self: *Memory) usize { var sum: usize = 0; var iter = self.mem.iterator(); while (iter.next()) |kv| { sum += kv.value; } return sum; } }; pub fn main() !void { const begin = @divTrunc(std.time.nanoTimestamp(), 1000); // setup // defer _ = gpa.deinit(); var allo = &gpa.allocator; var lines: std.mem.TokenIterator = try utils.readInputLines(allo, "./input1"); defer allo.free(lines.buffer); var p1_mem = Memory.init(allo, false); defer p1_mem.deinit(); var p2_mem = Memory.init(allo, true); defer p2_mem.deinit(); while (lines.next()) |line| { p1_mem.doLine(line); p2_mem.doLine(line); } // 14722016054794 print("p1: {}\n", .{p1_mem.sumAll()}); print("p2: {}\n", .{p2_mem.sumAll()}); const delta = @divTrunc(std.time.nanoTimestamp(), 1000) - begin; print("all done in {} microseconds\n", .{delta}); }
day_14/src/main.zig
const e = @import("erl_nif.zig"); const std = @import("std"); const builtin = @import("builtin"); const BeamMutex = @import("beam_mutex.zig").BeamMutex; /////////////////////////////////////////////////////////////////////////////// // BEAM allocator definitions /////////////////////////////////////////////////////////////////////////////// const Allocator = std.mem.Allocator; // basic allocator /// !value /// provides a default BEAM allocator. This is an implementation of the Zig /// allocator interface. Use `beam.allocator.alloc` everywhere to safely /// allocate slices efficiently, and use `beam.allocator.free` to release that /// memory. For single item allocation, use `beam.allocator.create` and /// `beam.allocator.destroy` to release the memory. /// /// Note this does not make the allocated memory *garbage collected* by the /// BEAM. /// /// All memory will be tracked by the beam. All allocations happen with 8-byte /// alignment, as described in `erl_nif.h`. This is sufficient to create /// correctly aligned `beam.terms`, and for most purposes. /// For data that require greater alignment, use `beam.large_allocator`. /// /// ### Example /// /// The following code will return ten bytes of new memory. /// /// ```zig /// const beam = @import("beam.zig"); /// /// fn give_me_ten_bytes() ![]u8 { /// return beam.allocator.alloc(u8, 10); /// } /// ``` /// /// currently does not release memory that is resized. For this behaviour, use /// use `beam.general_purpose_allocator`. /// /// not threadsafe. for a threadsafe allocator, use `beam.general_purpose_allocator` pub const allocator = &raw_beam_allocator; pub const MAX_ALIGN = 8; var raw_beam_allocator = Allocator{ .allocFn = raw_beam_alloc, .resizeFn = raw_beam_resize, }; fn raw_beam_alloc( _self: *Allocator, len: usize, ptr_align: u29, _len_align: u29, _ret_addr: usize, ) Allocator.Error![]u8 { if (ptr_align > MAX_ALIGN) { return error.OutOfMemory; } const ptr = e.enif_alloc(len) orelse return error.OutOfMemory; return @ptrCast([*]u8, ptr)[0..len]; } fn raw_beam_resize( _self: *Allocator, buf: []u8, _old_align: u29, new_len: usize, _len_align: u29, _ret_addr: usize, ) Allocator.Error!usize { if (new_len == 0) { e.enif_free(buf.ptr); return 0; } if (new_len <= buf.len) { return new_len; } return error.OutOfMemory; } /// !value /// provides a BEAM allocator that can perform allocations with greater /// alignment than the machine word. Note that this comes at the cost /// of some memory to store important metadata. /// /// currently does not release memory that is resized. For this behaviour /// use `beam.general_purpose_allocator`. /// /// not threadsafe. for a threadsafe allocator, use `beam.general_purpose_allocator` pub const large_allocator = &large_beam_allocator; var large_beam_allocator = Allocator { .allocFn = large_beam_alloc, .resizeFn = large_beam_resize, }; fn large_beam_alloc( allocator_: *Allocator, len: usize, alignment: u29, len_align: u29, return_address: usize ) error{OutOfMemory}![]u8 { var ptr = try alignedAlloc(len, alignment, len_align, return_address); if (len_align == 0) { return ptr[0..len]; } return ptr[0..std.mem.alignBackwardAnyAlign(len, len_align)]; } fn large_beam_resize( allocator_: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, return_address: usize, ) Allocator.Error!usize { if (new_len > buf.len) { return error.OutOfMemory; } if (new_len == 0) { return alignedFree(buf, buf_align); } if (len_align == 0) { return new_len; } return std.mem.alignBackwardAnyAlign(new_len, len_align); } fn alignedAlloc(len: usize, alignment: u29, len_align: u29, return_address: usize) ![*]u8 { var safe_len = safeLen(len, alignment); var alloc_slice: []u8 = try allocator.allocAdvanced( u8, MAX_ALIGN, safe_len, std.mem.Allocator.Exact.exact); const unaligned_addr = @ptrToInt(alloc_slice.ptr); const aligned_addr = reAlign(unaligned_addr, alignment); getPtrPtr(aligned_addr).* = unaligned_addr; return aligned_addr; } fn alignedFree(buf: []u8, alignment: u29) usize { var ptr = getPtrPtr(buf.ptr).*; allocator.free(@intToPtr([*]u8, ptr)[0..safeLen(buf.len, alignment)]); return 0; } fn reAlign(unaligned_addr: usize, alignment: u29) [*]u8 { return @intToPtr( [*]u8, std.mem.alignForward( unaligned_addr + @sizeOf(usize), alignment)); } fn safeLen(len: usize, alignment: u29) usize { return len + alignment - @sizeOf(usize) + MAX_ALIGN; } fn getPtrPtr(aligned_ptr: [*]u8) *usize { return @intToPtr(*usize, @ptrToInt(aligned_ptr) - @sizeOf(usize)); } /// !value /// wraps the zig GeneralPurposeAllocator into the standard BEAM allocator. var general_purpose_allocator_instance = std.heap.GeneralPurposeAllocator( .{.thread_safe = true}) { .backing_allocator = large_allocator, }; pub var general_purpose_allocator = &general_purpose_allocator_instance.allocator; /////////////////////////////////////////////////////////////////////////////// // syntactic sugar: important elixir terms /////////////////////////////////////////////////////////////////////////////// /// errors for nif translation pub const Error = error { /// Translates to Elixir `FunctionClauseError`. /// /// This is the default mechanism for reporting that a Zigler nif function has /// been incorrectly passed a value from the Elixir BEAM runtime. This is very /// important, as Zig is statically typed. /// /// support for users to be able to throw this value in their own Zig functions /// is forthcoming. FunctionClauseError, }; /// errors for launching nif errors pub const ThreadError = error { /// Occurs when there's a problem launching a threaded nif. LaunchError }; /// errors for testing pub const AssertionError = error { /// Translates to `ExUnit.AssertionError`. Mostly used in Zig unit tests. /// /// All test clauses in the directories of your Zig-enabled modules are /// converted to Zig functions with the inferred type `!void`. The /// `beam.assert/1` function can throw this error as its error type. /// /// Zigler converts assert statements in test blocks to `try beam.assert(...);` AssertionError }; /// syntactic sugar for the BEAM environment. Note that the `env` type /// encapsulates the pointer, since you will almost always be passing this /// pointer to an opaque struct around without accessing it. pub const env = ?*e.ErlNifEnv; /// syntactic sugar for the BEAM term struct (`e.ErlNifTerm`) pub const term = e.ErlNifTerm; /////////////////////////////////////////////////////////////////////////////// // syntactic sugar: gets /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// // generics /// A helper for marshalling values from the BEAM runtime into Zig. Use this /// function if you need support for Zig generics. /// /// Used internally to typcheck values coming into Zig slice. /// /// supported types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` pub fn get(comptime T: type, environment: env, value: term) !T { switch (T) { c_int => return get_c_int(environment, value), c_long => return get_c_long(environment, value), isize => return get_isize(environment, value), usize => return get_usize(environment, value), u8 => return get_u8(environment, value), u16 => return get_u16(environment, value), u32 => return get_u32(environment, value), u64 => return get_u64(environment, value), i32 => return get_i32(environment, value), i64 => return get_i64(environment, value), f16 => return get_f16(environment, value), f32 => return get_f32(environment, value), f64 => return get_f64(environment, value), else => unreachable } } /////////////////////////////////////////////////////////////////////////////// // ints /// Takes a BEAM int term and returns a `c_int` value. Should only be used for /// C interop with Zig functions. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_c_int(environment: env, src_term: term) !c_int { var result: c_int = undefined; if (0 != e.enif_get_int(environment, src_term, &result)) { return result; } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `c_uint` value. Should only be used for /// C interop with Zig functions. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_c_uint(environment: env, src_term: term) !c_uint { var result: c_uint = undefined; if (0 != e.enif_get_uint(environment, src_term, &result)) { return result; } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `c_long` value. Should only be used /// for C interop with Zig functions. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_c_long(environment: env, src_term: term) !c_long { var result: c_long = undefined; if (0 != e.enif_get_long(environment, src_term, &result)) { return result; } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `c_ulong` value. Should only be used /// for C interop with Zig functions. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_c_ulong(environment: env, src_term: term) !c_ulong { var result: c_ulong = undefined; if (0 != e.enif_get_ulong(environment, src_term, &result)) { return result; } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `isize` value. Should only be used /// for C interop. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_isize(environment: env, src_term: term) !isize { var result: i64 = undefined; if (0 != e.enif_get_long(environment, src_term, @ptrCast(*c_long, &result))) { return @intCast(isize, result); } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `usize` value. Zig idiomatically uses /// `usize` for its size values, so typically you should be using this function. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_usize(environment: env, src_term: term) !usize { var result: i64 = undefined; if (0 != e.enif_get_long(environment, src_term, @ptrCast(*c_long, &result))) { return @intCast(usize, result); } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `u8` value. /// /// Note that this conversion function checks to make sure it's in range /// (`0..255`). /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_u8(environment: env, src_term: term) !u8 { var result: c_int = undefined; if (0 != e.enif_get_int(environment, src_term, &result)) { if ((result >= 0) and (result <= 0xFF)) { return @intCast(u8, result); } else { return Error.FunctionClauseError; } } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `u16` value. /// /// Note that this conversion function checks to make sure it's in range /// (`0..65535`). /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_u16(environment: env, src_term: term) !u16 { var result: c_int = undefined; if (0 != e.enif_get_int(environment, src_term, &result)) { if ((result >= 0) and (result <= 0xFFFF)) { return @intCast(u16, result); } else { return Error.FunctionClauseError; } } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `u32` value. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_u32(environment: env, src_term: term) !u32 { var result: c_uint = undefined; if (0 != e.enif_get_uint(environment, src_term, &result)) { return @intCast(u32, result); } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns a `u64` value. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_u64(environment: env, src_term: term) !u64 { var result: c_ulong = undefined; if (0 != e.enif_get_ulong(environment, src_term, &result)) { return @intCast(u64, result); } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns an `i32` value. /// /// Note that this conversion function does not currently do range checking. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_i32(environment: env, src_term: term) !i32 { var result: c_int = undefined; if (0 != e.enif_get_int(environment, src_term, &result)) { return @intCast(i32, result); } else { return Error.FunctionClauseError; } } /// Takes a BEAM int term and returns an `i64` value. /// /// Note that this conversion function does not currently do range checking. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:integer/0` pub fn get_i64(environment: env, src_term: term) !i64 { var result: i64 = undefined; if (0 != e.enif_get_long(environment, src_term, @ptrCast(*c_long, &result))) { return result; } else { return Error.FunctionClauseError; } } /////////////////////////////////////////////////////////////////////////////// // floats /// Takes a BEAM float term and returns an `f16` value. /// /// Note that this conversion function does not currently do range checking. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:float/0` pub fn get_f16(environment: env, src_term: term) !f16 { var result: f64 = undefined; if (0 != e.enif_get_double(environment, src_term, &result)) { return @floatCast(f16, result); } else { return Error.FunctionClauseError; } } /// Takes a BEAM float term and returns an `f32` value. /// /// Note that this conversion function does not currently do range checking. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:float/0` pub fn get_f32(environment: env, src_term: term) !f32 { var result: f64 = undefined; if (0 != e.enif_get_double(environment, src_term, &result)) { return @floatCast(f32, result); } else { return Error.FunctionClauseError; } } /// Takes a BEAM float term and returns an `f64` value. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:float/0` pub fn get_f64(environment: env, src_term: term) !f64 { var result: f64 = undefined; if (0 != e.enif_get_double(environment, src_term, &result)) { return result; } else { return Error.FunctionClauseError; } } /////////////////////////////////////////////////////////////////////////////// // atoms /// note that Zig has no equivalent of a BEAM atom, so we will just declare /// it as a term. You can retrieve the string value of the BEAM atom using /// `get_atom_slice/2` pub const atom = term; const __latin1 = e.ErlNifCharEncoding.ERL_NIF_LATIN1; /// Takes a BEAM atom term and retrieves it as a slice `[]u8` value. /// it's the caller's responsibility to make sure that the value is freed. /// /// Uses the standard `beam.allocator` allocator. If you require a custom /// allocator, use `get_atom_slice_alloc/3` /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:atom/0` pub fn get_atom_slice(environment: env, src_term: atom) ![]u8 { return get_atom_slice_alloc(allocator, environment, src_term); } /// Takes a BEAM atom term and retrieves it as a slice `[]u8` value, with /// any allocator. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:atom/0` pub fn get_atom_slice_alloc(a: *Allocator, environment: env, src_term: atom) ![]u8 { var len: c_uint = undefined; var result: []u8 = undefined; if (0 != e.enif_get_atom_length(environment, src_term, @ptrCast([*c]c_uint, &len), __latin1)) { result = try a.alloc(u8, len + 1); // pull the value from the beam. if (0 != e.enif_get_atom(environment, src_term, @ptrCast([*c]u8, &result[0]), len + 1, __latin1)) { // trim the slice, it's the caller's responsibility to free it. return result[0..len]; } else { unreachable; } } else { return Error.FunctionClauseError; } } /////////////////////////////////////////////////////////////////////////////// // binaries /// shorthand for `e.ErlNifBinary`. pub const binary = e.ErlNifBinary; /// Takes an BEAM `t:Kernel.binary/0` term and retrieves a pointer to the /// binary data as a Zig c-string (`[*c]u8`). No memory is allocated for /// this operation. /// /// Should only be used for c interop functions. /// /// *Note*: this function could have unexpected results if your BEAM binary /// contains any zero byte values. Always use `get_char_slice/2` when /// C-interop is not necessary. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:Kernel.binary/0` pub fn get_c_string(environment: env, src_term: term) ![*c]u8 { var bin: binary = undefined; if (0 != e.enif_inspect_binary(environment, src_term, &bin)) { return bin.data; } else { return Error.FunctionClauseError;} } /// Takes an BEAM `t:Kernel.binary/0` term and retrieves it as a Zig character slice /// (`[]u8`) No memory is allocated for this operation. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:Kernel.binary/0` pub fn get_char_slice(environment: env, src_term: term) ![]u8 { var bin: binary = undefined; var result: []u8 = undefined; if (0 != e.enif_inspect_binary(environment, src_term, &bin)) { return bin.data[0..bin.size]; } else { return Error.FunctionClauseError; } } /// Takes an BEAM `t:Kernel.binary/0` term and returns the corresponding /// `binary` struct. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:Kernel.binary/0` pub fn get_binary(environment: env, src_term: term) !binary { var bin: binary = undefined; if (0 != e.enif_inspect_binary(environment, src_term, &bin)) { return bin; } else { return Error.FunctionClauseError; } } /////////////////////////////////////////////////////////////////////////////// // pids /// shorthand for `e.ErlNifPid`. pub const pid = e.ErlNifPid; /// Takes an BEAM `t:Kernel.pid/0` term and returns the corresponding `pid` /// struct. /// /// Note that this is a fairly opaque struct and you're on your /// own as to what you can do with this (for now), except as a argument /// for the `e.enif_send` function. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:Kernel.pid/0` pub fn get_pid(environment: env, src_term: term) !pid { var result: pid = undefined; if (0 != e.enif_get_local_pid(environment, src_term, &result)) { return result; } else { return Error.FunctionClauseError; } } /// shortcut for `e.enif_self`, marshalling into zig error style. /// /// returns the pid value if it's env is a process-bound environment, otherwise /// returns `beam.Error.FunctionClauseError`. pub fn self(environment: env) !pid { var p: pid = undefined; if (e.enif_self(environment, @ptrCast([*c] pid, &p))) |self_val| { return self_val.*; } else { return Error.FunctionClauseError; } } /// shortcut for `e.enif_self` /// /// returns true if the send is successful, false otherwise. /// /// NOTE this function assumes a valid BEAM environment. If you have spawned /// an OS thread without a BEAM environment, you must use `send_advanced/4` pub fn send(c_env: env, to_pid: pid, msg: term) bool { return (e.enif_send(c_env, &to_pid, null, msg) == 1); } /// shortcut for `e.enif_self` /// /// returns true if the send is successful, false otherwise. /// /// if you are sending from a thread that does not have a BEAM environment, you /// should put `null` in both environment variables. pub fn send_advanced(c_env: env, to_pid: pid, m_env: env, msg: term) bool { return (e.enif_send(c_env, &to_pid, m_env, msg) == 1); } /////////////////////////////////////////////////////////////////////////////// // tuples /// Takes an Beam `t:tuple/0` term and returns it as a slice of `term` structs. /// Does *not* allocate memory for this operation. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:tuple/0` pub fn get_tuple(environment: env, src_term: term) ![]term { var length: c_int; var term_list: [*c]term; if (0 != enif_get_tuple(env, src_term, &length, &term_list)) { return term_list[0..(length - 1)]; } else {return Error.FunctionClauseError; } } /////////////////////////////////////////////////////////////////////////////// // lists /// Takes a BEAM `t:list/0` term and returns its length. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:list/0` pub fn get_list_length(environment: env, list: term) !usize { var result: c_uint = undefined; if (0 != e.enif_get_list_length(environment, list, &result)) { return @intCast(usize, result); } else { return Error.FunctionClauseError; } } /// Iterates over a BEAM `t:list/0`. /// /// In this function, the `list` value will be modified to the `tl` of the /// BEAM list, and the return value will be the BEAM term. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:list/0` pub fn get_head_and_iter(environment: env, list: *term) !term { var head: term = undefined; if (0 != e.enif_get_list_cell(environment, list.*, &head, list)) { return head; } else { return Error.FunctionClauseError; } } /// A generic function which lets you convert a BEAM `t:list/0` of /// homogeous type into a Zig slice. /// /// The resulting slice will be allocated using the beam allocator, with /// ownership passed to the caller. If you need to use a different allocator, /// use `get_slice_of_alloc/4` /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:list/0`. /// Also raises `beam.Error.FunctionClauseError` if any of the terms is /// incompatible with the internal type /// /// supported internal types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` pub fn get_slice_of(comptime T: type, environment: env, list: term) ![]T { return get_slice_of_alloc(T, allocator, environment, list); } /// Converts an BEAM `t:list/0` of homogenous type into a Zig slice, but /// using any allocator you wish. /// /// ownership is passed to the caller. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:list/0`. /// Also raises `beam.Error.FunctionClauseError` if any of the terms is /// incompatible with the internal type. /// /// supported internal types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` pub fn get_slice_of_alloc(comptime T: type, a: *Allocator, environment: env, list: term) ![]T { const size = try get_list_length(environment, list); var idx: usize = 0; var head: term = undefined; // allocate memory for the Zig list. var result = try a.alloc(T, size); var movable_list = list; while (idx < size){ head = try get_head_and_iter(environment, &movable_list); result[idx] = try get(T, environment, head); idx += 1; } errdefer a.free(result); return result; } /////////////////////////////////////////////////////////////////////////////// // booleans /// private helper string comparison function fn str_cmp(comptime ref: []const u8, str: []const u8) bool { if (str.len != ref.len) { return false; } for (str) |item, idx| { if (item != ref[idx]) { return false; } } return true; } const true_slice = "true"[0..]; const false_slice = "false"[0..]; /// Converts an BEAM `t:boolean/0` into a Zig `bool`. /// /// Raises `beam.Error.FunctionClauseError` if the term is not `t:boolean/0`. /// May potentially raise an out of memory error, as it must make an allocation /// to perform its conversion. pub fn get_bool(environment: env, val: term) !bool { var str: []u8 = undefined; str = try get_atom_slice(environment, val); defer allocator.free(str); if (str_cmp(true_slice, str)) { return true; } else if (str_cmp(false_slice, str)) { return false; } else { return Error.FunctionClauseError; } } /////////////////////////////////////////////////////////////////////////////// // syntactic sugar: makes /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// // generic /// A helper for marshalling values from Zig back into the runtime. Use this /// function if you need support for Zig generics. /// /// supported types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` pub fn make(comptime T: type, environment: env, val: T) term { switch (T) { u8 => return make_u8(environment, val), u16 => return make_u16(environment, val), u32 => return make_u32(environment, val), u64 => return make_u64(environment, val), c_int => return make_c_int(environment, val), c_uint => return make_c_uint(environment, val), c_long => return make_c_long(environment, val), c_ulong => return make_c_ulong(environment, val), isize => return make_isize(environment, val), usize => return make_usize(environment, val), i32 => return make_i32(environment, val), i64 => return make_i64(environment, val), f16 => return make_f16(environment, val), f32 => return make_f32(environment, val), f64 => return make_f64(environment, val), else => unreachable } } /// converts a char (`u8`) value into a BEAM `t:integer/0`. pub fn make_u8(environment: env, chr: u8) term { return e.enif_make_uint(environment, @intCast(c_uint, chr)); } /// converts a unsigned (`u16`) value into a BEAM `t:integer/0`. pub fn make_u16(environment: env, val: u16) term { return e.enif_make_uint(environment, @intCast(c_uint, val)); } /// converts a unsigned (`u32`) value into a BEAM `t:integer/0`. pub fn make_u32(environment: env, val: u32) term { return e.enif_make_uint(environment, @intCast(c_uint, val)); } /// converts a unsigned (`u64`) value into a BEAM `t:integer/0`. pub fn make_u64(environment: env, val: u64) term { return e.enif_make_ulong(environment, @intCast(c_ulong, val)); } /// converts a `c_int` value into a BEAM `t:integer/0`. pub fn make_c_int(environment: env, val: c_int) term { return e.enif_make_int(environment, val); } /// converts a `c_uint` value into a BEAM `t:integer/0`. pub fn make_c_uint(environment: env, val: c_uint) term { return e.enif_make_uint(environment, val); } /// converts a `c_long` value into a BEAM `t:integer/0`. pub fn make_c_long(environment: env, val: c_long) term { return e.enif_make_long(environment, val); } /// converts a `c_ulong` value into a BEAM `t:integer/0`. pub fn make_c_ulong(environment: env, val: c_ulong) term { return e.enif_make_ulong(environment, val); } /// converts an `isize` value into a BEAM `t:integer/0`. pub fn make_isize(environment: env, val: isize) term { return e.enif_make_int(environment, @intCast(c_int, val)); } /// converts a `usize` value into a BEAM `t:integer/0`. pub fn make_usize(environment: env, val: usize) term { return e.enif_make_int(environment, @intCast(c_int, val)); } /// converts an `i32` value into a BEAM `t:integer/0`. pub fn make_i32(environment: env, val: i32) term { return e.enif_make_int(environment, @intCast(c_int, val)); } /// converts an `i64` value into a BEAM `t:integer/0`. pub fn make_i64(environment: env, val: i64) term { return e.enif_make_long(environment, @intCast(c_long, val)); } /////////////////////////////////////////////////////////////////////////////// // floats /// converts an `f16` value into a BEAM `t:float/0`. pub fn make_f16(environment: env, val: f16) term { return e.enif_make_double(environment, @floatCast(f64, val)); } /// converts an `f32` value into a BEAM `t:float/0`. pub fn make_f32(environment: env, val: f32) term { return e.enif_make_double(environment, @floatCast(f64, val)); } /// converts an `f64` value into a BEAM `t:float/0`. pub fn make_f64(environment: env, val: f64) term { return e.enif_make_double(environment, val); } /////////////////////////////////////////////////////////////////////////////// // atoms /// converts a Zig char slice (`[]u8`) into a BEAM `t:atom/0`. pub fn make_atom(environment: env, atom_str: []const u8) term { return e.enif_make_atom_len(environment, @ptrCast([*c]const u8, &atom_str[0]), atom_str.len); } /////////////////////////////////////////////////////////////////////////////// // binaries /// converts a Zig char slice (`[]u8`) into a BEAM `t:binary/0`. /// /// no memory allocation inside of Zig is performed and the BEAM environment /// is responsible for the resulting binary. You are responsible for managing /// the allocation of the slice. pub fn make_slice(environment: env, val: []const u8) term { var result: e.ErlNifTerm = undefined; var bin: [*]u8 = @ptrCast([*]u8, e.enif_make_new_binary(environment, val.len, &result)); for (val) | _chr, i | { bin[i] = val[i]; } return result; } /// converts an c string (`[*c]u8`) into a BEAM `t:binary/0`. Mostly used for /// c interop. /// /// no memory allocation inside of Zig is performed and the BEAM environment /// is responsible for the resulting binary. You are responsible for managing /// the allocation of the slice. pub fn make_c_string(environment: env, val: [*c] const u8) term { var result: e.ErlNifTerm = undefined; var len: usize = 0; // first get the length of the c string. for (result) | chr, i | { if (chr == 0) { break; } len = i; } // punt to the slicing function. return make_slice(environment, val[0..len + 1]); } /////////////////////////////////////////////////////////////////////////////// // tuples /// converts a slice of `term`s into a BEAM `t:tuple/0`. pub fn make_tuple(environment: env, val: []term) term { return e.enif_make_tuple_from_array( environment, @ptrCast([*c]term, val.ptr), @intCast(c_uint, val.len)); } /////////////////////////////////////////////////////////////////////////////// // lists /// converts a slice of `term`s into a BEAM `t:list/0`. pub fn make_term_list(environment: env, val: []term) term { return e.enif_make_list_from_array( environment, @ptrCast([*c]term, val.ptr), @intCast(c_uint, val.len)); } /// converts a Zig char slice (`[]u8`) into a BEAM `t:charlist/0`. pub fn make_charlist(environment: env, val: [] const u8) term { return e.enif_make_string_len(environment, val, val.len, __latin1); } /// converts a c string (`[*c]u8`) into a BEAM `t:charlist/0`. pub fn make_cstring_charlist(environment: env, val: [*c] const u8) term { return e.enif_make_string(environment, val, __latin1); } /////////////////////////////////////////////////////////////////////////////// // list-generic /// A helper to make BEAM lists out of slices of `term`. Use this function if /// you need a generic listbuilding function. /// /// uses the BEAM allocator internally. If you would like to use a custom /// allocator, (for example an arena allocator, if you have very long lists), /// use `make_list_alloc/4` /// /// supported internal types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` pub fn make_list(comptime T: type, environment: env, val: []T) !term { return make_list_alloc(T, allocator, environment, val); } /// A helper to make a BEAM `t:Kernel.list` out of `term`s, with any allocator. /// Use this function if you need a generic listbuilding function. /// /// supported internal types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` pub fn make_list_alloc(comptime T: type, a: *Allocator, environment: env, val: []T) !term { var term_slice: []term = try a.alloc(term, val.len); defer a.free(term_slice); for (val) | item, idx | { term_slice[idx] = make(T, environment, item); } return e.enif_make_list_from_array(environment, @ptrCast([*c]term, &term_slice[0]), @intCast(c_uint, val.len)); } /// converts a c_int slice (`[]c_int`) into a BEAM list of `integer/0`. pub fn make_c_int_list(environment: env, val: []c_int) !term { return try make_list(c_int, environment, val); } /// converts a c_long slice (`[]c_long`) into a BEAM list of `integer/0`. pub fn make_c_long_list(environment: env, val: []c_long) !term { return try make_list(c_long, environment, val); } /// converts an i32 slice (`[]i32`) into a BEAM list of `integer/0`. pub fn make_i32_list(environment: env, val: []i32) !term { return try make_list(i32, environment, val); } /// converts an i64 slice (`[]i64`) into a BEAM list of `integer/0`. pub fn make_i64_list(environment: env, val: []i64) !term { return try make_list(i64, environment, val); } /// converts an f16 slice (`[]f16`) into a BEAM list of `t:float/0`. pub fn make_f16_list(environment: env, val: []f16) !term { return try make_list(f16, environment, val); } /// converts an f32 slice (`[]f32`) into a BEAM list of `t:float/0`. pub fn make_f32_list(environment: env, val: []f32) !term { return try make_list(f32, environment, val); } /// converts an f64 slice (`[]f64`) into a BEAM list of `t:float/0`. pub fn make_f64_list(environment: env, val: []f64) !term { return try make_list(f64, environment, val); } /////////////////////////////////////////////////////////////////////////////// // special atoms /// converts a `bool` value into a `t:Kernel.boolean/0` value. pub fn make_bool(environment: env, val: bool) term { return if (val) e.enif_make_atom(environment, "true") else e.enif_make_atom(environment, "false"); } /// creates a beam `nil` value. pub fn make_nil(environment: env) term { return e.enif_make_atom(environment, "nil"); } /// creates a beam `ok` value. pub fn make_ok(environment: env) term { return e.enif_make_atom(environment, "ok"); } /// creates a beam `error` value. pub fn make_error(environment: env) term { return e.enif_make_atom(environment, "error"); } /////////////////////////////////////////////////////////////////////////////// // ok and error tuples /// A helper to make `{:ok, term}` terms from arbitrarily-typed values. /// /// supported types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` /// /// Use `make_ok_term/2` to make ok tuples from generic terms. /// Use `make_ok_atom/2` to make ok tuples with atom terms from slices. pub fn make_ok_tuple(comptime T: type, environment: env, val: T) term { return make_ok_term(environment, make(T, environment, val)); } /// A helper to make `{:ok, binary}` terms from slices pub fn make_ok_binary(environment: env, val: [] const u8) term { return make_ok_term(environment, make_slice(environment, val)); } /// A helper to make `{:ok, atom}` terms from slices pub fn make_ok_atom(environment: env, val: [] const u8) term { return make_ok_term(environment, make_atom(environment, val)); } /// A helper to make `{:ok, term}` terms in general pub fn make_ok_term(environment: env, val: term) term { return e.enif_make_tuple(environment, 2, make_ok(environment), val); } /// A helper to make `{:error, term}` terms from arbitrarily-typed values. /// /// supported types: /// - `c_int` /// - `c_long` /// - `isize` /// - `usize` /// - `u8` /// - `i32` /// - `i64` /// - `f16` /// - `f32` /// - `f64` /// /// Use `make_error_term/2` to make error tuples from generic terms. /// Use `make_error_atom/2` to make atom errors from slices. pub fn make_error_tuple(comptime T: type, environment: env, val: T) term { return make_error_term(environment, make(T, environment, val)); } /// A helper to make `{:error, atom}` terms from slices pub fn make_error_atom(environment: env, val: [] const u8) term { return make_error_term(environment, make_atom(environment, val)); } /// A helper to make `{:error, binary}` terms from slices pub fn make_error_binary(environment: env, val: [] const u8) term { return make_error_term(environment, make_slice(environment, val)); } /// A helper to make `{:error, term}` terms in general pub fn make_error_term(environment: env, val: term) term { return e.enif_make_tuple(environment, 2, make_error(environment), val); } /////////////////////////////////////////////////////////////////////////////// // refs /// Encapsulates `e.enif_make_ref` and allows it to return a /// FunctionClauseError. pub fn make_ref(environment: env) term { return e.enif_make_ref(environment); } /////////////////////////////////////////////////////////////////////////////// // resources pub const resource_type = ?*e.ErlNifResourceType; pub const resource = struct { /// errors related to resource transactions pub const ResourceError = error { /// something has gone wrong while trying to fetch a resource. FetchError, }; pub fn create(comptime T : type, environment: env, res_typ: resource_type, val : T) !term { var ptr : ?*c_void = e.enif_alloc_resource(res_typ, @sizeOf(T)); var obj : *T = undefined; if (ptr == null) { return error.OutOfMemory; } else { obj = @ptrCast(*T, @alignCast(@alignOf(*T), ptr)); obj.* = val; } return e.enif_make_resource(environment, ptr); } pub fn update(comptime T : type, environment: env, res_typ: resource_type, res_trm: term, new_val: T) !void { var obj : ?*c_void = undefined; if (0 == e.enif_get_resource(environment, res_trm, res_typ, @ptrCast([*c]?*c_void, &obj))) { return resource.ResourceError.FetchError; } if (obj == null) { unreachable; } var val : *T = @ptrCast(*T, @alignCast(@alignOf(*T), obj)); val.* = new_val; } pub fn fetch(comptime T : type, environment: env, res_typ: resource_type, res_trm: term) !T { var obj : ?*c_void = undefined; if (0 == e.enif_get_resource(environment, res_trm, res_typ, @ptrCast([*c]?*c_void, &obj))) { return resource.ResourceError.FetchError; } // according to the erlang documentation: // the pointer received in *objp is guaranteed to be valid at least as long as the // resource handle term is valid. if (obj == null) { unreachable; } var val : *T = @ptrCast(*T, @alignCast(@alignOf(*T), obj)); return val.*; } pub fn keep(comptime T: type, environment: env, res_typ: resource_type, res_trm: term) !void { var obj : ?*c_void = undefined; if (0 == e.enif_get_resource(environment, res_trm, res_typ, @ptrCast([*c]?*c_void, &obj))) { return resource.ResourceError.FetchError; } if (obj == null) { unreachable; } e.enif_keep_resource(obj); } pub fn release(environment: env, res_typ: resource_type, res_trm: term) void { var obj : ?*c_void = undefined; if (0 != e.enif_get_resource(environment, res_trm, res_typ, &obj)) { e.enif_release_resource(obj); } else { unreachable; } } }; /////////////////////////////////////////////////////////////////////////////// // yielding NIFs /// transparently passes information into the yield statement. pub threadlocal var yield_info: *YieldInfo = undefined; pub fn Frame(function: anytype) type { return struct { yield_info: YieldInfo, zig_frame: *@Frame(function), }; } pub const YieldError = error { Cancelled, }; /// this function is going to be dropped inside the suspend statement. pub fn yield() !env { suspend { if (yield_info.cancelled) return YieldError.Cancelled; yield_info.yield_frame = @frame(); } return yield_info.environment; } pub const YieldInfo = struct { yield_frame: ?anyframe = null, cancelled: bool = false, response: term = undefined, environment: env, }; pub fn set_yield_response(what: term) void { yield_info.response = what; } /////////////////////////////////////////////////////////////////////////////// // errors, etc. pub fn raise(environment: env, exception: atom) term { return e.enif_raise_exception(environment, exception); } // create a global enomem string, then throw it. const enomem_slice = "enomem"; /// This function is used to communicate `:enomem` back to the BEAM as an /// exception. /// /// The BEAM is potentially OOM-safe, and Zig lets you leverage that. /// OOM errors from `beam.allocator` can be converted to a generic erlang term /// that represents an exception. Returning this from your NIF results in /// a BEAM throw event. pub fn raise_enomem(environment: env) term { return e.enif_raise_exception(environment, make_atom(environment, enomem_slice)); } const f_c_e_slice = "function_clause"; /// This function is used to communicate `:function_clause` back to the BEAM as an /// exception. /// /// By default Zigler will do argument input checking on value /// ingress from the dynamic BEAM runtime to the static Zig runtime. /// You can also use this function to communicate a similar error by returning the /// resulting term from your NIF. pub fn raise_function_clause_error(environment: env) term { return e.enif_raise_exception(environment, make_atom(environment, f_c_e_slice)); } const resource_error = "resource_error"; /// This function is used to communicate `:resource_error` back to the BEAM as an /// exception. pub fn raise_resource_error(environment: env) term { return e.enif_raise_exception(environment, make_atom(environment, resource_error)); } const assert_slice = "assertion_error"; /// This function is used to communicate `:assertion_error` back to the BEAM as an /// exception. /// /// Used when running Zigtests, when trapping `beam.AssertionError.AssertionError`. pub fn raise_assertion_error(environment: env) term { return e.enif_raise_exception(environment, make_atom(environment, assert_slice)); } pub fn make_error_return_trace(env_: env, error_trace: ?*builtin.StackTrace) term { if (error_trace) | trace | { var frame_index: usize = 0; var frames_left: usize = std.math.min(trace.index, trace.instruction_addresses.len); var list: term = e.enif_make_list(env_, 0); const debug_info = std.debug.getSelfDebugInfo() catch return make_nil(env_); while (frames_left != 0) : ({ frames_left -= 1; frame_index = (frame_index + 1) % trace.instruction_addresses.len; }) { const return_address = trace.instruction_addresses[frame_index]; var next_term: term = error_trace_info(env_, debug_info, return_address); list = e.enif_make_list_cell(env_, next_term, list); } return list; } else { return make_nil(env_); } } fn error_trace_info(env_: env, debug_info: *std.debug.DebugInfo, address: usize) term { const module = debug_info.getModuleForAddress(address) catch return make_error(env_); const symbol_info = module.getSymbolAtAddress(address) catch return make_error(env_); defer symbol_info.deinit(); var file = symbol_info.compile_unit_name; var fun = symbol_info.symbol_name; return make_tuple(env_, .{ e.enif_make_string_len(env, file, file.len, e.ErlNifCharEncoding.ERL_NIF_LATIN1), e.enif_make_string_len(env, fun, fun.len, e.ErlNifCharEncoding.ERL_NIF_LATIN1), e.enif_make_uint(env, symbol_info.line_info.line) }); } /////////////////////////////////////////////////////////////////////////////// // assertation for tests /// A function used to return assertion errors to a zigtest. /// /// Zig's std.assert() will panic the Zig runtime and therefore the entire /// BEAM VM, making it incompatible with Elixir's Unit tests. As the VM is /// required for certain functionality (like `e.enif_alloc`), a BEAM-compatible /// assert is necessary. /// /// When building zigtests, `assert(...)` calls get lexically converted to /// `try beam.assert(...)` calls. pub fn assert(ok: bool, file: []const u8, line: i64) !void { if (!ok) { error_file = file; error_line = line; return AssertionError.AssertionError; // assertion failure } } /// !value /// you can use this value to access the BEAM environment of your unit test. pub threadlocal var test_env: env = undefined; pub threadlocal var error_file: []const u8 = undefined; pub threadlocal var error_line: i64 = 0; // private function which fetches the threadlocal cache. pub fn test_error() term { var tuple_slice: []term = allocator.alloc(term, 3) catch unreachable; defer allocator.free(tuple_slice); tuple_slice[0] = make_atom(test_env, "error"); tuple_slice[1] = make_slice(test_env, error_file); tuple_slice[2] = make_i64(test_env, error_line); return make_tuple(test_env, tuple_slice); } /////////////////////////////////////////////////////////////////////////////// // NIF LOADING Boilerplate pub export fn blank_load( _env: env, _priv: [*c]?*c_void, _info: term) c_int { return 0; } pub export fn blank_upgrade( _env: env, _priv: [*c]?*c_void, _old_priv: [*c]?*c_void, _info: term) c_int { return 0; } pub export fn blank_unload(_env: env, priv: ?*c_void) void {}
priv/beam/beam.zig
pub const SYNC_VERSION_FLAG_FROM_FEED = @as(u32, 1); pub const SYNC_VERSION_FLAG_HAS_BY = @as(u32, 2); pub const SYNC_SERIALIZE_REPLICA_KEY_MAP = @as(u32, 1); pub const SYNC_FILTER_INFO_FLAG_ITEM_LIST = @as(u32, 1); pub const SYNC_FILTER_INFO_FLAG_CHANGE_UNIT_LIST = @as(u32, 2); pub const SYNC_FILTER_INFO_FLAG_CUSTOM = @as(u32, 4); pub const SYNC_FILTER_INFO_COMBINED = @as(u32, 8); pub const SYNC_CHANGE_FLAG_DELETED = @as(u32, 1); pub const SYNC_CHANGE_FLAG_DOES_NOT_EXIST = @as(u32, 2); pub const SYNC_CHANGE_FLAG_GHOST = @as(u32, 4); //-------------------------------------------------------------------------------- // Section: Types (102) //-------------------------------------------------------------------------------- pub const ID_PARAMETER_PAIR = extern struct { fIsVariable: BOOL, cbIdSize: u16, }; pub const ID_PARAMETERS = extern struct { dwSize: u32, replicaId: ID_PARAMETER_PAIR, itemId: ID_PARAMETER_PAIR, changeUnitId: ID_PARAMETER_PAIR, }; pub const SYNC_SESSION_STATISTICS = extern struct { dwChangesApplied: u32, dwChangesFailed: u32, }; pub const SYNC_VERSION = extern struct { dwLastUpdatingReplicaKey: u32, ullTickCount: u64, }; pub const SYNC_RANGE = extern struct { pbClosedLowerBound: ?*u8, pbClosedUpperBound: ?*u8, }; pub const SYNC_TIME = extern struct { dwDate: u32, dwTime: u32, }; pub const SYNC_FILTER_CHANGE = extern struct { fMoveIn: BOOL, moveVersion: SYNC_VERSION, }; pub const SYNC_PROVIDER_ROLE = enum(i32) { SOURCE = 0, DESTINATION = 1, }; pub const SPR_SOURCE = SYNC_PROVIDER_ROLE.SOURCE; pub const SPR_DESTINATION = SYNC_PROVIDER_ROLE.DESTINATION; pub const CONFLICT_RESOLUTION_POLICY = enum(i32) { NONE = 0, DESTINATION_PROVIDER_WINS = 1, SOURCE_PROVIDER_WINS = 2, LAST = 3, }; pub const CRP_NONE = CONFLICT_RESOLUTION_POLICY.NONE; pub const CRP_DESTINATION_PROVIDER_WINS = CONFLICT_RESOLUTION_POLICY.DESTINATION_PROVIDER_WINS; pub const CRP_SOURCE_PROVIDER_WINS = CONFLICT_RESOLUTION_POLICY.SOURCE_PROVIDER_WINS; pub const CRP_LAST = CONFLICT_RESOLUTION_POLICY.LAST; pub const SYNC_PROGRESS_STAGE = enum(i32) { DETECTION = 0, ENUMERATION = 1, APPLICATION = 2, }; pub const SPS_CHANGE_DETECTION = SYNC_PROGRESS_STAGE.DETECTION; pub const SPS_CHANGE_ENUMERATION = SYNC_PROGRESS_STAGE.ENUMERATION; pub const SPS_CHANGE_APPLICATION = SYNC_PROGRESS_STAGE.APPLICATION; pub const SYNC_FULL_ENUMERATION_ACTION = enum(i32) { FULL_ENUMERATION = 0, PARTIAL_SYNC = 1, ABORT = 2, }; pub const SFEA_FULL_ENUMERATION = SYNC_FULL_ENUMERATION_ACTION.FULL_ENUMERATION; pub const SFEA_PARTIAL_SYNC = SYNC_FULL_ENUMERATION_ACTION.PARTIAL_SYNC; pub const SFEA_ABORT = SYNC_FULL_ENUMERATION_ACTION.ABORT; pub const SYNC_RESOLVE_ACTION = enum(i32) { DEFER = 0, ACCEPT_DESTINATION_PROVIDER = 1, ACCEPT_SOURCE_PROVIDER = 2, MERGE = 3, TRANSFER_AND_DEFER = 4, LAST = 5, }; pub const SRA_DEFER = SYNC_RESOLVE_ACTION.DEFER; pub const SRA_ACCEPT_DESTINATION_PROVIDER = SYNC_RESOLVE_ACTION.ACCEPT_DESTINATION_PROVIDER; pub const SRA_ACCEPT_SOURCE_PROVIDER = SYNC_RESOLVE_ACTION.ACCEPT_SOURCE_PROVIDER; pub const SRA_MERGE = SYNC_RESOLVE_ACTION.MERGE; pub const SRA_TRANSFER_AND_DEFER = SYNC_RESOLVE_ACTION.TRANSFER_AND_DEFER; pub const SRA_LAST = SYNC_RESOLVE_ACTION.LAST; pub const SYNC_STATISTICS = enum(i32) { T = 0, }; pub const SYNC_STATISTICS_RANGE_COUNT = SYNC_STATISTICS.T; pub const SYNC_SERIALIZATION_VERSION = enum(i32) { @"1" = 1, @"2" = 4, @"3" = 5, }; pub const SYNC_SERIALIZATION_VERSION_V1 = SYNC_SERIALIZATION_VERSION.@"1"; pub const SYNC_SERIALIZATION_VERSION_V2 = SYNC_SERIALIZATION_VERSION.@"2"; pub const SYNC_SERIALIZATION_VERSION_V3 = SYNC_SERIALIZATION_VERSION.@"3"; pub const FILTERING_TYPE = enum(i32) { ONLY = 0, AND_VERSIONS_FOR_MOVED_OUT_ITEMS = 1, }; pub const FT_CURRENT_ITEMS_ONLY = FILTERING_TYPE.ONLY; pub const FT_CURRENT_ITEMS_AND_VERSIONS_FOR_MOVED_OUT_ITEMS = FILTERING_TYPE.AND_VERSIONS_FOR_MOVED_OUT_ITEMS; pub const SYNC_CONSTRAINT_RESOLVE_ACTION = enum(i32) { DEFER = 0, ACCEPT_DESTINATION_PROVIDER = 1, ACCEPT_SOURCE_PROVIDER = 2, TRANSFER_AND_DEFER = 3, MERGE = 4, RENAME_SOURCE = 5, RENAME_DESTINATION = 6, }; pub const SCRA_DEFER = SYNC_CONSTRAINT_RESOLVE_ACTION.DEFER; pub const SCRA_ACCEPT_DESTINATION_PROVIDER = SYNC_CONSTRAINT_RESOLVE_ACTION.ACCEPT_DESTINATION_PROVIDER; pub const SCRA_ACCEPT_SOURCE_PROVIDER = SYNC_CONSTRAINT_RESOLVE_ACTION.ACCEPT_SOURCE_PROVIDER; pub const SCRA_TRANSFER_AND_DEFER = SYNC_CONSTRAINT_RESOLVE_ACTION.TRANSFER_AND_DEFER; pub const SCRA_MERGE = SYNC_CONSTRAINT_RESOLVE_ACTION.MERGE; pub const SCRA_RENAME_SOURCE = SYNC_CONSTRAINT_RESOLVE_ACTION.RENAME_SOURCE; pub const SCRA_RENAME_DESTINATION = SYNC_CONSTRAINT_RESOLVE_ACTION.RENAME_DESTINATION; pub const CONSTRAINT_CONFLICT_REASON = enum(i32) { OTHER = 0, COLLISION = 1, NOPARENT = 2, IDENTITY = 3, }; pub const CCR_OTHER = CONSTRAINT_CONFLICT_REASON.OTHER; pub const CCR_COLLISION = CONSTRAINT_CONFLICT_REASON.COLLISION; pub const CCR_NOPARENT = CONSTRAINT_CONFLICT_REASON.NOPARENT; pub const CCR_IDENTITY = CONSTRAINT_CONFLICT_REASON.IDENTITY; pub const KNOWLEDGE_COOKIE_COMPARISON_RESULT = enum(i32) { EQUAL = 0, CONTAINED = 1, CONTAINS = 2, NOT_COMPARABLE = 3, }; pub const KCCR_COOKIE_KNOWLEDGE_EQUAL = KNOWLEDGE_COOKIE_COMPARISON_RESULT.EQUAL; pub const KCCR_COOKIE_KNOWLEDGE_CONTAINED = KNOWLEDGE_COOKIE_COMPARISON_RESULT.CONTAINED; pub const KCCR_COOKIE_KNOWLEDGE_CONTAINS = KNOWLEDGE_COOKIE_COMPARISON_RESULT.CONTAINS; pub const KCCR_COOKIE_KNOWLEDGE_NOT_COMPARABLE = KNOWLEDGE_COOKIE_COMPARISON_RESULT.NOT_COMPARABLE; // TODO: this type is limited to platform 'windows6.1' const IID_IClockVectorElement_Value = @import("../zig.zig").Guid.initString("e71c4250-adf8-4a07-8fae-5669596909c1"); pub const IID_IClockVectorElement = &IID_IClockVectorElement_Value; pub const IClockVectorElement = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetReplicaKey: fn( self: *const IClockVectorElement, pdwReplicaKey: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetTickCount: fn( self: *const IClockVectorElement, pullTickCount: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IClockVectorElement_GetReplicaKey(self: *const T, pdwReplicaKey: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IClockVectorElement.VTable, self.vtable).GetReplicaKey(@ptrCast(*const IClockVectorElement, self), pdwReplicaKey); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IClockVectorElement_GetTickCount(self: *const T, pullTickCount: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IClockVectorElement.VTable, self.vtable).GetTickCount(@ptrCast(*const IClockVectorElement, self), pullTickCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IFeedClockVectorElement_Value = @import("../zig.zig").Guid.initString("a40b46d2-e97b-4156-b6da-991f501b0f05"); pub const IID_IFeedClockVectorElement = &IID_IFeedClockVectorElement_Value; pub const IFeedClockVectorElement = extern struct { pub const VTable = extern struct { base: IClockVectorElement.VTable, GetSyncTime: fn( self: *const IFeedClockVectorElement, pSyncTime: ?*SYNC_TIME, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFlags: fn( self: *const IFeedClockVectorElement, pbFlags: ?*u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IClockVectorElement.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFeedClockVectorElement_GetSyncTime(self: *const T, pSyncTime: ?*SYNC_TIME) callconv(.Inline) HRESULT { return @ptrCast(*const IFeedClockVectorElement.VTable, self.vtable).GetSyncTime(@ptrCast(*const IFeedClockVectorElement, self), pSyncTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFeedClockVectorElement_GetFlags(self: *const T, pbFlags: ?*u8) callconv(.Inline) HRESULT { return @ptrCast(*const IFeedClockVectorElement.VTable, self.vtable).GetFlags(@ptrCast(*const IFeedClockVectorElement, self), pbFlags); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IClockVector_Value = @import("../zig.zig").Guid.initString("14b2274a-8698-4cc6-9333-f89bd1d47bc4"); pub const IID_IClockVector = &IID_IClockVector_Value; pub const IClockVector = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetClockVectorElements: fn( self: *const IClockVector, riid: ?*const Guid, ppiEnumClockVector: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClockVectorElementCount: fn( self: *const IClockVector, pdwCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IClockVector_GetClockVectorElements(self: *const T, riid: ?*const Guid, ppiEnumClockVector: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IClockVector.VTable, self.vtable).GetClockVectorElements(@ptrCast(*const IClockVector, self), riid, ppiEnumClockVector); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IClockVector_GetClockVectorElementCount(self: *const T, pdwCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IClockVector.VTable, self.vtable).GetClockVectorElementCount(@ptrCast(*const IClockVector, self), pdwCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IFeedClockVector_Value = @import("../zig.zig").Guid.initString("8d1d98d1-9fb8-4ec9-a553-54dd924e0f67"); pub const IID_IFeedClockVector = &IID_IFeedClockVector_Value; pub const IFeedClockVector = extern struct { pub const VTable = extern struct { base: IClockVector.VTable, GetUpdateCount: fn( self: *const IFeedClockVector, pdwUpdateCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsNoConflictsSpecified: fn( self: *const IFeedClockVector, pfIsNoConflictsSpecified: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IClockVector.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFeedClockVector_GetUpdateCount(self: *const T, pdwUpdateCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IFeedClockVector.VTable, self.vtable).GetUpdateCount(@ptrCast(*const IFeedClockVector, self), pdwUpdateCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFeedClockVector_IsNoConflictsSpecified(self: *const T, pfIsNoConflictsSpecified: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IFeedClockVector.VTable, self.vtable).IsNoConflictsSpecified(@ptrCast(*const IFeedClockVector, self), pfIsNoConflictsSpecified); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumClockVector_Value = @import("../zig.zig").Guid.initString("525844db-2837-4799-9e80-81a66e02220c"); pub const IID_IEnumClockVector = &IID_IEnumClockVector_Value; pub const IEnumClockVector = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumClockVector, cClockVectorElements: u32, ppiClockVectorElements: ?*?*IClockVectorElement, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumClockVector, cSyncVersions: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumClockVector, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumClockVector, ppiEnum: ?*?*IEnumClockVector, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumClockVector_Next(self: *const T, cClockVectorElements: u32, ppiClockVectorElements: ?*?*IClockVectorElement, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumClockVector.VTable, self.vtable).Next(@ptrCast(*const IEnumClockVector, self), cClockVectorElements, ppiClockVectorElements, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumClockVector_Skip(self: *const T, cSyncVersions: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumClockVector.VTable, self.vtable).Skip(@ptrCast(*const IEnumClockVector, self), cSyncVersions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumClockVector_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumClockVector.VTable, self.vtable).Reset(@ptrCast(*const IEnumClockVector, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumClockVector_Clone(self: *const T, ppiEnum: ?*?*IEnumClockVector) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumClockVector.VTable, self.vtable).Clone(@ptrCast(*const IEnumClockVector, self), ppiEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumFeedClockVector_Value = @import("../zig.zig").Guid.initString("550f763d-146a-48f6-abeb-6c88c7f70514"); pub const IID_IEnumFeedClockVector = &IID_IEnumFeedClockVector_Value; pub const IEnumFeedClockVector = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumFeedClockVector, cClockVectorElements: u32, ppiClockVectorElements: ?*?*IFeedClockVectorElement, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumFeedClockVector, cSyncVersions: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumFeedClockVector, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumFeedClockVector, ppiEnum: ?*?*IEnumFeedClockVector, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumFeedClockVector_Next(self: *const T, cClockVectorElements: u32, ppiClockVectorElements: ?*?*IFeedClockVectorElement, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumFeedClockVector.VTable, self.vtable).Next(@ptrCast(*const IEnumFeedClockVector, self), cClockVectorElements, ppiClockVectorElements, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumFeedClockVector_Skip(self: *const T, cSyncVersions: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumFeedClockVector.VTable, self.vtable).Skip(@ptrCast(*const IEnumFeedClockVector, self), cSyncVersions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumFeedClockVector_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumFeedClockVector.VTable, self.vtable).Reset(@ptrCast(*const IEnumFeedClockVector, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumFeedClockVector_Clone(self: *const T, ppiEnum: ?*?*IEnumFeedClockVector) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumFeedClockVector.VTable, self.vtable).Clone(@ptrCast(*const IEnumFeedClockVector, self), ppiEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ICoreFragment_Value = @import("../zig.zig").Guid.initString("613b2ab5-b304-47d9-9c31-ce6c54401a15"); pub const IID_ICoreFragment = &IID_ICoreFragment_Value; pub const ICoreFragment = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, NextColumn: fn( self: *const ICoreFragment, pChangeUnitId: ?*u8, pChangeUnitIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NextRange: fn( self: *const ICoreFragment, pItemId: ?*u8, pItemIdSize: ?*u32, piClockVector: ?*?*IClockVector, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const ICoreFragment, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetColumnCount: fn( self: *const ICoreFragment, pColumnCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRangeCount: fn( self: *const ICoreFragment, pRangeCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICoreFragment_NextColumn(self: *const T, pChangeUnitId: ?*u8, pChangeUnitIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICoreFragment.VTable, self.vtable).NextColumn(@ptrCast(*const ICoreFragment, self), pChangeUnitId, pChangeUnitIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICoreFragment_NextRange(self: *const T, pItemId: ?*u8, pItemIdSize: ?*u32, piClockVector: ?*?*IClockVector) callconv(.Inline) HRESULT { return @ptrCast(*const ICoreFragment.VTable, self.vtable).NextRange(@ptrCast(*const ICoreFragment, self), pItemId, pItemIdSize, piClockVector); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICoreFragment_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICoreFragment.VTable, self.vtable).Reset(@ptrCast(*const ICoreFragment, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICoreFragment_GetColumnCount(self: *const T, pColumnCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICoreFragment.VTable, self.vtable).GetColumnCount(@ptrCast(*const ICoreFragment, self), pColumnCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICoreFragment_GetRangeCount(self: *const T, pRangeCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICoreFragment.VTable, self.vtable).GetRangeCount(@ptrCast(*const ICoreFragment, self), pRangeCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ICoreFragmentInspector_Value = @import("../zig.zig").Guid.initString("f7fcc5fd-ae26-4679-ba16-96aac583c134"); pub const IID_ICoreFragmentInspector = &IID_ICoreFragmentInspector_Value; pub const ICoreFragmentInspector = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, NextCoreFragments: fn( self: *const ICoreFragmentInspector, requestedCount: u32, ppiCoreFragments: ?*?*ICoreFragment, pFetchedCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const ICoreFragmentInspector, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICoreFragmentInspector_NextCoreFragments(self: *const T, requestedCount: u32, ppiCoreFragments: ?*?*ICoreFragment, pFetchedCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICoreFragmentInspector.VTable, self.vtable).NextCoreFragments(@ptrCast(*const ICoreFragmentInspector, self), requestedCount, ppiCoreFragments, pFetchedCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICoreFragmentInspector_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICoreFragmentInspector.VTable, self.vtable).Reset(@ptrCast(*const ICoreFragmentInspector, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IRangeException_Value = @import("../zig.zig").Guid.initString("75ae8777-6848-49f7-956c-a3a92f5096e8"); pub const IID_IRangeException = &IID_IRangeException_Value; pub const IRangeException = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetClosedRangeStart: fn( self: *const IRangeException, pbClosedRangeStart: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClosedRangeEnd: fn( self: *const IRangeException, pbClosedRangeEnd: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClockVector: fn( self: *const IRangeException, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRangeException_GetClosedRangeStart(self: *const T, pbClosedRangeStart: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRangeException.VTable, self.vtable).GetClosedRangeStart(@ptrCast(*const IRangeException, self), pbClosedRangeStart, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRangeException_GetClosedRangeEnd(self: *const T, pbClosedRangeEnd: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRangeException.VTable, self.vtable).GetClosedRangeEnd(@ptrCast(*const IRangeException, self), pbClosedRangeEnd, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRangeException_GetClockVector(self: *const T, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IRangeException.VTable, self.vtable).GetClockVector(@ptrCast(*const IRangeException, self), riid, ppUnk); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumRangeExceptions_Value = @import("../zig.zig").Guid.initString("0944439f-ddb1-4176-b703-046ff22a2386"); pub const IID_IEnumRangeExceptions = &IID_IEnumRangeExceptions_Value; pub const IEnumRangeExceptions = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumRangeExceptions, cExceptions: u32, ppRangeException: ?*?*IRangeException, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumRangeExceptions, cExceptions: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumRangeExceptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumRangeExceptions, ppEnum: ?*?*IEnumRangeExceptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumRangeExceptions_Next(self: *const T, cExceptions: u32, ppRangeException: ?*?*IRangeException, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumRangeExceptions.VTable, self.vtable).Next(@ptrCast(*const IEnumRangeExceptions, self), cExceptions, ppRangeException, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumRangeExceptions_Skip(self: *const T, cExceptions: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumRangeExceptions.VTable, self.vtable).Skip(@ptrCast(*const IEnumRangeExceptions, self), cExceptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumRangeExceptions_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumRangeExceptions.VTable, self.vtable).Reset(@ptrCast(*const IEnumRangeExceptions, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumRangeExceptions_Clone(self: *const T, ppEnum: ?*?*IEnumRangeExceptions) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumRangeExceptions.VTable, self.vtable).Clone(@ptrCast(*const IEnumRangeExceptions, self), ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISingleItemException_Value = @import("../zig.zig").Guid.initString("892fb9b0-7c55-4a18-9316-fdf449569b64"); pub const IID_ISingleItemException = &IID_ISingleItemException_Value; pub const ISingleItemException = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetItemId: fn( self: *const ISingleItemException, pbItemId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClockVector: fn( self: *const ISingleItemException, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISingleItemException_GetItemId(self: *const T, pbItemId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISingleItemException.VTable, self.vtable).GetItemId(@ptrCast(*const ISingleItemException, self), pbItemId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISingleItemException_GetClockVector(self: *const T, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISingleItemException.VTable, self.vtable).GetClockVector(@ptrCast(*const ISingleItemException, self), riid, ppUnk); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumSingleItemExceptions_Value = @import("../zig.zig").Guid.initString("e563381c-1b4d-4c66-9796-c86faccdcd40"); pub const IID_IEnumSingleItemExceptions = &IID_IEnumSingleItemExceptions_Value; pub const IEnumSingleItemExceptions = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSingleItemExceptions, cExceptions: u32, ppSingleItemException: ?*?*ISingleItemException, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSingleItemExceptions, cExceptions: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSingleItemExceptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSingleItemExceptions, ppEnum: ?*?*IEnumSingleItemExceptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSingleItemExceptions_Next(self: *const T, cExceptions: u32, ppSingleItemException: ?*?*ISingleItemException, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSingleItemExceptions.VTable, self.vtable).Next(@ptrCast(*const IEnumSingleItemExceptions, self), cExceptions, ppSingleItemException, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSingleItemExceptions_Skip(self: *const T, cExceptions: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSingleItemExceptions.VTable, self.vtable).Skip(@ptrCast(*const IEnumSingleItemExceptions, self), cExceptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSingleItemExceptions_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSingleItemExceptions.VTable, self.vtable).Reset(@ptrCast(*const IEnumSingleItemExceptions, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSingleItemExceptions_Clone(self: *const T, ppEnum: ?*?*IEnumSingleItemExceptions) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSingleItemExceptions.VTable, self.vtable).Clone(@ptrCast(*const IEnumSingleItemExceptions, self), ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IChangeUnitException_Value = @import("../zig.zig").Guid.initString("0cd7ee7c-fec0-4021-99ee-f0e5348f2a5f"); pub const IID_IChangeUnitException = &IID_IChangeUnitException_Value; pub const IChangeUnitException = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetItemId: fn( self: *const IChangeUnitException, pbItemId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnitId: fn( self: *const IChangeUnitException, pbChangeUnitId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClockVector: fn( self: *const IChangeUnitException, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeUnitException_GetItemId(self: *const T, pbItemId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeUnitException.VTable, self.vtable).GetItemId(@ptrCast(*const IChangeUnitException, self), pbItemId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeUnitException_GetChangeUnitId(self: *const T, pbChangeUnitId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeUnitException.VTable, self.vtable).GetChangeUnitId(@ptrCast(*const IChangeUnitException, self), pbChangeUnitId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeUnitException_GetClockVector(self: *const T, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeUnitException.VTable, self.vtable).GetClockVector(@ptrCast(*const IChangeUnitException, self), riid, ppUnk); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumChangeUnitExceptions_Value = @import("../zig.zig").Guid.initString("3074e802-9319-4420-be21-1022e2e21da8"); pub const IID_IEnumChangeUnitExceptions = &IID_IEnumChangeUnitExceptions_Value; pub const IEnumChangeUnitExceptions = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumChangeUnitExceptions, cExceptions: u32, ppChangeUnitException: ?*?*IChangeUnitException, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumChangeUnitExceptions, cExceptions: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumChangeUnitExceptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumChangeUnitExceptions, ppEnum: ?*?*IEnumChangeUnitExceptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumChangeUnitExceptions_Next(self: *const T, cExceptions: u32, ppChangeUnitException: ?*?*IChangeUnitException, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumChangeUnitExceptions.VTable, self.vtable).Next(@ptrCast(*const IEnumChangeUnitExceptions, self), cExceptions, ppChangeUnitException, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumChangeUnitExceptions_Skip(self: *const T, cExceptions: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumChangeUnitExceptions.VTable, self.vtable).Skip(@ptrCast(*const IEnumChangeUnitExceptions, self), cExceptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumChangeUnitExceptions_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumChangeUnitExceptions.VTable, self.vtable).Reset(@ptrCast(*const IEnumChangeUnitExceptions, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumChangeUnitExceptions_Clone(self: *const T, ppEnum: ?*?*IEnumChangeUnitExceptions) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumChangeUnitExceptions.VTable, self.vtable).Clone(@ptrCast(*const IEnumChangeUnitExceptions, self), ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IReplicaKeyMap_Value = @import("../zig.zig").Guid.initString("2209f4fc-fd10-4ff0-84a8-f0a1982e440e"); pub const IID_IReplicaKeyMap = &IID_IReplicaKeyMap_Value; pub const IReplicaKeyMap = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, LookupReplicaKey: fn( self: *const IReplicaKeyMap, pbReplicaId: ?*const u8, pdwReplicaKey: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LookupReplicaId: fn( self: *const IReplicaKeyMap, dwReplicaKey: u32, pbReplicaId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Serialize: fn( self: *const IReplicaKeyMap, pbReplicaKeyMap: ?*u8, pcbReplicaKeyMap: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReplicaKeyMap_LookupReplicaKey(self: *const T, pbReplicaId: ?*const u8, pdwReplicaKey: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IReplicaKeyMap.VTable, self.vtable).LookupReplicaKey(@ptrCast(*const IReplicaKeyMap, self), pbReplicaId, pdwReplicaKey); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReplicaKeyMap_LookupReplicaId(self: *const T, dwReplicaKey: u32, pbReplicaId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IReplicaKeyMap.VTable, self.vtable).LookupReplicaId(@ptrCast(*const IReplicaKeyMap, self), dwReplicaKey, pbReplicaId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReplicaKeyMap_Serialize(self: *const T, pbReplicaKeyMap: ?*u8, pcbReplicaKeyMap: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IReplicaKeyMap.VTable, self.vtable).Serialize(@ptrCast(*const IReplicaKeyMap, self), pbReplicaKeyMap, pcbReplicaKeyMap); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IConstructReplicaKeyMap_Value = @import("../zig.zig").Guid.initString("ded10970-ec85-4115-b52c-4405845642a5"); pub const IID_IConstructReplicaKeyMap = &IID_IConstructReplicaKeyMap_Value; pub const IConstructReplicaKeyMap = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, FindOrAddReplica: fn( self: *const IConstructReplicaKeyMap, pbReplicaId: ?*const u8, pdwReplicaKey: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstructReplicaKeyMap_FindOrAddReplica(self: *const T, pbReplicaId: ?*const u8, pdwReplicaKey: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IConstructReplicaKeyMap.VTable, self.vtable).FindOrAddReplica(@ptrCast(*const IConstructReplicaKeyMap, self), pbReplicaId, pdwReplicaKey); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncKnowledge_Value = @import("../zig.zig").Guid.initString("615bbb53-c945-4203-bf4b-2cb65919a0aa"); pub const IID_ISyncKnowledge = &IID_ISyncKnowledge_Value; pub const ISyncKnowledge = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetOwnerReplicaId: fn( self: *const ISyncKnowledge, pbReplicaId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Serialize: fn( self: *const ISyncKnowledge, fSerializeReplicaKeyMap: BOOL, pbKnowledge: ?*u8, pcbKnowledge: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetLocalTickCount: fn( self: *const ISyncKnowledge, ullTickCount: u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ContainsChange: fn( self: *const ISyncKnowledge, pbVersionOwnerReplicaId: ?*const u8, pgidItemId: ?*const u8, pSyncVersion: ?*const SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ContainsChangeUnit: fn( self: *const ISyncKnowledge, pbVersionOwnerReplicaId: ?*const u8, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, pSyncVersion: ?*const SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetScopeVector: fn( self: *const ISyncKnowledge, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetReplicaKeyMap: fn( self: *const ISyncKnowledge, ppReplicaKeyMap: ?*?*IReplicaKeyMap, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const ISyncKnowledge, ppClonedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ConvertVersion: fn( self: *const ISyncKnowledge, pKnowledgeIn: ?*ISyncKnowledge, pbCurrentOwnerId: ?*const u8, pVersionIn: ?*const SYNC_VERSION, pbNewOwnerId: ?*u8, pcbIdSize: ?*u32, pVersionOut: ?*SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MapRemoteToLocal: fn( self: *const ISyncKnowledge, pRemoteKnowledge: ?*ISyncKnowledge, ppMappedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Union: fn( self: *const ISyncKnowledge, pKnowledge: ?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProjectOntoItem: fn( self: *const ISyncKnowledge, pbItemId: ?*const u8, ppKnowledgeOut: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProjectOntoChangeUnit: fn( self: *const ISyncKnowledge, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, ppKnowledgeOut: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProjectOntoRange: fn( self: *const ISyncKnowledge, psrngSyncRange: ?*const SYNC_RANGE, ppKnowledgeOut: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ExcludeItem: fn( self: *const ISyncKnowledge, pbItemId: ?*const u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ExcludeChangeUnit: fn( self: *const ISyncKnowledge, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ContainsKnowledge: fn( self: *const ISyncKnowledge, pKnowledge: ?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FindMinTickCountForReplica: fn( self: *const ISyncKnowledge, pbReplicaId: ?*const u8, pullReplicaTickCount: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRangeExceptions: fn( self: *const ISyncKnowledge, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSingleItemExceptions: fn( self: *const ISyncKnowledge, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnitExceptions: fn( self: *const ISyncKnowledge, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FindClockVectorForItem: fn( self: *const ISyncKnowledge, pbItemId: ?*const u8, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FindClockVectorForChangeUnit: fn( self: *const ISyncKnowledge, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, riid: ?*const Guid, ppUnk: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVersion: fn( self: *const ISyncKnowledge, pdwVersion: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_GetOwnerReplicaId(self: *const T, pbReplicaId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).GetOwnerReplicaId(@ptrCast(*const ISyncKnowledge, self), pbReplicaId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_Serialize(self: *const T, fSerializeReplicaKeyMap: BOOL, pbKnowledge: ?*u8, pcbKnowledge: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).Serialize(@ptrCast(*const ISyncKnowledge, self), fSerializeReplicaKeyMap, pbKnowledge, pcbKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_SetLocalTickCount(self: *const T, ullTickCount: u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).SetLocalTickCount(@ptrCast(*const ISyncKnowledge, self), ullTickCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ContainsChange(self: *const T, pbVersionOwnerReplicaId: ?*const u8, pgidItemId: ?*const u8, pSyncVersion: ?*const SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ContainsChange(@ptrCast(*const ISyncKnowledge, self), pbVersionOwnerReplicaId, pgidItemId, pSyncVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ContainsChangeUnit(self: *const T, pbVersionOwnerReplicaId: ?*const u8, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, pSyncVersion: ?*const SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ContainsChangeUnit(@ptrCast(*const ISyncKnowledge, self), pbVersionOwnerReplicaId, pbItemId, pbChangeUnitId, pSyncVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_GetScopeVector(self: *const T, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).GetScopeVector(@ptrCast(*const ISyncKnowledge, self), riid, ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_GetReplicaKeyMap(self: *const T, ppReplicaKeyMap: ?*?*IReplicaKeyMap) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).GetReplicaKeyMap(@ptrCast(*const ISyncKnowledge, self), ppReplicaKeyMap); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_Clone(self: *const T, ppClonedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).Clone(@ptrCast(*const ISyncKnowledge, self), ppClonedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ConvertVersion(self: *const T, pKnowledgeIn: ?*ISyncKnowledge, pbCurrentOwnerId: ?*const u8, pVersionIn: ?*const SYNC_VERSION, pbNewOwnerId: ?*u8, pcbIdSize: ?*u32, pVersionOut: ?*SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ConvertVersion(@ptrCast(*const ISyncKnowledge, self), pKnowledgeIn, pbCurrentOwnerId, pVersionIn, pbNewOwnerId, pcbIdSize, pVersionOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_MapRemoteToLocal(self: *const T, pRemoteKnowledge: ?*ISyncKnowledge, ppMappedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).MapRemoteToLocal(@ptrCast(*const ISyncKnowledge, self), pRemoteKnowledge, ppMappedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_Union(self: *const T, pKnowledge: ?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).Union(@ptrCast(*const ISyncKnowledge, self), pKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ProjectOntoItem(self: *const T, pbItemId: ?*const u8, ppKnowledgeOut: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ProjectOntoItem(@ptrCast(*const ISyncKnowledge, self), pbItemId, ppKnowledgeOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ProjectOntoChangeUnit(self: *const T, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, ppKnowledgeOut: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ProjectOntoChangeUnit(@ptrCast(*const ISyncKnowledge, self), pbItemId, pbChangeUnitId, ppKnowledgeOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ProjectOntoRange(self: *const T, psrngSyncRange: ?*const SYNC_RANGE, ppKnowledgeOut: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ProjectOntoRange(@ptrCast(*const ISyncKnowledge, self), psrngSyncRange, ppKnowledgeOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ExcludeItem(self: *const T, pbItemId: ?*const u8) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ExcludeItem(@ptrCast(*const ISyncKnowledge, self), pbItemId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ExcludeChangeUnit(self: *const T, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ExcludeChangeUnit(@ptrCast(*const ISyncKnowledge, self), pbItemId, pbChangeUnitId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_ContainsKnowledge(self: *const T, pKnowledge: ?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).ContainsKnowledge(@ptrCast(*const ISyncKnowledge, self), pKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_FindMinTickCountForReplica(self: *const T, pbReplicaId: ?*const u8, pullReplicaTickCount: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).FindMinTickCountForReplica(@ptrCast(*const ISyncKnowledge, self), pbReplicaId, pullReplicaTickCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_GetRangeExceptions(self: *const T, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).GetRangeExceptions(@ptrCast(*const ISyncKnowledge, self), riid, ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_GetSingleItemExceptions(self: *const T, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).GetSingleItemExceptions(@ptrCast(*const ISyncKnowledge, self), riid, ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_GetChangeUnitExceptions(self: *const T, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).GetChangeUnitExceptions(@ptrCast(*const ISyncKnowledge, self), riid, ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_FindClockVectorForItem(self: *const T, pbItemId: ?*const u8, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).FindClockVectorForItem(@ptrCast(*const ISyncKnowledge, self), pbItemId, riid, ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_FindClockVectorForChangeUnit(self: *const T, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, riid: ?*const Guid, ppUnk: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).FindClockVectorForChangeUnit(@ptrCast(*const ISyncKnowledge, self), pbItemId, pbChangeUnitId, riid, ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge_GetVersion(self: *const T, pdwVersion: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge.VTable, self.vtable).GetVersion(@ptrCast(*const ISyncKnowledge, self), pdwVersion); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IForgottenKnowledge_Value = @import("../zig.zig").Guid.initString("456e0f96-6036-452b-9f9d-bcc4b4a85db2"); pub const IID_IForgottenKnowledge = &IID_IForgottenKnowledge_Value; pub const IForgottenKnowledge = extern struct { pub const VTable = extern struct { base: ISyncKnowledge.VTable, ForgetToVersion: fn( self: *const IForgottenKnowledge, pKnowledge: ?*ISyncKnowledge, pVersion: ?*const SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncKnowledge.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IForgottenKnowledge_ForgetToVersion(self: *const T, pKnowledge: ?*ISyncKnowledge, pVersion: ?*const SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const IForgottenKnowledge.VTable, self.vtable).ForgetToVersion(@ptrCast(*const IForgottenKnowledge, self), pKnowledge, pVersion); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncKnowledge2_Value = @import("../zig.zig").Guid.initString("ed0addc0-3b4b-46a1-9a45-45661d2114c8"); pub const IID_ISyncKnowledge2 = &IID_ISyncKnowledge2_Value; pub const ISyncKnowledge2 = extern struct { pub const VTable = extern struct { base: ISyncKnowledge.VTable, GetIdParameters: fn( self: *const ISyncKnowledge2, pIdParameters: ?*ID_PARAMETERS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProjectOntoColumnSet: fn( self: *const ISyncKnowledge2, ppColumns: ?*const ?*u8, count: u32, ppiKnowledgeOut: ?*?*ISyncKnowledge2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SerializeWithOptions: fn( self: *const ISyncKnowledge2, targetFormatVersion: SYNC_SERIALIZATION_VERSION, dwFlags: u32, pbBuffer: ?*u8, pdwSerializedSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLowestUncontainedId: fn( self: *const ISyncKnowledge2, piSyncKnowledge: ?*ISyncKnowledge2, pbItemId: ?*u8, pcbItemIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetInspector: fn( self: *const ISyncKnowledge2, riid: ?*const Guid, ppiInspector: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMinimumSupportedVersion: fn( self: *const ISyncKnowledge2, pVersion: ?*SYNC_SERIALIZATION_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStatistics: fn( self: *const ISyncKnowledge2, which: SYNC_STATISTICS, pValue: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ContainsKnowledgeForItem: fn( self: *const ISyncKnowledge2, pKnowledge: ?*ISyncKnowledge, pbItemId: ?*const u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ContainsKnowledgeForChangeUnit: fn( self: *const ISyncKnowledge2, pKnowledge: ?*ISyncKnowledge, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProjectOntoKnowledgeWithPrerequisite: fn( self: *const ISyncKnowledge2, pPrerequisiteKnowledge: ?*ISyncKnowledge, pTemplateKnowledge: ?*ISyncKnowledge, ppProjectedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Complement: fn( self: *const ISyncKnowledge2, pSyncKnowledge: ?*ISyncKnowledge, ppComplementedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IntersectsWithKnowledge: fn( self: *const ISyncKnowledge2, pSyncKnowledge: ?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetKnowledgeCookie: fn( self: *const ISyncKnowledge2, ppKnowledgeCookie: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CompareToKnowledgeCookie: fn( self: *const ISyncKnowledge2, pKnowledgeCookie: ?*IUnknown, pResult: ?*KNOWLEDGE_COOKIE_COMPARISON_RESULT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncKnowledge.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_GetIdParameters(self: *const T, pIdParameters: ?*ID_PARAMETERS) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).GetIdParameters(@ptrCast(*const ISyncKnowledge2, self), pIdParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_ProjectOntoColumnSet(self: *const T, ppColumns: ?*const ?*u8, count: u32, ppiKnowledgeOut: ?*?*ISyncKnowledge2) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).ProjectOntoColumnSet(@ptrCast(*const ISyncKnowledge2, self), ppColumns, count, ppiKnowledgeOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_SerializeWithOptions(self: *const T, targetFormatVersion: SYNC_SERIALIZATION_VERSION, dwFlags: u32, pbBuffer: ?*u8, pdwSerializedSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).SerializeWithOptions(@ptrCast(*const ISyncKnowledge2, self), targetFormatVersion, dwFlags, pbBuffer, pdwSerializedSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_GetLowestUncontainedId(self: *const T, piSyncKnowledge: ?*ISyncKnowledge2, pbItemId: ?*u8, pcbItemIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).GetLowestUncontainedId(@ptrCast(*const ISyncKnowledge2, self), piSyncKnowledge, pbItemId, pcbItemIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_GetInspector(self: *const T, riid: ?*const Guid, ppiInspector: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).GetInspector(@ptrCast(*const ISyncKnowledge2, self), riid, ppiInspector); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_GetMinimumSupportedVersion(self: *const T, pVersion: ?*SYNC_SERIALIZATION_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).GetMinimumSupportedVersion(@ptrCast(*const ISyncKnowledge2, self), pVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_GetStatistics(self: *const T, which: SYNC_STATISTICS, pValue: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).GetStatistics(@ptrCast(*const ISyncKnowledge2, self), which, pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_ContainsKnowledgeForItem(self: *const T, pKnowledge: ?*ISyncKnowledge, pbItemId: ?*const u8) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).ContainsKnowledgeForItem(@ptrCast(*const ISyncKnowledge2, self), pKnowledge, pbItemId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_ContainsKnowledgeForChangeUnit(self: *const T, pKnowledge: ?*ISyncKnowledge, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).ContainsKnowledgeForChangeUnit(@ptrCast(*const ISyncKnowledge2, self), pKnowledge, pbItemId, pbChangeUnitId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_ProjectOntoKnowledgeWithPrerequisite(self: *const T, pPrerequisiteKnowledge: ?*ISyncKnowledge, pTemplateKnowledge: ?*ISyncKnowledge, ppProjectedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).ProjectOntoKnowledgeWithPrerequisite(@ptrCast(*const ISyncKnowledge2, self), pPrerequisiteKnowledge, pTemplateKnowledge, ppProjectedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_Complement(self: *const T, pSyncKnowledge: ?*ISyncKnowledge, ppComplementedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).Complement(@ptrCast(*const ISyncKnowledge2, self), pSyncKnowledge, ppComplementedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_IntersectsWithKnowledge(self: *const T, pSyncKnowledge: ?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).IntersectsWithKnowledge(@ptrCast(*const ISyncKnowledge2, self), pSyncKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_GetKnowledgeCookie(self: *const T, ppKnowledgeCookie: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).GetKnowledgeCookie(@ptrCast(*const ISyncKnowledge2, self), ppKnowledgeCookie); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncKnowledge2_CompareToKnowledgeCookie(self: *const T, pKnowledgeCookie: ?*IUnknown, pResult: ?*KNOWLEDGE_COOKIE_COMPARISON_RESULT) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncKnowledge2.VTable, self.vtable).CompareToKnowledgeCookie(@ptrCast(*const ISyncKnowledge2, self), pKnowledgeCookie, pResult); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IRecoverableErrorData_Value = @import("../zig.zig").Guid.initString("b37c4a0a-4b7d-4c2d-9711-3b00d119b1c8"); pub const IID_IRecoverableErrorData = &IID_IRecoverableErrorData_Value; pub const IRecoverableErrorData = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Initialize: fn( self: *const IRecoverableErrorData, pcszItemDisplayName: ?[*:0]const u16, pcszErrorDescription: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetItemDisplayName: fn( self: *const IRecoverableErrorData, pszItemDisplayName: ?PWSTR, pcchItemDisplayName: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetErrorDescription: fn( self: *const IRecoverableErrorData, pszErrorDescription: ?PWSTR, pcchErrorDescription: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableErrorData_Initialize(self: *const T, pcszItemDisplayName: ?[*:0]const u16, pcszErrorDescription: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableErrorData.VTable, self.vtable).Initialize(@ptrCast(*const IRecoverableErrorData, self), pcszItemDisplayName, pcszErrorDescription); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableErrorData_GetItemDisplayName(self: *const T, pszItemDisplayName: ?PWSTR, pcchItemDisplayName: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableErrorData.VTable, self.vtable).GetItemDisplayName(@ptrCast(*const IRecoverableErrorData, self), pszItemDisplayName, pcchItemDisplayName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableErrorData_GetErrorDescription(self: *const T, pszErrorDescription: ?PWSTR, pcchErrorDescription: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableErrorData.VTable, self.vtable).GetErrorDescription(@ptrCast(*const IRecoverableErrorData, self), pszErrorDescription, pcchErrorDescription); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IRecoverableError_Value = @import("../zig.zig").Guid.initString("0f5625e8-0a7b-45ee-9637-1ce13645909e"); pub const IID_IRecoverableError = &IID_IRecoverableError_Value; pub const IRecoverableError = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetStage: fn( self: *const IRecoverableError, pStage: ?*SYNC_PROGRESS_STAGE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetProvider: fn( self: *const IRecoverableError, pProviderRole: ?*SYNC_PROVIDER_ROLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeWithRecoverableError: fn( self: *const IRecoverableError, ppChangeWithRecoverableError: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRecoverableErrorDataForChange: fn( self: *const IRecoverableError, phrError: ?*HRESULT, ppErrorData: ?*?*IRecoverableErrorData, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRecoverableErrorDataForChangeUnit: fn( self: *const IRecoverableError, pChangeUnit: ?*ISyncChangeUnit, phrError: ?*HRESULT, ppErrorData: ?*?*IRecoverableErrorData, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableError_GetStage(self: *const T, pStage: ?*SYNC_PROGRESS_STAGE) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableError.VTable, self.vtable).GetStage(@ptrCast(*const IRecoverableError, self), pStage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableError_GetProvider(self: *const T, pProviderRole: ?*SYNC_PROVIDER_ROLE) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableError.VTable, self.vtable).GetProvider(@ptrCast(*const IRecoverableError, self), pProviderRole); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableError_GetChangeWithRecoverableError(self: *const T, ppChangeWithRecoverableError: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableError.VTable, self.vtable).GetChangeWithRecoverableError(@ptrCast(*const IRecoverableError, self), ppChangeWithRecoverableError); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableError_GetRecoverableErrorDataForChange(self: *const T, phrError: ?*HRESULT, ppErrorData: ?*?*IRecoverableErrorData) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableError.VTable, self.vtable).GetRecoverableErrorDataForChange(@ptrCast(*const IRecoverableError, self), phrError, ppErrorData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRecoverableError_GetRecoverableErrorDataForChangeUnit(self: *const T, pChangeUnit: ?*ISyncChangeUnit, phrError: ?*HRESULT, ppErrorData: ?*?*IRecoverableErrorData) callconv(.Inline) HRESULT { return @ptrCast(*const IRecoverableError.VTable, self.vtable).GetRecoverableErrorDataForChangeUnit(@ptrCast(*const IRecoverableError, self), pChangeUnit, phrError, ppErrorData); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IChangeConflict_Value = @import("../zig.zig").Guid.initString("014ebf97-9f20-4f7a-bdd4-25979c77c002"); pub const IID_IChangeConflict = &IID_IChangeConflict_Value; pub const IChangeConflict = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDestinationProviderConflictingChange: fn( self: *const IChangeConflict, ppConflictingChange: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSourceProviderConflictingChange: fn( self: *const IChangeConflict, ppConflictingChange: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDestinationProviderConflictingData: fn( self: *const IChangeConflict, ppConflictingData: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSourceProviderConflictingData: fn( self: *const IChangeConflict, ppConflictingData: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetResolveActionForChange: fn( self: *const IChangeConflict, pResolveAction: ?*SYNC_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetResolveActionForChange: fn( self: *const IChangeConflict, resolveAction: SYNC_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetResolveActionForChangeUnit: fn( self: *const IChangeConflict, pChangeUnit: ?*ISyncChangeUnit, pResolveAction: ?*SYNC_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetResolveActionForChangeUnit: fn( self: *const IChangeConflict, pChangeUnit: ?*ISyncChangeUnit, resolveAction: SYNC_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_GetDestinationProviderConflictingChange(self: *const T, ppConflictingChange: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).GetDestinationProviderConflictingChange(@ptrCast(*const IChangeConflict, self), ppConflictingChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_GetSourceProviderConflictingChange(self: *const T, ppConflictingChange: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).GetSourceProviderConflictingChange(@ptrCast(*const IChangeConflict, self), ppConflictingChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_GetDestinationProviderConflictingData(self: *const T, ppConflictingData: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).GetDestinationProviderConflictingData(@ptrCast(*const IChangeConflict, self), ppConflictingData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_GetSourceProviderConflictingData(self: *const T, ppConflictingData: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).GetSourceProviderConflictingData(@ptrCast(*const IChangeConflict, self), ppConflictingData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_GetResolveActionForChange(self: *const T, pResolveAction: ?*SYNC_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).GetResolveActionForChange(@ptrCast(*const IChangeConflict, self), pResolveAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_SetResolveActionForChange(self: *const T, resolveAction: SYNC_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).SetResolveActionForChange(@ptrCast(*const IChangeConflict, self), resolveAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_GetResolveActionForChangeUnit(self: *const T, pChangeUnit: ?*ISyncChangeUnit, pResolveAction: ?*SYNC_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).GetResolveActionForChangeUnit(@ptrCast(*const IChangeConflict, self), pChangeUnit, pResolveAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeConflict_SetResolveActionForChangeUnit(self: *const T, pChangeUnit: ?*ISyncChangeUnit, resolveAction: SYNC_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeConflict.VTable, self.vtable).SetResolveActionForChangeUnit(@ptrCast(*const IChangeConflict, self), pChangeUnit, resolveAction); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IConstraintConflict_Value = @import("../zig.zig").Guid.initString("00d2302e-1cf8-4835-b85f-b7ca4f799e0a"); pub const IID_IConstraintConflict = &IID_IConstraintConflict_Value; pub const IConstraintConflict = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDestinationProviderConflictingChange: fn( self: *const IConstraintConflict, ppConflictingChange: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSourceProviderConflictingChange: fn( self: *const IConstraintConflict, ppConflictingChange: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDestinationProviderOriginalChange: fn( self: *const IConstraintConflict, ppOriginalChange: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDestinationProviderConflictingData: fn( self: *const IConstraintConflict, ppConflictingData: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSourceProviderConflictingData: fn( self: *const IConstraintConflict, ppConflictingData: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDestinationProviderOriginalData: fn( self: *const IConstraintConflict, ppOriginalData: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetConstraintResolveActionForChange: fn( self: *const IConstraintConflict, pConstraintResolveAction: ?*SYNC_CONSTRAINT_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetConstraintResolveActionForChange: fn( self: *const IConstraintConflict, constraintResolveAction: SYNC_CONSTRAINT_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetConstraintResolveActionForChangeUnit: fn( self: *const IConstraintConflict, pChangeUnit: ?*ISyncChangeUnit, pConstraintResolveAction: ?*SYNC_CONSTRAINT_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetConstraintResolveActionForChangeUnit: fn( self: *const IConstraintConflict, pChangeUnit: ?*ISyncChangeUnit, constraintResolveAction: SYNC_CONSTRAINT_RESOLVE_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetConstraintConflictReason: fn( self: *const IConstraintConflict, pConstraintConflictReason: ?*CONSTRAINT_CONFLICT_REASON, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsTemporary: fn( self: *const IConstraintConflict, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetDestinationProviderConflictingChange(self: *const T, ppConflictingChange: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetDestinationProviderConflictingChange(@ptrCast(*const IConstraintConflict, self), ppConflictingChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetSourceProviderConflictingChange(self: *const T, ppConflictingChange: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetSourceProviderConflictingChange(@ptrCast(*const IConstraintConflict, self), ppConflictingChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetDestinationProviderOriginalChange(self: *const T, ppOriginalChange: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetDestinationProviderOriginalChange(@ptrCast(*const IConstraintConflict, self), ppOriginalChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetDestinationProviderConflictingData(self: *const T, ppConflictingData: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetDestinationProviderConflictingData(@ptrCast(*const IConstraintConflict, self), ppConflictingData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetSourceProviderConflictingData(self: *const T, ppConflictingData: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetSourceProviderConflictingData(@ptrCast(*const IConstraintConflict, self), ppConflictingData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetDestinationProviderOriginalData(self: *const T, ppOriginalData: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetDestinationProviderOriginalData(@ptrCast(*const IConstraintConflict, self), ppOriginalData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetConstraintResolveActionForChange(self: *const T, pConstraintResolveAction: ?*SYNC_CONSTRAINT_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetConstraintResolveActionForChange(@ptrCast(*const IConstraintConflict, self), pConstraintResolveAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_SetConstraintResolveActionForChange(self: *const T, constraintResolveAction: SYNC_CONSTRAINT_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).SetConstraintResolveActionForChange(@ptrCast(*const IConstraintConflict, self), constraintResolveAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetConstraintResolveActionForChangeUnit(self: *const T, pChangeUnit: ?*ISyncChangeUnit, pConstraintResolveAction: ?*SYNC_CONSTRAINT_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetConstraintResolveActionForChangeUnit(@ptrCast(*const IConstraintConflict, self), pChangeUnit, pConstraintResolveAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_SetConstraintResolveActionForChangeUnit(self: *const T, pChangeUnit: ?*ISyncChangeUnit, constraintResolveAction: SYNC_CONSTRAINT_RESOLVE_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).SetConstraintResolveActionForChangeUnit(@ptrCast(*const IConstraintConflict, self), pChangeUnit, constraintResolveAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_GetConstraintConflictReason(self: *const T, pConstraintConflictReason: ?*CONSTRAINT_CONFLICT_REASON) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).GetConstraintConflictReason(@ptrCast(*const IConstraintConflict, self), pConstraintConflictReason); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConstraintConflict_IsTemporary(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IConstraintConflict.VTable, self.vtable).IsTemporary(@ptrCast(*const IConstraintConflict, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncCallback_Value = @import("../zig.zig").Guid.initString("0599797f-5ed9-485c-ae36-0c5d1bf2e7a5"); pub const IID_ISyncCallback = &IID_ISyncCallback_Value; pub const ISyncCallback = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnProgress: fn( self: *const ISyncCallback, provider: SYNC_PROVIDER_ROLE, syncStage: SYNC_PROGRESS_STAGE, dwCompletedWork: u32, dwTotalWork: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnChange: fn( self: *const ISyncCallback, pSyncChange: ?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnConflict: fn( self: *const ISyncCallback, pConflict: ?*IChangeConflict, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnFullEnumerationNeeded: fn( self: *const ISyncCallback, pFullEnumerationAction: ?*SYNC_FULL_ENUMERATION_ACTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnRecoverableError: fn( self: *const ISyncCallback, pRecoverableError: ?*IRecoverableError, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncCallback_OnProgress(self: *const T, provider: SYNC_PROVIDER_ROLE, syncStage: SYNC_PROGRESS_STAGE, dwCompletedWork: u32, dwTotalWork: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncCallback.VTable, self.vtable).OnProgress(@ptrCast(*const ISyncCallback, self), provider, syncStage, dwCompletedWork, dwTotalWork); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncCallback_OnChange(self: *const T, pSyncChange: ?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncCallback.VTable, self.vtable).OnChange(@ptrCast(*const ISyncCallback, self), pSyncChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncCallback_OnConflict(self: *const T, pConflict: ?*IChangeConflict) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncCallback.VTable, self.vtable).OnConflict(@ptrCast(*const ISyncCallback, self), pConflict); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncCallback_OnFullEnumerationNeeded(self: *const T, pFullEnumerationAction: ?*SYNC_FULL_ENUMERATION_ACTION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncCallback.VTable, self.vtable).OnFullEnumerationNeeded(@ptrCast(*const ISyncCallback, self), pFullEnumerationAction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncCallback_OnRecoverableError(self: *const T, pRecoverableError: ?*IRecoverableError) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncCallback.VTable, self.vtable).OnRecoverableError(@ptrCast(*const ISyncCallback, self), pRecoverableError); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncCallback2_Value = @import("../zig.zig").Guid.initString("47ce84af-7442-4ead-8630-12015e030ad7"); pub const IID_ISyncCallback2 = &IID_ISyncCallback2_Value; pub const ISyncCallback2 = extern struct { pub const VTable = extern struct { base: ISyncCallback.VTable, OnChangeApplied: fn( self: *const ISyncCallback2, dwChangesApplied: u32, dwChangesFailed: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnChangeFailed: fn( self: *const ISyncCallback2, dwChangesApplied: u32, dwChangesFailed: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncCallback.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncCallback2_OnChangeApplied(self: *const T, dwChangesApplied: u32, dwChangesFailed: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncCallback2.VTable, self.vtable).OnChangeApplied(@ptrCast(*const ISyncCallback2, self), dwChangesApplied, dwChangesFailed); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncCallback2_OnChangeFailed(self: *const T, dwChangesApplied: u32, dwChangesFailed: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncCallback2.VTable, self.vtable).OnChangeFailed(@ptrCast(*const ISyncCallback2, self), dwChangesApplied, dwChangesFailed); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncConstraintCallback_Value = @import("../zig.zig").Guid.initString("8af3843e-75b3-438c-bb51-6f020d70d3cb"); pub const IID_ISyncConstraintCallback = &IID_ISyncConstraintCallback_Value; pub const ISyncConstraintCallback = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnConstraintConflict: fn( self: *const ISyncConstraintCallback, pConflict: ?*IConstraintConflict, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncConstraintCallback_OnConstraintConflict(self: *const T, pConflict: ?*IConstraintConflict) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncConstraintCallback.VTable, self.vtable).OnConstraintConflict(@ptrCast(*const ISyncConstraintCallback, self), pConflict); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncProvider_Value = @import("../zig.zig").Guid.initString("8f657056-2bce-4a17-8c68-c7bb7898b56f"); pub const IID_ISyncProvider = &IID_ISyncProvider_Value; pub const ISyncProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIdParameters: fn( self: *const ISyncProvider, pIdParameters: ?*ID_PARAMETERS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProvider_GetIdParameters(self: *const T, pIdParameters: ?*ID_PARAMETERS) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProvider.VTable, self.vtable).GetIdParameters(@ptrCast(*const ISyncProvider, self), pIdParameters); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncSessionState_Value = @import("../zig.zig").Guid.initString("b8a940fe-9f01-483b-9434-c37d361225d9"); pub const IID_ISyncSessionState = &IID_ISyncSessionState_Value; pub const ISyncSessionState = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, IsCanceled: fn( self: *const ISyncSessionState, pfIsCanceled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetInfoForChangeApplication: fn( self: *const ISyncSessionState, pbChangeApplierInfo: ?*u8, pcbChangeApplierInfo: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadInfoFromChangeApplication: fn( self: *const ISyncSessionState, pbChangeApplierInfo: ?*const u8, cbChangeApplierInfo: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetForgottenKnowledgeRecoveryRangeStart: fn( self: *const ISyncSessionState, pbRangeStart: ?*u8, pcbRangeStart: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetForgottenKnowledgeRecoveryRangeEnd: fn( self: *const ISyncSessionState, pbRangeEnd: ?*u8, pcbRangeEnd: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetForgottenKnowledgeRecoveryRange: fn( self: *const ISyncSessionState, pRange: ?*const SYNC_RANGE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnProgress: fn( self: *const ISyncSessionState, provider: SYNC_PROVIDER_ROLE, syncStage: SYNC_PROGRESS_STAGE, dwCompletedWork: u32, dwTotalWork: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState_IsCanceled(self: *const T, pfIsCanceled: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState.VTable, self.vtable).IsCanceled(@ptrCast(*const ISyncSessionState, self), pfIsCanceled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState_GetInfoForChangeApplication(self: *const T, pbChangeApplierInfo: ?*u8, pcbChangeApplierInfo: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState.VTable, self.vtable).GetInfoForChangeApplication(@ptrCast(*const ISyncSessionState, self), pbChangeApplierInfo, pcbChangeApplierInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState_LoadInfoFromChangeApplication(self: *const T, pbChangeApplierInfo: ?*const u8, cbChangeApplierInfo: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState.VTable, self.vtable).LoadInfoFromChangeApplication(@ptrCast(*const ISyncSessionState, self), pbChangeApplierInfo, cbChangeApplierInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState_GetForgottenKnowledgeRecoveryRangeStart(self: *const T, pbRangeStart: ?*u8, pcbRangeStart: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState.VTable, self.vtable).GetForgottenKnowledgeRecoveryRangeStart(@ptrCast(*const ISyncSessionState, self), pbRangeStart, pcbRangeStart); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState_GetForgottenKnowledgeRecoveryRangeEnd(self: *const T, pbRangeEnd: ?*u8, pcbRangeEnd: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState.VTable, self.vtable).GetForgottenKnowledgeRecoveryRangeEnd(@ptrCast(*const ISyncSessionState, self), pbRangeEnd, pcbRangeEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState_SetForgottenKnowledgeRecoveryRange(self: *const T, pRange: ?*const SYNC_RANGE) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState.VTable, self.vtable).SetForgottenKnowledgeRecoveryRange(@ptrCast(*const ISyncSessionState, self), pRange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState_OnProgress(self: *const T, provider: SYNC_PROVIDER_ROLE, syncStage: SYNC_PROGRESS_STAGE, dwCompletedWork: u32, dwTotalWork: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState.VTable, self.vtable).OnProgress(@ptrCast(*const ISyncSessionState, self), provider, syncStage, dwCompletedWork, dwTotalWork); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncSessionExtendedErrorInfo_Value = @import("../zig.zig").Guid.initString("326c6810-790a-409b-b741-6999388761eb"); pub const IID_ISyncSessionExtendedErrorInfo = &IID_ISyncSessionExtendedErrorInfo_Value; pub const ISyncSessionExtendedErrorInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSyncProviderWithError: fn( self: *const ISyncSessionExtendedErrorInfo, ppProviderWithError: ?*?*ISyncProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionExtendedErrorInfo_GetSyncProviderWithError(self: *const T, ppProviderWithError: ?*?*ISyncProvider) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionExtendedErrorInfo.VTable, self.vtable).GetSyncProviderWithError(@ptrCast(*const ISyncSessionExtendedErrorInfo, self), ppProviderWithError); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncSessionState2_Value = @import("../zig.zig").Guid.initString("9e37cfa3-9e38-4c61-9ca3-ffe810b45ca2"); pub const IID_ISyncSessionState2 = &IID_ISyncSessionState2_Value; pub const ISyncSessionState2 = extern struct { pub const VTable = extern struct { base: ISyncSessionState.VTable, SetProviderWithError: fn( self: *const ISyncSessionState2, fSelf: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSessionErrorStatus: fn( self: *const ISyncSessionState2, phrSessionError: ?*HRESULT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncSessionState.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState2_SetProviderWithError(self: *const T, fSelf: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState2.VTable, self.vtable).SetProviderWithError(@ptrCast(*const ISyncSessionState2, self), fSelf); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncSessionState2_GetSessionErrorStatus(self: *const T, phrSessionError: ?*HRESULT) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncSessionState2.VTable, self.vtable).GetSessionErrorStatus(@ptrCast(*const ISyncSessionState2, self), phrSessionError); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncFilterInfo_Value = @import("../zig.zig").Guid.initString("794eaaf8-3f2e-47e6-9728-17e6fcf94cb7"); pub const IID_ISyncFilterInfo = &IID_ISyncFilterInfo_Value; pub const ISyncFilterInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Serialize: fn( self: *const ISyncFilterInfo, pbBuffer: ?*u8, pcbBuffer: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFilterInfo_Serialize(self: *const T, pbBuffer: ?*u8, pcbBuffer: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFilterInfo.VTable, self.vtable).Serialize(@ptrCast(*const ISyncFilterInfo, self), pbBuffer, pcbBuffer); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncFilterInfo2_Value = @import("../zig.zig").Guid.initString("19b394ba-e3d0-468c-934d-321968b2ab34"); pub const IID_ISyncFilterInfo2 = &IID_ISyncFilterInfo2_Value; pub const ISyncFilterInfo2 = extern struct { pub const VTable = extern struct { base: ISyncFilterInfo.VTable, GetFlags: fn( self: *const ISyncFilterInfo2, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncFilterInfo.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFilterInfo2_GetFlags(self: *const T, pdwFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFilterInfo2.VTable, self.vtable).GetFlags(@ptrCast(*const ISyncFilterInfo2, self), pdwFlags); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IChangeUnitListFilterInfo_Value = @import("../zig.zig").Guid.initString("f2837671-0bdf-43fa-b502-232375fb50c2"); pub const IID_IChangeUnitListFilterInfo = &IID_IChangeUnitListFilterInfo_Value; pub const IChangeUnitListFilterInfo = extern struct { pub const VTable = extern struct { base: ISyncFilterInfo.VTable, Initialize: fn( self: *const IChangeUnitListFilterInfo, ppbChangeUnitIds: ?*const ?*u8, dwChangeUnitCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnitIdCount: fn( self: *const IChangeUnitListFilterInfo, pdwChangeUnitIdCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnitId: fn( self: *const IChangeUnitListFilterInfo, dwChangeUnitIdIndex: u32, pbChangeUnitId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncFilterInfo.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeUnitListFilterInfo_Initialize(self: *const T, ppbChangeUnitIds: ?*const ?*u8, dwChangeUnitCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeUnitListFilterInfo.VTable, self.vtable).Initialize(@ptrCast(*const IChangeUnitListFilterInfo, self), ppbChangeUnitIds, dwChangeUnitCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeUnitListFilterInfo_GetChangeUnitIdCount(self: *const T, pdwChangeUnitIdCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeUnitListFilterInfo.VTable, self.vtable).GetChangeUnitIdCount(@ptrCast(*const IChangeUnitListFilterInfo, self), pdwChangeUnitIdCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChangeUnitListFilterInfo_GetChangeUnitId(self: *const T, dwChangeUnitIdIndex: u32, pbChangeUnitId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IChangeUnitListFilterInfo.VTable, self.vtable).GetChangeUnitId(@ptrCast(*const IChangeUnitListFilterInfo, self), dwChangeUnitIdIndex, pbChangeUnitId, pcbIdSize); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncFilter_Value = @import("../zig.zig").Guid.initString("087a3f15-0fcb-44c1-9639-53c14e2b5506"); pub const IID_ISyncFilter = &IID_ISyncFilter_Value; pub const ISyncFilter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, IsIdentical: fn( self: *const ISyncFilter, pSyncFilter: ?*ISyncFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Serialize: fn( self: *const ISyncFilter, pbSyncFilter: ?*u8, pcbSyncFilter: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFilter_IsIdentical(self: *const T, pSyncFilter: ?*ISyncFilter) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFilter.VTable, self.vtable).IsIdentical(@ptrCast(*const ISyncFilter, self), pSyncFilter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFilter_Serialize(self: *const T, pbSyncFilter: ?*u8, pcbSyncFilter: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFilter.VTable, self.vtable).Serialize(@ptrCast(*const ISyncFilter, self), pbSyncFilter, pcbSyncFilter); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncFilterDeserializer_Value = @import("../zig.zig").Guid.initString("b45b7a72-e5c7-46be-9c82-77b8b15dab8a"); pub const IID_ISyncFilterDeserializer = &IID_ISyncFilterDeserializer_Value; pub const ISyncFilterDeserializer = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, DeserializeSyncFilter: fn( self: *const ISyncFilterDeserializer, pbSyncFilter: ?*const u8, dwCbSyncFilter: u32, ppISyncFilter: ?*?*ISyncFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFilterDeserializer_DeserializeSyncFilter(self: *const T, pbSyncFilter: ?*const u8, dwCbSyncFilter: u32, ppISyncFilter: ?*?*ISyncFilter) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFilterDeserializer.VTable, self.vtable).DeserializeSyncFilter(@ptrCast(*const ISyncFilterDeserializer, self), pbSyncFilter, dwCbSyncFilter, ppISyncFilter); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICustomFilterInfo_Value = @import("../zig.zig").Guid.initString("1d335dff-6f88-4e4d-91a8-a3f351cfd473"); pub const IID_ICustomFilterInfo = &IID_ICustomFilterInfo_Value; pub const ICustomFilterInfo = extern struct { pub const VTable = extern struct { base: ISyncFilterInfo.VTable, GetSyncFilter: fn( self: *const ICustomFilterInfo, pISyncFilter: ?*?*ISyncFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncFilterInfo.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICustomFilterInfo_GetSyncFilter(self: *const T, pISyncFilter: ?*?*ISyncFilter) callconv(.Inline) HRESULT { return @ptrCast(*const ICustomFilterInfo.VTable, self.vtable).GetSyncFilter(@ptrCast(*const ICustomFilterInfo, self), pISyncFilter); } };} pub usingnamespace MethodMixin(@This()); }; pub const FILTER_COMBINATION_TYPE = enum(i32) { N = 0, }; pub const FCT_INTERSECTION = FILTER_COMBINATION_TYPE.N; const IID_ICombinedFilterInfo_Value = @import("../zig.zig").Guid.initString("11f9de71-2818-4779-b2ac-42d450565f45"); pub const IID_ICombinedFilterInfo = &IID_ICombinedFilterInfo_Value; pub const ICombinedFilterInfo = extern struct { pub const VTable = extern struct { base: ISyncFilterInfo.VTable, GetFilterCount: fn( self: *const ICombinedFilterInfo, pdwFilterCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilterInfo: fn( self: *const ICombinedFilterInfo, dwFilterIndex: u32, ppIFilterInfo: ?*?*ISyncFilterInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilterCombinationType: fn( self: *const ICombinedFilterInfo, pFilterCombinationType: ?*FILTER_COMBINATION_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncFilterInfo.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICombinedFilterInfo_GetFilterCount(self: *const T, pdwFilterCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICombinedFilterInfo.VTable, self.vtable).GetFilterCount(@ptrCast(*const ICombinedFilterInfo, self), pdwFilterCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICombinedFilterInfo_GetFilterInfo(self: *const T, dwFilterIndex: u32, ppIFilterInfo: ?*?*ISyncFilterInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ICombinedFilterInfo.VTable, self.vtable).GetFilterInfo(@ptrCast(*const ICombinedFilterInfo, self), dwFilterIndex, ppIFilterInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICombinedFilterInfo_GetFilterCombinationType(self: *const T, pFilterCombinationType: ?*FILTER_COMBINATION_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const ICombinedFilterInfo.VTable, self.vtable).GetFilterCombinationType(@ptrCast(*const ICombinedFilterInfo, self), pFilterCombinationType); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumSyncChanges_Value = @import("../zig.zig").Guid.initString("5f86be4a-5e78-4e32-ac1c-c24fd223ef85"); pub const IID_IEnumSyncChanges = &IID_IEnumSyncChanges_Value; pub const IEnumSyncChanges = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSyncChanges, cChanges: u32, ppChange: ?*?*ISyncChange, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSyncChanges, cChanges: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSyncChanges, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSyncChanges, ppEnum: ?*?*IEnumSyncChanges, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChanges_Next(self: *const T, cChanges: u32, ppChange: ?*?*ISyncChange, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChanges.VTable, self.vtable).Next(@ptrCast(*const IEnumSyncChanges, self), cChanges, ppChange, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChanges_Skip(self: *const T, cChanges: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChanges.VTable, self.vtable).Skip(@ptrCast(*const IEnumSyncChanges, self), cChanges); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChanges_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChanges.VTable, self.vtable).Reset(@ptrCast(*const IEnumSyncChanges, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChanges_Clone(self: *const T, ppEnum: ?*?*IEnumSyncChanges) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChanges.VTable, self.vtable).Clone(@ptrCast(*const IEnumSyncChanges, self), ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeBuilder_Value = @import("../zig.zig").Guid.initString("56f14771-8677-484f-a170-e386e418a676"); pub const IID_ISyncChangeBuilder = &IID_ISyncChangeBuilder_Value; pub const ISyncChangeBuilder = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddChangeUnitMetadata: fn( self: *const ISyncChangeBuilder, pbChangeUnitId: ?*const u8, pChangeUnitVersion: ?*const SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBuilder_AddChangeUnitMetadata(self: *const T, pbChangeUnitId: ?*const u8, pChangeUnitVersion: ?*const SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBuilder.VTable, self.vtable).AddChangeUnitMetadata(@ptrCast(*const ISyncChangeBuilder, self), pbChangeUnitId, pChangeUnitVersion); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IFilterTrackingSyncChangeBuilder_Value = @import("../zig.zig").Guid.initString("295024a0-70da-4c58-883c-ce2afb308d0b"); pub const IID_IFilterTrackingSyncChangeBuilder = &IID_IFilterTrackingSyncChangeBuilder_Value; pub const IFilterTrackingSyncChangeBuilder = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddFilterChange: fn( self: *const IFilterTrackingSyncChangeBuilder, dwFilterKey: u32, pFilterChange: ?*const SYNC_FILTER_CHANGE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetAllChangeUnitsPresentFlag: fn( self: *const IFilterTrackingSyncChangeBuilder, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterTrackingSyncChangeBuilder_AddFilterChange(self: *const T, dwFilterKey: u32, pFilterChange: ?*const SYNC_FILTER_CHANGE) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterTrackingSyncChangeBuilder.VTable, self.vtable).AddFilterChange(@ptrCast(*const IFilterTrackingSyncChangeBuilder, self), dwFilterKey, pFilterChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterTrackingSyncChangeBuilder_SetAllChangeUnitsPresentFlag(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterTrackingSyncChangeBuilder.VTable, self.vtable).SetAllChangeUnitsPresentFlag(@ptrCast(*const IFilterTrackingSyncChangeBuilder, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeBatchBase_Value = @import("../zig.zig").Guid.initString("52f6e694-6a71-4494-a184-a8311bf5d227"); pub const IID_ISyncChangeBatchBase = &IID_ISyncChangeBatchBase_Value; pub const ISyncChangeBatchBase = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetChangeEnumerator: fn( self: *const ISyncChangeBatchBase, ppEnum: ?*?*IEnumSyncChanges, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetIsLastBatch: fn( self: *const ISyncChangeBatchBase, pfLastBatch: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetWorkEstimateForBatch: fn( self: *const ISyncChangeBatchBase, pdwWorkForBatch: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRemainingWorkEstimateForSession: fn( self: *const ISyncChangeBatchBase, pdwRemainingWorkForSession: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BeginOrderedGroup: fn( self: *const ISyncChangeBatchBase, pbLowerBound: ?*const u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EndOrderedGroup: fn( self: *const ISyncChangeBatchBase, pbUpperBound: ?*const u8, pMadeWithKnowledge: ?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddItemMetadataToGroup: fn( self: *const ISyncChangeBatchBase, pbOwnerReplicaId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwFlags: u32, dwWorkForChange: u32, ppChangeBuilder: ?*?*ISyncChangeBuilder, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedKnowledge: fn( self: *const ISyncChangeBatchBase, ppLearnedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPrerequisiteKnowledge: fn( self: *const ISyncChangeBatchBase, ppPrerequisteKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSourceForgottenKnowledge: fn( self: *const ISyncChangeBatchBase, ppSourceForgottenKnowledge: ?*?*IForgottenKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetLastBatch: fn( self: *const ISyncChangeBatchBase, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetWorkEstimateForBatch: fn( self: *const ISyncChangeBatchBase, dwWorkForBatch: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRemainingWorkEstimateForSession: fn( self: *const ISyncChangeBatchBase, dwRemainingWorkForSession: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Serialize: fn( self: *const ISyncChangeBatchBase, pbChangeBatch: ?*u8, pcbChangeBatch: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_GetChangeEnumerator(self: *const T, ppEnum: ?*?*IEnumSyncChanges) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).GetChangeEnumerator(@ptrCast(*const ISyncChangeBatchBase, self), ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_GetIsLastBatch(self: *const T, pfLastBatch: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).GetIsLastBatch(@ptrCast(*const ISyncChangeBatchBase, self), pfLastBatch); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_GetWorkEstimateForBatch(self: *const T, pdwWorkForBatch: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).GetWorkEstimateForBatch(@ptrCast(*const ISyncChangeBatchBase, self), pdwWorkForBatch); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_GetRemainingWorkEstimateForSession(self: *const T, pdwRemainingWorkForSession: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).GetRemainingWorkEstimateForSession(@ptrCast(*const ISyncChangeBatchBase, self), pdwRemainingWorkForSession); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_BeginOrderedGroup(self: *const T, pbLowerBound: ?*const u8) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).BeginOrderedGroup(@ptrCast(*const ISyncChangeBatchBase, self), pbLowerBound); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_EndOrderedGroup(self: *const T, pbUpperBound: ?*const u8, pMadeWithKnowledge: ?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).EndOrderedGroup(@ptrCast(*const ISyncChangeBatchBase, self), pbUpperBound, pMadeWithKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_AddItemMetadataToGroup(self: *const T, pbOwnerReplicaId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwFlags: u32, dwWorkForChange: u32, ppChangeBuilder: ?*?*ISyncChangeBuilder) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).AddItemMetadataToGroup(@ptrCast(*const ISyncChangeBatchBase, self), pbOwnerReplicaId, pbItemId, pChangeVersion, pCreationVersion, dwFlags, dwWorkForChange, ppChangeBuilder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_GetLearnedKnowledge(self: *const T, ppLearnedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).GetLearnedKnowledge(@ptrCast(*const ISyncChangeBatchBase, self), ppLearnedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_GetPrerequisiteKnowledge(self: *const T, ppPrerequisteKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).GetPrerequisiteKnowledge(@ptrCast(*const ISyncChangeBatchBase, self), ppPrerequisteKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_GetSourceForgottenKnowledge(self: *const T, ppSourceForgottenKnowledge: ?*?*IForgottenKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).GetSourceForgottenKnowledge(@ptrCast(*const ISyncChangeBatchBase, self), ppSourceForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_SetLastBatch(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).SetLastBatch(@ptrCast(*const ISyncChangeBatchBase, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_SetWorkEstimateForBatch(self: *const T, dwWorkForBatch: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).SetWorkEstimateForBatch(@ptrCast(*const ISyncChangeBatchBase, self), dwWorkForBatch); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_SetRemainingWorkEstimateForSession(self: *const T, dwRemainingWorkForSession: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).SetRemainingWorkEstimateForSession(@ptrCast(*const ISyncChangeBatchBase, self), dwRemainingWorkForSession); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase_Serialize(self: *const T, pbChangeBatch: ?*u8, pcbChangeBatch: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase.VTable, self.vtable).Serialize(@ptrCast(*const ISyncChangeBatchBase, self), pbChangeBatch, pcbChangeBatch); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeBatch_Value = @import("../zig.zig").Guid.initString("70c64dee-380f-4c2e-8f70-31c55bd5f9b3"); pub const IID_ISyncChangeBatch = &IID_ISyncChangeBatch_Value; pub const ISyncChangeBatch = extern struct { pub const VTable = extern struct { base: ISyncChangeBatchBase.VTable, BeginUnorderedGroup: fn( self: *const ISyncChangeBatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EndUnorderedGroup: fn( self: *const ISyncChangeBatch, pMadeWithKnowledge: ?*ISyncKnowledge, fAllChangesForKnowledge: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddLoggedConflict: fn( self: *const ISyncChangeBatch, pbOwnerReplicaId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwFlags: u32, dwWorkForChange: u32, pConflictKnowledge: ?*ISyncKnowledge, ppChangeBuilder: ?*?*ISyncChangeBuilder, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncChangeBatchBase.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatch_BeginUnorderedGroup(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatch.VTable, self.vtable).BeginUnorderedGroup(@ptrCast(*const ISyncChangeBatch, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatch_EndUnorderedGroup(self: *const T, pMadeWithKnowledge: ?*ISyncKnowledge, fAllChangesForKnowledge: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatch.VTable, self.vtable).EndUnorderedGroup(@ptrCast(*const ISyncChangeBatch, self), pMadeWithKnowledge, fAllChangesForKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatch_AddLoggedConflict(self: *const T, pbOwnerReplicaId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwFlags: u32, dwWorkForChange: u32, pConflictKnowledge: ?*ISyncKnowledge, ppChangeBuilder: ?*?*ISyncChangeBuilder) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatch.VTable, self.vtable).AddLoggedConflict(@ptrCast(*const ISyncChangeBatch, self), pbOwnerReplicaId, pbItemId, pChangeVersion, pCreationVersion, dwFlags, dwWorkForChange, pConflictKnowledge, ppChangeBuilder); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncFullEnumerationChangeBatch_Value = @import("../zig.zig").Guid.initString("ef64197d-4f44-4ea2-b355-4524713e3bed"); pub const IID_ISyncFullEnumerationChangeBatch = &IID_ISyncFullEnumerationChangeBatch_Value; pub const ISyncFullEnumerationChangeBatch = extern struct { pub const VTable = extern struct { base: ISyncChangeBatchBase.VTable, GetLearnedKnowledgeAfterRecoveryComplete: fn( self: *const ISyncFullEnumerationChangeBatch, ppLearnedKnowledgeAfterRecoveryComplete: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClosedLowerBoundItemId: fn( self: *const ISyncFullEnumerationChangeBatch, pbClosedLowerBoundItemId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClosedUpperBoundItemId: fn( self: *const ISyncFullEnumerationChangeBatch, pbClosedUpperBoundItemId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncChangeBatchBase.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFullEnumerationChangeBatch_GetLearnedKnowledgeAfterRecoveryComplete(self: *const T, ppLearnedKnowledgeAfterRecoveryComplete: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFullEnumerationChangeBatch.VTable, self.vtable).GetLearnedKnowledgeAfterRecoveryComplete(@ptrCast(*const ISyncFullEnumerationChangeBatch, self), ppLearnedKnowledgeAfterRecoveryComplete); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFullEnumerationChangeBatch_GetClosedLowerBoundItemId(self: *const T, pbClosedLowerBoundItemId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFullEnumerationChangeBatch.VTable, self.vtable).GetClosedLowerBoundItemId(@ptrCast(*const ISyncFullEnumerationChangeBatch, self), pbClosedLowerBoundItemId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFullEnumerationChangeBatch_GetClosedUpperBoundItemId(self: *const T, pbClosedUpperBoundItemId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFullEnumerationChangeBatch.VTable, self.vtable).GetClosedUpperBoundItemId(@ptrCast(*const ISyncFullEnumerationChangeBatch, self), pbClosedUpperBoundItemId, pcbIdSize); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeBatchWithPrerequisite_Value = @import("../zig.zig").Guid.initString("097f13be-5b92-4048-b3f2-7b42a2515e07"); pub const IID_ISyncChangeBatchWithPrerequisite = &IID_ISyncChangeBatchWithPrerequisite_Value; pub const ISyncChangeBatchWithPrerequisite = extern struct { pub const VTable = extern struct { base: ISyncChangeBatchBase.VTable, SetPrerequisiteKnowledge: fn( self: *const ISyncChangeBatchWithPrerequisite, pPrerequisiteKnowledge: ?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedKnowledgeWithPrerequisite: fn( self: *const ISyncChangeBatchWithPrerequisite, pDestinationKnowledge: ?*ISyncKnowledge, ppLearnedWithPrerequisiteKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedForgottenKnowledge: fn( self: *const ISyncChangeBatchWithPrerequisite, ppLearnedForgottenKnowledge: ?*?*IForgottenKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncChangeBatchBase.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithPrerequisite_SetPrerequisiteKnowledge(self: *const T, pPrerequisiteKnowledge: ?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithPrerequisite.VTable, self.vtable).SetPrerequisiteKnowledge(@ptrCast(*const ISyncChangeBatchWithPrerequisite, self), pPrerequisiteKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithPrerequisite_GetLearnedKnowledgeWithPrerequisite(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, ppLearnedWithPrerequisiteKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithPrerequisite.VTable, self.vtable).GetLearnedKnowledgeWithPrerequisite(@ptrCast(*const ISyncChangeBatchWithPrerequisite, self), pDestinationKnowledge, ppLearnedWithPrerequisiteKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithPrerequisite_GetLearnedForgottenKnowledge(self: *const T, ppLearnedForgottenKnowledge: ?*?*IForgottenKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithPrerequisite.VTable, self.vtable).GetLearnedForgottenKnowledge(@ptrCast(*const ISyncChangeBatchWithPrerequisite, self), ppLearnedForgottenKnowledge); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeBatchBase2_Value = @import("../zig.zig").Guid.initString("6fdb596a-d755-4584-bd0c-c0c23a548fbf"); pub const IID_ISyncChangeBatchBase2 = &IID_ISyncChangeBatchBase2_Value; pub const ISyncChangeBatchBase2 = extern struct { pub const VTable = extern struct { base: ISyncChangeBatchBase.VTable, SerializeWithOptions: fn( self: *const ISyncChangeBatchBase2, targetFormatVersion: SYNC_SERIALIZATION_VERSION, dwFlags: u32, pbBuffer: ?*u8, pdwSerializedSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncChangeBatchBase.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchBase2_SerializeWithOptions(self: *const T, targetFormatVersion: SYNC_SERIALIZATION_VERSION, dwFlags: u32, pbBuffer: ?*u8, pdwSerializedSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchBase2.VTable, self.vtable).SerializeWithOptions(@ptrCast(*const ISyncChangeBatchBase2, self), targetFormatVersion, dwFlags, pbBuffer, pdwSerializedSize); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeBatchAdvanced_Value = @import("../zig.zig").Guid.initString("0f1a4995-cbc8-421d-b550-5d0bebf3e9a5"); pub const IID_ISyncChangeBatchAdvanced = &IID_ISyncChangeBatchAdvanced_Value; pub const ISyncChangeBatchAdvanced = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFilterInfo: fn( self: *const ISyncChangeBatchAdvanced, ppFilterInfo: ?*?*ISyncFilterInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ConvertFullEnumerationChangeBatchToRegularChangeBatch: fn( self: *const ISyncChangeBatchAdvanced, ppChangeBatch: ?*?*ISyncChangeBatch, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetUpperBoundItemId: fn( self: *const ISyncChangeBatchAdvanced, pbItemId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBatchLevelKnowledgeShouldBeApplied: fn( self: *const ISyncChangeBatchAdvanced, pfBatchKnowledgeShouldBeApplied: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchAdvanced_GetFilterInfo(self: *const T, ppFilterInfo: ?*?*ISyncFilterInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchAdvanced.VTable, self.vtable).GetFilterInfo(@ptrCast(*const ISyncChangeBatchAdvanced, self), ppFilterInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchAdvanced_ConvertFullEnumerationChangeBatchToRegularChangeBatch(self: *const T, ppChangeBatch: ?*?*ISyncChangeBatch) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchAdvanced.VTable, self.vtable).ConvertFullEnumerationChangeBatchToRegularChangeBatch(@ptrCast(*const ISyncChangeBatchAdvanced, self), ppChangeBatch); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchAdvanced_GetUpperBoundItemId(self: *const T, pbItemId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchAdvanced.VTable, self.vtable).GetUpperBoundItemId(@ptrCast(*const ISyncChangeBatchAdvanced, self), pbItemId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchAdvanced_GetBatchLevelKnowledgeShouldBeApplied(self: *const T, pfBatchKnowledgeShouldBeApplied: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchAdvanced.VTable, self.vtable).GetBatchLevelKnowledgeShouldBeApplied(@ptrCast(*const ISyncChangeBatchAdvanced, self), pfBatchKnowledgeShouldBeApplied); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncChangeBatch2_Value = @import("../zig.zig").Guid.initString("225f4a33-f5ee-4cc7-b039-67a262b4b2ac"); pub const IID_ISyncChangeBatch2 = &IID_ISyncChangeBatch2_Value; pub const ISyncChangeBatch2 = extern struct { pub const VTable = extern struct { base: ISyncChangeBatch.VTable, AddMergeTombstoneMetadataToGroup: fn( self: *const ISyncChangeBatch2, pbOwnerReplicaId: ?*const u8, pbWinnerItemId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwWorkForChange: u32, ppChangeBuilder: ?*?*ISyncChangeBuilder, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddMergeTombstoneLoggedConflict: fn( self: *const ISyncChangeBatch2, pbOwnerReplicaId: ?*const u8, pbWinnerItemId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwWorkForChange: u32, pConflictKnowledge: ?*ISyncKnowledge, ppChangeBuilder: ?*?*ISyncChangeBuilder, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncChangeBatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatch2_AddMergeTombstoneMetadataToGroup(self: *const T, pbOwnerReplicaId: ?*const u8, pbWinnerItemId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwWorkForChange: u32, ppChangeBuilder: ?*?*ISyncChangeBuilder) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatch2.VTable, self.vtable).AddMergeTombstoneMetadataToGroup(@ptrCast(*const ISyncChangeBatch2, self), pbOwnerReplicaId, pbWinnerItemId, pbItemId, pChangeVersion, pCreationVersion, dwWorkForChange, ppChangeBuilder); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatch2_AddMergeTombstoneLoggedConflict(self: *const T, pbOwnerReplicaId: ?*const u8, pbWinnerItemId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwWorkForChange: u32, pConflictKnowledge: ?*ISyncKnowledge, ppChangeBuilder: ?*?*ISyncChangeBuilder) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatch2.VTable, self.vtable).AddMergeTombstoneLoggedConflict(@ptrCast(*const ISyncChangeBatch2, self), pbOwnerReplicaId, pbWinnerItemId, pbItemId, pChangeVersion, pCreationVersion, dwWorkForChange, pConflictKnowledge, ppChangeBuilder); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncFullEnumerationChangeBatch2_Value = @import("../zig.zig").Guid.initString("e06449f4-a205-4b65-9724-01b22101eec1"); pub const IID_ISyncFullEnumerationChangeBatch2 = &IID_ISyncFullEnumerationChangeBatch2_Value; pub const ISyncFullEnumerationChangeBatch2 = extern struct { pub const VTable = extern struct { base: ISyncFullEnumerationChangeBatch.VTable, AddMergeTombstoneMetadataToGroup: fn( self: *const ISyncFullEnumerationChangeBatch2, pbOwnerReplicaId: ?*const u8, pbWinnerItemId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwWorkForChange: u32, ppChangeBuilder: ?*?*ISyncChangeBuilder, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncFullEnumerationChangeBatch.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFullEnumerationChangeBatch2_AddMergeTombstoneMetadataToGroup(self: *const T, pbOwnerReplicaId: ?*const u8, pbWinnerItemId: ?*const u8, pbItemId: ?*const u8, pChangeVersion: ?*const SYNC_VERSION, pCreationVersion: ?*const SYNC_VERSION, dwWorkForChange: u32, ppChangeBuilder: ?*?*ISyncChangeBuilder) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFullEnumerationChangeBatch2.VTable, self.vtable).AddMergeTombstoneMetadataToGroup(@ptrCast(*const ISyncFullEnumerationChangeBatch2, self), pbOwnerReplicaId, pbWinnerItemId, pbItemId, pChangeVersion, pCreationVersion, dwWorkForChange, ppChangeBuilder); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IKnowledgeSyncProvider_Value = @import("../zig.zig").Guid.initString("43434a49-8da4-47f2-8172-ad7b8b024978"); pub const IID_IKnowledgeSyncProvider = &IID_IKnowledgeSyncProvider_Value; pub const IKnowledgeSyncProvider = extern struct { pub const VTable = extern struct { base: ISyncProvider.VTable, BeginSession: fn( self: *const IKnowledgeSyncProvider, role: SYNC_PROVIDER_ROLE, pSessionState: ?*ISyncSessionState, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSyncBatchParameters: fn( self: *const IKnowledgeSyncProvider, ppSyncKnowledge: ?*?*ISyncKnowledge, pdwRequestedBatchSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeBatch: fn( self: *const IKnowledgeSyncProvider, dwBatchSize: u32, pSyncKnowledge: ?*ISyncKnowledge, ppSyncChangeBatch: ?*?*ISyncChangeBatch, ppUnkDataRetriever: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFullEnumerationChangeBatch: fn( self: *const IKnowledgeSyncProvider, dwBatchSize: u32, pbLowerEnumerationBound: ?*const u8, pSyncKnowledge: ?*ISyncKnowledge, ppSyncChangeBatch: ?*?*ISyncFullEnumerationChangeBatch, ppUnkDataRetriever: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProcessChangeBatch: fn( self: *const IKnowledgeSyncProvider, resolutionPolicy: CONFLICT_RESOLUTION_POLICY, pSourceChangeBatch: ?*ISyncChangeBatch, pUnkDataRetriever: ?*IUnknown, pCallback: ?*ISyncCallback, pSyncSessionStatistics: ?*SYNC_SESSION_STATISTICS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProcessFullEnumerationChangeBatch: fn( self: *const IKnowledgeSyncProvider, resolutionPolicy: CONFLICT_RESOLUTION_POLICY, pSourceChangeBatch: ?*ISyncFullEnumerationChangeBatch, pUnkDataRetriever: ?*IUnknown, pCallback: ?*ISyncCallback, pSyncSessionStatistics: ?*SYNC_SESSION_STATISTICS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EndSession: fn( self: *const IKnowledgeSyncProvider, pSessionState: ?*ISyncSessionState, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISyncProvider.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IKnowledgeSyncProvider_BeginSession(self: *const T, role: SYNC_PROVIDER_ROLE, pSessionState: ?*ISyncSessionState) callconv(.Inline) HRESULT { return @ptrCast(*const IKnowledgeSyncProvider.VTable, self.vtable).BeginSession(@ptrCast(*const IKnowledgeSyncProvider, self), role, pSessionState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IKnowledgeSyncProvider_GetSyncBatchParameters(self: *const T, ppSyncKnowledge: ?*?*ISyncKnowledge, pdwRequestedBatchSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IKnowledgeSyncProvider.VTable, self.vtable).GetSyncBatchParameters(@ptrCast(*const IKnowledgeSyncProvider, self), ppSyncKnowledge, pdwRequestedBatchSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IKnowledgeSyncProvider_GetChangeBatch(self: *const T, dwBatchSize: u32, pSyncKnowledge: ?*ISyncKnowledge, ppSyncChangeBatch: ?*?*ISyncChangeBatch, ppUnkDataRetriever: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IKnowledgeSyncProvider.VTable, self.vtable).GetChangeBatch(@ptrCast(*const IKnowledgeSyncProvider, self), dwBatchSize, pSyncKnowledge, ppSyncChangeBatch, ppUnkDataRetriever); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IKnowledgeSyncProvider_GetFullEnumerationChangeBatch(self: *const T, dwBatchSize: u32, pbLowerEnumerationBound: ?*const u8, pSyncKnowledge: ?*ISyncKnowledge, ppSyncChangeBatch: ?*?*ISyncFullEnumerationChangeBatch, ppUnkDataRetriever: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IKnowledgeSyncProvider.VTable, self.vtable).GetFullEnumerationChangeBatch(@ptrCast(*const IKnowledgeSyncProvider, self), dwBatchSize, pbLowerEnumerationBound, pSyncKnowledge, ppSyncChangeBatch, ppUnkDataRetriever); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IKnowledgeSyncProvider_ProcessChangeBatch(self: *const T, resolutionPolicy: CONFLICT_RESOLUTION_POLICY, pSourceChangeBatch: ?*ISyncChangeBatch, pUnkDataRetriever: ?*IUnknown, pCallback: ?*ISyncCallback, pSyncSessionStatistics: ?*SYNC_SESSION_STATISTICS) callconv(.Inline) HRESULT { return @ptrCast(*const IKnowledgeSyncProvider.VTable, self.vtable).ProcessChangeBatch(@ptrCast(*const IKnowledgeSyncProvider, self), resolutionPolicy, pSourceChangeBatch, pUnkDataRetriever, pCallback, pSyncSessionStatistics); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IKnowledgeSyncProvider_ProcessFullEnumerationChangeBatch(self: *const T, resolutionPolicy: CONFLICT_RESOLUTION_POLICY, pSourceChangeBatch: ?*ISyncFullEnumerationChangeBatch, pUnkDataRetriever: ?*IUnknown, pCallback: ?*ISyncCallback, pSyncSessionStatistics: ?*SYNC_SESSION_STATISTICS) callconv(.Inline) HRESULT { return @ptrCast(*const IKnowledgeSyncProvider.VTable, self.vtable).ProcessFullEnumerationChangeBatch(@ptrCast(*const IKnowledgeSyncProvider, self), resolutionPolicy, pSourceChangeBatch, pUnkDataRetriever, pCallback, pSyncSessionStatistics); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IKnowledgeSyncProvider_EndSession(self: *const T, pSessionState: ?*ISyncSessionState) callconv(.Inline) HRESULT { return @ptrCast(*const IKnowledgeSyncProvider.VTable, self.vtable).EndSession(@ptrCast(*const IKnowledgeSyncProvider, self), pSessionState); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeUnit_Value = @import("../zig.zig").Guid.initString("60edd8ca-7341-4bb7-95ce-fab6394b51cb"); pub const IID_ISyncChangeUnit = &IID_ISyncChangeUnit_Value; pub const ISyncChangeUnit = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetItemChange: fn( self: *const ISyncChangeUnit, ppSyncChange: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnitId: fn( self: *const ISyncChangeUnit, pbChangeUnitId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnitVersion: fn( self: *const ISyncChangeUnit, pbCurrentReplicaId: ?*const u8, pVersion: ?*SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeUnit_GetItemChange(self: *const T, ppSyncChange: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeUnit.VTable, self.vtable).GetItemChange(@ptrCast(*const ISyncChangeUnit, self), ppSyncChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeUnit_GetChangeUnitId(self: *const T, pbChangeUnitId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeUnit.VTable, self.vtable).GetChangeUnitId(@ptrCast(*const ISyncChangeUnit, self), pbChangeUnitId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeUnit_GetChangeUnitVersion(self: *const T, pbCurrentReplicaId: ?*const u8, pVersion: ?*SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeUnit.VTable, self.vtable).GetChangeUnitVersion(@ptrCast(*const ISyncChangeUnit, self), pbCurrentReplicaId, pVersion); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumSyncChangeUnits_Value = @import("../zig.zig").Guid.initString("346b35f1-8703-4c6d-ab1a-4dbca2cff97f"); pub const IID_IEnumSyncChangeUnits = &IID_IEnumSyncChangeUnits_Value; pub const IEnumSyncChangeUnits = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSyncChangeUnits, cChanges: u32, ppChangeUnit: ?*?*ISyncChangeUnit, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSyncChangeUnits, cChanges: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSyncChangeUnits, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSyncChangeUnits, ppEnum: ?*?*IEnumSyncChangeUnits, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChangeUnits_Next(self: *const T, cChanges: u32, ppChangeUnit: ?*?*ISyncChangeUnit, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChangeUnits.VTable, self.vtable).Next(@ptrCast(*const IEnumSyncChangeUnits, self), cChanges, ppChangeUnit, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChangeUnits_Skip(self: *const T, cChanges: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChangeUnits.VTable, self.vtable).Skip(@ptrCast(*const IEnumSyncChangeUnits, self), cChanges); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChangeUnits_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChangeUnits.VTable, self.vtable).Reset(@ptrCast(*const IEnumSyncChangeUnits, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncChangeUnits_Clone(self: *const T, ppEnum: ?*?*IEnumSyncChangeUnits) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncChangeUnits.VTable, self.vtable).Clone(@ptrCast(*const IEnumSyncChangeUnits, self), ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChange_Value = @import("../zig.zig").Guid.initString("a1952beb-0f6b-4711-b136-01da85b968a6"); pub const IID_ISyncChange = &IID_ISyncChange_Value; pub const ISyncChange = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetOwnerReplicaId: fn( self: *const ISyncChange, pbReplicaId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRootItemId: fn( self: *const ISyncChange, pbRootItemId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeVersion: fn( self: *const ISyncChange, pbCurrentReplicaId: ?*const u8, pVersion: ?*SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCreationVersion: fn( self: *const ISyncChange, pbCurrentReplicaId: ?*const u8, pVersion: ?*SYNC_VERSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFlags: fn( self: *const ISyncChange, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetWorkEstimate: fn( self: *const ISyncChange, pdwWork: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnits: fn( self: *const ISyncChange, ppEnum: ?*?*IEnumSyncChangeUnits, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMadeWithKnowledge: fn( self: *const ISyncChange, ppMadeWithKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedKnowledge: fn( self: *const ISyncChange, ppLearnedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetWorkEstimate: fn( self: *const ISyncChange, dwWork: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetOwnerReplicaId(self: *const T, pbReplicaId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetOwnerReplicaId(@ptrCast(*const ISyncChange, self), pbReplicaId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetRootItemId(self: *const T, pbRootItemId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetRootItemId(@ptrCast(*const ISyncChange, self), pbRootItemId, pcbIdSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetChangeVersion(self: *const T, pbCurrentReplicaId: ?*const u8, pVersion: ?*SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetChangeVersion(@ptrCast(*const ISyncChange, self), pbCurrentReplicaId, pVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetCreationVersion(self: *const T, pbCurrentReplicaId: ?*const u8, pVersion: ?*SYNC_VERSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetCreationVersion(@ptrCast(*const ISyncChange, self), pbCurrentReplicaId, pVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetFlags(self: *const T, pdwFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetFlags(@ptrCast(*const ISyncChange, self), pdwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetWorkEstimate(self: *const T, pdwWork: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetWorkEstimate(@ptrCast(*const ISyncChange, self), pdwWork); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetChangeUnits(self: *const T, ppEnum: ?*?*IEnumSyncChangeUnits) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetChangeUnits(@ptrCast(*const ISyncChange, self), ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetMadeWithKnowledge(self: *const T, ppMadeWithKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetMadeWithKnowledge(@ptrCast(*const ISyncChange, self), ppMadeWithKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_GetLearnedKnowledge(self: *const T, ppLearnedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).GetLearnedKnowledge(@ptrCast(*const ISyncChange, self), ppLearnedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChange_SetWorkEstimate(self: *const T, dwWork: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChange.VTable, self.vtable).SetWorkEstimate(@ptrCast(*const ISyncChange, self), dwWork); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncChangeWithPrerequisite_Value = @import("../zig.zig").Guid.initString("9e38382f-1589-48c3-92e4-05ecdcb4f3f7"); pub const IID_ISyncChangeWithPrerequisite = &IID_ISyncChangeWithPrerequisite_Value; pub const ISyncChangeWithPrerequisite = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPrerequisiteKnowledge: fn( self: *const ISyncChangeWithPrerequisite, ppPrerequisiteKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedKnowledgeWithPrerequisite: fn( self: *const ISyncChangeWithPrerequisite, pDestinationKnowledge: ?*ISyncKnowledge, ppLearnedKnowledgeWithPrerequisite: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithPrerequisite_GetPrerequisiteKnowledge(self: *const T, ppPrerequisiteKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithPrerequisite.VTable, self.vtable).GetPrerequisiteKnowledge(@ptrCast(*const ISyncChangeWithPrerequisite, self), ppPrerequisiteKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithPrerequisite_GetLearnedKnowledgeWithPrerequisite(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, ppLearnedKnowledgeWithPrerequisite: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithPrerequisite.VTable, self.vtable).GetLearnedKnowledgeWithPrerequisite(@ptrCast(*const ISyncChangeWithPrerequisite, self), pDestinationKnowledge, ppLearnedKnowledgeWithPrerequisite); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncFullEnumerationChange_Value = @import("../zig.zig").Guid.initString("9785e0bd-bdff-40c4-98c5-b34b2f1991b3"); pub const IID_ISyncFullEnumerationChange = &IID_ISyncFullEnumerationChange_Value; pub const ISyncFullEnumerationChange = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetLearnedKnowledgeAfterRecoveryComplete: fn( self: *const ISyncFullEnumerationChange, ppLearnedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedForgottenKnowledge: fn( self: *const ISyncFullEnumerationChange, ppLearnedForgottenKnowledge: ?*?*IForgottenKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFullEnumerationChange_GetLearnedKnowledgeAfterRecoveryComplete(self: *const T, ppLearnedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFullEnumerationChange.VTable, self.vtable).GetLearnedKnowledgeAfterRecoveryComplete(@ptrCast(*const ISyncFullEnumerationChange, self), ppLearnedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncFullEnumerationChange_GetLearnedForgottenKnowledge(self: *const T, ppLearnedForgottenKnowledge: ?*?*IForgottenKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncFullEnumerationChange.VTable, self.vtable).GetLearnedForgottenKnowledge(@ptrCast(*const ISyncFullEnumerationChange, self), ppLearnedForgottenKnowledge); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncMergeTombstoneChange_Value = @import("../zig.zig").Guid.initString("6ec62597-0903-484c-ad61-36d6e938f47b"); pub const IID_ISyncMergeTombstoneChange = &IID_ISyncMergeTombstoneChange_Value; pub const ISyncMergeTombstoneChange = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetWinnerItemId: fn( self: *const ISyncMergeTombstoneChange, pbWinnerItemId: ?*u8, pcbIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncMergeTombstoneChange_GetWinnerItemId(self: *const T, pbWinnerItemId: ?*u8, pcbIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncMergeTombstoneChange.VTable, self.vtable).GetWinnerItemId(@ptrCast(*const ISyncMergeTombstoneChange, self), pbWinnerItemId, pcbIdSize); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IEnumItemIds_Value = @import("../zig.zig").Guid.initString("43aa3f61-4b2e-4b60-83df-b110d3e148f1"); pub const IID_IEnumItemIds = &IID_IEnumItemIds_Value; pub const IEnumItemIds = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumItemIds, pbItemId: ?*u8, pcbItemIdSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumItemIds_Next(self: *const T, pbItemId: ?*u8, pcbItemIdSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumItemIds.VTable, self.vtable).Next(@ptrCast(*const IEnumItemIds, self), pbItemId, pcbItemIdSize); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IFilterKeyMap_Value = @import("../zig.zig").Guid.initString("ca169652-07c6-4708-a3da-6e4eba8d2297"); pub const IID_IFilterKeyMap = &IID_IFilterKeyMap_Value; pub const IFilterKeyMap = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCount: fn( self: *const IFilterKeyMap, pdwCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddFilter: fn( self: *const IFilterKeyMap, pISyncFilter: ?*ISyncFilter, pdwFilterKey: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilter: fn( self: *const IFilterKeyMap, dwFilterKey: u32, ppISyncFilter: ?*?*ISyncFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Serialize: fn( self: *const IFilterKeyMap, pbFilterKeyMap: ?*u8, pcbFilterKeyMap: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterKeyMap_GetCount(self: *const T, pdwCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterKeyMap.VTable, self.vtable).GetCount(@ptrCast(*const IFilterKeyMap, self), pdwCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterKeyMap_AddFilter(self: *const T, pISyncFilter: ?*ISyncFilter, pdwFilterKey: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterKeyMap.VTable, self.vtable).AddFilter(@ptrCast(*const IFilterKeyMap, self), pISyncFilter, pdwFilterKey); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterKeyMap_GetFilter(self: *const T, dwFilterKey: u32, ppISyncFilter: ?*?*ISyncFilter) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterKeyMap.VTable, self.vtable).GetFilter(@ptrCast(*const IFilterKeyMap, self), dwFilterKey, ppISyncFilter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterKeyMap_Serialize(self: *const T, pbFilterKeyMap: ?*u8, pcbFilterKeyMap: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterKeyMap.VTable, self.vtable).Serialize(@ptrCast(*const IFilterKeyMap, self), pbFilterKeyMap, pcbFilterKeyMap); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncChangeWithFilterKeyMap_Value = @import("../zig.zig").Guid.initString("bfe1ef00-e87d-42fd-a4e9-242d70414aef"); pub const IID_ISyncChangeWithFilterKeyMap = &IID_ISyncChangeWithFilterKeyMap_Value; pub const ISyncChangeWithFilterKeyMap = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFilterCount: fn( self: *const ISyncChangeWithFilterKeyMap, pdwFilterCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilterChange: fn( self: *const ISyncChangeWithFilterKeyMap, dwFilterKey: u32, pFilterChange: ?*SYNC_FILTER_CHANGE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAllChangeUnitsPresentFlag: fn( self: *const ISyncChangeWithFilterKeyMap, pfAllChangeUnitsPresent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilterForgottenKnowledge: fn( self: *const ISyncChangeWithFilterKeyMap, dwFilterKey: u32, ppIFilterForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilteredReplicaLearnedKnowledge: fn( self: *const ISyncChangeWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedFilterForgottenKnowledge: fn( self: *const ISyncChangeWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilteredReplicaLearnedForgottenKnowledge: fn( self: *const ISyncChangeWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilteredReplicaLearnedForgottenKnowledgeAfterRecoveryComplete: fn( self: *const ISyncChangeWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedFilterForgottenKnowledgeAfterRecoveryComplete: fn( self: *const ISyncChangeWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetFilterCount(self: *const T, pdwFilterCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetFilterCount(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), pdwFilterCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetFilterChange(self: *const T, dwFilterKey: u32, pFilterChange: ?*SYNC_FILTER_CHANGE) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetFilterChange(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), dwFilterKey, pFilterChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetAllChangeUnitsPresentFlag(self: *const T, pfAllChangeUnitsPresent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetAllChangeUnitsPresentFlag(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), pfAllChangeUnitsPresent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetFilterForgottenKnowledge(self: *const T, dwFilterKey: u32, ppIFilterForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetFilterForgottenKnowledge(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), dwFilterKey, ppIFilterForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetFilteredReplicaLearnedKnowledge(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetFilteredReplicaLearnedKnowledge(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, ppLearnedKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetLearnedFilterForgottenKnowledge(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetLearnedFilterForgottenKnowledge(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, dwFilterKey, ppLearnedFilterForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetFilteredReplicaLearnedForgottenKnowledge(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetFilteredReplicaLearnedForgottenKnowledge(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, ppLearnedForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetFilteredReplicaLearnedForgottenKnowledgeAfterRecoveryComplete(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetFilteredReplicaLearnedForgottenKnowledgeAfterRecoveryComplete(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, ppLearnedForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeWithFilterKeyMap_GetLearnedFilterForgottenKnowledgeAfterRecoveryComplete(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeWithFilterKeyMap.VTable, self.vtable).GetLearnedFilterForgottenKnowledgeAfterRecoveryComplete(@ptrCast(*const ISyncChangeWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, dwFilterKey, ppLearnedFilterForgottenKnowledge); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncChangeBatchWithFilterKeyMap_Value = @import("../zig.zig").Guid.initString("de247002-566d-459a-a6ed-a5aab3459fb7"); pub const IID_ISyncChangeBatchWithFilterKeyMap = &IID_ISyncChangeBatchWithFilterKeyMap_Value; pub const ISyncChangeBatchWithFilterKeyMap = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFilterKeyMap: fn( self: *const ISyncChangeBatchWithFilterKeyMap, ppIFilterKeyMap: ?*?*IFilterKeyMap, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetFilterKeyMap: fn( self: *const ISyncChangeBatchWithFilterKeyMap, pIFilterKeyMap: ?*IFilterKeyMap, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetFilterForgottenKnowledge: fn( self: *const ISyncChangeBatchWithFilterKeyMap, dwFilterKey: u32, pFilterForgottenKnowledge: ?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilteredReplicaLearnedKnowledge: fn( self: *const ISyncChangeBatchWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedFilterForgottenKnowledge: fn( self: *const ISyncChangeBatchWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilteredReplicaLearnedForgottenKnowledge: fn( self: *const ISyncChangeBatchWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilteredReplicaLearnedForgottenKnowledgeAfterRecoveryComplete: fn( self: *const ISyncChangeBatchWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLearnedFilterForgottenKnowledgeAfterRecoveryComplete: fn( self: *const ISyncChangeBatchWithFilterKeyMap, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_GetFilterKeyMap(self: *const T, ppIFilterKeyMap: ?*?*IFilterKeyMap) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).GetFilterKeyMap(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), ppIFilterKeyMap); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_SetFilterKeyMap(self: *const T, pIFilterKeyMap: ?*IFilterKeyMap) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).SetFilterKeyMap(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), pIFilterKeyMap); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_SetFilterForgottenKnowledge(self: *const T, dwFilterKey: u32, pFilterForgottenKnowledge: ?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).SetFilterForgottenKnowledge(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), dwFilterKey, pFilterForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_GetFilteredReplicaLearnedKnowledge(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).GetFilteredReplicaLearnedKnowledge(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, ppLearnedForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_GetLearnedFilterForgottenKnowledge(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).GetLearnedFilterForgottenKnowledge(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, dwFilterKey, ppLearnedFilterForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_GetFilteredReplicaLearnedForgottenKnowledge(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).GetFilteredReplicaLearnedForgottenKnowledge(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, ppLearnedForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_GetFilteredReplicaLearnedForgottenKnowledgeAfterRecoveryComplete(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, ppLearnedForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).GetFilteredReplicaLearnedForgottenKnowledgeAfterRecoveryComplete(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, ppLearnedForgottenKnowledge); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncChangeBatchWithFilterKeyMap_GetLearnedFilterForgottenKnowledgeAfterRecoveryComplete(self: *const T, pDestinationKnowledge: ?*ISyncKnowledge, pNewMoveins: ?*IEnumItemIds, dwFilterKey: u32, ppLearnedFilterForgottenKnowledge: ?*?*ISyncKnowledge) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncChangeBatchWithFilterKeyMap.VTable, self.vtable).GetLearnedFilterForgottenKnowledgeAfterRecoveryComplete(@ptrCast(*const ISyncChangeBatchWithFilterKeyMap, self), pDestinationKnowledge, pNewMoveins, dwFilterKey, ppLearnedFilterForgottenKnowledge); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IDataRetrieverCallback_Value = @import("../zig.zig").Guid.initString("71b4863b-f969-4676-bbc3-3d9fdc3fb2c7"); pub const IID_IDataRetrieverCallback = &IID_IDataRetrieverCallback_Value; pub const IDataRetrieverCallback = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, LoadChangeDataComplete: fn( self: *const IDataRetrieverCallback, pUnkData: ?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadChangeDataError: fn( self: *const IDataRetrieverCallback, hrError: HRESULT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDataRetrieverCallback_LoadChangeDataComplete(self: *const T, pUnkData: ?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IDataRetrieverCallback.VTable, self.vtable).LoadChangeDataComplete(@ptrCast(*const IDataRetrieverCallback, self), pUnkData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDataRetrieverCallback_LoadChangeDataError(self: *const T, hrError: HRESULT) callconv(.Inline) HRESULT { return @ptrCast(*const IDataRetrieverCallback.VTable, self.vtable).LoadChangeDataError(@ptrCast(*const IDataRetrieverCallback, self), hrError); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ILoadChangeContext_Value = @import("../zig.zig").Guid.initString("44a4aaca-ec39-46d5-b5c9-d633c0ee67e2"); pub const IID_ILoadChangeContext = &IID_ILoadChangeContext_Value; pub const ILoadChangeContext = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSyncChange: fn( self: *const ILoadChangeContext, ppSyncChange: ?*?*ISyncChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRecoverableErrorOnChange: fn( self: *const ILoadChangeContext, hrError: HRESULT, pErrorData: ?*IRecoverableErrorData, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRecoverableErrorOnChangeUnit: fn( self: *const ILoadChangeContext, hrError: HRESULT, pChangeUnit: ?*ISyncChangeUnit, pErrorData: ?*IRecoverableErrorData, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILoadChangeContext_GetSyncChange(self: *const T, ppSyncChange: ?*?*ISyncChange) callconv(.Inline) HRESULT { return @ptrCast(*const ILoadChangeContext.VTable, self.vtable).GetSyncChange(@ptrCast(*const ILoadChangeContext, self), ppSyncChange); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILoadChangeContext_SetRecoverableErrorOnChange(self: *const T, hrError: HRESULT, pErrorData: ?*IRecoverableErrorData) callconv(.Inline) HRESULT { return @ptrCast(*const ILoadChangeContext.VTable, self.vtable).SetRecoverableErrorOnChange(@ptrCast(*const ILoadChangeContext, self), hrError, pErrorData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILoadChangeContext_SetRecoverableErrorOnChangeUnit(self: *const T, hrError: HRESULT, pChangeUnit: ?*ISyncChangeUnit, pErrorData: ?*IRecoverableErrorData) callconv(.Inline) HRESULT { return @ptrCast(*const ILoadChangeContext.VTable, self.vtable).SetRecoverableErrorOnChangeUnit(@ptrCast(*const ILoadChangeContext, self), hrError, pChangeUnit, pErrorData); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISynchronousDataRetriever_Value = @import("../zig.zig").Guid.initString("9b22f2a9-a4cd-4648-9d8e-3a510d4da04b"); pub const IID_ISynchronousDataRetriever = &IID_ISynchronousDataRetriever_Value; pub const ISynchronousDataRetriever = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIdParameters: fn( self: *const ISynchronousDataRetriever, pIdParameters: ?*ID_PARAMETERS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadChangeData: fn( self: *const ISynchronousDataRetriever, pLoadChangeContext: ?*ILoadChangeContext, ppUnkData: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISynchronousDataRetriever_GetIdParameters(self: *const T, pIdParameters: ?*ID_PARAMETERS) callconv(.Inline) HRESULT { return @ptrCast(*const ISynchronousDataRetriever.VTable, self.vtable).GetIdParameters(@ptrCast(*const ISynchronousDataRetriever, self), pIdParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISynchronousDataRetriever_LoadChangeData(self: *const T, pLoadChangeContext: ?*ILoadChangeContext, ppUnkData: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISynchronousDataRetriever.VTable, self.vtable).LoadChangeData(@ptrCast(*const ISynchronousDataRetriever, self), pLoadChangeContext, ppUnkData); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IAsynchronousDataRetriever_Value = @import("../zig.zig").Guid.initString("9fc7e470-61ea-4a88-9be4-df56a27cfef2"); pub const IID_IAsynchronousDataRetriever = &IID_IAsynchronousDataRetriever_Value; pub const IAsynchronousDataRetriever = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIdParameters: fn( self: *const IAsynchronousDataRetriever, pIdParameters: ?*ID_PARAMETERS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RegisterCallback: fn( self: *const IAsynchronousDataRetriever, pDataRetrieverCallback: ?*IDataRetrieverCallback, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RevokeCallback: fn( self: *const IAsynchronousDataRetriever, pDataRetrieverCallback: ?*IDataRetrieverCallback, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadChangeData: fn( self: *const IAsynchronousDataRetriever, pLoadChangeContext: ?*ILoadChangeContext, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAsynchronousDataRetriever_GetIdParameters(self: *const T, pIdParameters: ?*ID_PARAMETERS) callconv(.Inline) HRESULT { return @ptrCast(*const IAsynchronousDataRetriever.VTable, self.vtable).GetIdParameters(@ptrCast(*const IAsynchronousDataRetriever, self), pIdParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAsynchronousDataRetriever_RegisterCallback(self: *const T, pDataRetrieverCallback: ?*IDataRetrieverCallback) callconv(.Inline) HRESULT { return @ptrCast(*const IAsynchronousDataRetriever.VTable, self.vtable).RegisterCallback(@ptrCast(*const IAsynchronousDataRetriever, self), pDataRetrieverCallback); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAsynchronousDataRetriever_RevokeCallback(self: *const T, pDataRetrieverCallback: ?*IDataRetrieverCallback) callconv(.Inline) HRESULT { return @ptrCast(*const IAsynchronousDataRetriever.VTable, self.vtable).RevokeCallback(@ptrCast(*const IAsynchronousDataRetriever, self), pDataRetrieverCallback); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAsynchronousDataRetriever_LoadChangeData(self: *const T, pLoadChangeContext: ?*ILoadChangeContext) callconv(.Inline) HRESULT { return @ptrCast(*const IAsynchronousDataRetriever.VTable, self.vtable).LoadChangeData(@ptrCast(*const IAsynchronousDataRetriever, self), pLoadChangeContext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IFilterRequestCallback_Value = @import("../zig.zig").Guid.initString("82df8873-6360-463a-a8a1-ede5e1a1594d"); pub const IID_IFilterRequestCallback = &IID_IFilterRequestCallback_Value; pub const IFilterRequestCallback = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, RequestFilter: fn( self: *const IFilterRequestCallback, pFilter: ?*IUnknown, filteringType: FILTERING_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterRequestCallback_RequestFilter(self: *const T, pFilter: ?*IUnknown, filteringType: FILTERING_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterRequestCallback.VTable, self.vtable).RequestFilter(@ptrCast(*const IFilterRequestCallback, self), pFilter, filteringType); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IRequestFilteredSync_Value = @import("../zig.zig").Guid.initString("2e020184-6d18-46a7-a32a-da4aeb06696c"); pub const IID_IRequestFilteredSync = &IID_IRequestFilteredSync_Value; pub const IRequestFilteredSync = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SpecifyFilter: fn( self: *const IRequestFilteredSync, pCallback: ?*IFilterRequestCallback, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRequestFilteredSync_SpecifyFilter(self: *const T, pCallback: ?*IFilterRequestCallback) callconv(.Inline) HRESULT { return @ptrCast(*const IRequestFilteredSync.VTable, self.vtable).SpecifyFilter(@ptrCast(*const IRequestFilteredSync, self), pCallback); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISupportFilteredSync_Value = @import("../zig.zig").Guid.initString("3d128ded-d555-4e0d-bf4b-fb213a8a9302"); pub const IID_ISupportFilteredSync = &IID_ISupportFilteredSync_Value; pub const ISupportFilteredSync = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddFilter: fn( self: *const ISupportFilteredSync, pFilter: ?*IUnknown, filteringType: FILTERING_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISupportFilteredSync_AddFilter(self: *const T, pFilter: ?*IUnknown, filteringType: FILTERING_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const ISupportFilteredSync.VTable, self.vtable).AddFilter(@ptrCast(*const ISupportFilteredSync, self), pFilter, filteringType); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IFilterTrackingRequestCallback_Value = @import("../zig.zig").Guid.initString("713ca7bb-c858-4674-b4b6-1122436587a9"); pub const IID_IFilterTrackingRequestCallback = &IID_IFilterTrackingRequestCallback_Value; pub const IFilterTrackingRequestCallback = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, RequestTrackedFilter: fn( self: *const IFilterTrackingRequestCallback, pFilter: ?*ISyncFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterTrackingRequestCallback_RequestTrackedFilter(self: *const T, pFilter: ?*ISyncFilter) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterTrackingRequestCallback.VTable, self.vtable).RequestTrackedFilter(@ptrCast(*const IFilterTrackingRequestCallback, self), pFilter); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IFilterTrackingProvider_Value = @import("../zig.zig").Guid.initString("743383c0-fc4e-45ba-ad81-d9d84c7a24f8"); pub const IID_IFilterTrackingProvider = &IID_IFilterTrackingProvider_Value; pub const IFilterTrackingProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SpecifyTrackedFilters: fn( self: *const IFilterTrackingProvider, pCallback: ?*IFilterTrackingRequestCallback, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddTrackedFilter: fn( self: *const IFilterTrackingProvider, pFilter: ?*ISyncFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterTrackingProvider_SpecifyTrackedFilters(self: *const T, pCallback: ?*IFilterTrackingRequestCallback) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterTrackingProvider.VTable, self.vtable).SpecifyTrackedFilters(@ptrCast(*const IFilterTrackingProvider, self), pCallback); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFilterTrackingProvider_AddTrackedFilter(self: *const T, pFilter: ?*ISyncFilter) callconv(.Inline) HRESULT { return @ptrCast(*const IFilterTrackingProvider.VTable, self.vtable).AddTrackedFilter(@ptrCast(*const IFilterTrackingProvider, self), pFilter); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISupportLastWriteTime_Value = @import("../zig.zig").Guid.initString("eadf816f-d0bd-43ca-8f40-5acdc6c06f7a"); pub const IID_ISupportLastWriteTime = &IID_ISupportLastWriteTime_Value; pub const ISupportLastWriteTime = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetItemChangeTime: fn( self: *const ISupportLastWriteTime, pbItemId: ?*const u8, pullTimestamp: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChangeUnitChangeTime: fn( self: *const ISupportLastWriteTime, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, pullTimestamp: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISupportLastWriteTime_GetItemChangeTime(self: *const T, pbItemId: ?*const u8, pullTimestamp: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISupportLastWriteTime.VTable, self.vtable).GetItemChangeTime(@ptrCast(*const ISupportLastWriteTime, self), pbItemId, pullTimestamp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISupportLastWriteTime_GetChangeUnitChangeTime(self: *const T, pbItemId: ?*const u8, pbChangeUnitId: ?*const u8, pullTimestamp: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISupportLastWriteTime.VTable, self.vtable).GetChangeUnitChangeTime(@ptrCast(*const ISupportLastWriteTime, self), pbItemId, pbChangeUnitId, pullTimestamp); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IProviderConverter_Value = @import("../zig.zig").Guid.initString("809b7276-98cf-4957-93a5-0ebdd3dddffd"); pub const IID_IProviderConverter = &IID_IProviderConverter_Value; pub const IProviderConverter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Initialize: fn( self: *const IProviderConverter, pISyncProvider: ?*ISyncProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IProviderConverter_Initialize(self: *const T, pISyncProvider: ?*ISyncProvider) callconv(.Inline) HRESULT { return @ptrCast(*const IProviderConverter.VTable, self.vtable).Initialize(@ptrCast(*const IProviderConverter, self), pISyncProvider); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISyncDataConverter_Value = @import("../zig.zig").Guid.initString("435d4861-68d5-44aa-a0f9-72a0b00ef9cf"); pub const IID_ISyncDataConverter = &IID_ISyncDataConverter_Value; pub const ISyncDataConverter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ConvertDataRetrieverFromProviderFormat: fn( self: *const ISyncDataConverter, pUnkDataRetrieverIn: ?*IUnknown, pEnumSyncChanges: ?*IEnumSyncChanges, ppUnkDataOut: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ConvertDataRetrieverToProviderFormat: fn( self: *const ISyncDataConverter, pUnkDataRetrieverIn: ?*IUnknown, pEnumSyncChanges: ?*IEnumSyncChanges, ppUnkDataOut: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ConvertDataFromProviderFormat: fn( self: *const ISyncDataConverter, pDataContext: ?*ILoadChangeContext, pUnkDataIn: ?*IUnknown, ppUnkDataOut: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ConvertDataToProviderFormat: fn( self: *const ISyncDataConverter, pDataContext: ?*ILoadChangeContext, pUnkDataOut: ?*IUnknown, ppUnkDataout: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncDataConverter_ConvertDataRetrieverFromProviderFormat(self: *const T, pUnkDataRetrieverIn: ?*IUnknown, pEnumSyncChanges: ?*IEnumSyncChanges, ppUnkDataOut: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncDataConverter.VTable, self.vtable).ConvertDataRetrieverFromProviderFormat(@ptrCast(*const ISyncDataConverter, self), pUnkDataRetrieverIn, pEnumSyncChanges, ppUnkDataOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncDataConverter_ConvertDataRetrieverToProviderFormat(self: *const T, pUnkDataRetrieverIn: ?*IUnknown, pEnumSyncChanges: ?*IEnumSyncChanges, ppUnkDataOut: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncDataConverter.VTable, self.vtable).ConvertDataRetrieverToProviderFormat(@ptrCast(*const ISyncDataConverter, self), pUnkDataRetrieverIn, pEnumSyncChanges, ppUnkDataOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncDataConverter_ConvertDataFromProviderFormat(self: *const T, pDataContext: ?*ILoadChangeContext, pUnkDataIn: ?*IUnknown, ppUnkDataOut: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncDataConverter.VTable, self.vtable).ConvertDataFromProviderFormat(@ptrCast(*const ISyncDataConverter, self), pDataContext, pUnkDataIn, ppUnkDataOut); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncDataConverter_ConvertDataToProviderFormat(self: *const T, pDataContext: ?*ILoadChangeContext, pUnkDataOut: ?*IUnknown, ppUnkDataout: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncDataConverter.VTable, self.vtable).ConvertDataToProviderFormat(@ptrCast(*const ISyncDataConverter, self), pDataContext, pUnkDataOut, ppUnkDataout); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_SyncProviderRegistration_Value = @import("../zig.zig").Guid.initString("f82b4ef1-93a9-4dde-8015-f7950a1a6e31"); pub const CLSID_SyncProviderRegistration = &CLSID_SyncProviderRegistration_Value; pub const SyncProviderConfiguration = extern struct { dwVersion: u32, guidInstanceId: Guid, clsidProvider: Guid, guidConfigUIInstanceId: Guid, guidContentType: Guid, dwCapabilities: u32, dwSupportedArchitecture: u32, }; pub const SyncProviderConfigUIConfiguration = extern struct { dwVersion: u32, guidInstanceId: Guid, clsidConfigUI: Guid, guidContentType: Guid, dwCapabilities: u32, dwSupportedArchitecture: u32, fIsGlobal: BOOL, }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncProviderRegistration_Value = @import("../zig.zig").Guid.initString("cb45953b-7624-47bc-a472-eb8cac6b222e"); pub const IID_ISyncProviderRegistration = &IID_ISyncProviderRegistration_Value; pub const ISyncProviderRegistration = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateSyncProviderConfigUIRegistrationInstance: fn( self: *const ISyncProviderRegistration, pConfigUIConfig: ?*const SyncProviderConfigUIConfiguration, ppConfigUIInfo: ?*?*ISyncProviderConfigUIInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnregisterSyncProviderConfigUI: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumerateSyncProviderConfigUIs: fn( self: *const ISyncProviderRegistration, pguidContentType: ?*const Guid, dwSupportedArchitecture: u32, ppEnumSyncProviderConfigUIInfos: ?*?*IEnumSyncProviderConfigUIInfos, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateSyncProviderRegistrationInstance: fn( self: *const ISyncProviderRegistration, pProviderConfiguration: ?*const SyncProviderConfiguration, ppProviderInfo: ?*?*ISyncProviderInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnregisterSyncProvider: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSyncProviderConfigUIInfoforProvider: fn( self: *const ISyncProviderRegistration, pguidProviderInstanceId: ?*const Guid, ppProviderConfigUIInfo: ?*?*ISyncProviderConfigUIInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumerateSyncProviders: fn( self: *const ISyncProviderRegistration, pguidContentType: ?*const Guid, dwStateFlagsToFilterMask: u32, dwStateFlagsToFilter: u32, refProviderClsId: ?*const Guid, dwSupportedArchitecture: u32, ppEnumSyncProviderInfos: ?*?*IEnumSyncProviderInfos, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSyncProviderInfo: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, ppProviderInfo: ?*?*ISyncProviderInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSyncProviderFromInstanceId: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, dwClsContext: u32, ppSyncProvider: ?*?*IRegisteredSyncProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSyncProviderConfigUIInfo: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, ppConfigUIInfo: ?*?*ISyncProviderConfigUIInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSyncProviderConfigUIFromInstanceId: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, dwClsContext: u32, ppConfigUI: ?*?*ISyncProviderConfigUI, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSyncProviderState: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, pdwStateFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSyncProviderState: fn( self: *const ISyncProviderRegistration, pguidInstanceId: ?*const Guid, dwStateFlagsMask: u32, dwStateFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RegisterForEvent: fn( self: *const ISyncProviderRegistration, phEvent: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RevokeEvent: fn( self: *const ISyncProviderRegistration, hEvent: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetChange: fn( self: *const ISyncProviderRegistration, hEvent: ?HANDLE, ppChange: ?*?*ISyncRegistrationChange, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_CreateSyncProviderConfigUIRegistrationInstance(self: *const T, pConfigUIConfig: ?*const SyncProviderConfigUIConfiguration, ppConfigUIInfo: ?*?*ISyncProviderConfigUIInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).CreateSyncProviderConfigUIRegistrationInstance(@ptrCast(*const ISyncProviderRegistration, self), pConfigUIConfig, ppConfigUIInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_UnregisterSyncProviderConfigUI(self: *const T, pguidInstanceId: ?*const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).UnregisterSyncProviderConfigUI(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_EnumerateSyncProviderConfigUIs(self: *const T, pguidContentType: ?*const Guid, dwSupportedArchitecture: u32, ppEnumSyncProviderConfigUIInfos: ?*?*IEnumSyncProviderConfigUIInfos) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).EnumerateSyncProviderConfigUIs(@ptrCast(*const ISyncProviderRegistration, self), pguidContentType, dwSupportedArchitecture, ppEnumSyncProviderConfigUIInfos); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_CreateSyncProviderRegistrationInstance(self: *const T, pProviderConfiguration: ?*const SyncProviderConfiguration, ppProviderInfo: ?*?*ISyncProviderInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).CreateSyncProviderRegistrationInstance(@ptrCast(*const ISyncProviderRegistration, self), pProviderConfiguration, ppProviderInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_UnregisterSyncProvider(self: *const T, pguidInstanceId: ?*const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).UnregisterSyncProvider(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_GetSyncProviderConfigUIInfoforProvider(self: *const T, pguidProviderInstanceId: ?*const Guid, ppProviderConfigUIInfo: ?*?*ISyncProviderConfigUIInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).GetSyncProviderConfigUIInfoforProvider(@ptrCast(*const ISyncProviderRegistration, self), pguidProviderInstanceId, ppProviderConfigUIInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_EnumerateSyncProviders(self: *const T, pguidContentType: ?*const Guid, dwStateFlagsToFilterMask: u32, dwStateFlagsToFilter: u32, refProviderClsId: ?*const Guid, dwSupportedArchitecture: u32, ppEnumSyncProviderInfos: ?*?*IEnumSyncProviderInfos) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).EnumerateSyncProviders(@ptrCast(*const ISyncProviderRegistration, self), pguidContentType, dwStateFlagsToFilterMask, dwStateFlagsToFilter, refProviderClsId, dwSupportedArchitecture, ppEnumSyncProviderInfos); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_GetSyncProviderInfo(self: *const T, pguidInstanceId: ?*const Guid, ppProviderInfo: ?*?*ISyncProviderInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).GetSyncProviderInfo(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId, ppProviderInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_GetSyncProviderFromInstanceId(self: *const T, pguidInstanceId: ?*const Guid, dwClsContext: u32, ppSyncProvider: ?*?*IRegisteredSyncProvider) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).GetSyncProviderFromInstanceId(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId, dwClsContext, ppSyncProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_GetSyncProviderConfigUIInfo(self: *const T, pguidInstanceId: ?*const Guid, ppConfigUIInfo: ?*?*ISyncProviderConfigUIInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).GetSyncProviderConfigUIInfo(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId, ppConfigUIInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_GetSyncProviderConfigUIFromInstanceId(self: *const T, pguidInstanceId: ?*const Guid, dwClsContext: u32, ppConfigUI: ?*?*ISyncProviderConfigUI) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).GetSyncProviderConfigUIFromInstanceId(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId, dwClsContext, ppConfigUI); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_GetSyncProviderState(self: *const T, pguidInstanceId: ?*const Guid, pdwStateFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).GetSyncProviderState(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId, pdwStateFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_SetSyncProviderState(self: *const T, pguidInstanceId: ?*const Guid, dwStateFlagsMask: u32, dwStateFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).SetSyncProviderState(@ptrCast(*const ISyncProviderRegistration, self), pguidInstanceId, dwStateFlagsMask, dwStateFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_RegisterForEvent(self: *const T, phEvent: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).RegisterForEvent(@ptrCast(*const ISyncProviderRegistration, self), phEvent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_RevokeEvent(self: *const T, hEvent: ?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).RevokeEvent(@ptrCast(*const ISyncProviderRegistration, self), hEvent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderRegistration_GetChange(self: *const T, hEvent: ?HANDLE, ppChange: ?*?*ISyncRegistrationChange) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderRegistration.VTable, self.vtable).GetChange(@ptrCast(*const ISyncProviderRegistration, self), hEvent, ppChange); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumSyncProviderConfigUIInfos_Value = @import("../zig.zig").Guid.initString("f6be2602-17c6-4658-a2d7-68ed3330f641"); pub const IID_IEnumSyncProviderConfigUIInfos = &IID_IEnumSyncProviderConfigUIInfos_Value; pub const IEnumSyncProviderConfigUIInfos = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSyncProviderConfigUIInfos, cFactories: u32, ppSyncProviderConfigUIInfo: [*]?*ISyncProviderConfigUIInfo, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSyncProviderConfigUIInfos, cFactories: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSyncProviderConfigUIInfos, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSyncProviderConfigUIInfos, ppEnum: ?*?*IEnumSyncProviderConfigUIInfos, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderConfigUIInfos_Next(self: *const T, cFactories: u32, ppSyncProviderConfigUIInfo: [*]?*ISyncProviderConfigUIInfo, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderConfigUIInfos.VTable, self.vtable).Next(@ptrCast(*const IEnumSyncProviderConfigUIInfos, self), cFactories, ppSyncProviderConfigUIInfo, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderConfigUIInfos_Skip(self: *const T, cFactories: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderConfigUIInfos.VTable, self.vtable).Skip(@ptrCast(*const IEnumSyncProviderConfigUIInfos, self), cFactories); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderConfigUIInfos_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderConfigUIInfos.VTable, self.vtable).Reset(@ptrCast(*const IEnumSyncProviderConfigUIInfos, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderConfigUIInfos_Clone(self: *const T, ppEnum: ?*?*IEnumSyncProviderConfigUIInfos) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderConfigUIInfos.VTable, self.vtable).Clone(@ptrCast(*const IEnumSyncProviderConfigUIInfos, self), ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IEnumSyncProviderInfos_Value = @import("../zig.zig").Guid.initString("a04ba850-5eb1-460d-a973-393fcb608a11"); pub const IID_IEnumSyncProviderInfos = &IID_IEnumSyncProviderInfos_Value; pub const IEnumSyncProviderInfos = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSyncProviderInfos, cInstances: u32, ppSyncProviderInfo: [*]?*ISyncProviderInfo, pcFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSyncProviderInfos, cInstances: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSyncProviderInfos, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSyncProviderInfos, ppEnum: ?*?*IEnumSyncProviderInfos, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderInfos_Next(self: *const T, cInstances: u32, ppSyncProviderInfo: [*]?*ISyncProviderInfo, pcFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderInfos.VTable, self.vtable).Next(@ptrCast(*const IEnumSyncProviderInfos, self), cInstances, ppSyncProviderInfo, pcFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderInfos_Skip(self: *const T, cInstances: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderInfos.VTable, self.vtable).Skip(@ptrCast(*const IEnumSyncProviderInfos, self), cInstances); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderInfos_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderInfos.VTable, self.vtable).Reset(@ptrCast(*const IEnumSyncProviderInfos, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSyncProviderInfos_Clone(self: *const T, ppEnum: ?*?*IEnumSyncProviderInfos) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSyncProviderInfos.VTable, self.vtable).Clone(@ptrCast(*const IEnumSyncProviderInfos, self), ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncProviderInfo_Value = @import("../zig.zig").Guid.initString("1ee135de-88a4-4504-b0d0-f7920d7e5ba6"); pub const IID_ISyncProviderInfo = &IID_ISyncProviderInfo_Value; pub const ISyncProviderInfo = extern struct { pub const VTable = extern struct { base: IPropertyStore.VTable, GetSyncProvider: fn( self: *const ISyncProviderInfo, dwClsContext: u32, ppSyncProvider: ?*?*IRegisteredSyncProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IPropertyStore.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderInfo_GetSyncProvider(self: *const T, dwClsContext: u32, ppSyncProvider: ?*?*IRegisteredSyncProvider) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderInfo.VTable, self.vtable).GetSyncProvider(@ptrCast(*const ISyncProviderInfo, self), dwClsContext, ppSyncProvider); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncProviderConfigUIInfo_Value = @import("../zig.zig").Guid.initString("214141ae-33d7-4d8d-8e37-f227e880ce50"); pub const IID_ISyncProviderConfigUIInfo = &IID_ISyncProviderConfigUIInfo_Value; pub const ISyncProviderConfigUIInfo = extern struct { pub const VTable = extern struct { base: IPropertyStore.VTable, GetSyncProviderConfigUI: fn( self: *const ISyncProviderConfigUIInfo, dwClsContext: u32, ppSyncProviderConfigUI: ?*?*ISyncProviderConfigUI, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IPropertyStore.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderConfigUIInfo_GetSyncProviderConfigUI(self: *const T, dwClsContext: u32, ppSyncProviderConfigUI: ?*?*ISyncProviderConfigUI) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderConfigUIInfo.VTable, self.vtable).GetSyncProviderConfigUI(@ptrCast(*const ISyncProviderConfigUIInfo, self), dwClsContext, ppSyncProviderConfigUI); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncProviderConfigUI_Value = @import("../zig.zig").Guid.initString("7b0705f6-cbcd-4071-ab05-3bdc364d4a0c"); pub const IID_ISyncProviderConfigUI = &IID_ISyncProviderConfigUI_Value; pub const ISyncProviderConfigUI = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Init: fn( self: *const ISyncProviderConfigUI, pguidInstanceId: ?*const Guid, pguidContentType: ?*const Guid, pConfigurationProperties: ?*IPropertyStore, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRegisteredProperties: fn( self: *const ISyncProviderConfigUI, ppConfigUIProperties: ?*?*IPropertyStore, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateAndRegisterNewSyncProvider: fn( self: *const ISyncProviderConfigUI, hwndParent: ?HWND, pUnkContext: ?*IUnknown, ppProviderInfo: ?*?*ISyncProviderInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ModifySyncProvider: fn( self: *const ISyncProviderConfigUI, hwndParent: ?HWND, pUnkContext: ?*IUnknown, pProviderInfo: ?*ISyncProviderInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderConfigUI_Init(self: *const T, pguidInstanceId: ?*const Guid, pguidContentType: ?*const Guid, pConfigurationProperties: ?*IPropertyStore) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderConfigUI.VTable, self.vtable).Init(@ptrCast(*const ISyncProviderConfigUI, self), pguidInstanceId, pguidContentType, pConfigurationProperties); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderConfigUI_GetRegisteredProperties(self: *const T, ppConfigUIProperties: ?*?*IPropertyStore) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderConfigUI.VTable, self.vtable).GetRegisteredProperties(@ptrCast(*const ISyncProviderConfigUI, self), ppConfigUIProperties); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderConfigUI_CreateAndRegisterNewSyncProvider(self: *const T, hwndParent: ?HWND, pUnkContext: ?*IUnknown, ppProviderInfo: ?*?*ISyncProviderInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderConfigUI.VTable, self.vtable).CreateAndRegisterNewSyncProvider(@ptrCast(*const ISyncProviderConfigUI, self), hwndParent, pUnkContext, ppProviderInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncProviderConfigUI_ModifySyncProvider(self: *const T, hwndParent: ?HWND, pUnkContext: ?*IUnknown, pProviderInfo: ?*ISyncProviderInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncProviderConfigUI.VTable, self.vtable).ModifySyncProvider(@ptrCast(*const ISyncProviderConfigUI, self), hwndParent, pUnkContext, pProviderInfo); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_IRegisteredSyncProvider_Value = @import("../zig.zig").Guid.initString("913bcf76-47c1-40b5-a896-5e8a9c414c14"); pub const IID_IRegisteredSyncProvider = &IID_IRegisteredSyncProvider_Value; pub const IRegisteredSyncProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Init: fn( self: *const IRegisteredSyncProvider, pguidInstanceId: ?*const Guid, pguidContentType: ?*const Guid, pContextPropertyStore: ?*IPropertyStore, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetInstanceId: fn( self: *const IRegisteredSyncProvider, pguidInstanceId: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IRegisteredSyncProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRegisteredSyncProvider_Init(self: *const T, pguidInstanceId: ?*const Guid, pguidContentType: ?*const Guid, pContextPropertyStore: ?*IPropertyStore) callconv(.Inline) HRESULT { return @ptrCast(*const IRegisteredSyncProvider.VTable, self.vtable).Init(@ptrCast(*const IRegisteredSyncProvider, self), pguidInstanceId, pguidContentType, pContextPropertyStore); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRegisteredSyncProvider_GetInstanceId(self: *const T, pguidInstanceId: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IRegisteredSyncProvider.VTable, self.vtable).GetInstanceId(@ptrCast(*const IRegisteredSyncProvider, self), pguidInstanceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRegisteredSyncProvider_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IRegisteredSyncProvider.VTable, self.vtable).Reset(@ptrCast(*const IRegisteredSyncProvider, self)); } };} pub usingnamespace MethodMixin(@This()); }; pub const SYNC_REGISTRATION_EVENT = enum(i32) { PROVIDER_ADDED = 0, PROVIDER_REMOVED = 1, PROVIDER_UPDATED = 2, PROVIDER_STATE_CHANGED = 3, CONFIGUI_ADDED = 4, CONFIGUI_REMOVED = 5, CONFIGUI_UPDATED = 6, }; pub const SRE_PROVIDER_ADDED = SYNC_REGISTRATION_EVENT.PROVIDER_ADDED; pub const SRE_PROVIDER_REMOVED = SYNC_REGISTRATION_EVENT.PROVIDER_REMOVED; pub const SRE_PROVIDER_UPDATED = SYNC_REGISTRATION_EVENT.PROVIDER_UPDATED; pub const SRE_PROVIDER_STATE_CHANGED = SYNC_REGISTRATION_EVENT.PROVIDER_STATE_CHANGED; pub const SRE_CONFIGUI_ADDED = SYNC_REGISTRATION_EVENT.CONFIGUI_ADDED; pub const SRE_CONFIGUI_REMOVED = SYNC_REGISTRATION_EVENT.CONFIGUI_REMOVED; pub const SRE_CONFIGUI_UPDATED = SYNC_REGISTRATION_EVENT.CONFIGUI_UPDATED; // TODO: this type is limited to platform 'windows6.1' const IID_ISyncRegistrationChange_Value = @import("../zig.zig").Guid.initString("eea0d9ae-6b29-43b4-9e70-e3ae33bb2c3b"); pub const IID_ISyncRegistrationChange = &IID_ISyncRegistrationChange_Value; pub const ISyncRegistrationChange = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetEvent: fn( self: *const ISyncRegistrationChange, psreEvent: ?*SYNC_REGISTRATION_EVENT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetInstanceId: fn( self: *const ISyncRegistrationChange, pguidInstanceId: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncRegistrationChange_GetEvent(self: *const T, psreEvent: ?*SYNC_REGISTRATION_EVENT) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncRegistrationChange.VTable, self.vtable).GetEvent(@ptrCast(*const ISyncRegistrationChange, self), psreEvent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISyncRegistrationChange_GetInstanceId(self: *const T, pguidInstanceId: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISyncRegistrationChange.VTable, self.vtable).GetInstanceId(@ptrCast(*const ISyncRegistrationChange, self), pguidInstanceId); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (8) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const HANDLE = @import("../foundation.zig").HANDLE; const HRESULT = @import("../foundation.zig").HRESULT; const HWND = @import("../foundation.zig").HWND; const IPropertyStore = @import("../ui/shell/properties_system.zig").IPropertyStore; const IUnknown = @import("../system/com.zig").IUnknown; const PWSTR = @import("../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/system/windows_sync.zig
const uefi = @import("std").os.uefi; const Guid = uefi.Guid; const TableHeader = uefi.tables.TableHeader; const Time = uefi.Time; const TimeCapabilities = uefi.TimeCapabilities; const Status = uefi.Status; const MemoryDescriptor = uefi.tables.MemoryDescriptor; /// Runtime services are provided by the firmware before and after exitBootServices has been called. /// /// As the runtime_services table may grow with new UEFI versions, it is important to check hdr.header_size. /// /// Some functions may not be supported. Check the RuntimeServicesSupported variable using getVariable. /// getVariable is one of the functions that may not be supported. /// /// Some functions may not be called while other functions are running. pub const RuntimeServices = extern struct { hdr: TableHeader, /// Returns the current time and date information, and the time-keeping capabilities of the hardware platform. getTime: fn (time: *uefi.Time, capabilities: ?*TimeCapabilities) callconv(.C) Status, /// Sets the current local time and date information setTime: fn (time: *uefi.Time) callconv(.C) Status, /// Returns the current wakeup alarm clock setting getWakeupTime: fn (enabled: *bool, pending: *bool, time: *uefi.Time) callconv(.C) Status, /// Sets the system wakeup alarm clock time setWakeupTime: fn (enable: *bool, time: ?*uefi.Time) callconv(.C) Status, /// Changes the runtime addressing mode of EFI firmware from physical to virtual. setVirtualAddressMap: fn (mmap_size: usize, descriptor_size: usize, descriptor_version: u32, virtual_map: [*]MemoryDescriptor) callconv(.C) Status, /// Determines the new virtual address that is to be used on subsequent memory accesses. convertPointer: fn (debug_disposition: usize, address: **anyopaque) callconv(.C) Status, /// Returns the value of a variable. getVariable: fn (var_name: [*:0]const u16, vendor_guid: *align(8) const Guid, attributes: ?*u32, data_size: *usize, data: ?*anyopaque) callconv(.C) Status, /// Enumerates the current variable names. getNextVariableName: fn (var_name_size: *usize, var_name: [*:0]u16, vendor_guid: *align(8) Guid) callconv(.C) Status, /// Sets the value of a variable. setVariable: fn (var_name: [*:0]const u16, vendor_guid: *align(8) const Guid, attributes: u32, data_size: usize, data: *anyopaque) callconv(.C) Status, /// Return the next high 32 bits of the platform's monotonic counter getNextHighMonotonicCount: fn (high_count: *u32) callconv(.C) Status, /// Resets the entire platform. resetSystem: fn (reset_type: ResetType, reset_status: Status, data_size: usize, reset_data: ?*const anyopaque) callconv(.C) noreturn, /// Passes capsules to the firmware with both virtual and physical mapping. /// Depending on the intended consumption, the firmware may process the capsule immediately. /// If the payload should persist across a system reset, the reset value returned from /// `queryCapsuleCapabilities` must be passed into resetSystem and will cause the capsule /// to be processed by the firmware as part of the reset process. updateCapsule: fn (capsule_header_array: **CapsuleHeader, capsule_count: usize, scatter_gather_list: EfiPhysicalAddress) callconv(.C) Status, /// Returns if the capsule can be supported via `updateCapsule` queryCapsuleCapabilities: fn (capsule_header_array: **CapsuleHeader, capsule_count: usize, maximum_capsule_size: *usize, resetType: ResetType) callconv(.C) Status, /// Returns information about the EFI variables queryVariableInfo: fn (attributes: *u32, maximum_variable_storage_size: *u64, remaining_variable_storage_size: *u64, maximum_variable_size: *u64) callconv(.C) Status, pub const signature: u64 = 0x56524553544e5552; }; const EfiPhysicalAddress = u64; pub const CapsuleHeader = extern struct { capsuleGuid: Guid align(8), headerSize: u32, flags: u32, capsuleImageSize: u32, }; pub const UefiCapsuleBlockDescriptor = extern struct { length: u64, address: union { dataBlock: EfiPhysicalAddress, continuationPointer: EfiPhysicalAddress, }, }; pub const ResetType = enum(u32) { ResetCold, ResetWarm, ResetShutdown, ResetPlatformSpecific, }; pub const global_variable align(8) = Guid{ .time_low = 0x8be4df61, .time_mid = 0x93ca, .time_high_and_version = 0x11d2, .clock_seq_high_and_reserved = 0xaa, .clock_seq_low = 0x0d, .node = [_]u8{ 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c }, };
lib/std/os/uefi/tables/runtime_services.zig
const std = @import("std"); const warn = std.debug.warn; const assert = std.debug.assert; const c = @import("c.zig").c; const mem = std.mem; const builtin = @import("builtin"); const wgi = @import("WindowGraphicsInput.zig"); const Files = @import("../Files.zig"); const image = @import("Image.zig"); var maxVertexAttribs: u32 = 16; var maxTextureSize: u32 = 1024; var maxTextureUnits: u32 = 16; var disableDepthBuffer = false; pub var window: ?*c.GLFWwindow = null; var gl_version: u32 = 33; pub fn windowWasCreatedWithoutDepthBuffer() bool { return disableDepthBuffer; } export fn debug_callback(source: c_uint, type_: c_uint, id: c_uint, severity: c_uint, length: c_int, message: [*c]const u8, userParam: ?*const c_void) void { if (type_ != c.GL_DEBUG_TYPE_OTHER_ARB) { // gets rid of the 'Buffer detailed info' messages warn("OpenGL error: {}\n", .{message[0..mem.len(message)]}); assert(type_ != c.GL_DEBUG_TYPE_ERROR_ARB); } } fn glfw_error_callback(code: c_int, description: [*c]const u8) callconv(.C) void { warn("GLFW error: {} {}\n", .{ code, description[0..mem.len(description)] }); } // If fullscreen is true then width and height are ignored. // disableDepthBuffer_ is used to avoid unnecessarily allocating a depth buffer if FXAA will be used pub fn createWindow(fullscreen: bool, width: u32, height: u32, title: [*]const u8, disableDepthBuffer_: bool, msaa: u32) !void { disableDepthBuffer = disableDepthBuffer_; if (disableDepthBuffer and msaa > 0) { return error.ParameterError; } if (c.glfwInit() == c.GLFW_FALSE) { return error.GLFWError; } errdefer c.glfwTerminate(); _ = c.glfwSetErrorCallback(glfw_error_callback); // 3.3 is needed for GL_INT_2_10_10_10_REV c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MAJOR, 3); c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MINOR, 3); c.glfwWindowHint(c.GLFW_OPENGL_PROFILE, c.GLFW_OPENGL_CORE_PROFILE); c.glfwWindowHint(c.GLFW_STENCIL_BITS, 0); if (disableDepthBuffer) { c.glfwWindowHint(c.GLFW_DEPTH_BITS, 0); } else { c.glfwWindowHint(c.GLFW_DEPTH_BITS, 32); } if (builtin.mode == builtin.Mode.Debug) { c.glfwWindowHint(c.GLFW_OPENGL_DEBUG_CONTEXT, 1); } // OpenGL will automatically do gamma correction when writing to the main frame buffer // c.glfwWindowHint(c.GLFW_SRGB_CAPABLE, 1); // Disable deprecated functionality c.glfwWindowHint(c.GLFW_OPENGL_FORWARD_COMPAT, 1); c.glfwWindowHint(c.GLFW_SAMPLES, if (msaa <= 32) @intCast(c_int, msaa) else 32); if (fullscreen) { const monitor = c.glfwGetPrimaryMonitor(); const mode = c.glfwGetVideoMode(monitor); c.glfwWindowHint(c.GLFW_RED_BITS, mode.*.redBits); c.glfwWindowHint(c.GLFW_GREEN_BITS, mode.*.greenBits); c.glfwWindowHint(c.GLFW_BLUE_BITS, mode.*.blueBits); c.glfwWindowHint(c.GLFW_REFRESH_RATE, mode.*.refreshRate); window = c.glfwCreateWindow(@intCast(c_int, mode.*.width), @intCast(c_int, mode.*.height), title, monitor, null); } else { window = c.glfwCreateWindow(@intCast(c_int, width), @intCast(c_int, height), title, null, null); } if (window == null) { return error.GLFWError; } errdefer c.glfwDestroyWindow(window); // Disable mouse acceleration (good for 3D games, bad for GUI) // TODO Add functions for enabling/disabling this at any time if (c.glfwRawMouseMotionSupported() == c.GLFW_TRUE) c.glfwSetInputMode(window, c.GLFW_RAW_MOUSE_MOTION, c.GLFW_TRUE); c.glfwMakeContextCurrent(window); const gladLoadRet = c.gladLoadGLLoader(@ptrCast(c.GLADloadproc, c.glfwGetProcAddress)); if (gladLoadRet == 0) { warn("Possible error in gladLoadGLLoader\n", .{}); } c.glfwSwapInterval(1); if (c.GLAD_GL_ARB_clip_control == 0) { warn("ARB_clip_control OpenGL extension is not supported\n", .{}); return error.ARBClipControlNotSupported; } c.glEnable(c.GL_DEPTH_TEST); // Switch to optimal depth buffer configuration wgi.setDepthModeDirectX(false, false); c.glViewport(0, 0, @intCast(c_int, width), @intCast(c_int, height)); c.glClearColor(0.1, 0.1, 0.1, 1.0); // OpenGL will automatically do gamma correction when writing to the main frame buffer // c.glEnable(c.GL_FRAMEBUFFER_SRGB); if (msaa == 0) { c.glDisable(c.GL_MULTISAMPLE); } else { c.glEnable(c.GL_MULTISAMPLE); } c.glEnable(c.GL_DEPTH_CLAMP); if (builtin.mode == builtin.Mode.Debug and c.GL_ARB_debug_output != 0) { c.glEnable(c.GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB); c.glDebugMessageCallbackARB(debug_callback, null); } c.glGetIntegerv(c.GL_MAX_TEXTURE_SIZE, @ptrCast([*c]c_int, &maxTextureSize)); if (maxTextureSize < 1024) { maxTextureSize = 1024; } c.glGetIntegerv(c.GL_MAX_VERTEX_ATTRIBS, @ptrCast([*c]c_int, &maxVertexAttribs)); if (maxVertexAttribs < 16) { maxVertexAttribs = 16; } c.glGetIntegerv(c.GL_MAX_TEXTURE_IMAGE_UNITS, @ptrCast([*c]c_int, &maxTextureUnits)); if (maxTextureUnits < 16) { maxTextureUnits = 16; } } pub fn goFullScreen() void { const monitor = c.glfwGetPrimaryMonitor(); const mode = c.glfwGetVideoMode(monitor); c.glfwSetWindowMonitor(window, monitor, 0, 0, @intCast(c_int, mode.*.width), @intCast(c_int, mode.*.height), mode.*.refreshRate); c.glfwSwapInterval(1); } pub fn exitFullScreen(width: u32, height: u32) void { const monitor = c.glfwGetPrimaryMonitor(); const mode = c.glfwGetVideoMode(monitor); c.glfwSetWindowMonitor(window, null, 0, 0, @intCast(c_int, width), @intCast(c_int, height), mode.*.refreshRate); c.glfwSwapInterval(1); c.glfwSetWindowPos(window, 20, 30); } pub fn closeWindow() void { c.glfwDestroyWindow(window); c.glfwTerminate(); } pub fn windowShouldClose() bool { return c.glfwWindowShouldClose(window) == c.GLFW_TRUE; } pub fn swapBuffers() void { return c.glfwSwapBuffers(window); } // Thread goes to sleep until there are input events pub fn waitEvents() void { return c.glfwWaitEvents(); } pub fn pollEvents() void { return c.glfwPollEvents(); } pub fn getSize(w: *u32, h: *u32) void { var w_: c_int = 0; var h_: c_int = 0; c.glfwGetFramebufferSize(window, &w_, &h_); if (w_ < 0) { w_ = 0; } if (h_ < 0) { h_ = 0; } w.* = @intCast(u32, w_); h.* = @intCast(u32, h_); } pub const StringName = enum(u32) { Vendor = c.GL_VENDOR, Renderer = c.GL_RENDERER, Version = c.GL_VERSION, ShadingLanguageVersion = c.GL_SHADING_LANGUAGE_VERSION, }; pub fn getString(stringRequest: StringName) ![]const u8 { var s = c.glGetString(@enumToInt(stringRequest)); if (s == 0) { return error.OpenGLError; } return s[0..mem.len(u8, s)]; } pub const CullMode = enum { None, Clockwise, // cull anti-clockwise faces AntiClockwise, // cull clockwise faces }; pub fn setCullMode(newMode: CullMode) void { if (newMode == CullMode.None) { c.glDisable(c.GL_CULL_FACE); } else { c.glEnable(c.GL_CULL_FACE); if (newMode == CullMode.Clockwise) { c.glFrontFace(c.GL_CW); } else if (newMode == CullMode.AntiClockwise) { c.glFrontFace(c.GL_CCW); } } } pub const BlendMode = enum(u32) { None, Max, Standard, }; pub fn setBlendMode(mode: BlendMode) void { switch (mode) { BlendMode.None => { c.glDisable(c.GL_BLEND); }, BlendMode.Max => { c.glEnable(c.GL_BLEND); c.glBlendFunc(1, 1); c.glBlendEquation(c.GL_MAX); }, BlendMode.Standard => { c.glEnable(c.GL_BLEND); c.glBlendFunc(c.GL_SRC_ALPHA, c.GL_ONE_MINUS_SRC_ALPHA); c.glBlendEquation(c.GL_FUNC_ADD); }, } } pub fn setClearColour(r: f32, g: f32, b: f32, a: f32) void { c.glClearColor(r, g, b, a); } pub fn clear(colourBuffer: bool, depthBuffer: bool) void { var parameter: u32 = 0; if (colourBuffer) { parameter |= c.GL_COLOR_BUFFER_BIT; } if (depthBuffer) { parameter |= c.GL_DEPTH_BUFFER_BIT; } c.glClear(parameter); } // Maximum size of each dimension for a 2D texture pub fn maximumTextureSize() u32 { return maxTextureSize; } // Maximum number of vec/ivec/uvec/ vertex inputs pub fn maximumNumVertexAttributes() u32 { return maxVertexAttribs; } // Maximum number of bound textures // Note that multiple textures of different types cannot be bound to the same texture unit pub fn maximumNumTextureImageUnits() u32 { return maxTextureUnits; } pub fn setResizeable(resizable: bool) void { if (resizable) { c.glfwSetWindowAttrib(window, c.GLFW_RESIZABLE, c.GLFW_TRUE); } else { c.glfwSetWindowAttrib(window, c.GLFW_RESIZABLE, c.GLFW_FALSE); } } pub fn isKeyDown(key: c_int) bool { return c.glfwGetKey(window, key) == c.GLFW_PRESS; } pub fn setIcon(icon_16x16: ?[]u32, icon_32x32: ?[]u32, icon_48x48: ?[]u32, icon_256x256: ?[]u32) void { var images: [4]c.GLFWimage = undefined; var i: u32 = 0; if (icon_16x16 != null and icon_16x16.?.len == 16 * 16) { images[i].width = 16; images[i].height = 16; images[i].pixels = @ptrCast([*c]u8, &icon_16x16.?[0]); i += 1; } if (icon_32x32 != null and icon_32x32.?.len == 32 * 32) { images[i].width = 32; images[i].height = 32; images[i].pixels = @ptrCast([*c]u8, &icon_32x32.?[0]); i += 1; } if (icon_48x48 != null and icon_48x48.?.len == 48 * 48) { images[i].width = 48; images[i].height = 48; images[i].pixels = @ptrCast([*c]u8, &icon_48x48.?[0]); i += 1; } if (icon_256x256 != null and icon_256x256.?.len == 256 * 256) { images[i].width = 256; images[i].height = 256; images[i].pixels = @ptrCast([*c]u8, &icon_256x256.?[0]); i += 1; } if (i != 0) { c.glfwSetWindowIcon(window, @intCast(c_int, i), &images[0]); } } pub fn loadIcon(file_path: []const u8, allocator: *std.mem.Allocator) !void { const image_file_data = try Files.loadFile(file_path, allocator); defer allocator.free(image_file_data); var ico_components: u32 = 4; var ico_width: u32 = 0; var ico_height: u32 = 0; const ico_data = try image.decodeImage(image_file_data, &ico_components, &ico_width, &ico_height, allocator); defer image.freeDecodedImage(ico_data); if (ico_components != 4 or ico_data.len != ico_width * ico_height * 4) { return error.ImageDecodeError; } if (ico_width == 16 and ico_height == 16) { setIcon(std.mem.bytesAsSlice(u32, std.mem.sliceAsBytes(ico_data)), null, null, null); } else if (ico_width == 32 and ico_height == 32) { setIcon(null, std.mem.bytesAsSlice(u32, std.mem.sliceAsBytes(ico_data)), null, null); } else if (ico_width == 48 and ico_height == 48) { setIcon(null, null, std.mem.bytesAsSlice(u32, std.mem.sliceAsBytes(ico_data)), null); } else if (ico_width == 256 and ico_height == 256) { setIcon(null, null, null, std.mem.bytesAsSlice(u32, std.mem.sliceAsBytes(ico_data))); } else { return error.IconWrongSize; } }
src/WindowGraphicsInput/Window.zig
const std = @import("std"); const assert = std.debug.assert; const warn = std.debug.warn; const Allocator = std.mem.Allocator; usingnamespace @import("../main/util.zig"); pub const systemFunc = fn (self: *SystemSelf, params: VariantMap) void; pub const SystemFuncDef = struct { pass: []const u8, phase: u8 = 0, run_before: [][]const u8 = [_][]const u8{}, run_after: [][]const u8 = [_][]const u8{}, params: [][]const u8 = [_][]const u8{}, func: systemFunc, }; pub const SystemSelf = @OpaqueType(); pub const System = struct { name: []const u8, funcs: []const SystemFuncDef, self: *SystemSelf, pub fn init(name: []const u8, funcs: []const SystemFuncDef, self: var) System { var system = System{ .name = name, .funcs = funcs, .self = self, }; return system; } }; pub fn systemFunctionWrap(comptime SystemT: type, comptime sysFunc: var, comptime ContextT: type) fn (self: *SystemSelf, params: VariantMap) void { return struct { fn func(self: *SystemSelf, params: VariantMap) void { var sys = @ptrCast(*SystemT, @alignCast(@alignOf(*SystemT), self)); var context = fillContext(params, ContextT); sysFunc(sys, context); } }.func; } pub const SystemManager = struct { systems: std.ArrayList(System), allocator: *Allocator, pub fn init(allocator: *Allocator) SystemManager { var sm = SystemManager{ .allocator = allocator, .systems = std.ArrayList(System).init(allocator), }; return sm; } pub fn deinit(self: *SystemManager) void {} pub fn registerAllSystems(self: *SystemManager, systems: []const System) void { var err = self.systems.appendSlice(systems); std.sort.sort(System, self.systems.toSlice(), systemSorter); } pub fn runSystemFunc(self: *SystemManager, pass: []const u8, params: VariantMap) void { for (self.systems.toSlice()) |system| { // params.putNoClobber(system.name, Variant{ .psystem = &system }) catch unreachable; } for (self.systems.toSlice()) |system| { for (system.funcs) |func| { if (std.mem.eql(u8, func.pass, pass)) { func.func(system.self, params); } } } } fn systemSorter(s1: System, s2: System) bool { return s1.name[0] < s2.name[0]; } };
code/core/system.zig
const std = @import("std"); const builtin = std.builtin; fn __clzsi2_generic(a: i32) callconv(.C) i32 { @setRuntimeSafety(builtin.is_test); var x = @bitCast(u32, a); var n: i32 = 32; // Count first bit set using binary search, from Hacker's Delight var y: u32 = 0; inline for ([_]i32{ 16, 8, 4, 2, 1 }) |shift| { y = x >> shift; if (y != 0) { n = n - shift; x = y; } } return n - @bitCast(i32, x); } fn __clzsi2_thumb1() callconv(.Naked) void { // Similar to the generic version with the last two rounds replaced by a LUT asm volatile ( \\ movs r1, #32 \\ lsrs r2, r0, #16 \\ beq 1f \\ subs r1, #16 \\ movs r0, r2 \\ 1: \\ lsrs r2, r0, #8 \\ beq 1f \\ subs r1, #8 \\ movs r0, r2 \\ 1: \\ lsrs r2, r0, #4 \\ beq 1f \\ subs r1, #4 \\ movs r0, r2 \\ 1: \\ ldr r3, =LUT \\ ldrb r0, [r3, r0] \\ subs r0, r1, r0 \\ bx lr \\ .p2align 2 \\ // Number of bits set in the 0-15 range \\ LUT: \\ .byte 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4 ); unreachable; } fn __clzsi2_arm32() callconv(.Naked) void { asm volatile ( \\ // Assumption: n != 0 \\ // r0: n \\ // r1: count of leading zeros in n + 1 \\ // r2: scratch register for shifted r0 \\ mov r1, #1 \\ \\ // Basic block: \\ // if ((r0 >> SHIFT) == 0) \\ // r1 += SHIFT; \\ // else \\ // r0 >>= SHIFT; \\ // for descending powers of two as SHIFT. \\ lsrs r2, r0, #16 \\ movne r0, r2 \\ addeq r1, #16 \\ \\ lsrs r2, r0, #8 \\ movne r0, r2 \\ addeq r1, #8 \\ \\ lsrs r2, r0, #4 \\ movne r0, r2 \\ addeq r1, #4 \\ \\ lsrs r2, r0, #2 \\ movne r0, r2 \\ addeq r1, #2 \\ \\ // The basic block invariants at this point are (r0 >> 2) == 0 and \\ // r0 != 0. This means 1 <= r0 <= 3 and 0 <= (r0 >> 1) <= 1. \\ // \\ // r0 | (r0 >> 1) == 0 | (r0 >> 1) == 1 | -(r0 >> 1) | 1 - (r0 >> 1)f \\ // ---+----------------+----------------+------------+-------------- \\ // 1 | 1 | 0 | 0 | 1 \\ // 2 | 0 | 1 | -1 | 0 \\ // 3 | 0 | 1 | -1 | 0 \\ // \\ // The r1's initial value of 1 compensates for the 1 here. \\ sub r0, r1, r0, lsr #1 \\ bx lr ); unreachable; } pub const __clzsi2 = switch (std.Target.current.cpu.arch) { .arm, .armeb => if (std.Target.arm.featureSetHas(std.Target.current.cpu.features, .noarm)) __clzsi2_thumb1 else __clzsi2_arm32, .thumb, .thumbeb => __clzsi2_thumb1, else => __clzsi2_generic, }; test "test clzsi2" { _ = @import("clzsi2_test.zig"); }
lib/std/special/compiler_rt/clzsi2.zig
pub const Entry = union(enum) { const DataS = struct { const Self = @This(); const Granularity = enum(u1) { Byte = 0, Page = 1, }; const Direction = enum(u1) { Up = 0, Down = 1, }; base: u32, limit: u20, accessed: bool, writable: bool, direction: Self.Direction, dpl: u4, present: bool, available_to_system_programmers: bool, big: bool, granularity: Self.Granularity, fn pack(self: Self) u64 { return u64(self.limit & 0xFFFF) << 0 | u64(self.base & 0xFFFFFF) << 16 | u64(@boolToInt(self.accessed)) << 40 | u64(@boolToInt(self.writable)) << 41 | u64(@enumToInt(self.direction)) << 42 | 0b10 << 43 | u64(self.dpl) << 45 | u64(@boolToInt(self.present)) << 47 | u64(self.limit >> 16 & 0xF) << 48 | u64(@boolToInt(self.available_to_system_programmers)) << 52 | 0b0 << 53 | u64(@boolToInt(self.big)) << 54 | u64(@enumToInt(self.granularity)) << 55 | u64(self.base >> 24 & 0xFF) << 56; } fn unpack(e: u64) Self { return Self{ .limit = @intCast(u20, e >> 0 & 0xFFFF | (e >> 48 & 0xF) << 16), .base = @intCast(u32, e >> 16 & 0xFFFFFF | (e >> 56 & 0xFF) << 24), .accessed = e >> 40 & 1 != 0, .writable = e >> 41 & 1 != 0, .direction = @intToEnum(Self.Direction, @intCast(u1, e >> 42 & 1)), .dpl = @intCast(u2, e >> 45 & 0b11), .present = e >> 47 & 1 != 0, .available_to_system_programmers = e >> 52 & 1 != 0, .big = e >> 54 & 1 != 0, .granularity = @intToEnum(Self.Granularity, @intCast(u1, e >> 55 & 1)) }; } }; const CodeS = struct { const Self = @This(); const Granularity = enum(u1) { Byte = 0, Page = 1, }; const Mode = enum(u2) { Real = 0b00, Protected = 0b10, Long = 0b01, }; base: u32, limit: u20, accessed: bool, readable: bool, conforming: bool, dpl: u4, present: bool, available_to_system_programmers: bool, mode: Self.Mode, granularity: Self.Granularity, fn pack(self: Self) u64 { return u64(self.limit & 0xFFFF) << 0 | u64(self.base & 0xFFFFFF) << 16 | u64(@boolToInt(self.accessed)) << 40 | u64(@boolToInt(self.readable)) << 41 | u64(@boolToInt(self.conforming)) << 42 | 0b11 << 43 | u64(self.dpl) << 45 | u64(@boolToInt(self.present)) << 47 | u64(self.limit >> 16 & 0xF) << 48 | u64(@boolToInt(self.available_to_system_programmers)) << 52 | u64(@enumToInt(self.mode)) << 53 | u64(@enumToInt(self.granularity)) << 55 | u64(self.base >> 24 & 0xFF) << 56; } fn unpack(e: u64) Self { return Self{ .limit = @intCast(u20, e >> 0 & 0xFFFF | (e >> 48 & 0xF) << 16), .base = @intCast(u32, e >> 16 & 0xFFFFFF | (e >> 56 & 0xFF) << 24), .accessed = e >> 40 & 1 != 0, .readable = e >> 41 & 1 != 0, .conforming = e >> 42 & 1 != 0, .dpl = @intCast(u2, e >> 45 & 0b11), .present = e >> 47 & 1 != 0, .available_to_system_programmers = e >> 52 & 1 != 0, .mode = @intToEnum(Self.Mode, @intCast(u2, e >> 53 & 1)), .granularity = @intToEnum(Self.Granularity, @intCast(u1, e >> 55 & 1)) }; } }; const SystemS = struct { const Self = @This(); const Type = enum(u4) { Ldt = 0b0010, TssAvailable = 0b1001, TssBusy = 0b1011, CallGate = 0b1100, InterruptGate = 0b1110, TrapGate = 0b1111, }; const Granularity = enum(u1) { Byte = 0, Page = 1, }; base: u32, limit: u20, @"type": Self.Type, dpl: u4, present: bool, granularity: Granularity, fn pack(self: Self) u64 { return u64(self.limit & 0xFFFF) << 0 | u64(self.base & 0xFFFFFF) << 16 | u64(@enumToInt(self.type)) << 40 | 0b0 << 44 | u64(self.dpl) << 45 | u64(@boolToInt(self.present)) << 47 | u64(self.limit >> 16 & 0xF) << 48 | 0b000 << 52 | u64(@enumToInt(self.granularity)) << 55 | u64(self.base >> 24 & 0xFF) << 56; } fn unpack(e: u64) Self { return Self{ .limit = @intCast(u20, e >> 0 & 0xFFFF | (e >> 48 & 0xF) << 16), .base = @intCast(u32, e >> 16 & 0xFFFFFF | (e >> 56 & 0xFF) << 24), .type = @intToEnum(Self.Type, @intCast(u4, e >> 40 & 0b1111)), .dpl = @intCast(u2, e >> 45 & 0b11), .present = e >> 47 & 1 != 0, .granularity = @intToEnum(Self.Granularity, @intCast(u1, e >> 55 & 1)) }; } }; Data: Entry.DataS, Code: Entry.CodeS, System: Entry.SystemS, pub fn pack(self: Entry) u64 { return switch (self) { Entry.Code => |x| x.pack(), Entry.Data => |x| x.pack(), Entry.System => |x| x.pack(), }; } pub fn unpack(e: u64) Entry { if (e >> 43 & 0b11 == 0b10) { return Entry{ .Data = Entry.DataS.unpack(e) }; } else if (e >> 43 & 0b11 == 0b11) { return Entry{ .Code = Entry.CodeS.unpack(e) }; } else { return Entry{ .System = Entry.SystemS.unpack(e) }; } } pub fn isPresent(e: u64) bool { return e >> 47 & 1 != 0; } }; pub var our_gdt = []u64{ 0, (Entry{ .Code = Entry.CodeS{ .base = 0, .limit = 0, .accessed = false, .readable = true, .conforming = false, .dpl = 0, .present = true, .available_to_system_programmers = false, .mode = Entry.CodeS.Mode.Long, .granularity = Entry.CodeS.Granularity.Byte, }}).pack(), (Entry{ .Data = Entry.DataS{ .base = 0, .limit = 0, .accessed = false, .writable = true, .direction = Entry.DataS.Direction.Up, .dpl = 0, .present = true, .available_to_system_programmers = false, .big = false, .granularity = Entry.DataS.Granularity.Byte, }}).pack(), }; const Gdtr = extern struct { padding1: u32, padding2: u16, limit: u16, base: [*]u64, }; pub fn storeGdt() []u64 { var gdtr: Gdtr = Gdtr{ .padding1 = 0, .padding2 = 0, .limit = 0, .base = &our_gdt }; storeGdtInternal(&gdtr); return gdtr.base[0..((gdtr.limit + 1) / @sizeOf(u64))]; } pub fn loadGdt(gdt: []u64, cs: usize, ds: usize) void { const gdtr = Gdtr{ .padding1 = 0, .padding2 = 0, .limit = @intCast(u16, gdt.len * @sizeOf(@typeOf(gdt[0])) - 1), .base = gdt.ptr }; loadGdtInternal(&gdtr, @intCast(u16, cs * @sizeOf(@typeOf(gdt[0]))), @intCast(u16, ds * @sizeOf(@typeOf(gdt[0])))); } const storeGdtInternal = gdt_storeGdtInternal; const loadGdtInternal = gdt_loadGdtInternal; extern fn gdt_storeGdtInternal(r: *Gdtr) void; extern fn gdt_loadGdtInternal(r: *const Gdtr, cs: u16, ds: u16) void;
src/gdt.zig
const std = @import("std"); const backend = @import("backend.zig"); const c = @cImport({ @cInclude("GLFW/glfw3.h"); }); var previous_x: f64 = 0; var previous_y: f64 = 0; pub const GLFWBackend = struct { windowCount: i32, hidden: *c.GLFWwindow, const Self = @This(); pub fn init(self: *Self) !void {} pub fn newOutput(self: *Self, width: i32, height: i32) !GLFWOutput { var window = c.glfwCreateWindow(width, height, "foxwhale", null, self.hidden) orelse return error.GLFWWindowCreationFailed; c.glfwMakeContextCurrent(window); _ = c.glfwSetKeyCallback(window, keyCallback); _ = c.glfwSetMouseButtonCallback(window, mouseButtonCallback); _ = c.glfwSetFramebufferSizeCallback(window, resizeCallback); _ = c.glfwSetCursorPosCallback(window, cursorPositionCallback); self.windowCount += 1; return GLFWOutput{ .window = window, .backend = self, }; } pub fn deinit(self: Self) void { c.glfwTerminate(); } }; pub fn new() !GLFWBackend { if (c.glfwInit() != 1) { return error.GLFWInitFailed; } errdefer c.glfwTerminate(); c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MAJOR, 3); c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MINOR, 3); c.glfwSwapInterval(1); var hidden = c.glfwCreateWindow(1, 1, "foxwhale", null, null) orelse return error.GLFWWindowCreationFailed; return GLFWBackend{ .windowCount = 0, .hidden = hidden, }; } fn keyCallback(window: ?*c.GLFWwindow, key: c_int, scancode: c_int, action: c_int, mods: c_int) callconv(.C) void { if (key == c.GLFW_KEY_ESCAPE and action == c.GLFW_PRESS) { if (c.glfwGetInputMode(window, c.GLFW_CURSOR) == c.GLFW_CURSOR_DISABLED) { c.glfwSetInputMode(window, c.GLFW_CURSOR, c.GLFW_CURSOR_NORMAL); } else { c.glfwSetWindowShouldClose(window, c.GLFW_TRUE); } } if (backend.BACKEND_FNS.keyboard) |keyboard| { var time = @truncate(u32, @intCast(u64, std.time.milliTimestamp())); keyboard(time, @intCast(u32, scancode - 8), @intCast(u32, action)) catch return; } } fn mouseButtonCallback(window: ?*c.GLFWwindow, button: c_int, action: c_int, mods: c_int) callconv(.C) void { if (action == c.GLFW_PRESS) { c.glfwSetInputMode(window, c.GLFW_CURSOR, c.GLFW_CURSOR_DISABLED); } var button_code: u32 = switch (button) { 0 => 0x110, else => 0x0, }; if (backend.BACKEND_FNS.mouseClick) |mouseClick| { var time = @truncate(u32, @intCast(u64, std.time.milliTimestamp())); mouseClick(time, @intCast(u32, button_code), @intCast(u32, action)) catch return; } } fn resizeCallback(window: ?*c.GLFWwindow, width: c_int, height: c_int) callconv(.C) void { c.glfwMakeContextCurrent(window); c.glViewport(0, 0, width, height); } fn cursorPositionCallback(window: ?*c.GLFWwindow, x: f64, y: f64) callconv(.C) void { var dx = x - previous_x; var dy = y - previous_y; previous_x = previous_x + dx; previous_y = previous_y + dy; if (backend.BACKEND_FNS.mouseMove) |mouseMove| { var time = @truncate(u32, @intCast(u64, std.time.milliTimestamp())); mouseMove(time, dx, dy) catch return; } } pub const GLFWOutput = struct { window: ?*c.GLFWwindow, backend: *GLFWBackend, const Self = @This(); pub fn begin(self: Self) void { c.glfwPollEvents(); c.glfwMakeContextCurrent(self.window); } pub fn end(self: Self) void { c.glfwMakeContextCurrent(self.backend.hidden); } pub fn swap(self: Self) void { c.glfwSwapBuffers(self.window); } pub fn shouldClose(self: Self) bool { return c.glfwWindowShouldClose(self.window) == 1; } pub fn getWidth(self: Self) i32 { var w: c_int = 0; var h: c_int = 0; c.glfwGetFramebufferSize(self.window, &w, &h); return w; } pub fn getHeight(self: Self) i32 { var w: c_int = 0; var h: c_int = 0; c.glfwGetFramebufferSize(self.window, &w, &h); return h; } pub fn deinit(self: *Self) void { c.glfwDestroyWindow(self.window); self.window = null; } };
src/backend/glfw.zig
const std = @import("std"); const registry = @import("registry.zig"); /// generates versioned "handles" (https://floooh.github.io/2018/06/17/handles-vs-pointers.html) /// you choose the type of the handle (aka its size) and how much of that goes to the index and the version. /// the bitsize of version + id must equal the handle size. pub fn Handles(comptime HandleType: type, comptime IndexType: type, comptime VersionType: type) type { std.debug.assert(@typeInfo(HandleType) == .Int and std.meta.Int(.unsigned, @bitSizeOf(HandleType)) == HandleType); std.debug.assert(@typeInfo(IndexType) == .Int and std.meta.Int(.unsigned, @bitSizeOf(IndexType)) == IndexType); std.debug.assert(@typeInfo(VersionType) == .Int and std.meta.Int(.unsigned, @bitSizeOf(VersionType)) == VersionType); if (@bitSizeOf(IndexType) + @bitSizeOf(VersionType) != @bitSizeOf(HandleType)) @compileError("IndexType and VersionType must sum to HandleType's bit count"); return struct { const Self = @This(); handles: []HandleType, append_cursor: IndexType = 0, last_destroyed: ?IndexType = null, allocator: *std.mem.Allocator, const invalid_id = std.math.maxInt(IndexType); pub const Iterator = struct { hm: Self, index: usize = 0, pub fn init(hm: Self) @This() { return .{ .hm = hm }; } pub fn next(self: *@This()) ?HandleType { if (self.index == self.hm.append_cursor) return null; for (self.hm.handles[self.index..self.hm.append_cursor]) |h| { self.index += 1; if (self.hm.alive(h)) { return h; } } return null; } }; pub fn init(allocator: *std.mem.Allocator) Self { return initWithCapacity(allocator, 32); } pub fn initWithCapacity(allocator: *std.mem.Allocator, capacity: usize) Self { return Self{ .handles = allocator.alloc(HandleType, capacity) catch unreachable, .allocator = allocator, }; } pub fn deinit(self: Self) void { self.allocator.free(self.handles); } pub fn extractId(_: Self, handle: HandleType) IndexType { return @truncate(IndexType, handle & registry.entity_traits.entity_mask); } pub fn extractVersion(_: Self, handle: HandleType) VersionType { return @truncate(VersionType, handle >> registry.entity_traits.entity_shift); } fn forge(id: IndexType, version: VersionType) HandleType { return id | @as(HandleType, version) << registry.entity_traits.entity_shift; } pub fn create(self: *Self) HandleType { if (self.last_destroyed == null) { // ensure capacity and grow if needed if (self.handles.len - 1 == self.append_cursor) { self.handles = self.allocator.realloc(self.handles, self.handles.len * 2) catch unreachable; } const id = self.append_cursor; const handle = forge(self.append_cursor, 0); self.handles[id] = handle; self.append_cursor += 1; return handle; } const version = self.extractVersion(self.handles[self.last_destroyed.?]); const destroyed_id = self.extractId(self.handles[self.last_destroyed.?]); const handle = forge(self.last_destroyed.?, version); self.handles[self.last_destroyed.?] = handle; self.last_destroyed = if (destroyed_id == invalid_id) null else destroyed_id; return handle; } pub fn remove(self: *Self, handle: HandleType) !void { const id = self.extractId(handle); if (id > self.append_cursor or self.handles[id] != handle) return error.RemovedInvalidHandle; const next_id = self.last_destroyed orelse invalid_id; if (next_id == id) return error.ExhaustedEntityRemoval; const version = self.extractVersion(handle); self.handles[id] = forge(next_id, version +% 1); self.last_destroyed = id; } pub fn alive(self: Self, handle: HandleType) bool { const id = self.extractId(handle); return id < self.append_cursor and self.handles[id] == handle; } pub fn iterator(self: Self) Iterator { return Iterator.init(self); } }; } test "handles" { var hm = Handles(u32, u20, u12).init(std.testing.allocator); defer hm.deinit(); const e0 = hm.create(); const e1 = hm.create(); const e2 = hm.create(); std.debug.assert(hm.alive(e0)); std.debug.assert(hm.alive(e1)); std.debug.assert(hm.alive(e2)); hm.remove(e1) catch unreachable; std.debug.assert(!hm.alive(e1)); try std.testing.expectError(error.RemovedInvalidHandle, hm.remove(e1)); var e_tmp = hm.create(); std.debug.assert(hm.alive(e_tmp)); hm.remove(e_tmp) catch unreachable; std.debug.assert(!hm.alive(e_tmp)); hm.remove(e0) catch unreachable; std.debug.assert(!hm.alive(e0)); hm.remove(e2) catch unreachable; std.debug.assert(!hm.alive(e2)); e_tmp = hm.create(); std.debug.assert(hm.alive(e_tmp)); e_tmp = hm.create(); std.debug.assert(hm.alive(e_tmp)); e_tmp = hm.create(); std.debug.assert(hm.alive(e_tmp)); }
src/ecs/handles.zig
const std = @import("std"); const Pkg = std.build.Pkg; const platform = @import("../platform/lib.zig"); const graphics = @import("../graphics/lib.zig"); const GraphicsBackend = @import("../platform/backend.zig").GraphicsBackend; const stdx = @import("../stdx/lib.zig"); const stb = @import("../lib/stb/lib.zig"); const freetype = @import("../lib/freetype2/lib.zig"); const gl = @import("../lib/gl/lib.zig"); const vk = @import("../lib/vk/lib.zig"); const sdl = @import("../lib/sdl/lib.zig"); const lyon = @import("../lib/clyon/lib.zig"); const tess2 = @import("../lib/tess2/lib.zig"); const cgltf = @import("../lib/cgltf/lib.zig"); pub const pkg = Pkg{ .name = "graphics", .source = .{ .path = srcPath() ++ "/src/graphics.zig" }, }; pub const Options = struct { graphics_backend: GraphicsBackend, link_lyon: bool = false, link_tess2: bool = false, link_stbtt: bool = false, link_freetype2: bool = true, enable_tracy: bool = false, add_dep_pkgs: bool = true, /// Override with prebuilt libs. sdl_lib_path: ?[]const u8 = null, }; pub fn getPackage(b: *std.build.Builder, opts: Options) std.build.Pkg { var ret = pkg; var lyon_pkg: Pkg = undefined; if (opts.link_lyon) { lyon_pkg = lyon.pkg; // lyon_pkg.dependencies = &.{ stdx.pkg }; } else { lyon_pkg = lyon.dummy_pkg; // lyon_pkg.dependencies = &.{ stdx.pkg }; } var tess2_pkg: Pkg = undefined; if (opts.link_tess2) { tess2_pkg = tess2.pkg; } else { tess2_pkg = tess2.dummy_pkg; } const build_options = b.addOptions(); build_options.addOption(GraphicsBackend, "GraphicsBackend", opts.graphics_backend); build_options.addOption(bool, "enable_tracy", opts.enable_tracy); build_options.addOption(bool, "has_lyon", opts.link_lyon); build_options.addOption(bool, "has_tess2", opts.link_tess2); const build_options_pkg = build_options.getPackage("build_options"); const platform_opts: platform.Options = .{ .graphics_backend = opts.graphics_backend, .add_dep_pkgs = opts.add_dep_pkgs, }; const platform_pkg = platform.getPackage(b, platform_opts); ret.dependencies = b.allocator.dupe(std.build.Pkg, &.{ gl.pkg, vk.pkg, stdx.pkg, build_options_pkg, platform_pkg, freetype.pkg, lyon_pkg, tess2_pkg, sdl.pkg, stb.stbi_pkg, stb.stbtt_pkg, cgltf.pkg, }) catch @panic("error"); return ret; } pub fn addPackage(step: *std.build.LibExeObjStep, opts: Options) void { const b = step.builder; var new_pkg = getPackage(b, opts); step.addPackage(new_pkg); if (opts.add_dep_pkgs) { stdx.addPackage(step, .{ .enable_tracy = opts.enable_tracy, }); const platform_opts: platform.Options = .{ .graphics_backend = opts.graphics_backend, .add_dep_pkgs = opts.add_dep_pkgs, }; platform.addPackage(step, platform_opts); gl.addPackage(step); stb.addStbttPackage(step); } } fn isWasm(target: std.zig.CrossTarget) bool { return target.getCpuArch() == .wasm32 or target.getCpuArch() == .wasm64; } pub fn buildAndLink(step: *std.build.LibExeObjStep, opts: Options) void { if (!isWasm(step.target)) { gl.link(step); vk.link(step); sdl.buildAndLink(step, .{ .lib_path = opts.sdl_lib_path, }); } if (opts.link_stbtt) { stb.buildAndLinkStbtt(step); } stb.buildAndLinkStbi(step); if (opts.link_freetype2) { freetype.buildAndLink(step); } if (opts.link_lyon) { lyon.link(step); } if (opts.link_tess2) { tess2.buildAndLink(step); } cgltf.buildAndLink(step); } fn srcPath() []const u8 { return std.fs.path.dirname(@src().file) orelse unreachable; }
graphics/lib.zig
const std = @import("std"); const ascii = std.ascii; const mem = std.mem; const time = std.time; const Address = std.net.Address; const assert = std.debug.assert; const testing = std.testing; const Allocator = std.mem.Allocator; const AtomicFile = std.fs.AtomicFile; const Headers = @import("headers.zig").Headers; const Cookies = @import("cookies.zig").Cookies; const util = @import("util.zig"); const Bytes = util.Bytes; const IOStream = util.IOStream; const simd = @import("simd.zig"); const GET_ = @bitCast(u32, [4]u8{ 'G', 'E', 'T', ' ' }); const PUT_ = @bitCast(u32, [4]u8{ 'P', 'U', 'T', ' ' }); const POST = @bitCast(u32, [4]u8{ 'P', 'O', 'S', 'T' }); const HEAD = @bitCast(u32, [4]u8{ 'H', 'E', 'A', 'D' }); const PATC = @bitCast(u32, [4]u8{ 'P', 'A', 'T', 'C' }); const DELE = @bitCast(u32, [4]u8{ 'D', 'E', 'L', 'E' }); const OPTI = @bitCast(u32, [4]u8{ 'O', 'P', 'T', 'I' }); const ONS_ = @bitCast(u32, [4]u8{ 'O', 'N', 'S', '_' }); const HTTP = @bitCast(u32, [4]u8{ 'H', 'T', 'T', 'P' }); const V1p1 = @bitCast(u32, [4]u8{ '/', '1', '.', '1' }); const V1p0 = @bitCast(u32, [4]u8{ '/', '1', '.', '0' }); const V2p0 = @bitCast(u32, [4]u8{ '/', '2', '.', '0' }); const V3p0 = @bitCast(u32, [4]u8{ '/', '3', '.', '0' }); fn skipGraphFindSpaceOrQuestionMark(ch: u8) !bool { if (!ascii.isGraph(ch)) { return if (ch == ' ') true else error.BadRequest; } return ch == '?'; } fn skipGraphFindSpace(ch: u8) !bool { if (!ascii.isGraph(ch)) { return if (ch == ' ') true else error.BadRequest; } return if (ch == '?') error.BadRequest else false; } fn skipHostChar(ch: u8) bool { return !ascii.isAlNum(ch) and !(ch == '.' or ch == '-'); } pub const Request = struct { pub const Content = struct { pub const StorageType = enum { Buffer, TempFile, }; type: StorageType, data: union { buffer: []const u8, file: AtomicFile, }, }; pub const ParseOptions = struct { // If request line is longer than this throw an error max_request_line_size: usize = 2048, // If the whole request header is larger than this throw an error max_header_size: usize = 10 * 1024, // If the content length is larger than this throw an error max_content_length: usize = 1000 * 1024 * 1024, // Dump conents of the request buffer dump_buffer: bool = false, }; pub const Method = enum { Unknown, Get, Put, Post, Patch, Head, Delete, Options, }; pub const Version = enum { Unknown, Http1_0, Http1_1, Http2_0, Http3_0, }; pub const Scheme = enum { Unknown, Http, Https, }; // ------------------------------------------------------------------------ // Common request fields // ------------------------------------------------------------------------ method: Method = .Unknown, version: Version = .Unknown, scheme: Scheme = .Unknown, // Full request uri uri: []const u8 = "", // Host part of the uri host: []const u8 = "", // Path part of the uri path: []const u8 = "", // Query part of the uri // TODO: Parse this into a map query: []const u8 = "", // Slice from the start to the body head: []const u8 = "", // Content length pulled from the content-length header (if present) content_length: usize = 0, // Set once the read is complete and no more reads will be done on the // after which it's safe to defer processing to another thread read_finished: bool = false, // Url captures args: ?[]?[]const u8 = null, // All headers headers: Headers, // Cookies // this is not parsed by default, if you need cookies use readCookies cookies: Cookies, // Holds the whole request (for now) buffer: Bytes, // Stream used for reading stream: ?*IOStream = null, // Body of request will be one of these depending on the size content: ?Content = null, // Client address client: Address = undefined, // ------------------------------------------------------------------------ // Constructors // ------------------------------------------------------------------------ pub fn initCapacity(allocator: *Allocator, buffer_size: usize, max_headers: usize, max_cookies: usize) !Request { return Request{ .buffer = try Bytes.initCapacity(allocator, buffer_size), .headers = try Headers.initCapacity(allocator, max_headers), .cookies = try Cookies.initCapacity(allocator, max_cookies), }; } // ------------------------------------------------------------------------ // Testing // ------------------------------------------------------------------------ pub fn initTest(allocator: *Allocator, stream: *IOStream) !Request { //if (!builtin.is_test) @compileError("This is for testing only"); return Request{ .buffer = Bytes.fromOwnedSlice(allocator, stream.in_buffer), .headers = try Headers.initCapacity(allocator, 64), .cookies = try Cookies.initCapacity(allocator, 64), }; } // ------------------------------------------------------------------------ // Parsing // ------------------------------------------------------------------------ pub fn parse(self: *Request, stream: *IOStream, options: ParseOptions) !void { // Swap the buffer so no copying occurs while reading // Want to dump directly into the request buffer self.buffer.expandToCapacity(); stream.swapBuffer(self.buffer.items); if (stream.amountBuffered() == 0) { try stream.fillBuffer(); } var start = stream.readCount(); if (options.dump_buffer) { std.log.debug( \\ \\========== Buffer at {} ========== \\{s} \\============================== , .{ start, stream.readBuffered() }); } while (true) { self.parseNoSwap(stream, options) catch |err| switch (err) { error.EndOfBuffer => { const n = try stream.shiftAndFillBuffer(start); if (n == 0) return error.EndOfStream; start = 0; continue; }, else => return err, }; return; } } inline fn parseTest(self: *Request, stream: *IOStream) !void { return self.parseNoSwap(stream, .{}); } fn parseNoSwap(self: *Request, stream: *IOStream, options: ParseOptions) !void { const start = stream.readCount(); try self.parseRequestLine(stream, options.max_request_line_size); try self.headers.parse(&self.buffer, stream, options.max_header_size); try self.parseContentLength(options.max_content_length); const end = stream.readCount(); self.head = self.buffer.items[start..end]; } pub fn parseRequestLine(self: *Request, stream: *IOStream, max_size: usize) !void { if (stream.isEmpty()) return error.EndOfBuffer; // Skip leading newline if any var ch: u8 = stream.lastByte(); switch (ch) { '\r' => { stream.skipBytes(1); ch = try stream.readByteSafe(); if (ch != '\n') return error.BadRequest; }, '\n' => { stream.skipBytes(1); }, else => {}, } // Parse method try self.parseMethod(stream); // Parse path try self.parseUri(stream, max_size); // Read version try self.parseVersion(stream); // Read to end of the line ch = try stream.readByteSafe(); if (ch == '\r') { ch = try stream.readByteSafe(); } if (ch != '\n') return error.BadRequest; } // Parses first 8 bytes and checks the space pub fn parseMethod(self: *Request, stream: *IOStream) !void { const buf = stream.readBuffered(); if (buf.len < 8) return error.EndOfBuffer; stream.skipBytes(4); const method = @bitCast(u32, buf[0..4].*); self.method = switch (method) { GET_ => Method.Get, PUT_ => Method.Put, POST => if (stream.readByteUnsafe() == ' ') Method.Post else Method.Unknown, HEAD => if (stream.readByteUnsafe() == ' ') Method.Head else Method.Unknown, DELE => if (stream.readByteUnsafe() == 'T' and stream.readByteUnsafe() == 'E' and stream.readByteUnsafe() == ' ') Method.Delete else Method.Unknown, PATC => if (stream.readByteUnsafe() == 'H' and stream.readByteUnsafe() == ' ') Method.Patch else Method.Unknown, OPTI => blk: { stream.skipBytes(4); const r = if (@bitCast(u32, buf[4..8].*) != ONS_) Method.Options else Method.Unknown; break :blk r; }, else => Method.Unknown, // Unknown method or doesn't have a space }; if (self.method == .Unknown) return error.BadRequest; } // Parses HTTP/X.Y pub fn parseVersion(self: *Request, stream: *IOStream) !void { const buf = stream.readBuffered(); if (buf.len < 8) return error.EndOfBuffer; if (@bitCast(u32, buf[0..4].*) != HTTP) return error.BadRequest; self.version = switch (@bitCast(u32, buf[4..8].*)) { V1p0 => .Http1_0, V1p1 => .Http1_1, V2p0 => .Http2_0, V3p0 => .Http3_0, else => .Unknown, }; if (self.version == .Unknown) return error.UnsupportedHttpVersion; stream.skipBytes(8); } // Parse the url, this populates, the uri, host, scheme, and query // when available. The trailing space is consumed. pub fn parseUri(self: *Request, stream: *IOStream, max_size: usize) !void { //@setRuntimeSafety(false); // We already check it const buf = self.buffer.items; const index = stream.readCount(); const limit = std.math.min(max_size, stream.amountBuffered()); const read_limit = limit + stream.readCount(); // Bounds check, Must have "/ HTTP/x.x\n\n" if (stream.amountBuffered() < 12) return error.EndOfBuffer; // Parse host if any var path_start = index; var ch = stream.readByteUnsafe(); switch (ch) { '/' => {}, 'h', 'H' => { // A complete URL, known as the absolute form inline for ("ttp") |expected| { ch = ascii.toLower(stream.readByteUnsafe()); if (ch != expected) return error.BadRequest; } ch = stream.readByteUnsafe(); if (ch == 's' or ch == 'S') { self.scheme = .Https; ch = stream.readByteUnsafe(); } else { self.scheme = .Http; } if (ch != ':') return error.BadRequest; inline for ("//") |expected| { ch = stream.readByteUnsafe(); if (ch != expected) return error.BadRequest; } // Read host const host_start = stream.readCount(); ch = stream.readByteUnsafe(); ch = stream.readUntilExpr(skipHostChar, ch, read_limit); if (stream.readCount() >= read_limit) { if (stream.isEmpty()) return error.EndOfBuffer; return error.RequestUriTooLong; // Too Big } if (ch == ':') { // Read port, can be at most 5 digits (65535) so we // want to read at least 6 bytes to ensure we catch the / inline for ("012345") |_| { ch = try stream.readByteSafe(); if (!ascii.isDigit(ch)) break; } } if (ch != '/') return error.BadRequest; path_start = stream.readCount() - 1; self.host = buf[host_start..path_start]; }, '*' => { // The asterisk form, a simple asterisk ('*') is used with // OPTIONS, representing the server as a whole. ch = stream.readByteUnsafe(); if (ch != ' ') return error.BadRequest; self.uri = buf[index..stream.readCount()]; return; }, // TODO: Authority form is unsupported else => return error.BadRequest, } // Read path ch = try stream.readUntilExprValidate(error{BadRequest}, skipGraphFindSpaceOrQuestionMark, ch, read_limit); var end = stream.readCount() - 1; self.path = buf[path_start..end]; // Read query if (ch == '?') { const q = stream.readCount(); ch = try stream.readByteSafe(); ch = try stream.readUntilExprValidate(error{BadRequest}, skipGraphFindSpace, ch, read_limit); end = stream.readCount() - 1; self.query = buf[q..end]; } if (stream.readCount() >= read_limit) { if (stream.isEmpty()) return error.EndOfBuffer; return error.RequestUriTooLong; // Too Big } if (ch != ' ') return error.BadRequest; self.uri = buf[index..end]; } pub fn parseContentLength(self: *Request, max_size: usize) !void { const headers = &self.headers; // Read content length if (headers.getOptional("Content-Length")) |content_length| { if (headers.contains("Transfer-Encoding")) { // Response cannot contain both Content-Length and // Transfer-Encoding headers. // http://tools.ietf.org/html/rfc7230#section-3.3.3 return error.BadRequest; } // Proxies sometimes cause Content-Length headers to get // duplicated. If all the values are identical then we can // use them but if they differ it's an error. if (mem.indexOf(u8, content_length, ",")) |i| { try headers.put("Content-Length", content_length[0..i]); } self.content_length = std.fmt.parseInt(u32, content_length, 10) catch return error.BadRequest; if (self.content_length > max_size) { return error.RequestEntityTooLarge; } } // Should already be 0 } // Read the cookie header and return a pointer to the cookies if // they exist pub fn readCookies(self: *Request) !?*Cookies { if (self.cookies.parsed) return &self.cookies; if (self.headers.getOptional("Cookie")) |header| { try self.cookies.parse(header); return &self.cookies; } return null; } pub fn readBody(self: *Request, stream: *IOStream) !void { defer self.read_finished = true; if (self.content_length > 0) { try self.readFixedBody(stream); } else if (self.headers.eqlIgnoreCase("Transfer-Encoding", "chunked")) { try self.readChunkedBody(stream); } } pub fn readFixedBody(self: *Request, stream: *IOStream) !void { // End of the request const end_of_headers = stream.readCount(); // Anything else is the body const end_of_body = end_of_headers + self.content_length; // Take whatever is still buffered from the initial read up to the // end of the body const amt = stream.consumeBuffered(end_of_body); const start = end_of_headers + amt; // Check if we can fit everything in the request buffer // if not, write the body to a temp file if (end_of_body > self.buffer.capacity) { std.log.warn("Write to temp file", .{}); // TODO: Write the body to a file const tmp = std.fs.cwd(); //.openDir("/tmp/"); var f = try tmp.atomicFile("zhp.tmp", .{}); // Copy what was buffered var writer = f.file.writer(); try writer.writeAll(self.buffer.items[end_of_headers..start]); // Switch the stream to unbuffered mode and read directly // into the request buffer stream.readUnbuffered(true); defer stream.readUnbuffered(false); var reader = stream.reader(); var left: usize = end_of_body - start; while (left > 0) { var buf: [4096]u8 = undefined; const end = std.math.min(left, buf.len); const n = try reader.read(buf[0..end]); if (n == 0) break; try writer.writeAll(buf[0..n]); left -= n; } self.content = Content{ .type = .TempFile, .data = .{ .file = f }, }; } else { // We can fit it in memory const body = self.buffer.items[end_of_headers..end_of_body]; // Check if the full body was already read into the buffer if (start < end_of_body) { // We need to read more // Switch the stream to unbuffered mode and read directly // into the request buffer stream.readUnbuffered(true); defer stream.readUnbuffered(false); const rest_of_body = self.buffer.items[start..end_of_body]; try stream.reader().readNoEof(rest_of_body); } self.content = Content{ .type = .Buffer, .data = .{ .buffer = body }, }; } } pub fn readChunkedBody(self: *Request, stream: *IOStream) !void { _ = self; _ = stream; return error.NotImplemented; // TODO: This } pub fn format( self: Request, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { _ = fmt; _ = options; try std.fmt.format(out_stream, "Request{{\n", .{}); try std.fmt.format(out_stream, " .client=\"{s}\",\n", .{self.client}); try std.fmt.format(out_stream, " .method={s},\n", .{self.method}); try std.fmt.format(out_stream, " .version={s},\n", .{self.version}); try std.fmt.format(out_stream, " .scheme={s},\n", .{self.scheme}); try std.fmt.format(out_stream, " .host=\"{s}\",\n", .{self.host}); try std.fmt.format(out_stream, " .path=\"{s}\",\n", .{self.path}); try std.fmt.format(out_stream, " .query=\"{s}\",\n", .{self.query}); try std.fmt.format(out_stream, " .headers={s},\n", .{self.headers}); if (self.content) |content| { const n = std.math.min(self.content_length, 1024); switch (content.type) { .TempFile => { // content.data.file // try std.fmt.format(out_stream, " .body='{}',\n", .{ // content.data.file[0..n]}); }, .Buffer => { try std.fmt.format(out_stream, " .body=\"{s}\",\n", .{content.data.buffer[0..n]}); }, } } try std.fmt.format(out_stream, "}}", .{}); } // ------------------------------------------------------------------------ // Cleanup // ------------------------------------------------------------------------ // Reset the request to it's initial state so it can be reused // without needing to reallocate. Usefull when using an ObjectPool pub fn reset(self: *Request) void { self.method = .Unknown; self.version = .Unknown; self.scheme = .Unknown; self.uri = ""; self.host = ""; self.path = ""; self.query = ""; self.head = ""; self.content_length = 0; self.read_finished = false; self.args = null; self.buffer.items.len = 0; self.headers.reset(); self.cookies.reset(); self.cleanup(); } pub fn cleanup(self: *Request) void { if (self.content) |*content| { switch (content.type) { .TempFile => { content.data.file.deinit(); }, .Buffer => {}, } self.content = null; } } pub fn deinit(self: *Request) void { self.buffer.deinit(); self.headers.deinit(); self.cookies.deinit(); self.cleanup(); } }; const TEST_GET_1 = "GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n" ++ "Host: www.kittyhell.com\r\n" ++ "User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n" ++ "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" ++ "Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n" ++ "Accept-Encoding: gzip,deflate\r\n" ++ "Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n" ++ "Keep-Alive: 115\r\n" ++ "Connection: keep-alive\r\n" ++ "Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; " ++ "__utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; " ++ "__utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral\r\n" ++ "\r\n"; const TEST_GET_2 = \\GET /pixel/of_doom.png?id=t3_25jzeq-t8_k2ii&hash=da31d967485cdbd459ce1e9a5dde279fef7fc381&r=1738649500 HTTP/1.1 \\Host: pixel.redditmedia.com \\User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:15.0) Gecko/20100101 Firefox/15.0.1 \\Accept: image/png,image/*;q=0.8,*/*;q=0.5 \\Accept-Language: en-us,en;q=0.5 \\Accept-Encoding: gzip, deflate \\Connection: keep-alive \\Referer: http://www.reddit.com/ \\ \\ ; const TEST_POST_1 = \\POST https://bs.serving-sys.com/BurstingPipe/adServer.bs?cn=tf&c=19&mc=imp&pli=9994987&PluID=0&ord=1400862593644&rtu=-1 HTTP/1.1 \\Host: bs.serving-sys.com \\User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:15.0) Gecko/20100101 Firefox/15.0.1 \\Accept: image/png,image/*;q=0.8,*/*;q=0.5 \\Accept-Language: en-us,en;q=0.5 \\Accept-Encoding: gzip, deflate \\Connection: keep-alive \\Referer: http://static.adzerk.net/reddit/ads.html?sr=-reddit.com&bust2 \\ \\ ; fn expectParseResult(buf: []const u8, request: Request) !void { var buffer: [1024 * 1024]u8 = undefined; const allocator = &std.heap.FixedBufferAllocator.init(&buffer).allocator; var stream = try IOStream.initTest(allocator, buf); var r = try Request.initTest(allocator, &stream); try r.parseTest(&stream); try testing.expectEqual(request.method, r.method); try testing.expectEqual(request.version, r.version); if (request.scheme != .Unknown) { try testing.expectEqual(request.scheme, r.scheme); } try testing.expectEqualStrings(request.uri, r.uri); try testing.expectEqualStrings(request.path, r.path); try testing.expectEqualStrings(request.query, r.query); try testing.expectEqualStrings(request.host, r.host); } fn expectParseError(err: anyerror, buf: []const u8) !void { var buffer: [1024 * 1024]u8 = undefined; const allocator = &std.heap.FixedBufferAllocator.init(&buffer).allocator; var stream = IOStream.initTest(allocator, buf) catch unreachable; var request = Request.initTest(allocator, &stream) catch unreachable; try testing.expectError(err, request.parseTest(&stream)); } test "01-parse-request-get" { try expectParseResult( \\GET / HTTP/1.1 \\Host: localhost:8000 \\ \\ , .{ .headers = undefined, // Dont care .buffer = undefined, // Dont care .client = undefined, // Dont care .cookies = undefined, // Don't care .method = .Get, .version = .Http1_1, .uri = "/", .path = "/", }); } test "01-parse-request-get-path" { try expectParseResult(TEST_GET_1, .{ .headers = undefined, // Dont care .buffer = undefined, // Dont care .client = undefined, // Dont care .cookies = undefined, // Don't care .method = .Get, .version = .Http1_1, .uri = "/wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg", .path = "/wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg", }); } test "01-parse-request-get-query" { try expectParseResult(TEST_GET_2, .{ .headers = undefined, // Dont care .buffer = undefined, // Dont care .client = undefined, // Dont care .cookies = undefined, // Don't care .method = .Get, .version = .Http1_1, .uri = "/pixel/of_doom.png?id=t3_25jzeq-t8_k2ii&hash=da31d967485cdbd459ce1e9a5dde279fef7fc381&r=1738649500", .path = "/pixel/of_doom.png", .query = "id=t3_25jzeq-t8_k2ii&hash=da31d967485cdbd459ce1e9a5dde279fef7fc381&r=1738649500", }); } test "01-parse-request-post-proxy" { try expectParseResult(TEST_POST_1, .{ .headers = undefined, // Dont care .buffer = undefined, // Dont care .client = undefined, // Dont care .cookies = undefined, // Don't care .method = .Post, .version = .Http1_1, .uri = "https://bs.serving-sys.com/BurstingPipe/adServer.bs?cn=tf&c=19&mc=imp&pli=9994987&PluID=0&ord=1400862593644&rtu=-1", .host = "bs.serving-sys.com", .path = "/BurstingPipe/adServer.bs", .query = "cn=tf&c=19&mc=imp&pli=9994987&PluID=0&ord=1400862593644&rtu=-1", }); } test "01-parse-request-delete" { try expectParseResult( \\DELETE /api/users/12/ HTTP/1.0 \\Host: bs.serving-sys.com \\Connection: keep-alive \\ \\ , .{ .headers = undefined, // Dont care .buffer = undefined, // Dont care .client = undefined, // Dont care .cookies = undefined, // Don't care .method = .Delete, .version = .Http1_0, .path = "/api/users/12/", .uri = "/api/users/12/", }); } test "01-parse-request-proxy" { try expectParseResult( \\PUT https://127.0.0.1/upload/ HTTP/1.1 \\Connection: keep-alive \\ \\ , .{ .headers = undefined, // Dont care .buffer = undefined, // Dont care .client = undefined, // Dont care .cookies = undefined, // Don't care .method = .Put, .version = .Http1_1, .scheme = .Https, .host = "127.0.0.1", .uri = "https://127.0.0.1/upload/", .path = "/upload/", }); } test "01-parse-request-port" { try expectParseResult( \\PATCH https://127.0.0.1:8080/upload/ HTTP/1.1 \\Connection: keep-alive \\ \\ , .{ .headers = undefined, // Dont care .buffer = undefined, // Dont care .client = undefined, // Dont care .cookies = undefined, // Don't care .method = .Patch, .version = .Http1_1, .scheme = .Https, .host = "127.0.0.1:8080", .uri = "https://127.0.0.1:8080/upload/", .path = "/upload/", }); } test "01-invalid-method" { try expectParseError(error.BadRequest, \\GOT /this/path/is/nonsense HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-invalid-host-char" { try expectParseError(error.BadRequest, \\GET http://not;valid/ HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-invalid-host-scheme" { try expectParseError(error.BadRequest, \\GET htx://192.168.0.0/ HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-invalid-host-scheme-1" { try expectParseError(error.BadRequest, \\GET HTTP:/localhost/ HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-invalid-host-port" { try expectParseError(error.BadRequest, \\GET HTTP://localhost:aef/ HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-invalid-method-2" { try expectParseError(error.BadRequest, \\DEL TE /api/users/12/ HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-no-space" { try expectParseError(error.BadRequest, \\GET/this/path/is/nonsense HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-bad-url" { try expectParseError(error.BadRequest, \\GET 0000000000000000000000000 HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-bad-url-character" { try expectParseError(error.BadRequest, "GET /" ++ [_]u8{0} ++ "/ HTTP/1.1\r\n" ++ "Accept: */*\r\n" ++ "\r\n\r\n"); } test "01-bad-url-character-2" { try expectParseError(error.BadRequest, "GET /\t HTTP/1.1\r\n" ++ "Accept: */*\r\n" ++ "\r\n\r\n"); } test "01-bad-query" { try expectParseError(error.BadRequest, \\GET /this/is?query1?query2 HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-empty-request-line" { try expectParseError(error.BadRequest, \\ \\Host: localhost:8000 \\ \\ ); } test "01-unsupported-version" { try expectParseError(error.UnsupportedHttpVersion, \\GET / HTTP/7.1 \\Host: localhost:8000 \\ \\ ); } test "01-version-malformed" { try expectParseError(error.BadRequest, \\GET / HXX/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-url-malformed" { try expectParseError(error.BadRequest, \\GET /what?are? HTTP/1.1 \\Host: localhost:8000 \\ \\ ); } test "01-empty-header" { try expectParseError(error.BadRequest, \\GET /api/something/ HTTP/1.0 \\: localhost:8000 \\ \\ ); } test "01-invalid-header-name" { try expectParseError(error.BadRequest, \\GET /api/something/ HTTP/1.0 \\Host?: localhost:8000 \\ \\ ); } test "01-header-too-long" { const opts = Request.ParseOptions{}; const name = [_]u8{'x'} ** (opts.max_header_size + 1024); try expectParseError(error.RequestHeaderFieldsTooLarge, "GET /api/something/ HTTP/1.0\r\n" ++ name ++ ": foo\r\n" ++ "\r\n\r\n"); } test "01-partial-request" { try expectParseError(error.EndOfBuffer, "GET /api/something/ HTTP/1.0\r\n" ++ "Host: localhost\r"); } test "01-partial-request-line" { try expectParseError(error.EndOfBuffer, "GET /api/somethithing/long/path/slow"); } test "02-parse-request-multiple" { var buffer: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const allocator = &fba.allocator; const REQUESTS = TEST_GET_1 ++ TEST_GET_2 ++ TEST_POST_1; var stream = try IOStream.initTest(allocator, REQUESTS); var request = try Request.initTest(allocator, &stream); try request.parseTest(&stream); try testing.expectEqualSlices(u8, request.path, "/wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg"); try request.parseTest(&stream); try testing.expectEqualSlices(u8, request.path, "/pixel/of_doom.png"); try request.parseTest(&stream); // I have no idea why but this seems to mess up the speed of the next test //try testing.expectEqualSlices(u8, request.path, "/BurstingPipe/adServer.bs"); } test "03-bench-parse-request-line" { var buffer: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const allocator = &fba.allocator; var stream = try IOStream.initTest(allocator, TEST_GET_1); var request = try Request.initTest(allocator, &stream); const requests: usize = 1000000; var n: usize = 0; var timer = try std.time.Timer.start(); var i: usize = 0; // 1M while (i < requests) : (i += 1) { // 10000k req/s 750MB/s (100 ns/req) try request.parseRequestLine(&stream, 2048); n = stream.readCount(); request.reset(); fba.reset(); request.buffer.items.len = stream.in_buffer.len; stream.reset(); } const ns = timer.lap(); const ms = ns / 1000000; const bytes = requests * n / time.ms_per_s; std.debug.warn("\n {}k req/s {}MB/s ({} ns/req)\n", .{ requests / ms, bytes / ms, ns / requests }); //stream.load("POST CRAP"); //request = try Request.init(allocator); //try testing.expectError(error.BadRequest, // request.parseRequestLine(&stream, 0)); // var line = try Request.StartLine.parse(a, "GET /foo HTTP/1.1"); // try testing.expect(mem.eql(u8, line.method, "GET")); // try testing.expect(mem.eql(u8, line.path, "/foo")); // try testing.expect(mem.eql(u8, line.version, "HTTP/1.1")); // line = try RequestStartLine.parse("POST / HTTP/1.1"); // try testing.expect(mem.eql(u8, line.method, "POST")); // try testing.expect(mem.eql(u8, line.path, "/")); // try testing.expect(mem.eql(u8, line.version, "HTTP/1.1")); // // try testing.expectError(error.BadRequest, // RequestStartLine.parse(a, "POST CRAP")); // try testing.expectError(error.BadRequest, // RequestStartLine.parse(a, "POST /theform/ HTTP/1.1 DROP ALL TABLES")); // try testing.expectError(error.UnsupportedHttpVersion, // RequestStartLine.parse(a, "POST / HTTP/2.0")); } test "04-parse-request-headers" { var buffer: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const allocator = &fba.allocator; var stream = try IOStream.initTest(allocator, \\GET / HTTP/1.1 \\Host: server \\User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00 \\Cookie: uid=012345678901234532323; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600 \\Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 \\Accept-Language: en-US,en;q=0.5 \\Connection: keep-alive \\ \\ ); var request = try Request.initTest(allocator, &stream); try request.parseTest(&stream); var h = &request.headers; try testing.expectEqual(@as(usize, 6), h.headers.items.len); try testing.expectEqualSlices(u8, "server", try h.get("Host")); try testing.expectEqualSlices(u8, "Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00", try h.get("User-Agent")); try testing.expectEqualSlices(u8, "uid=012345678901234532323; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600", try h.get("Cookie")); try testing.expectEqualSlices(u8, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", try h.get("Accept")); try testing.expectEqualSlices(u8, "en-US,en;q=0.5", try h.get("Accept-Language")); try testing.expectEqualSlices(u8, "keep-alive", try h.get("Connection")); } test "04-parse-request-cookies" { var buffer: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const allocator = &fba.allocator; var stream = try IOStream.initTest(allocator, TEST_GET_1); var request = try Request.initTest(allocator, &stream); try request.parseTest(&stream); const h = &request.headers; try testing.expectEqual(@as(usize, 9), h.headers.items.len); try testing.expectEqualSlices(u8, "www.kittyhell.com", try h.get("Host")); try testing.expectEqualSlices(u8, "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9", try h.get("User-Agent")); try testing.expectEqualSlices(u8, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", try h.get("Accept")); try testing.expectEqualSlices(u8, "ja,en-us;q=0.7,en;q=0.3", try h.get("Accept-Language")); try testing.expectEqualSlices(u8, "gzip,deflate", try h.get("Accept-Encoding")); try testing.expectEqualSlices(u8, "Shift_JIS,utf-8;q=0.7,*;q=0.7", try h.get("Accept-Charset")); try testing.expectEqualSlices(u8, "115", try h.get("Keep-Alive")); try testing.expectEqualSlices(u8, "keep-alive", try h.get("Connection")); try testing.expectEqualSlices(u8, "wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; " ++ "__utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; " ++ "__utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral", try h.get("Cookie")); const cookies = (try request.readCookies()).?; try testing.expectEqualStrings("2", try cookies.get("wp_ozh_wsa_visits")); try testing.expectEqualStrings("xxxxxxxxxx", try cookies.get("wp_ozh_wsa_visit_lasttime")); try testing.expectEqualStrings("xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x", try cookies.get("__utma")); try testing.expectEqualStrings("xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral", try cookies.get("__utmz")); } test "05-bench-parse-request-headers" { var buffer: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const allocator = &fba.allocator; var stream = try IOStream.initTest(allocator, TEST_GET_1); var request = try Request.initTest(allocator, &stream); const requests: usize = 1000000; var timer = try std.time.Timer.start(); var i: usize = 0; // 1M while (i < requests) : (i += 1) { // HACK: For testing we "fake" filling the buffer... // since this test is only concerned with the parser speed request.buffer.items.len = TEST_GET_1.len; // 1031k req/s 725MB/s (969 ns/req) try request.parseTest(&stream); request.reset(); fba.reset(); stream.reset(); } const n = TEST_GET_1.len; const ns = timer.lap(); const ms = ns / 1000000; const bytes = requests * n / time.ms_per_s; std.debug.warn("\n {}k req/s {}MB/s ({} ns/req)\n", .{ requests / ms, bytes / ms, ns / requests }); }
src/request.zig
pub const FILTER_NAME_MAX_CHARS = @as(u32, 255); pub const VOLUME_NAME_MAX_CHARS = @as(u32, 1024); pub const INSTANCE_NAME_MAX_CHARS = @as(u32, 255); pub const FLTFL_AGGREGATE_INFO_IS_MINIFILTER = @as(u32, 1); pub const FLTFL_AGGREGATE_INFO_IS_LEGACYFILTER = @as(u32, 2); pub const FLTFL_ASI_IS_MINIFILTER = @as(u32, 1); pub const FLTFL_ASI_IS_LEGACYFILTER = @as(u32, 2); pub const FLTFL_VSI_DETACHED_VOLUME = @as(u32, 1); pub const FLTFL_IASI_IS_MINIFILTER = @as(u32, 1); pub const FLTFL_IASI_IS_LEGACYFILTER = @as(u32, 2); pub const FLTFL_IASIM_DETACHED_VOLUME = @as(u32, 1); pub const FLTFL_IASIL_DETACHED_VOLUME = @as(u32, 1); pub const FLT_PORT_FLAG_SYNC_HANDLE = @as(u32, 1); pub const WNNC_NET_MSNET = @as(u32, 65536); pub const WNNC_NET_SMB = @as(u32, 131072); pub const WNNC_NET_NETWARE = @as(u32, 196608); pub const WNNC_NET_VINES = @as(u32, 262144); pub const WNNC_NET_10NET = @as(u32, 327680); pub const WNNC_NET_LOCUS = @as(u32, 393216); pub const WNNC_NET_SUN_PC_NFS = @as(u32, 458752); pub const WNNC_NET_LANSTEP = @as(u32, 524288); pub const WNNC_NET_9TILES = @as(u32, 589824); pub const WNNC_NET_LANTASTIC = @as(u32, 655360); pub const WNNC_NET_AS400 = @as(u32, 720896); pub const WNNC_NET_FTP_NFS = @as(u32, 786432); pub const WNNC_NET_PATHWORKS = @as(u32, 851968); pub const WNNC_NET_LIFENET = @as(u32, 917504); pub const WNNC_NET_POWERLAN = @as(u32, 983040); pub const WNNC_NET_BWNFS = @as(u32, 1048576); pub const WNNC_NET_COGENT = @as(u32, 1114112); pub const WNNC_NET_FARALLON = @as(u32, 1179648); pub const WNNC_NET_APPLETALK = @as(u32, 1245184); pub const WNNC_NET_INTERGRAPH = @as(u32, 1310720); pub const WNNC_NET_SYMFONET = @as(u32, 1376256); pub const WNNC_NET_CLEARCASE = @as(u32, 1441792); pub const WNNC_NET_FRONTIER = @as(u32, 1507328); pub const WNNC_NET_BMC = @as(u32, 1572864); pub const WNNC_NET_DCE = @as(u32, 1638400); pub const WNNC_NET_AVID = @as(u32, 1703936); pub const WNNC_NET_DOCUSPACE = @as(u32, 1769472); pub const WNNC_NET_MANGOSOFT = @as(u32, 1835008); pub const WNNC_NET_SERNET = @as(u32, 1900544); pub const WNNC_NET_RIVERFRONT1 = @as(u32, 1966080); pub const WNNC_NET_RIVERFRONT2 = @as(u32, 2031616); pub const WNNC_NET_DECORB = @as(u32, 2097152); pub const WNNC_NET_PROTSTOR = @as(u32, 2162688); pub const WNNC_NET_FJ_REDIR = @as(u32, 2228224); pub const WNNC_NET_DISTINCT = @as(u32, 2293760); pub const WNNC_NET_TWINS = @as(u32, 2359296); pub const WNNC_NET_RDR2SAMPLE = @as(u32, 2424832); pub const WNNC_NET_CSC = @as(u32, 2490368); pub const WNNC_NET_3IN1 = @as(u32, 2555904); pub const WNNC_NET_EXTENDNET = @as(u32, 2686976); pub const WNNC_NET_STAC = @as(u32, 2752512); pub const WNNC_NET_FOXBAT = @as(u32, 2818048); pub const WNNC_NET_YAHOO = @as(u32, 2883584); pub const WNNC_NET_EXIFS = @as(u32, 2949120); pub const WNNC_NET_DAV = @as(u32, 3014656); pub const WNNC_NET_KNOWARE = @as(u32, 3080192); pub const WNNC_NET_OBJECT_DIRE = @as(u32, 3145728); pub const WNNC_NET_MASFAX = @as(u32, 3211264); pub const WNNC_NET_HOB_NFS = @as(u32, 3276800); pub const WNNC_NET_SHIVA = @as(u32, 3342336); pub const WNNC_NET_IBMAL = @as(u32, 3407872); pub const WNNC_NET_LOCK = @as(u32, 3473408); pub const WNNC_NET_TERMSRV = @as(u32, 3538944); pub const WNNC_NET_SRT = @as(u32, 3604480); pub const WNNC_NET_QUINCY = @as(u32, 3670016); pub const WNNC_NET_OPENAFS = @as(u32, 3735552); pub const WNNC_NET_AVID1 = @as(u32, 3801088); pub const WNNC_NET_DFS = @as(u32, 3866624); pub const WNNC_NET_KWNP = @as(u32, 3932160); pub const WNNC_NET_ZENWORKS = @as(u32, 3997696); pub const WNNC_NET_DRIVEONWEB = @as(u32, 4063232); pub const WNNC_NET_VMWARE = @as(u32, 4128768); pub const WNNC_NET_RSFX = @as(u32, 4194304); pub const WNNC_NET_MFILES = @as(u32, 4259840); pub const WNNC_NET_MS_NFS = @as(u32, 4325376); pub const WNNC_NET_GOOGLE = @as(u32, 4390912); pub const WNNC_NET_NDFS = @as(u32, 4456448); pub const WNNC_NET_DOCUSHARE = @as(u32, 4521984); pub const WNNC_NET_AURISTOR_FS = @as(u32, 4587520); pub const WNNC_NET_SECUREAGENT = @as(u32, 4653056); pub const WNNC_NET_9P = @as(u32, 4718592); pub const WNNC_CRED_MANAGER = @as(u32, 4294901760); pub const WNNC_NET_LANMAN = @as(u32, 131072); //-------------------------------------------------------------------------------- // Section: Types (21) //-------------------------------------------------------------------------------- // TODO: this type has a FreeFunc 'FilterClose', what can Zig do with this information? pub const HFILTER = *opaque{}; // TODO: this type has a FreeFunc 'FilterInstanceClose', what can Zig do with this information? pub const HFILTER_INSTANCE = isize; // TODO: this type has a FreeFunc 'FilterFindClose', what can Zig do with this information? pub const FilterFindHandle = isize; // TODO: this type has a FreeFunc 'FilterVolumeFindClose', what can Zig do with this information? pub const FilterVolumeFindHandle = isize; // TODO: this type has a FreeFunc 'FilterInstanceFindClose', what can Zig do with this information? pub const FilterInstanceFindHandle = isize; // TODO: this type has a FreeFunc 'FilterVolumeInstanceFindClose', what can Zig do with this information? pub const FilterVolumeInstanceFindHandle = isize; pub const FLT_FILESYSTEM_TYPE = enum(i32) { UNKNOWN = 0, RAW = 1, NTFS = 2, FAT = 3, CDFS = 4, UDFS = 5, LANMAN = 6, WEBDAV = 7, RDPDR = 8, NFS = 9, MS_NETWARE = 10, NETWARE = 11, BSUDF = 12, MUP = 13, RSFX = 14, ROXIO_UDF1 = 15, ROXIO_UDF2 = 16, ROXIO_UDF3 = 17, TACIT = 18, FS_REC = 19, INCD = 20, INCD_FAT = 21, EXFAT = 22, PSFS = 23, GPFS = 24, NPFS = 25, MSFS = 26, CSVFS = 27, REFS = 28, OPENAFS = 29, CIMFS = 30, }; pub const FLT_FSTYPE_UNKNOWN = FLT_FILESYSTEM_TYPE.UNKNOWN; pub const FLT_FSTYPE_RAW = FLT_FILESYSTEM_TYPE.RAW; pub const FLT_FSTYPE_NTFS = FLT_FILESYSTEM_TYPE.NTFS; pub const FLT_FSTYPE_FAT = FLT_FILESYSTEM_TYPE.FAT; pub const FLT_FSTYPE_CDFS = FLT_FILESYSTEM_TYPE.CDFS; pub const FLT_FSTYPE_UDFS = FLT_FILESYSTEM_TYPE.UDFS; pub const FLT_FSTYPE_LANMAN = FLT_FILESYSTEM_TYPE.LANMAN; pub const FLT_FSTYPE_WEBDAV = FLT_FILESYSTEM_TYPE.WEBDAV; pub const FLT_FSTYPE_RDPDR = FLT_FILESYSTEM_TYPE.RDPDR; pub const FLT_FSTYPE_NFS = FLT_FILESYSTEM_TYPE.NFS; pub const FLT_FSTYPE_MS_NETWARE = FLT_FILESYSTEM_TYPE.MS_NETWARE; pub const FLT_FSTYPE_NETWARE = FLT_FILESYSTEM_TYPE.NETWARE; pub const FLT_FSTYPE_BSUDF = FLT_FILESYSTEM_TYPE.BSUDF; pub const FLT_FSTYPE_MUP = FLT_FILESYSTEM_TYPE.MUP; pub const FLT_FSTYPE_RSFX = FLT_FILESYSTEM_TYPE.RSFX; pub const FLT_FSTYPE_ROXIO_UDF1 = FLT_FILESYSTEM_TYPE.ROXIO_UDF1; pub const FLT_FSTYPE_ROXIO_UDF2 = FLT_FILESYSTEM_TYPE.ROXIO_UDF2; pub const FLT_FSTYPE_ROXIO_UDF3 = FLT_FILESYSTEM_TYPE.ROXIO_UDF3; pub const FLT_FSTYPE_TACIT = FLT_FILESYSTEM_TYPE.TACIT; pub const FLT_FSTYPE_FS_REC = FLT_FILESYSTEM_TYPE.FS_REC; pub const FLT_FSTYPE_INCD = FLT_FILESYSTEM_TYPE.INCD; pub const FLT_FSTYPE_INCD_FAT = FLT_FILESYSTEM_TYPE.INCD_FAT; pub const FLT_FSTYPE_EXFAT = FLT_FILESYSTEM_TYPE.EXFAT; pub const FLT_FSTYPE_PSFS = FLT_FILESYSTEM_TYPE.PSFS; pub const FLT_FSTYPE_GPFS = FLT_FILESYSTEM_TYPE.GPFS; pub const FLT_FSTYPE_NPFS = FLT_FILESYSTEM_TYPE.NPFS; pub const FLT_FSTYPE_MSFS = FLT_FILESYSTEM_TYPE.MSFS; pub const FLT_FSTYPE_CSVFS = FLT_FILESYSTEM_TYPE.CSVFS; pub const FLT_FSTYPE_REFS = FLT_FILESYSTEM_TYPE.REFS; pub const FLT_FSTYPE_OPENAFS = FLT_FILESYSTEM_TYPE.OPENAFS; pub const FLT_FSTYPE_CIMFS = FLT_FILESYSTEM_TYPE.CIMFS; pub const FILTER_INFORMATION_CLASS = enum(i32) { FullInformation = 0, AggregateBasicInformation = 1, AggregateStandardInformation = 2, }; pub const FilterFullInformation = FILTER_INFORMATION_CLASS.FullInformation; pub const FilterAggregateBasicInformation = FILTER_INFORMATION_CLASS.AggregateBasicInformation; pub const FilterAggregateStandardInformation = FILTER_INFORMATION_CLASS.AggregateStandardInformation; pub const FILTER_FULL_INFORMATION = extern struct { NextEntryOffset: u32, FrameID: u32, NumberOfInstances: u32, FilterNameLength: u16, FilterNameBuffer: [1]u16, }; pub const FILTER_AGGREGATE_BASIC_INFORMATION = extern struct { NextEntryOffset: u32, Flags: u32, Type: extern union { MiniFilter: extern struct { FrameID: u32, NumberOfInstances: u32, FilterNameLength: u16, FilterNameBufferOffset: u16, FilterAltitudeLength: u16, FilterAltitudeBufferOffset: u16, }, LegacyFilter: extern struct { FilterNameLength: u16, FilterNameBufferOffset: u16, }, }, }; pub const FILTER_AGGREGATE_STANDARD_INFORMATION = extern struct { NextEntryOffset: u32, Flags: u32, Type: extern union { MiniFilter: extern struct { Flags: u32, FrameID: u32, NumberOfInstances: u32, FilterNameLength: u16, FilterNameBufferOffset: u16, FilterAltitudeLength: u16, FilterAltitudeBufferOffset: u16, }, LegacyFilter: extern struct { Flags: u32, FilterNameLength: u16, FilterNameBufferOffset: u16, FilterAltitudeLength: u16, FilterAltitudeBufferOffset: u16, }, }, }; pub const FILTER_VOLUME_INFORMATION_CLASS = enum(i32) { BasicInformation = 0, StandardInformation = 1, }; pub const FilterVolumeBasicInformation = FILTER_VOLUME_INFORMATION_CLASS.BasicInformation; pub const FilterVolumeStandardInformation = FILTER_VOLUME_INFORMATION_CLASS.StandardInformation; pub const FILTER_VOLUME_BASIC_INFORMATION = extern struct { FilterVolumeNameLength: u16, FilterVolumeName: [1]u16, }; pub const FILTER_VOLUME_STANDARD_INFORMATION = extern struct { NextEntryOffset: u32, Flags: u32, FrameID: u32, FileSystemType: FLT_FILESYSTEM_TYPE, FilterVolumeNameLength: u16, FilterVolumeName: [1]u16, }; pub const INSTANCE_INFORMATION_CLASS = enum(i32) { BasicInformation = 0, PartialInformation = 1, FullInformation = 2, AggregateStandardInformation = 3, }; pub const InstanceBasicInformation = INSTANCE_INFORMATION_CLASS.BasicInformation; pub const InstancePartialInformation = INSTANCE_INFORMATION_CLASS.PartialInformation; pub const InstanceFullInformation = INSTANCE_INFORMATION_CLASS.FullInformation; pub const InstanceAggregateStandardInformation = INSTANCE_INFORMATION_CLASS.AggregateStandardInformation; pub const INSTANCE_BASIC_INFORMATION = extern struct { NextEntryOffset: u32, InstanceNameLength: u16, InstanceNameBufferOffset: u16, }; pub const INSTANCE_PARTIAL_INFORMATION = extern struct { NextEntryOffset: u32, InstanceNameLength: u16, InstanceNameBufferOffset: u16, AltitudeLength: u16, AltitudeBufferOffset: u16, }; pub const INSTANCE_FULL_INFORMATION = extern struct { NextEntryOffset: u32, InstanceNameLength: u16, InstanceNameBufferOffset: u16, AltitudeLength: u16, AltitudeBufferOffset: u16, VolumeNameLength: u16, VolumeNameBufferOffset: u16, FilterNameLength: u16, FilterNameBufferOffset: u16, }; pub const INSTANCE_AGGREGATE_STANDARD_INFORMATION = extern struct { NextEntryOffset: u32, Flags: u32, Type: extern union { MiniFilter: extern struct { Flags: u32, FrameID: u32, VolumeFileSystemType: FLT_FILESYSTEM_TYPE, InstanceNameLength: u16, InstanceNameBufferOffset: u16, AltitudeLength: u16, AltitudeBufferOffset: u16, VolumeNameLength: u16, VolumeNameBufferOffset: u16, FilterNameLength: u16, FilterNameBufferOffset: u16, SupportedFeatures: u32, }, LegacyFilter: extern struct { Flags: u32, AltitudeLength: u16, AltitudeBufferOffset: u16, VolumeNameLength: u16, VolumeNameBufferOffset: u16, FilterNameLength: u16, FilterNameBufferOffset: u16, SupportedFeatures: u32, }, }, }; pub const FILTER_MESSAGE_HEADER = extern struct { ReplyLength: u32, MessageId: u64, }; pub const FILTER_REPLY_HEADER = extern struct { Status: NTSTATUS, MessageId: u64, }; //-------------------------------------------------------------------------------- // Section: Functions (28) //-------------------------------------------------------------------------------- pub extern "FLTLIB" fn FilterLoad( lpFilterName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterUnload( lpFilterName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterCreate( lpFilterName: ?[*:0]const u16, hFilter: ?*?HFILTER, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterClose( hFilter: ?HFILTER, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterInstanceCreate( lpFilterName: ?[*:0]const u16, lpVolumeName: ?[*:0]const u16, lpInstanceName: ?[*:0]const u16, hInstance: ?*HFILTER_INSTANCE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterInstanceClose( hInstance: HFILTER_INSTANCE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterAttach( lpFilterName: ?[*:0]const u16, lpVolumeName: ?[*:0]const u16, lpInstanceName: ?[*:0]const u16, dwCreatedInstanceNameLength: u32, // TODO: what to do with BytesParamIndex 3? lpCreatedInstanceName: ?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterAttachAtAltitude( lpFilterName: ?[*:0]const u16, lpVolumeName: ?[*:0]const u16, lpAltitude: ?[*:0]const u16, lpInstanceName: ?[*:0]const u16, dwCreatedInstanceNameLength: u32, // TODO: what to do with BytesParamIndex 4? lpCreatedInstanceName: ?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterDetach( lpFilterName: ?[*:0]const u16, lpVolumeName: ?[*:0]const u16, lpInstanceName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterFindFirst( dwInformationClass: FILTER_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 2? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, lpFilterFind: ?*FilterFindHandle, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterFindNext( hFilterFind: ?HANDLE, dwInformationClass: FILTER_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterFindClose( hFilterFind: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterVolumeFindFirst( dwInformationClass: FILTER_VOLUME_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 2? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, lpVolumeFind: ?*FilterVolumeFindHandle, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterVolumeFindNext( hVolumeFind: ?HANDLE, dwInformationClass: FILTER_VOLUME_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterVolumeFindClose( hVolumeFind: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterInstanceFindFirst( lpFilterName: ?[*:0]const u16, dwInformationClass: INSTANCE_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, lpFilterInstanceFind: ?*FilterInstanceFindHandle, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterInstanceFindNext( hFilterInstanceFind: ?HANDLE, dwInformationClass: INSTANCE_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterInstanceFindClose( hFilterInstanceFind: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterVolumeInstanceFindFirst( lpVolumeName: ?[*:0]const u16, dwInformationClass: INSTANCE_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, lpVolumeInstanceFind: ?*FilterVolumeInstanceFindHandle, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterVolumeInstanceFindNext( hVolumeInstanceFind: ?HANDLE, dwInformationClass: INSTANCE_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterVolumeInstanceFindClose( hVolumeInstanceFind: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterGetInformation( hFilter: ?HFILTER, dwInformationClass: FILTER_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterInstanceGetInformation( hInstance: HFILTER_INSTANCE, dwInformationClass: INSTANCE_INFORMATION_CLASS, // TODO: what to do with BytesParamIndex 3? lpBuffer: ?*anyopaque, dwBufferSize: u32, lpBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterConnectCommunicationPort( lpPortName: ?[*:0]const u16, dwOptions: u32, // TODO: what to do with BytesParamIndex 3? lpContext: ?*const anyopaque, wSizeOfContext: u16, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES, hPort: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterSendMessage( hPort: ?HANDLE, // TODO: what to do with BytesParamIndex 2? lpInBuffer: ?*anyopaque, dwInBufferSize: u32, // TODO: what to do with BytesParamIndex 4? lpOutBuffer: ?*anyopaque, dwOutBufferSize: u32, lpBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterGetMessage( hPort: ?HANDLE, // TODO: what to do with BytesParamIndex 2? lpMessageBuffer: ?*FILTER_MESSAGE_HEADER, dwMessageBufferSize: u32, lpOverlapped: ?*OVERLAPPED, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows5.0' pub extern "FLTLIB" fn FilterReplyMessage( hPort: ?HANDLE, // TODO: what to do with BytesParamIndex 2? lpReplyBuffer: ?*FILTER_REPLY_HEADER, dwReplyBufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "FLTLIB" fn FilterGetDosName( lpVolumeName: ?[*:0]const u16, lpDosName: [*:0]u16, dwDosNameBufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (6) //-------------------------------------------------------------------------------- const HANDLE = @import("../foundation.zig").HANDLE; const HRESULT = @import("../foundation.zig").HRESULT; const NTSTATUS = @import("../foundation.zig").NTSTATUS; const OVERLAPPED = @import("../system/io.zig").OVERLAPPED; const PWSTR = @import("../foundation.zig").PWSTR; const SECURITY_ATTRIBUTES = @import("../security.zig").SECURITY_ATTRIBUTES; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/storage/installable_file_systems.zig
const std = @import("std"); usingnamespace @import("zalgebra"); usingnamespace @import("collision/aabb.zig"); const c = @import("c.zig"); const opengl = @import("opengl_renderer.zig"); const vertex = @import("vertex.zig"); fn genBoxMesh(half_size: vec3) opengl.Mesh { var vertices = [_]vertex.TexturedVertex { vertex.TexturedVertex.new(half_size.mul(vec3.new( 1, 1, 1)), vec2.zero()), vertex.TexturedVertex.new(half_size.mul(vec3.new( 1, 1, -1)), vec2.zero()), vertex.TexturedVertex.new(half_size.mul(vec3.new( 1, -1, 1)), vec2.zero()), vertex.TexturedVertex.new(half_size.mul(vec3.new( 1, -1, -1)), vec2.zero()), vertex.TexturedVertex.new(half_size.mul(vec3.new(-1, 1, 1)), vec2.zero()), vertex.TexturedVertex.new(half_size.mul(vec3.new(-1, 1, -1)), vec2.zero()), vertex.TexturedVertex.new(half_size.mul(vec3.new(-1, -1, 1)), vec2.zero()), vertex.TexturedVertex.new(half_size.mul(vec3.new(-1, -1, -1)), vec2.zero()) }; var indices = [_]u32 { 0, 2, 3, 0, 3, 1, 5, 7, 6, 5, 6, 4, 0, 1, 5, 0, 5, 4, 6, 7, 3, 6, 3, 2, 4, 6, 2, 4, 2, 0, 1, 3, 7, 1, 7, 5, }; return opengl.Mesh.init(vertex.TexturedVertex, u32, &vertices, &indices); } pub const TestBox = struct { const Self = @This(); aabb: Aabb, color: vec3, mesh: opengl.Mesh, pub fn init(pos: vec3, size: vec3, color: vec3) Self { var half_size = size.scale(1); return Self { .aabb = Aabb.init(pos, half_size), .color = color, .mesh = genBoxMesh(half_size), }; } pub fn deinit(self: *Self) void { self.mesh.deinit(); } pub fn render(self: *Self, matrix_uniform_index: c.GLint, color_uniform_index: c.GLint) void { c.glUniformMatrix4fv(matrix_uniform_index, 1, c.GL_FALSE, mat4.from_translate(self.aabb.position).get_data()); c.glUniform3f(color_uniform_index, @floatCast(c.GLfloat, self.color.x), @floatCast(c.GLfloat, self.color.y), @floatCast(c.GLfloat, self.color.z)); self.mesh.draw(); } pub fn update(self: *Self, ground: *Self) void { if (self.aabb.testAabb(&ground.aabb)) { var offset = self.aabb.calcPenetration(&ground.aabb); //std.log.info("Offset: ({d}, {d}, {d})", .{offset.x, offset.y, offset.z}); //Correct the collision kinda self.aabb.position = self.aabb.position.add(offset); self.color = vec3.new(0.0, 0.0, 1.0); } else { self.color = vec3.new(1.0, 0.0, 0.0); } } };
src/test_box.zig
const Mir = @This(); const std = @import("std"); const builtin = @import("builtin"); const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. extra: []const u32, pub const Inst = struct { tag: Tag, /// The meaning of this depends on `tag`. data: Data, pub const Tag = enum(u16) { add, addi, /// Pseudo-instruction: End of prologue dbg_prologue_end, /// Pseudo-instruction: Beginning of epilogue dbg_epilogue_begin, /// Pseudo-instruction: Update debug line dbg_line, ebreak, ecall, jalr, ld, lui, mv, nop, ret, sd, sub, }; /// The position of an MIR instruction within the `Mir` instructions array. pub const Index = u32; /// All instructions have a 4-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as /// how to interpret the data within. pub const Data = union { /// No additional data /// /// Used by e.g. ebreak nop: void, /// Another instruction. /// /// Used by e.g. b inst: Index, /// A 16-bit immediate value. /// /// Used by e.g. svc imm16: u16, /// Index into `extra`. Meaning of what can be found there is context-dependent. /// /// Used by e.g. load_memory payload: u32, /// A register /// /// Used by e.g. blr reg: Register, /// Two registers /// /// Used by e.g. mv rr: struct { rd: Register, rs: Register, }, /// I-Type /// /// Used by e.g. jalr i_type: struct { rd: Register, rs1: Register, imm12: i12, }, /// R-Type /// /// Used by e.g. add r_type: struct { rd: Register, rs1: Register, rs2: Register, }, /// U-Type /// /// Used by e.g. lui u_type: struct { rd: Register, imm20: i20, }, /// Debug info: line and column /// /// Used by e.g. dbg_line dbg_line_column: struct { line: u32, column: u32, }, }; // Make sure we don't accidentally make instructions bigger than expected. // Note that in Debug builds, Zig is allowed to insert a secret field for safety checks. // comptime { // if (builtin.mode != .Debug) { // assert(@sizeOf(Inst) == 8); // } // } }; pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { mir.instructions.deinit(gpa); gpa.free(mir.extra); mir.* = undefined; } /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } { const fields = std.meta.fields(T); var i: usize = index; var result: T = undefined; inline for (fields) |field| { @field(result, field.name) = switch (field.field_type) { u32 => mir.extra[i], i32 => @bitCast(i32, mir.extra[i]), else => @compileError("bad field type"), }; i += 1; } return .{ .data = result, .end = i, }; }
src/arch/riscv64/Mir.zig
const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; test "integer division" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try testDivision(); comptime try testDivision(); } fn testDivision() !void { try expect(div(u32, 13, 3) == 4); try expect(div(u64, 13, 3) == 4); try expect(div(u8, 13, 3) == 4); try expect(divExact(u32, 55, 11) == 5); try expect(divExact(i32, -55, 11) == -5); try expect(divExact(i64, -55, 11) == -5); try expect(divExact(i16, -55, 11) == -5); try expect(divFloor(i8, 5, 3) == 1); try expect(divFloor(i16, -5, 3) == -2); try expect(divFloor(i64, -0x80000000, -2) == 0x40000000); try expect(divFloor(i32, 0, -0x80000000) == 0); try expect(divFloor(i64, -0x40000001, 0x40000000) == -2); try expect(divFloor(i32, -0x80000000, 1) == -0x80000000); try expect(divFloor(i32, 10, 12) == 0); try expect(divFloor(i32, -14, 12) == -2); try expect(divFloor(i32, -2, 12) == -1); try expect(divTrunc(i32, 5, 3) == 1); try expect(divTrunc(i32, -5, 3) == -1); try expect(divTrunc(i32, 9, -10) == 0); try expect(divTrunc(i32, -9, 10) == 0); try expect(divTrunc(i32, 10, 12) == 0); try expect(divTrunc(i32, -14, 12) == -1); try expect(divTrunc(i32, -2, 12) == 0); try expect(mod(u32, 10, 12) == 10); try expect(mod(i32, 10, 12) == 10); try expect(mod(i64, -14, 12) == 10); try expect(mod(i16, -2, 12) == 10); try expect(mod(i8, -2, 12) == 10); try expect(rem(i32, 10, 12) == 10); try expect(rem(i32, -14, 12) == -2); try expect(rem(i32, -2, 12) == -2); comptime { try expect( 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600, ); try expect( @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600, ); try expect( 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2, ); try expect( @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2, ); try expect( @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2, ); try expect( @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2, ); try expect( 4126227191251978491697987544882340798050766755606969681711 % 10 == 1, ); } } fn div(comptime T: type, a: T, b: T) T { return a / b; } fn divExact(comptime T: type, a: T, b: T) T { return @divExact(a, b); } fn divFloor(comptime T: type, a: T, b: T) T { return @divFloor(a, b); } fn divTrunc(comptime T: type, a: T, b: T) T { return @divTrunc(a, b); } fn mod(comptime T: type, a: T, b: T) T { return @mod(a, b); } fn rem(comptime T: type, a: T, b: T) T { return @rem(a, b); }
test/behavior/int_div.zig
// TODO: pick the best index to put them into instead of at the end // - e.g. find a common previous symbol and put it after that one // - they definitely need to go into the correct section const std = @import("std"); const builtin = std.builtin; const mem = std.mem; const log = std.log; const elf = std.elf; const native_endian = @import("builtin").target.cpu.arch.endian(); const arches: [7]std.Target.Cpu.Arch = blk: { var result: [7]std.Target.Cpu.Arch = undefined; for (.{ .riscv64, .mips, .i386, .x86_64, .powerpc, .powerpc64, .aarch64 }) |arch| { result[archIndex(arch)] = arch; } break :blk result; }; const MultiSym = struct { size: [arches.len]u64, present: [arches.len]bool, binding: [arches.len]u4, section: u16, ty: u4, visib: elf.STV, fn allPresent(ms: MultiSym) bool { for (arches) |_, i| { if (!ms.present[i]) { return false; } } return true; } fn is32Only(ms: MultiSym) bool { return ms.present[archIndex(.riscv64)] == false and ms.present[archIndex(.mips)] == true and ms.present[archIndex(.i386)] == true and ms.present[archIndex(.x86_64)] == false and ms.present[archIndex(.powerpc)] == true and ms.present[archIndex(.powerpc64)] == false and ms.present[archIndex(.aarch64)] == false; } fn commonSize(ms: MultiSym) ?u64 { var size: ?u64 = null; for (arches) |_, i| { if (!ms.present[i]) continue; if (size) |s| { if (ms.size[i] != s) { return null; } } else { size = ms.size[i]; } } return size.?; } fn commonBinding(ms: MultiSym) ?u4 { var binding: ?u4 = null; for (arches) |_, i| { if (!ms.present[i]) continue; if (binding) |b| { if (ms.binding[i] != b) { return null; } } else { binding = ms.binding[i]; } } return binding.?; } fn isPtrSize(ms: MultiSym) bool { const map = .{ .{ .riscv64, 8 }, .{ .mips, 4 }, .{ .i386, 4 }, .{ .x86_64, 8 }, .{ .powerpc, 4 }, .{ .powerpc64, 8 }, .{ .aarch64, 8 }, }; inline for (map) |item| { const arch = item[0]; const size = item[1]; const arch_index = archIndex(arch); if (ms.present[arch_index] and ms.size[arch_index] != size) { return false; } } return true; } fn isPtr2Size(ms: MultiSym) bool { const map = .{ .{ .riscv64, 16 }, .{ .mips, 8 }, .{ .i386, 8 }, .{ .x86_64, 16 }, .{ .powerpc, 8 }, .{ .powerpc64, 16 }, .{ .aarch64, 16 }, }; inline for (map) |item| { const arch = item[0]; const size = item[1]; const arch_index = archIndex(arch); if (ms.present[arch_index] and ms.size[arch_index] != size) { return false; } } return true; } fn isWeak64(ms: MultiSym) bool { const map = .{ .{ .riscv64, 2 }, .{ .mips, 1 }, .{ .i386, 1 }, .{ .x86_64, 2 }, .{ .powerpc, 1 }, .{ .powerpc64, 2 }, .{ .aarch64, 2 }, }; inline for (map) |item| { const arch = item[0]; const binding = item[1]; const arch_index = archIndex(arch); if (ms.present[arch_index] and ms.binding[arch_index] != binding) { return false; } } return true; } }; const Parse = struct { arena: mem.Allocator, sym_table: *std.StringArrayHashMap(MultiSym), sections: *std.StringArrayHashMap(void), blacklist: std.StringArrayHashMap(void), elf_bytes: []align(@alignOf(elf.Elf64_Ehdr)) u8, header: elf.Header, arch: std.Target.Cpu.Arch, }; pub fn main() !void { var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_instance.deinit(); const arena = arena_instance.allocator(); const args = try std.process.argsAlloc(arena); const build_all_path = args[1]; var build_all_dir = try std.fs.cwd().openDir(build_all_path, .{}); var sym_table = std.StringArrayHashMap(MultiSym).init(arena); var sections = std.StringArrayHashMap(void).init(arena); var blacklist = std.StringArrayHashMap(void).init(arena); try blacklist.ensureUnusedCapacity(blacklisted_symbols.len); for (blacklisted_symbols) |name| { blacklist.putAssumeCapacityNoClobber(name, {}); } for (arches) |arch| { const libc_so_path = try std.fmt.allocPrint(arena, "{s}/lib/libc.so", .{@tagName(arch)}); // Read the ELF header. const elf_bytes = try build_all_dir.readFileAllocOptions( arena, libc_so_path, 100 * 1024 * 1024, 1 * 1024 * 1024, @alignOf(elf.Elf64_Ehdr), null, ); const header = try elf.Header.parse(elf_bytes[0..@sizeOf(elf.Elf64_Ehdr)]); const parse: Parse = .{ .arena = arena, .sym_table = &sym_table, .sections = &sections, .blacklist = blacklist, .elf_bytes = elf_bytes, .header = header, .arch = arch, }; switch (header.is_64) { true => switch (header.endian) { .Big => try parseElf(parse, true, .Big), .Little => try parseElf(parse, true, .Little), }, false => switch (header.endian) { .Big => try parseElf(parse, false, .Big), .Little => try parseElf(parse, false, .Little), }, } } const stdout = std.io.getStdOut().writer(); try stdout.writeAll( \\#ifdef PTR64 \\#define WEAK64 .weak \\#define PTR_SIZE_BYTES 8 \\#define PTR2_SIZE_BYTES 16 \\#else \\#define WEAK64 .globl \\#define PTR_SIZE_BYTES 4 \\#define PTR2_SIZE_BYTES 8 \\#endif \\ ); // Sort the symbols for deterministic output and cleaner vcs diffs. const SymTableSort = struct { sections: *const std.StringArrayHashMap(void), sym_table: *const std.StringArrayHashMap(MultiSym), /// Sort first by section name, then by symbol name pub fn lessThan(ctx: @This(), index_a: usize, index_b: usize) bool { const multi_sym_a = ctx.sym_table.values()[index_a]; const multi_sym_b = ctx.sym_table.values()[index_b]; const section_a = ctx.sections.keys()[multi_sym_a.section]; const section_b = ctx.sections.keys()[multi_sym_b.section]; switch (mem.order(u8, section_a, section_b)) { .lt => return true, .gt => return false, .eq => {}, } const symbol_a = ctx.sym_table.keys()[index_a]; const symbol_b = ctx.sym_table.keys()[index_b]; switch (mem.order(u8, symbol_a, symbol_b)) { .lt => return true, .gt, .eq => return false, } } }; sym_table.sort(SymTableSort{ .sym_table = &sym_table, .sections = &sections }); var prev_section: u16 = std.math.maxInt(u16); var prev_pp_state: enum { none, ptr32, special } = .none; for (sym_table.values()) |multi_sym, sym_index| { const name = sym_table.keys()[sym_index]; if (multi_sym.section != prev_section) { prev_section = multi_sym.section; const sh_name = sections.keys()[multi_sym.section]; try stdout.print("{s}\n", .{sh_name}); } if (multi_sym.allPresent()) { switch (prev_pp_state) { .none => {}, .ptr32, .special => { try stdout.writeAll("#endif\n"); prev_pp_state = .none; }, } } else if (multi_sym.is32Only()) { switch (prev_pp_state) { .none => { try stdout.writeAll("#ifdef PTR32\n"); prev_pp_state = .ptr32; }, .special => { try stdout.writeAll("#endif\n#ifdef PTR32\n"); prev_pp_state = .ptr32; }, .ptr32 => {}, } } else { switch (prev_pp_state) { .none => {}, .special, .ptr32 => { try stdout.writeAll("#endif\n"); }, } prev_pp_state = .special; var first = true; try stdout.writeAll("#if "); for (arches) |arch, i| { if (multi_sym.present[i]) continue; if (!first) try stdout.writeAll(" && "); first = false; try stdout.print("!defined(ARCH_{s})", .{@tagName(arch)}); } try stdout.writeAll("\n"); } if (multi_sym.commonBinding()) |binding| { switch (binding) { elf.STB_GLOBAL => { try stdout.print(".globl {s}\n", .{name}); }, elf.STB_WEAK => { try stdout.print(".weak {s}\n", .{name}); }, else => unreachable, } } else if (multi_sym.isWeak64()) { try stdout.print("WEAK64 {s}\n", .{name}); } else { for (arches) |arch, i| { log.info("symbol '{s}' binding on {s}: {d}", .{ name, @tagName(arch), multi_sym.binding[i], }); } } switch (multi_sym.ty) { elf.STT_NOTYPE => {}, elf.STT_FUNC => { try stdout.print(".type {s}, %function;\n", .{name}); // omitting the size is OK for functions }, elf.STT_OBJECT => { try stdout.print(".type {s}, %object;\n", .{name}); if (multi_sym.commonSize()) |size| { try stdout.print(".size {s}, {d}\n", .{ name, size }); } else if (multi_sym.isPtrSize()) { try stdout.print(".size {s}, PTR_SIZE_BYTES\n", .{name}); } else if (multi_sym.isPtr2Size()) { try stdout.print(".size {s}, PTR2_SIZE_BYTES\n", .{name}); } else { for (arches) |arch, i| { log.info("symbol '{s}' size on {s}: {d}", .{ name, @tagName(arch), multi_sym.size[i], }); } //try stdout.print(".size {s}, {d}\n", .{ name, size }); } }, else => unreachable, } switch (multi_sym.visib) { .DEFAULT => {}, .PROTECTED => try stdout.print(".protected {s}\n", .{name}), .INTERNAL, .HIDDEN => unreachable, } try stdout.print("{s}:\n", .{name}); } switch (prev_pp_state) { .none => {}, .ptr32, .special => try stdout.writeAll("#endif\n"), } } fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) !void { const arena = parse.arena; const elf_bytes = parse.elf_bytes; const header = parse.header; const Sym = if (is_64) elf.Elf64_Sym else elf.Elf32_Sym; const S = struct { fn endianSwap(x: anytype) @TypeOf(x) { if (endian != native_endian) { return @byteSwap(@TypeOf(x), x); } else { return x; } } fn symbolAddrLessThan(_: void, lhs: Sym, rhs: Sym) bool { return endianSwap(lhs.st_value) < endianSwap(rhs.st_value); } }; // A little helper to do endian swapping. const s = S.endianSwap; // Obtain list of sections. const Shdr = if (is_64) elf.Elf64_Shdr else elf.Elf32_Shdr; const shdrs = mem.bytesAsSlice(Shdr, elf_bytes[header.shoff..])[0..header.shnum]; // Obtain the section header string table. const shstrtab_offset = s(shdrs[header.shstrndx].sh_offset); log.debug("shstrtab is at offset {d}", .{shstrtab_offset}); const shstrtab = elf_bytes[shstrtab_offset..]; // Maps this ELF file's section header index to the multi arch section ArrayHashMap index. const section_index_map = try arena.alloc(u16, shdrs.len); // Find the offset of the dynamic symbol table. var dynsym_index: u16 = 0; for (shdrs) |shdr, i| { const sh_name = try arena.dupe(u8, mem.sliceTo(shstrtab[s(shdr.sh_name)..], 0)); log.debug("found section: {s}", .{sh_name}); if (mem.eql(u8, sh_name, ".dynsym")) { dynsym_index = @intCast(u16, i); } const gop = try parse.sections.getOrPut(sh_name); section_index_map[i] = @intCast(u16, gop.index); } if (dynsym_index == 0) @panic("did not find the .dynsym section"); log.debug("found .dynsym section at index {d}", .{dynsym_index}); // Read the dynamic symbols into a list. const dyn_syms_off = s(shdrs[dynsym_index].sh_offset); const dyn_syms_size = s(shdrs[dynsym_index].sh_size); const dyn_syms = mem.bytesAsSlice(Sym, elf_bytes[dyn_syms_off..][0..dyn_syms_size]); const dynstr_offset = s(shdrs[s(shdrs[dynsym_index].sh_link)].sh_offset); const dynstr = elf_bytes[dynstr_offset..]; // Sort the list by address, ascending. std.sort.sort(Sym, dyn_syms, {}, S.symbolAddrLessThan); for (dyn_syms) |sym| { const this_section = s(sym.st_shndx); const name = try arena.dupe(u8, mem.sliceTo(dynstr[s(sym.st_name)..], 0)); const ty = @truncate(u4, sym.st_info); const binding = @truncate(u4, sym.st_info >> 4); const visib = @intToEnum(elf.STV, @truncate(u2, sym.st_other)); const size = s(sym.st_size); if (parse.blacklist.contains(name)) continue; if (size == 0) { log.warn("{s}: symbol '{s}' has size 0", .{ @tagName(parse.arch), name }); } switch (binding) { elf.STB_GLOBAL, elf.STB_WEAK => {}, else => { log.debug("{s}: skipping '{s}' due to it having binding '{d}'", .{ @tagName(parse.arch), name, binding, }); continue; }, } switch (ty) { elf.STT_NOTYPE, elf.STT_FUNC, elf.STT_OBJECT => {}, else => { log.debug("{s}: skipping '{s}' due to it having type '{d}'", .{ @tagName(parse.arch), name, ty, }); continue; }, } switch (visib) { .DEFAULT, .PROTECTED => {}, .INTERNAL, .HIDDEN => { log.debug("{s}: skipping '{s}' due to it having visibility '{s}'", .{ @tagName(parse.arch), name, @tagName(visib), }); continue; }, } const gop = try parse.sym_table.getOrPut(name); if (gop.found_existing) { if (gop.value_ptr.section != section_index_map[this_section]) { const sh_name = mem.sliceTo(shstrtab[s(shdrs[this_section].sh_name)..], 0); fatal("symbol '{s}' in arch {s} is in section {s} but in arch {s} is in section {s}", .{ name, @tagName(parse.arch), sh_name, archSetName(gop.value_ptr.present), parse.sections.keys()[gop.value_ptr.section], }); } if (gop.value_ptr.ty != ty) blk: { if (ty == elf.STT_NOTYPE) { log.warn("symbol '{s}' in arch {s} has type {d} but in arch {s} has type {d}. going with the one that is not STT_NOTYPE", .{ name, @tagName(parse.arch), ty, archSetName(gop.value_ptr.present), gop.value_ptr.ty, }); break :blk; } if (gop.value_ptr.ty == elf.STT_NOTYPE) { log.warn("symbol '{s}' in arch {s} has type {d} but in arch {s} has type {d}. going with the one that is not STT_NOTYPE", .{ name, @tagName(parse.arch), ty, archSetName(gop.value_ptr.present), gop.value_ptr.ty, }); gop.value_ptr.ty = ty; break :blk; } fatal("symbol '{s}' in arch {s} has type {d} but in arch {s} has type {d}", .{ name, @tagName(parse.arch), ty, archSetName(gop.value_ptr.present), gop.value_ptr.ty, }); } if (gop.value_ptr.visib != visib) { fatal("symbol '{s}' in arch {s} has visib {s} but in arch {s} has visib {s}", .{ name, @tagName(parse.arch), @tagName(visib), archSetName(gop.value_ptr.present), @tagName(gop.value_ptr.visib), }); } } else { gop.value_ptr.* = .{ .present = [1]bool{false} ** arches.len, .section = section_index_map[this_section], .ty = ty, .binding = [1]u4{0} ** arches.len, .visib = visib, .size = [1]u64{0} ** arches.len, }; } gop.value_ptr.present[archIndex(parse.arch)] = true; gop.value_ptr.size[archIndex(parse.arch)] = size; gop.value_ptr.binding[archIndex(parse.arch)] = binding; } } fn archIndex(arch: std.Target.Cpu.Arch) u8 { return switch (arch) { // zig fmt: off .riscv64 => 0, .mips => 1, .i386 => 2, .x86_64 => 3, .powerpc => 4, .powerpc64 => 5, .aarch64 => 6, else => unreachable, // zig fmt: on }; } fn archSetName(arch_set: [arches.len]bool) []const u8 { for (arches) |arch, i| { if (arch_set[i]) { return @tagName(arch); } } return "(none)"; } fn fatal(comptime format: []const u8, args: anytype) noreturn { log.err(format, args); std.process.exit(1); } const blacklisted_symbols = [_][]const u8{ "__absvdi2", "__absvsi2", "__absvti2", "__adddf3", "__addkf3", "__addodi4", "__addosi4", "__addoti4", "__addsf3", "__addtf3", "__addxf3", "__ashldi3", "__ashlti3", "__ashrdi3", "__ashrti3", "__atomic_compare_exchange", "__atomic_compare_exchange_1", "__atomic_compare_exchange_2", "__atomic_compare_exchange_4", "__atomic_compare_exchange_8", "__atomic_exchange", "__atomic_exchange_1", "__atomic_exchange_2", "__atomic_exchange_4", "__atomic_exchange_8", "__atomic_fetch_add_1", "__atomic_fetch_add_2", "__atomic_fetch_add_4", "__atomic_fetch_add_8", "__atomic_fetch_and_1", "__atomic_fetch_and_2", "__atomic_fetch_and_4", "__atomic_fetch_and_8", "__atomic_fetch_nand_1", "__atomic_fetch_nand_2", "__atomic_fetch_nand_4", "__atomic_fetch_nand_8", "__atomic_fetch_or_1", "__atomic_fetch_or_2", "__atomic_fetch_or_4", "__atomic_fetch_or_8", "__atomic_fetch_sub_1", "__atomic_fetch_sub_2", "__atomic_fetch_sub_4", "__atomic_fetch_sub_8", "__atomic_fetch_xor_1", "__atomic_fetch_xor_2", "__atomic_fetch_xor_4", "__atomic_fetch_xor_8", "__atomic_load", "__atomic_load_1", "__atomic_load_2", "__atomic_load_4", "__atomic_load_8", "__atomic_store", "__atomic_store_1", "__atomic_store_2", "__atomic_store_4", "__atomic_store_8", "__bswapdi2", "__bswapsi2", "__bswapti2", "__ceilh", "__ceilx", "__clear_cache", "__clzdi2", "__clzsi2", "__clzti2", "__cmpdf2", "__cmpdi2", "__cmpsf2", "__cmpsi2", "__cmptf2", "__cmpti2", "__cosh", "__cosx", "__ctzdi2", "__ctzsi2", "__ctzti2", "__divdf3", "__divdi3", "__divkf3", "__divmoddi4", "__divmodsi4", "__divsf3", "__divsi3", "__divtf3", "__divti3", "__divxf3", "__dlstart", "__eqdf2", "__eqkf2", "__eqsf2", "__eqtf2", "__eqxf2", "__exp2h", "__exp2x", "__exph", "__expx", "__extenddfkf2", "__extenddftf2", "__extenddfxf2", "__extendhfsf2", "__extendhftf2", "__extendhfxf2", "__extendsfdf2", "__extendsfkf2", "__extendsftf2", "__extendsfxf2", "__extendxftf2", "__fabsh", "__fabsx", "__ffsdi2", "__ffssi2", "__ffsti2", "__fixdfdi", "__fixdfsi", "__fixdfti", "__fixkfdi", "__fixkfsi", "__fixsfdi", "__fixsfsi", "__fixsfti", "__fixtfdi", "__fixtfsi", "__fixtfti", "__fixunsdfdi", "__fixunsdfsi", "__fixunsdfti", "__fixunskfdi", "__fixunskfsi", "__fixunssfdi", "__fixunssfsi", "__fixunssfti", "__fixunstfdi", "__fixunstfsi", "__fixunstfti", "__fixunsxfdi", "__fixunsxfsi", "__fixunsxfti", "__fixxfdi", "__fixxfsi", "__fixxfti", "__floatdidf", "__floatdikf", "__floatdisf", "__floatditf", "__floatdixf", "__floatsidf", "__floatsikf", "__floatsisf", "__floatsitf", "__floatsixf", "__floattidf", "__floattisf", "__floattitf", "__floattixf", "__floatundidf", "__floatundikf", "__floatundisf", "__floatunditf", "__floatundixf", "__floatunsidf", "__floatunsikf", "__floatunsisf", "__floatunsitf", "__floatunsixf", "__floatuntidf", "__floatuntikf", "__floatuntisf", "__floatuntitf", "__floatuntixf", "__floorh", "__floorx", "__fmah", "__fmax", "__fmaxh", "__fmaxx", "__fminh", "__fminx", "__fmodh", "__fmodx", "__gedf2", "__gekf2", "__gesf2", "__getf2", "__gexf2", "__gnu_f2h_ieee", "__gnu_h2f_ieee", "__gtdf2", "__gtkf2", "__gtsf2", "__gttf2", "__gtxf2", "__ledf2", "__lekf2", "__lesf2", "__letf2", "__lexf2", "__log10h", "__log10x", "__log2h", "__log2x", "__logh", "__logx", "__lshrdi3", "__lshrti3", "__ltdf2", "__ltkf2", "__ltsf2", "__lttf2", "__ltxf2", "__moddi3", "__modsi3", "__modti3", "__muldc3", "__muldf3", "__muldi3", "__mulkf3", "__mulodi4", "__mulosi4", "__muloti4", "__mulsc3", "__mulsf3", "__mulsi3", "__multc3", "__multf3", "__multi3", "__mulxc3", "__mulxf3", "__nedf2", "__negdf2", "__negdi2", "__negsf2", "__negsi2", "__negti2", "__negvdi2", "__negvsi2", "__negvti2", "__nekf2", "__nesf2", "__netf2", "__nexf2", "__paritydi2", "__paritysi2", "__parityti2", "__popcountdi2", "__popcountsi2", "__popcountti2", "__roundh", "__roundx", "__sincosh", "__sincosx", "__sinh", "__sinx", "__sqrth", "__sqrtx", "__subdf3", "__subkf3", "__subodi4", "__subosi4", "__suboti4", "__subsf3", "__subtf3", "__subxf3", "__tanh", "__tanx", "__truncdfhf2", "__truncdfsf2", "__trunch", "__trunckfdf2", "__trunckfsf2", "__truncsfhf2", "__trunctfdf2", "__trunctfhf2", "__trunctfsf2", "__trunctfxf2", "__truncx", "__truncxfdf2", "__truncxfhf2", "__truncxfsf2", "__ucmpdi2", "__ucmpsi2", "__ucmpti2", "__udivdi3", "__udivmoddi4", "__udivmodsi4", "__udivmodti4", "__udivsi3", "__udivti3", "__umoddi3", "__umodsi3", "__umodti3", "__unorddf2", "__unordkf2", "__unordsf2", "__unordtf2", "__zig_probe_stack", "ceilf128", "cosf128", "exp2f128", "expf128", "fabsf128", "floorf128", "fmaf128", "fmaq", "fmaxf128", "fminf128", "fmodf128", "log10f128", "log2f128", "logf128", "roundf128", "sincosf128", "sinf128", "sqrtf128", "truncf128", };
tools/gen_stubs.zig