code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
const std = @import("std");
const fs = std.fs;
const assert = std.debug.assert;
pub fn main() !void {
const gpa = std.heap.page_allocator;
var arena_state = std.heap.ArenaAllocator.init(gpa);
defer arena_state.deinit();
const arena = arena_state.allocator();
const args = try std.process.argsAlloc(arena);
const records_csv_path = args[1];
const zig_git_path = args[2];
const commits_file = args[3];
const commits_file_text = try fs.cwd().readFileAlloc(arena, commits_file, 2 * 1024 * 1024);
const backfill_zig_build_dir = try fs.path.join(arena, &[_][]const u8{
zig_git_path, "build-backfill",
});
const new_zig_exe = try fs.path.join(arena, &[_][]const u8{
backfill_zig_build_dir, "new", "bin", "zig",
});
const old_zig_exe = try fs.path.join(arena, &[_][]const u8{
backfill_zig_build_dir, "zig",
});
var commits = std.mem.tokenize(u8, commits_file_text, " \r\n\t");
while (commits.next()) |commit| {
std.debug.print("Checking out {s} to backfill...\n", .{commit});
try exec(gpa, &[_][]const u8{ "git", "checkout", commit }, .{
.cwd = zig_git_path,
});
// Touch CMakeLists.txt to force the new `zig version` to be picked up.
try exec(gpa, &[_][]const u8{ "touch", "CMakeLists.txt" }, .{
.cwd = zig_git_path,
});
std.debug.print("Building old zig to {s}...\n", .{old_zig_exe});
try exec(gpa, &[_][]const u8{"ninja"}, .{
.cwd = backfill_zig_build_dir,
});
const zig_version_raw = try execCapture(arena, &[_][]const u8{ old_zig_exe, "version" }, .{});
const zig_version = std.mem.trim(u8, zig_version_raw, " \r\n\t");
const timestamp_untrimmed = try execCapture(gpa, &[_][]const u8{
"git", "log",
"-n1", commit,
"--pretty=format:%at",
}, .{
.cwd = zig_git_path,
});
const timestamp = std.mem.trim(u8, timestamp_untrimmed, " \r\n\t");
std.debug.print("Collecting measurements for zig version {s} timestamp {s}...\n", .{
zig_version, timestamp,
});
try exec(gpa, &[_][]const u8{
new_zig_exe, "run", "collect-measurements.zig", "--",
records_csv_path, old_zig_exe, commit, timestamp,
}, .{});
}
}
fn exec(
gpa: std.mem.Allocator,
argv: []const []const u8,
options: struct { cwd: ?[]const u8 = null },
) !void {
const child = try std.ChildProcess.init(argv, gpa);
defer child.deinit();
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
child.cwd = options.cwd;
const term = try child.spawnAndWait();
switch (term) {
.Exited => |code| {
if (code != 0) {
return error.ChildProcessBadExitCode;
}
},
else => {
return error.ChildProcessCrashed;
},
}
}
fn execCapture(
gpa: std.mem.Allocator,
argv: []const []const u8,
options: struct { cwd: ?[]const u8 = null },
) ![]u8 {
//std.debug.print("exec argv[0]={} cwd={}\n", .{argv[0], options.cwd});
const child = try std.ChildProcess.init(argv, gpa);
defer child.deinit();
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Inherit;
child.cwd = options.cwd;
//std.debug.print("cwd={}\n", .{child.cwd});
//for (argv) |arg| {
// std.debug.print("{} ", .{arg});
//}
//std.debug.print("\n", .{});
try child.spawn();
const stdout_in = child.stdout.?.reader();
const stdout = try stdout_in.readAllAlloc(gpa, 9999);
errdefer gpa.free(stdout);
const term = try child.wait();
switch (term) {
.Exited => |code| {
if (code != 0) {
return error.ChildProcessBadExitCode;
}
},
else => {
return error.ChildProcessCrashed;
},
}
return stdout;
} | backfill.zig |
const std = @import("std");
const sample_utils = @import("sample_utils.zig");
const c = @import("c.zig").c;
const glfw = @import("glfw");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var allocator = gpa.allocator();
const setup = try sample_utils.setup(allocator);
const queue = c.wgpuDeviceGetQueue(setup.device);
const framebuffer_size = try setup.window.getFramebufferSize();
const window_data = try allocator.create(WindowData);
window_data.* = .{
.surface = null,
.swap_chain = null,
.swap_chain_format = undefined,
.current_desc = undefined,
.target_desc = undefined,
};
setup.window.setUserPointer(window_data);
// If targetting OpenGL, we can't use the newer WGPUSurface API. Instead, we need to use the
// older Dawn-specific API. https://bugs.chromium.org/p/dawn/issues/detail?id=269&q=surface&can=2
const use_legacy_api = setup.backend_type == c.WGPUBackendType_OpenGL or setup.backend_type == c.WGPUBackendType_OpenGLES;
var descriptor: c.WGPUSwapChainDescriptor = undefined;
if (!use_legacy_api) {
window_data.swap_chain_format = c.WGPUTextureFormat_BGRA8Unorm;
descriptor = c.WGPUSwapChainDescriptor{
.nextInChain = null,
.label = "basic swap chain",
.usage = c.WGPUTextureUsage_RenderAttachment,
.format = window_data.swap_chain_format,
.width = framebuffer_size.width,
.height = framebuffer_size.height,
.presentMode = c.WGPUPresentMode_Fifo,
.implementation = 0,
};
window_data.surface = sample_utils.createSurfaceForWindow(
setup.instance,
setup.window,
comptime sample_utils.detectGLFWOptions(),
);
} else {
const binding = c.machUtilsCreateBinding(setup.backend_type, @ptrCast(*c.GLFWwindow, setup.window.handle), setup.device);
if (binding == null) {
@panic("failed to create Dawn backend binding");
}
descriptor = std.mem.zeroes(c.WGPUSwapChainDescriptor);
descriptor.implementation = c.machUtilsBackendBinding_getSwapChainImplementation(binding);
window_data.swap_chain = c.wgpuDeviceCreateSwapChain(setup.device, null, &descriptor);
window_data.swap_chain_format = c.machUtilsBackendBinding_getPreferredSwapChainTextureFormat(binding);
c.wgpuSwapChainConfigure(
window_data.swap_chain.?,
window_data.swap_chain_format,
c.WGPUTextureUsage_RenderAttachment,
framebuffer_size.width,
framebuffer_size.height,
);
}
window_data.current_desc = descriptor;
window_data.target_desc = descriptor;
const vs =
\\ @stage(vertex) fn main(
\\ @builtin(vertex_index) VertexIndex : u32
\\ ) -> @builtin(position) vec4<f32> {
\\ var pos = array<vec2<f32>, 3>(
\\ vec2<f32>( 0.0, 0.5),
\\ vec2<f32>(-0.5, -0.5),
\\ vec2<f32>( 0.5, -0.5)
\\ );
\\ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
\\ }
;
var vs_wgsl_descriptor = try allocator.create(c.WGPUShaderModuleWGSLDescriptor);
vs_wgsl_descriptor.chain.next = null;
vs_wgsl_descriptor.chain.sType = c.WGPUSType_ShaderModuleWGSLDescriptor;
vs_wgsl_descriptor.source = vs;
const vs_shader_descriptor = c.WGPUShaderModuleDescriptor{
.nextInChain = @ptrCast(*const c.WGPUChainedStruct, vs_wgsl_descriptor),
.label = "my vertex shader",
};
const vs_module = c.wgpuDeviceCreateShaderModule(setup.device, &vs_shader_descriptor);
const fs =
\\ @stage(fragment) fn main() -> @location(0) vec4<f32> {
\\ return vec4<f32>(1.0, 0.0, 0.0, 1.0);
\\ }
;
var fs_wgsl_descriptor = try allocator.create(c.WGPUShaderModuleWGSLDescriptor);
fs_wgsl_descriptor.chain.next = null;
fs_wgsl_descriptor.chain.sType = c.WGPUSType_ShaderModuleWGSLDescriptor;
fs_wgsl_descriptor.source = fs;
const fs_shader_descriptor = c.WGPUShaderModuleDescriptor{
.nextInChain = @ptrCast(*const c.WGPUChainedStruct, fs_wgsl_descriptor),
.label = "my fragment shader",
};
const fs_module = c.wgpuDeviceCreateShaderModule(setup.device, &fs_shader_descriptor);
// Fragment state
var blend = std.mem.zeroes(c.WGPUBlendState);
blend.color.operation = c.WGPUBlendOperation_Add;
blend.color.srcFactor = c.WGPUBlendFactor_One;
blend.color.dstFactor = c.WGPUBlendFactor_One;
blend.alpha.operation = c.WGPUBlendOperation_Add;
blend.alpha.srcFactor = c.WGPUBlendFactor_One;
blend.alpha.dstFactor = c.WGPUBlendFactor_One;
var color_target = std.mem.zeroes(c.WGPUColorTargetState);
color_target.format = window_data.swap_chain_format;
color_target.blend = &blend;
color_target.writeMask = c.WGPUColorWriteMask_All;
var fragment = std.mem.zeroes(c.WGPUFragmentState);
fragment.module = fs_module;
fragment.entryPoint = "main";
fragment.targetCount = 1;
fragment.targets = &color_target;
var pipeline_descriptor = std.mem.zeroes(c.WGPURenderPipelineDescriptor);
pipeline_descriptor.fragment = &fragment;
// Other state
pipeline_descriptor.layout = null;
pipeline_descriptor.depthStencil = null;
pipeline_descriptor.vertex.module = vs_module;
pipeline_descriptor.vertex.entryPoint = "main";
pipeline_descriptor.vertex.bufferCount = 0;
pipeline_descriptor.vertex.buffers = null;
pipeline_descriptor.multisample.count = 1;
pipeline_descriptor.multisample.mask = 0xFFFFFFFF;
pipeline_descriptor.multisample.alphaToCoverageEnabled = false;
pipeline_descriptor.primitive.frontFace = c.WGPUFrontFace_CCW;
pipeline_descriptor.primitive.cullMode = c.WGPUCullMode_None;
pipeline_descriptor.primitive.topology = c.WGPUPrimitiveTopology_TriangleList;
pipeline_descriptor.primitive.stripIndexFormat = c.WGPUIndexFormat_Undefined;
const pipeline = c.wgpuDeviceCreateRenderPipeline(setup.device, &pipeline_descriptor);
c.wgpuShaderModuleRelease(vs_module);
c.wgpuShaderModuleRelease(fs_module);
// Reconfigure the swap chain with the new framebuffer width/height, otherwise e.g. the Vulkan
// device would be lost after a resize.
setup.window.setFramebufferSizeCallback((struct {
fn callback(window: glfw.Window, width: u32, height: u32) void {
const pl = window.getUserPointer(WindowData);
pl.?.target_desc.width = width;
pl.?.target_desc.height = height;
}
}).callback);
while (!setup.window.shouldClose()) {
try frame(.{
.window = setup.window,
.device = setup.device,
.pipeline = pipeline,
.queue = queue,
});
std.time.sleep(16 * std.time.ns_per_ms);
}
}
const WindowData = struct {
surface: ?c.WGPUSurface,
swap_chain: ?c.WGPUSwapChain,
swap_chain_format: c.WGPUTextureFormat,
current_desc: c.WGPUSwapChainDescriptor,
target_desc: c.WGPUSwapChainDescriptor,
};
const FrameParams = struct {
window: glfw.Window,
device: c.WGPUDevice,
pipeline: c.WGPURenderPipeline,
queue: c.WGPUQueue,
};
fn isDescriptorEqual(a: c.WGPUSwapChainDescriptor, b: c.WGPUSwapChainDescriptor) bool {
return a.usage == b.usage and a.format == b.format and a.width == b.width and a.height == b.height and a.presentMode == b.presentMode;
}
fn frame(params: FrameParams) !void {
try glfw.pollEvents();
const pl = params.window.getUserPointer(WindowData).?;
if (pl.swap_chain == null or !isDescriptorEqual(pl.current_desc, pl.target_desc)) {
const use_legacy_api = pl.surface == null;
if (!use_legacy_api) {
pl.swap_chain = c.wgpuDeviceCreateSwapChain(params.device, pl.surface.?, &pl.target_desc);
} else {
c.wgpuSwapChainConfigure(
pl.swap_chain.?,
pl.swap_chain_format,
c.WGPUTextureUsage_RenderAttachment,
@intCast(u32, pl.target_desc.width),
@intCast(u32, pl.target_desc.height),
);
}
pl.current_desc = pl.target_desc;
}
const back_buffer_view = c.wgpuSwapChainGetCurrentTextureView(pl.swap_chain.?);
var render_pass_info = std.mem.zeroes(c.WGPURenderPassDescriptor);
var color_attachment = std.mem.zeroes(c.WGPURenderPassColorAttachment);
color_attachment.view = back_buffer_view;
color_attachment.resolveTarget = null;
color_attachment.clearValue = c.WGPUColor{ .r = 0.0, .g = 0.0, .b = 0.0, .a = 0.0 };
color_attachment.loadOp = c.WGPULoadOp_Clear;
color_attachment.storeOp = c.WGPUStoreOp_Store;
render_pass_info.colorAttachmentCount = 1;
render_pass_info.colorAttachments = &color_attachment;
render_pass_info.depthStencilAttachment = null;
const encoder = c.wgpuDeviceCreateCommandEncoder(params.device, null);
const pass = c.wgpuCommandEncoderBeginRenderPass(encoder, &render_pass_info);
c.wgpuRenderPassEncoderSetPipeline(pass, params.pipeline);
c.wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0);
c.wgpuRenderPassEncoderEnd(pass);
c.wgpuRenderPassEncoderRelease(pass);
const commands = c.wgpuCommandEncoderFinish(encoder, null);
c.wgpuCommandEncoderRelease(encoder);
c.wgpuQueueSubmit(params.queue, 1, &commands);
c.wgpuCommandBufferRelease(commands);
c.wgpuSwapChainPresent(pl.swap_chain.?);
c.wgpuTextureViewRelease(back_buffer_view);
} | gpu-dawn/src/dawn/hello_triangle.zig |
const std = @import("std");
const stdx = @import("stdx.zig");
const ds = stdx.ds;
const builtin = @import("builtin");
const log = stdx.log.scoped(.wasm);
/// A global buffer for wasm that can be used for:
/// Writing to js: In some cases in order to share the same abstraction as desktop code, a growing buffer is useful without needing an allocator. eg. logging.
/// Reading from js: If js needs to return dynamic data, it would need to write to memory which wasm knows about.
pub var js_buffer: WasmJsBuffer = undefined;
var galloc: std.mem.Allocator = undefined;
pub fn init(alloc: std.mem.Allocator) void {
galloc = alloc;
js_buffer.init(alloc);
promises = ds.CompactUnorderedList(PromiseId, PromiseInternal).init(alloc);
promise_child_deps = ds.CompactManySinglyLinkedList(PromiseId, PromiseDepId, PromiseId).init(alloc);
}
pub fn deinit() void {
js_buffer.deinit();
}
pub fn getJsBuffer() *WasmJsBuffer {
return &js_buffer;
}
// Used to read and write to js.
// We have two buffers since it's common to write output while we are reading input from js.
pub const WasmJsBuffer = struct {
const Self = @This();
// Used to write data to js.
output_buf: std.ArrayList(u8),
output_writer: std.ArrayList(u8).Writer,
input_buf: std.ArrayList(u8),
pub fn init(self: *Self, alloc: std.mem.Allocator) void {
self.* = .{
.output_buf = std.ArrayList(u8).init(alloc),
.output_writer = undefined,
.input_buf = std.ArrayList(u8).init(alloc),
};
// Ensure buffers have capacity since we will be returning pointers to js.
self.output_buf.resize(1) catch unreachable;
self.input_buf.resize(1) catch unreachable;
// TODO: also get a writer that does appendSliceAssumeCapacity.
self.output_writer = self.output_buf.writer();
}
pub fn deinit(self: Self) void {
self.output_buf.deinit();
self.input_buf.deinit();
}
// After wasm execution, write the new input buffer ptr/cap and return the output buffer ptr.
pub fn writeResult(self: *Self) *const u8 {
self.output_buf.shrinkRetainingCapacity(0);
self.output_writer.writeIntLittle(u32, @intCast(u32, @ptrToInt(self.input_buf.items.ptr))) catch unreachable;
self.output_writer.writeIntLittle(u32, @intCast(u32, self.input_buf.capacity)) catch unreachable;
return &self.output_buf.items[0];
}
pub fn appendInt(self: *Self, comptime T: type, i: T) void {
self.output_writer.writeIntLittle(T, i) catch unreachable;
}
pub fn writeIntAt(self: *Self, comptime T: type, idx: usize, i: T) void {
std.mem.writeIntLittle(T, @ptrCast(*[@sizeOf(T)]u8, &self.output_buf[idx]), i);
}
pub fn appendF32(self: *Self, f: f32) void {
self.output_writer.writeIntLittle(u32, @bitCast(u32, f)) catch unreachable;
}
pub fn writeF32At(self: *Self, idx: usize, f: f32) void {
std.mem.writeIntLittle(u32, @ptrCast(*[4]u8, &self.output_buf[idx]), @bitCast(u32, f));
}
pub fn readIntAt(self: *Self, comptime T: type, idx: usize) T {
return std.mem.readInt(T, @ptrCast(*[@sizeOf(T)]u8, &self.input_buf.items[idx]));
}
pub fn readF32At(self: *Self, idx: usize) f32 {
return stdx.mem.readFloat32Little(@ptrCast(*[4]u8, &self.input_buf.items[idx]));
}
pub fn clearOutputWithSize(self: *Self, size: usize) void {
self.output_buf.clearRetainingCapacity();
self.output_buf.resize(size) catch unreachable;
}
pub fn clearOutput(self: *Self) void {
self.output_buf.clearRetainingCapacity();
}
pub fn getOutputPtr(self: *Self) [*]const u8 {
return self.output_buf.items.ptr;
}
};
pub const PromiseId = u32;
var promises: ds.CompactUnorderedList(PromiseId, PromiseInternal) = undefined;
const PromiseDepId = u32;
var promise_child_deps: ds.CompactManySinglyLinkedList(PromiseId, PromiseDepId, PromiseId) = undefined;
pub const NullId = ds.CompactNull(u32);
pub fn createPromise(comptime T: type) Promise(T) {
const id = promises.add(.{
.num_deps = 0,
.cur_resolved_deps = 0,
.child_deps_list_id = null,
.then_copy_to = null,
.data_ptr = undefined,
.auto_free = false,
.resolved = false,
.dynamic_size = std.meta.trait.isSlice(T),
}) catch unreachable;
return .{
.id = id,
};
}
pub fn createAndPromise(ids: []const PromiseId) Promise(void) {
const id = promises.add(.{
.num_deps = ids.len,
.cur_resolved_deps = 0,
.child_deps_list_id = null,
.then_copy_to = null,
.data_ptr = undefined,
.auto_free = false,
.resolved = false,
.dynamic_size = false,
}) catch unreachable;
for (ids) |parent_id| {
const p = promises.getPtrNoCheck(parent_id);
if (p.child_deps_list_id == null) {
p.child_deps_list_id = promise_child_deps.addListWithHead(id) catch unreachable;
} else {
const last = promise_child_deps.getListHead(p.child_deps_list_id.?).?;
_ = promise_child_deps.insertAfter(last, id) catch unreachable;
}
}
return .{
.id = id,
};
}
export fn wasmEnsureFreeCapacity(size: u32, cur_input_len: u32) *const u8 {
// Must sync over the current input length or a realloc wouldn't know about the new data.
js_buffer.input_buf.items.len = cur_input_len;
js_buffer.input_buf.ensureUnusedCapacity(size) catch unreachable;
return js_buffer.writeResult();
}
// Called from js to resolve a promise.
export fn wasmResolvePromise(id: PromiseId, data_size: u32) void {
const p = promises.getPtrNoCheck(id);
if (p.dynamic_size) {
// We have to allocate heap memory for variable sized values.
const copy = stdx.heap.getDefaultAllocator().alloc(u8, data_size) catch unreachable;
js_buffer.input_buf.resize(data_size) catch unreachable;
std.mem.copy(u8, copy, js_buffer.input_buf.items[0..data_size]);
if (p.then_copy_to) |dst| {
stdx.mem.ptrCastAlign(*[]u8, dst).* = copy;
}
} else {
if (p.then_copy_to) |dst| {
const dst_slice = stdx.mem.ptrCastAlign([*]u8, dst)[0..data_size];
std.mem.copy(u8, dst_slice, js_buffer.input_buf.items[0..data_size]);
}
}
p.resolved = true;
if (p.child_deps_list_id) |list_id| {
var cur = promise_child_deps.getListHead(list_id).?;
while (cur != NullId) {
const child_id = promise_child_deps.getNoCheck(cur);
const child_p = promises.getPtrNoCheck(child_id);
child_p.cur_resolved_deps += 1;
if (child_p.cur_resolved_deps == child_p.num_deps) {
child_p.resolved = true;
}
cur = promise_child_deps.getNextIdNoCheck(cur);
}
}
if (p.auto_free) {
promises.remove(id);
}
}
pub fn resolvePromise(id: PromiseId, value: anytype) void {
const p = promises.getPtrNoCheck(id);
if (p.then_copy_to) |dst| {
stdx.mem.ptrCastAlign(*@TypeOf(value), dst).* = value;
}
p.resolved = true;
if (p.child_deps_list_id) |list_id| {
var cur = promise_child_deps.getListHead(list_id).?;
while (cur != NullId) {
const child_id = promise_child_deps.getNoCheck(cur);
const child_p = promises.getPtrNoCheck(child_id);
child_p.cur_resolved_deps += 1;
if (child_p.cur_resolved_deps == child_p.num_deps) {
child_p.resolved = true;
}
cur = promise_child_deps.getNextIdNoCheck(cur);
}
}
if (p.auto_free) {
promises.remove(id);
}
}
pub fn Promise(comptime T: type) type {
_ = T;
return struct {
const Self = @This();
id: PromiseId,
pub fn isResolved(self: Self) bool {
return promises.get(self.id).resolved;
}
pub fn thenCopyTo(self: Self, ptr: *T) Self {
promises.getPtrNoCheck(self.id).then_copy_to = ptr;
return self;
}
pub fn autoFree(self: Self) Self {
promises.getPtrNoCheck(self.id).auto_free = true;
return self;
}
};
}
const PromiseInternal = struct {
num_deps: u32,
cur_resolved_deps: u32,
child_deps_list_id: ?PromiseDepId,
then_copy_to: ?*anyopaque,
data_ptr: ds.SizedPtr,
auto_free: bool,
resolved: bool,
dynamic_size: bool,
};
const usize_len = @sizeOf(usize);
comptime {
// Conditionally export, or desktop builds will have the wrong malloc.
if (builtin.target.isWasm()) {
@export(malloc, .{ .name = "malloc", .linkage = .Strong });
@export(free, .{ .name = "free", .linkage = .Strong });
@export(realloc, .{ .name = "realloc", .linkage = .Strong });
@export(fabs, .{ .name = "fabs", .linkage = .Strong });
@export(sqrt, .{ .name = "sqrt", .linkage = .Strong });
@export(ldexp, .{ .name = "ldexp", .linkage = .Strong });
@export(pow, .{ .name = "pow", .linkage = .Strong });
@export(abs, .{ .name = "abs", .linkage = .Strong });
@export(memset, .{ .name = "memset", .linkage = .Strong });
@export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
}
}
/// libc malloc.
fn malloc(size: usize) callconv(.C) *anyopaque {
// Allocates a block that is a multiple of usize that fits the header and the user allocation.
const eff_size = 1 + (size + usize_len - 1) / usize_len;
const block = galloc.alloc(usize, eff_size) catch unreachable;
// Header stores the length.
block[0] = eff_size;
// Return the user allocation.
return &block[1];
}
/// libc fabs.
fn fabs(x: f64) callconv(.C) f64 {
return @fabs(x);
}
/// libc free.
fn free(ptr: ?*anyopaque) callconv(.C) void {
if (ptr == null) {
return;
}
const addr = @ptrToInt(ptr) - usize_len;
const block = @intToPtr([*]const usize, addr);
const len = block[0];
galloc.free(block[0..len]);
}
/// libc realloc.
fn realloc(ptr: ?*anyopaque, size: usize) callconv(.C) *anyopaque {
if (ptr == null) {
return malloc(size);
}
const eff_size = 1 + (size + usize_len - 1) / usize_len;
const addr = @ptrToInt(ptr.?) - usize_len;
const block = @intToPtr([*]usize, addr);
const len = block[0];
const slice: []usize = block[0..len];
const new_slice = galloc.realloc(slice, eff_size) catch unreachable;
new_slice[0] = eff_size;
return @ptrCast(*anyopaque, &new_slice[1]);
}
/// libc sqrt.
fn sqrt(x: f64) callconv(.C) f64 {
return std.math.sqrt(x);
}
/// libc ldexp.
fn ldexp(x: f64, n: i32) callconv(.C) f64 {
return std.math.ldexp(x, n);
}
/// libc pow.
fn pow(x: f64, y: f64) callconv(.C) f64 {
return std.math.pow(f64, x, y);
}
/// libc abs.
fn abs(x: i32) callconv(.C) i32 {
return std.math.absInt(x) catch unreachable;
}
/// libc memset.
fn memset(s: ?*anyopaque, val: i32, n: usize) callconv(.C) ?*anyopaque {
// Some user code may try to write to a bad location in wasm with n=0. Wasm doesn't allow that.
if (n > 0) {
const slice = @ptrCast([*]u8, s)[0..n];
std.mem.set(u8, slice, @intCast(u8, val));
}
return s;
}
/// libc memcpy.
fn memcpy(dst: ?*anyopaque, src: ?*anyopaque, n: usize) callconv(.C) ?*anyopaque {
const dst_slice = @ptrCast([*]u8, dst)[0..n];
const src_slice = @ptrCast([*]u8, src)[0..n];
std.mem.copy(u8, dst_slice, src_slice);
return dst;
} | stdx/wasm.zig |
const Mir = @This();
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const Air = @import("../../Air.zig");
const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
pub const Inst = struct {
tag: Tag,
cond: bits.Condition = .al,
/// The meaning of this depends on `tag`.
data: Data,
pub const Tag = enum(u16) {
/// Add
add,
/// Bitwise AND
@"and",
/// Arithmetic Shift Right
asr,
/// Branch
b,
/// Breakpoint
bkpt,
/// Branch with Link and Exchange
blx,
/// Branch and Exchange
bx,
/// Compare
cmp,
/// Pseudo-instruction: Argument
dbg_arg,
/// Pseudo-instruction: End of prologue
dbg_prologue_end,
/// Pseudo-instruction: Beginning of epilogue
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
/// Bitwise Exclusive OR
eor,
/// Load Register
ldr,
/// Pseudo-instruction: Load pointer to stack argument offset
ldr_ptr_stack_argument,
/// Load Register
ldr_stack_argument,
/// Load Register Byte
ldrb,
/// Load Register Byte
ldrb_stack_argument,
/// Load Register Halfword
ldrh,
/// Load Register Halfword
ldrh_stack_argument,
/// Load Register Signed Byte
ldrsb,
/// Load Register Signed Byte
ldrsb_stack_argument,
/// Load Register Signed Halfword
ldrsh,
/// Load Register Signed Halfword
ldrsh_stack_argument,
/// Logical Shift Left
lsl,
/// Logical Shift Right
lsr,
/// Move
mov,
/// Move
movw,
/// Move Top
movt,
/// Multiply
mul,
/// Bitwise NOT
mvn,
/// No Operation
nop,
/// Bitwise OR
orr,
/// Pop multiple registers from Stack
pop,
/// Push multiple registers to Stack
push,
/// Reverse Subtract
rsb,
/// Signed Bit Field Extract
sbfx,
/// Store Register
str,
/// Store Register Byte
strb,
/// Store Register Halfword
strh,
/// Subtract
sub,
/// Supervisor Call
svc,
/// Unsigned Bit Field Extract
ubfx,
};
/// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
/// All instructions have a 8-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// how to interpret the data within.
pub const Data = union {
/// No additional data
///
/// Used by e.g. nop
nop: void,
/// Another instruction
///
/// Used by e.g. b
inst: Index,
/// A 16-bit immediate value.
///
/// Used by e.g. bkpt
imm16: u16,
/// A 24-bit immediate value.
///
/// Used by e.g. svc
imm24: u24,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. load_memory
payload: u32,
/// A register
///
/// Used by e.g. blx
reg: Register,
/// A register and a stack offset
///
/// Used by e.g. ldr_stack_argument
r_stack_offset: struct {
rt: Register,
stack_offset: u32,
},
/// A register and a 16-bit unsigned immediate
///
/// Used by e.g. movw
r_imm16: struct {
rd: Register,
imm16: u16,
},
/// Two registers and a shift amount
///
/// Used by e.g. lsl
rr_shift: struct {
rd: Register,
rm: Register,
shift_amount: bits.Instruction.ShiftAmount,
},
/// Two registers and an operand
///
/// Used by e.g. sub
rr_op: struct {
rd: Register,
rn: Register,
op: bits.Instruction.Operand,
},
/// Two registers and an offset
///
/// Used by e.g. ldr
rr_offset: struct {
rt: Register,
rn: Register,
offset: bits.Instruction.OffsetArgs,
},
/// Two registers and an extra load/store offset
///
/// Used by e.g. ldrh
rr_extra_offset: struct {
rt: Register,
rn: Register,
offset: bits.Instruction.ExtraLoadStoreOffsetArgs,
},
/// Two registers and a lsb (range 0-31) and a width (range
/// 1-32)
///
/// Used by e.g. sbfx
rr_lsb_width: struct {
rd: Register,
rn: Register,
lsb: u5,
width: u6,
},
/// Three registers
///
/// Used by e.g. mul
rrr: struct {
rd: Register,
rn: Register,
rm: Register,
},
/// An unordered list of registers
///
/// Used by e.g. push
register_list: bits.Instruction.RegisterList,
/// Debug info: line and column
///
/// Used by e.g. dbg_line
dbg_line_column: struct {
line: u32,
column: u32,
},
/// Debug info: argument
///
/// Used by e.g. dbg_arg
dbg_arg_info: struct {
air_inst: Air.Inst.Index,
arg_index: u32,
},
};
// Make sure we don't accidentally make instructions bigger than expected.
// Note that in Debug builds, Zig is allowed to insert a secret field for safety checks.
comptime {
if (builtin.mode != .Debug) {
assert(@sizeOf(Data) == 8);
}
}
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
}
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
const fields = std.meta.fields(T);
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
@field(result, field.name) = switch (field.field_type) {
u32 => mir.extra[i],
i32 => @bitCast(i32, mir.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
}
return .{
.data = result,
.end = i,
};
} | src/arch/arm/Mir.zig |
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
pub extern fn __extendsfdf2(a: f32) f64 {
return @inlineCall(extendXfYf2, f64, f32, @bitCast(u32, a));
}
pub extern fn __extenddftf2(a: f64) f128 {
return @inlineCall(extendXfYf2, f128, f64, @bitCast(u64, a));
}
pub extern fn __extendsftf2(a: f32) f128 {
return @inlineCall(extendXfYf2, f128, f32, @bitCast(u32, a));
}
pub extern fn __extendhfsf2(a: u16) f32 {
return @inlineCall(extendXfYf2, f32, f16, a);
}
const CHAR_BIT = 8;
fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: @IntType(false, @typeInfo(src_t).Float.bits)) dst_t {
@setRuntimeSafety(builtin.is_test);
const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
const SrcShift = std.math.Log2Int(src_rep_t);
const DstShift = std.math.Log2Int(dst_rep_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @sizeOf(src_t) * CHAR_BIT;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @sizeOf(dst_t) * CHAR_BIT;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const dstMinNormal: dst_rep_t = @as(dst_rep_t, 1) << dstSigBits;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(src_rep_t, a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% srcMinNormal < srcInfinity - srcMinNormal) {
// a is a normal number.
// Extend to the destination type by shifting the significand and
// exponent into the proper position and rebiasing the exponent.
absResult = @as(dst_rep_t, aAbs) << (dstSigBits - srcSigBits);
absResult += (dstExpBias - srcExpBias) << dstSigBits;
} else if (aAbs >= srcInfinity) {
// a is NaN or infinity.
// Conjure the result by beginning with infinity, then setting the qNaN
// bit (if needed) and right-aligning the rest of the trailing NaN
// payload field.
absResult = dstInfExp << dstSigBits;
absResult |= @as(dst_rep_t, aAbs & srcQNaN) << (dstSigBits - srcSigBits);
absResult |= @as(dst_rep_t, aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
} else if (aAbs != 0) {
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: u32 = @clz(src_rep_t, aAbs) -
@clz(src_rep_t, @as(src_rep_t, srcMinNormal));
absResult = @as(dst_rep_t, aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
absResult ^= dstMinNormal;
const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1;
absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits;
} else {
// a is zero.
absResult = 0;
}
// Apply the signbit to (dst_t)abs(a).
const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @as(dst_rep_t, sign) << (dstBits - srcBits);
return @bitCast(dst_t, result);
}
test "import extendXfYf2" {
_ = @import("extendXfYf2_test.zig");
} | lib/std/special/compiler_rt/extendXfYf2.zig |
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const Allocator = std.mem.Allocator;
// Never freed until exit
pub var global_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
// Reset before each matching input file is processed
pub var temp_arena = TempAllocator.init(std.heap.page_allocator);
/// A version of std.heap.ArenaAllocator that can be reset without freeing (all of) the underlying memory, allowing it to
/// be reused again. This is useful when a program has a "top level" loop where the allocator can be reset, such as the
/// main loop of a game.
///
/// Each time the allocator is reset, it may choose to allocate a single contiguous block that it expects should be large
/// enough to cover all usage until the next reset, based on usage during previous iterations (low-pass filtered to avoid
/// thrashing).
pub const TempAllocator = struct {
child_allocator: Allocator,
state: State,
/// Inner state of TempAllocator. Can be stored rather than the entire TempAllocator
/// as a memory-saving optimization.
pub const State = struct {
buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}),
end_index: usize = 0,
usage_estimate: usize = 0,
prev_usage: usize = 0,
pub fn promote(self: State, child_allocator: Allocator) TempAllocator {
return .{
.child_allocator = child_allocator,
.state = self,
};
}
};
pub fn allocator(self: *TempAllocator) Allocator {
return Allocator.init(self, alloc, resize, free);
}
const BufNode = std.SinglyLinkedList([]u8).Node;
pub fn init(child_allocator: Allocator) TempAllocator {
return (State{}).promote(child_allocator);
}
pub fn deinit(self: TempAllocator) void {
var it = self.state.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
const next_it = node.next;
self.child_allocator.free(node.data);
it = next_it;
}
}
pub fn reset(self: *TempAllocator, min_capacity: usize) !void {
// The "half-life" for usage_estimate reacting to changes in usage is:
// ~11 cycles after an increase
// ~710 cycles after a decrease
// If the initial capacity node overflows two cycles in a row, it will be expanded on the second reset.
try self.resetAdvanced(min_capacity, 1, 64, 1024);
}
pub fn resetAdvanced(self: *TempAllocator, min_capacity: usize, comptime usage_contraction_rate: u16, comptime usage_expansion_rate: u16, comptime fast_usage_expansion_rate: u16) !void {
if (self.state.buffer_list.first) |first_node| {
var usage = self.state.end_index;
var it = first_node.next;
while (it) |node| {
usage += node.data.len - @sizeOf(BufNode);
const next_it = node.next;
self.child_allocator.free(node.data);
it = next_it;
}
first_node.next = null;
self.state.end_index = 0;
const capacity = first_node.data.len - @sizeOf(BufNode);
const new_usage_estimate = self.computeUsageEstimate(usage, capacity, usage_contraction_rate, usage_expansion_rate, fast_usage_expansion_rate);
self.state.usage_estimate = new_usage_estimate;
self.state.prev_usage = usage;
if (new_usage_estimate > capacity or (new_usage_estimate * 3 < capacity and capacity > padAndExpandSize(min_capacity))) {
const target_capacity = @maximum(min_capacity, new_usage_estimate);
const bigger_buf_size = padAndExpandSize(target_capacity);
if (self.child_allocator.resize(first_node.data, bigger_buf_size)) |buf| {
first_node.data = buf;
} else {
self.child_allocator.free(first_node.data);
self.state.buffer_list.first = null;
_ = try self.createNode(0, target_capacity);
}
}
} else {
_ = try self.createNode(0, @maximum(min_capacity, self.state.usage_estimate));
}
}
fn computeUsageEstimate(self: *TempAllocator, usage: usize, capacity: usize, comptime usage_contraction_rate: u16, comptime usage_expansion_rate: u16, comptime fast_usage_expansion_rate: u16) usize {
const last_usage_estimate = self.state.usage_estimate;
if (last_usage_estimate == 0) {
return usage;
} else if (usage > last_usage_estimate) {
if (usage > capacity and self.state.prev_usage > capacity) {
const delta = @maximum(usage, self.state.prev_usage) - last_usage_estimate;
return last_usage_estimate + scaleUsageDelta(delta, fast_usage_expansion_rate);
} else {
const avg_usage = usage / 2 + self.state.prev_usage / 2;
if (avg_usage > last_usage_estimate) {
return last_usage_estimate + scaleUsageDelta(avg_usage - last_usage_estimate, usage_expansion_rate);
} else {
return last_usage_estimate;
}
}
} else if (usage < last_usage_estimate) {
return last_usage_estimate - scaleUsageDelta(last_usage_estimate - usage, usage_contraction_rate);
} else {
return last_usage_estimate;
}
}
fn scaleUsageDelta(delta: usize, comptime scale: usize) usize {
return @maximum(1, if (delta >= (1 << 20)) delta / 1024 * scale else delta * scale / 1024);
}
fn padAndExpandSize(size: usize) usize {
const padded_size = size + @sizeOf(BufNode) + 16;
return padded_size + padded_size / 2;
}
fn createNode(self: *TempAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
const len = padAndExpandSize(prev_len + minimum_size);
const buf = try self.child_allocator.rawAlloc(len, @alignOf(BufNode), 1, @returnAddress());
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,
.next = null,
};
self.state.buffer_list.prepend(buf_node);
self.state.end_index = 0;
return buf_node;
}
fn alloc(self: *TempAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
const adjusted_addr = mem.alignForward(addr, ptr_align);
const adjusted_index = self.state.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index <= cur_buf.len) {
const result = cur_buf[adjusted_index..new_end_index];
self.state.end_index = new_end_index;
return result;
}
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
// Try to grow the buffer in-place
cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) orelse {
// Allocate a new node if that's not possible
cur_node = try self.createNode(cur_buf.len, n + ptr_align);
continue;
};
}
}
fn resize(self: *TempAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
_ = buf_align;
_ = len_align;
_ = ret_addr;
const cur_node = self.state.buffer_list.first orelse return null;
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
if (@ptrToInt(cur_buf.ptr) + self.state.end_index != @ptrToInt(buf.ptr) + buf.len) {
if (new_len > buf.len) return null;
return new_len;
}
if (buf.len >= new_len) {
self.state.end_index -= buf.len - new_len;
return new_len;
} else if (cur_buf.len - self.state.end_index >= new_len - buf.len) {
self.state.end_index += new_len - buf.len;
return new_len;
} else {
return null;
}
}
fn free(self: *TempAllocator, buf: []u8, buf_align: u29, ret_addr: usize) void {
_ = buf_align;
_ = ret_addr;
const cur_node = self.state.buffer_list.first orelse return;
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
if (@ptrToInt(cur_buf.ptr) + self.state.end_index == @ptrToInt(buf.ptr) + buf.len) {
self.state.end_index -= buf.len;
}
}
}; | limp/allocators.zig |
const std = @import("std");
const build_options = @import("build_options");
const Config = @import("config.zig");
const DocumentStore = @import("document_store.zig");
const DebugAllocator = @import("debug_allocator.zig");
const readRequestHeader = @import("header.zig").readRequestHeader;
const data = @import("data/" ++ build_options.data_version ++ ".zig");
const types = @import("types.zig");
const analysis = @import("analysis.zig");
// Code is largely based off of https://github.com/andersfr/zig-lsp/blob/master/server.zig
var stdout: std.fs.File.OutStream = undefined;
var allocator: *std.mem.Allocator = undefined;
var document_store: DocumentStore = undefined;
const initialize_response =
\\,"result":{"capabilities":{"signatureHelpProvider":{"triggerCharacters":["(",","]},"textDocumentSync":1,"completionProvider":{"resolveProvider":false,"triggerCharacters":[".",":","@"]},"documentHighlightProvider":false,"codeActionProvider":false,"workspace":{"workspaceFolders":{"supported":true}}}}}
;
const not_implemented_response =
\\,"error":{"code":-32601,"message":"NotImplemented"}}
;
const null_result_response =
\\,"result":null}
;
const empty_result_response =
\\,"result":{}}
;
const empty_array_response =
\\,"result":[]}
;
const edit_not_applied_response =
\\,"result":{"applied":false,"failureReason":"feature not implemented"}}
;
const no_completions_response =
\\,"result":{"isIncomplete":false,"items":[]}}
;
/// Sends a request or response
fn send(reqOrRes: var) !void {
// The most memory we'll probably need
var mem_buffer: [1024 * 128]u8 = undefined;
var fbs = std.io.fixedBufferStream(&mem_buffer);
try std.json.stringify(reqOrRes, std.json.StringifyOptions{}, fbs.outStream());
try stdout.print("Content-Length: {}\r\n\r\n", .{fbs.pos});
try stdout.writeAll(fbs.getWritten());
}
fn log(comptime fmt: []const u8, args: var) !void {
// Disable logs on Release modes.
if (std.builtin.mode != .Debug) return;
var message = try std.fmt.allocPrint(allocator, fmt, args);
defer allocator.free(message);
try send(types.Notification{
.method = "window/logMessage",
.params = .{
.LogMessageParams = .{
.@"type" = .Log,
.message = message,
},
},
});
}
fn respondGeneric(id: i64, response: []const u8) !void {
const id_digits = blk: {
if (id == 0) break :blk 1;
var digits: usize = 1;
var value = @divTrunc(id, 10);
while (value != 0) : (value = @divTrunc(value, 10)) {
digits += 1;
}
break :blk digits;
};
// Numbers of character that will be printed from this string: len - 3 brackets
// 1 from the beginning (escaped) and the 2 from the arg {}
const json_fmt = "{{\"jsonrpc\":\"2.0\",\"id\":{}";
try stdout.print("Content-Length: {}\r\n\r\n" ++ json_fmt, .{ response.len + id_digits + json_fmt.len - 3, id });
try stdout.writeAll(response);
}
// TODO: Is this correct or can we get a better end?
fn astLocationToRange(loc: std.zig.ast.Tree.Location) types.Range {
return .{
.start = .{
.line = @intCast(i64, loc.line),
.character = @intCast(i64, loc.column),
},
.end = .{
.line = @intCast(i64, loc.line),
.character = @intCast(i64, loc.column),
},
};
}
fn publishDiagnostics(handle: DocumentStore.Handle, config: Config) !void {
const tree = try handle.tree(allocator);
defer tree.deinit();
// Use an arena for our local memory allocations.
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
var diagnostics = std.ArrayList(types.Diagnostic).init(&arena.allocator);
var error_it = tree.errors.iterator(0);
while (error_it.next()) |err| {
const loc = tree.tokenLocation(0, err.loc());
var mem_buffer: [256]u8 = undefined;
var fbs = std.io.fixedBufferStream(&mem_buffer);
try tree.renderError(err, fbs.outStream());
try diagnostics.append(.{
.range = astLocationToRange(loc),
.severity = .Error,
.code = @tagName(err.*),
.source = "zls",
.message = try std.mem.dupe(&arena.allocator, u8, fbs.getWritten()),
// .relatedInformation = undefined
});
}
if (tree.errors.len == 0) {
var decls = tree.root_node.decls.iterator(0);
while (decls.next()) |decl_ptr| {
var decl = decl_ptr.*;
switch (decl.id) {
.FnProto => blk: {
const func = decl.cast(std.zig.ast.Node.FnProto).?;
const is_extern = func.extern_export_inline_token != null;
if (is_extern)
break :blk;
if (config.warn_style) {
if (func.name_token) |name_token| {
const loc = tree.tokenLocation(0, name_token);
const is_type_function = analysis.isTypeFunction(tree, func);
const func_name = tree.tokenSlice(name_token);
if (!is_type_function and !analysis.isCamelCase(func_name)) {
try diagnostics.append(.{
.range = astLocationToRange(loc),
.severity = .Information,
.code = "BadStyle",
.source = "zls",
.message = "Functions should be camelCase",
});
} else if (is_type_function and !analysis.isPascalCase(func_name)) {
try diagnostics.append(.{
.range = astLocationToRange(loc),
.severity = .Information,
.code = "BadStyle",
.source = "zls",
.message = "Type functions should be PascalCase",
});
}
}
}
},
else => {},
}
}
}
try send(types.Notification{
.method = "textDocument/publishDiagnostics",
.params = .{
.PublishDiagnosticsParams = .{
.uri = handle.uri(),
.diagnostics = diagnostics.items,
},
},
});
}
fn containerToCompletion(list: *std.ArrayList(types.CompletionItem), tree: *std.zig.ast.Tree, container: *std.zig.ast.Node, config: Config) !void {
var index: usize = 0;
while (container.iterate(index)) |child_node| : (index+=1) {
if (analysis.isNodePublic(tree, child_node)) {
try nodeToCompletion(list, tree, child_node, config);
}
}
}
fn nodeToCompletion(list: *std.ArrayList(types.CompletionItem), tree: *std.zig.ast.Tree, node: *std.zig.ast.Node, config: Config) error{OutOfMemory}!void {
var doc = if (try analysis.getDocComments(list.allocator, tree, node)) |doc_comments|
types.MarkupContent{
.kind = .Markdown,
.value = doc_comments,
}
else
null;
switch (node.id) {
.ErrorSetDecl, .Root, .ContainerDecl => {
try containerToCompletion(list, tree, node, config);
},
.FnProto => {
const func = node.cast(std.zig.ast.Node.FnProto).?;
if (func.name_token) |name_token| {
const insert_text = if (config.enable_snippets)
try analysis.getFunctionSnippet(list.allocator, tree, func)
else
null;
const is_type_function = analysis.isTypeFunction(tree, func);
try list.append(.{
.label = tree.tokenSlice(name_token),
.kind = if (is_type_function) .Struct else .Function,
.documentation = doc,
.detail = analysis.getFunctionSignature(tree, func),
.insertText = insert_text,
.insertTextFormat = if (config.enable_snippets) .Snippet else .PlainText,
});
}
},
.VarDecl => {
const var_decl = node.cast(std.zig.ast.Node.VarDecl).?;
const is_const = tree.tokens.at(var_decl.mut_token).id == .Keyword_const;
try list.append(.{
.label = tree.tokenSlice(var_decl.name_token),
.kind = if (is_const) .Constant else .Variable,
.documentation = doc,
.detail = analysis.getVariableSignature(tree, var_decl),
});
},
.ParamDecl => {
const param = node.cast(std.zig.ast.Node.ParamDecl).?;
if (param.name_token) |name_token|
try list.append(.{
.label = tree.tokenSlice(name_token),
.kind = .Constant,
.documentation = doc,
.detail = analysis.getParamSignature(tree, param),
});
},
.PrefixOp => {
try list.append(.{
.label = "len",
.kind = .Field,
});
try list.append(.{
.label = "ptr",
.kind = .Field,
});
},
.StringLiteral => {
try list.append(.{
.label = "len",
.kind = .Field,
});
},
else => if (analysis.nodeToString(tree, node)) |string| {
try list.append(.{
.label = string,
.kind = .Field,
.documentation = doc,
});
},
}
}
fn completeGlobal(id: i64, pos_index: usize, handle: DocumentStore.Handle, config: Config) !void {
var tree = try handle.tree(allocator);
defer tree.deinit();
// We use a local arena allocator to deallocate all temporary data without iterating
var arena = std.heap.ArenaAllocator.init(allocator);
var completions = std.ArrayList(types.CompletionItem).init(&arena.allocator);
// Deallocate all temporary data.
defer arena.deinit();
// var decls = tree.root_node.decls.iterator(0);
var decls = try analysis.declsFromIndex(&arena.allocator, tree, pos_index);
for (decls) |decl_ptr| {
var decl = decl_ptr.*;
try nodeToCompletion(&completions, tree, decl_ptr, config);
}
try send(types.Response{
.id = .{ .Integer = id },
.result = .{
.CompletionList = .{
.isIncomplete = false,
.items = completions.items,
},
},
});
}
fn completeFieldAccess(id: i64, handle: *DocumentStore.Handle, position: types.Position, line_start_idx: usize, config: Config) !void {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
var analysis_ctx = try document_store.analysisContext(handle, &arena, position);
defer analysis_ctx.deinit();
var completions = std.ArrayList(types.CompletionItem).init(&arena.allocator);
const line = try handle.document.getLine(@intCast(usize, position.line));
var tokenizer = std.zig.Tokenizer.init(line[line_start_idx..]);
// var decls = try analysis.declsFromIndex(&arena.allocator, analysis_ctx.tree, try handle.document.positionToIndex(position));
if (analysis.getFieldAccessTypeNode(&analysis_ctx, &tokenizer)) |node| {
try nodeToCompletion(&completions, analysis_ctx.tree, node, config);
}
try send(types.Response{
.id = .{ .Integer = id },
.result = .{
.CompletionList = .{
.isIncomplete = false,
.items = completions.items,
},
},
});
}
// Compute builtin completions at comptime.
const builtin_completions = block: {
@setEvalBranchQuota(3_500);
const CompletionList = [data.builtins.len]types.CompletionItem;
var with_snippets: CompletionList = undefined;
var without_snippets: CompletionList = undefined;
for (data.builtins) |builtin, i| {
const cutoff = std.mem.indexOf(u8, builtin, "(") orelse builtin.len;
const base_completion = types.CompletionItem{
.label = builtin[0..cutoff],
.kind = .Function,
.filterText = builtin[1..cutoff],
.detail = data.builtin_details[i],
.documentation = .{
.kind = .Markdown,
.value = data.builtin_docs[i],
},
};
with_snippets[i] = base_completion;
with_snippets[i].insertText = builtin[1..];
with_snippets[i].insertTextFormat = .Snippet;
without_snippets[i] = base_completion;
without_snippets[i].insertText = builtin[1..cutoff];
}
break :block [2]CompletionList{
without_snippets, with_snippets,
};
};
const PositionContext = union(enum) {
builtin,
comment,
string_literal,
field_access: usize,
var_access,
other,
empty,
};
const token_separators = [_]u8{
' ', '\t', '(', ')', '[', ']',
'{', '}', '|', '=', '!', ';',
',', '?', ':', '%', '+', '*',
'>', '<', '~', '-', '/', '&',
};
fn documentPositionContext(doc: types.TextDocument, pos_index: usize) PositionContext {
// First extract the whole current line up to the cursor.
var curr_position = pos_index;
while (curr_position > 0) : (curr_position -= 1) {
if (doc.text[curr_position - 1] == '\n') break;
}
var line = doc.text[curr_position .. pos_index + 1];
// Strip any leading whitespace.
var skipped_ws: usize = 0;
while (skipped_ws < line.len and (line[skipped_ws] == ' ' or line[skipped_ws] == '\t')) : (skipped_ws += 1) {}
if (skipped_ws >= line.len) return .empty;
line = line[skipped_ws..];
// Quick exit for comment lines and multi line string literals.
if (line.len >= 2 and line[0] == '/' and line[1] == '/')
return .comment;
if (line.len >= 2 and line[0] == '\\' and line[1] == '\\')
return .string_literal;
// TODO: This does not detect if we are in a string literal over multiple lines.
// Find out what context we are in.
// Go over the current line character by character
// and determine the context.
curr_position = 0;
var expr_start: usize = skipped_ws;
var new_token = true;
var context: PositionContext = .other;
var string_pop_ctx: PositionContext = .other;
while (curr_position < line.len) : (curr_position += 1) {
const c = line[curr_position];
const next_char = if (curr_position < line.len - 1) line[curr_position + 1] else null;
if (context != .string_literal and c == '"') {
expr_start = curr_position + skipped_ws;
context = .string_literal;
continue;
}
if (context == .string_literal) {
// Skip over escaped quotes
if (c == '\\' and next_char != null and next_char.? == '"') {
curr_position += 1;
} else if (c == '"') {
context = string_pop_ctx;
string_pop_ctx = .other;
new_token = true;
}
continue;
}
if (c == '/' and next_char != null and next_char.? == '/') {
context = .comment;
break;
}
if (std.mem.indexOfScalar(u8, &token_separators, c) != null) {
expr_start = curr_position + skipped_ws + 1;
new_token = true;
context = .other;
continue;
}
if (c == '.' and (!new_token or context == .string_literal)) {
new_token = true;
if (next_char != null and next_char.? == '.') continue;
context = .{ .field_access = expr_start };
continue;
}
if (new_token) {
const access_ctx: PositionContext = if (context == .field_access)
.{ .field_access = expr_start }
else
.var_access;
new_token = false;
if (c == '_' or std.ascii.isAlpha(c)) {
context = access_ctx;
} else if (c == '@') {
// This checks for @"..." identifiers by controlling
// the context the string will set after it is over.
if (next_char != null and next_char.? == '"') {
string_pop_ctx = access_ctx;
}
context = .builtin;
} else {
context = .other;
}
continue;
}
if (context == .field_access or context == .var_access or context == .builtin) {
if (c != '_' and !std.ascii.isAlNum(c)) {
context = .other;
}
continue;
}
context = .other;
}
return context;
}
fn processJsonRpc(parser: *std.json.Parser, json: []const u8, config: Config) !void {
var tree = try parser.parse(json);
defer tree.deinit();
const root = tree.root;
std.debug.assert(root.Object.getValue("method") != null);
const method = root.Object.getValue("method").?.String;
const id = if (root.Object.getValue("id")) |id| id.Integer else 0;
const params = root.Object.getValue("params").?.Object;
// Core
if (std.mem.eql(u8, method, "initialize")) {
try respondGeneric(id, initialize_response);
} else if (std.mem.eql(u8, method, "initialized")) {
// noop
} else if (std.mem.eql(u8, method, "$/cancelRequest")) {
// noop
}
// File changes
else if (std.mem.eql(u8, method, "textDocument/didOpen")) {
const document = params.getValue("textDocument").?.Object;
const uri = document.getValue("uri").?.String;
const text = document.getValue("text").?.String;
const handle = try document_store.openDocument(uri, text);
try publishDiagnostics(handle.*, config);
} else if (std.mem.eql(u8, method, "textDocument/didChange")) {
const text_document = params.getValue("textDocument").?.Object;
const uri = text_document.getValue("uri").?.String;
const content_changes = params.getValue("contentChanges").?.Array;
const handle = document_store.getHandle(uri) orelse {
try log("Trying to change non existent document {}", .{uri});
return;
};
try document_store.applyChanges(handle, content_changes);
try publishDiagnostics(handle.*, config);
} else if (std.mem.eql(u8, method, "textDocument/didSave")) {
// noop
} else if (std.mem.eql(u8, method, "textDocument/didClose")) {
const document = params.getValue("textDocument").?.Object;
const uri = document.getValue("uri").?.String;
document_store.closeDocument(uri);
}
// Autocomplete / Signatures
else if (std.mem.eql(u8, method, "textDocument/completion")) {
const text_document = params.getValue("textDocument").?.Object;
const uri = text_document.getValue("uri").?.String;
const position = params.getValue("position").?.Object;
const handle = document_store.getHandle(uri) orelse {
try log("Trying to complete in non existent document {}", .{uri});
return;
};
const pos = types.Position{
.line = position.getValue("line").?.Integer,
.character = position.getValue("character").?.Integer - 1,
};
if (pos.character >= 0) {
const pos_index = try handle.document.positionToIndex(pos);
const pos_context = documentPositionContext(handle.document, pos_index);
switch (pos_context) {
.builtin => try send(types.Response{
.id = .{ .Integer = id },
.result = .{
.CompletionList = .{
.isIncomplete = false,
.items = builtin_completions[@boolToInt(config.enable_snippets)][0..],
},
},
}),
.var_access, .empty => try completeGlobal(id, pos_index, handle.*, config),
.field_access => |start_idx| try completeFieldAccess(id, handle, pos, start_idx, config),
else => try respondGeneric(id, no_completions_response),
}
} else {
try respondGeneric(id, no_completions_response);
}
} else if (std.mem.eql(u8, method, "textDocument/signatureHelp")) {
// try respondGeneric(id,
// \\,"result":{"signatures":[{
// \\"label": "nameOfFunction(aNumber: u8)",
// \\"documentation": {"kind": "markdown", "value": "Description of the function in **Markdown**!"},
// \\"parameters": [
// \\{"label": [15, 27], "documentation": {"kind": "markdown", "value": "An argument"}}
// \\]
// \\}]}}
// );
try respondGeneric(id,
\\,"result":{"signatures":[]}}
);
} else if (root.Object.getValue("id")) |_| {
try log("Method with return value not implemented: {}", .{method});
try respondGeneric(id, not_implemented_response);
} else {
try log("Method without return value not implemented: {}", .{method});
}
}
var debug_alloc_state: DebugAllocator = undefined;
// We can now use if(leak_count_alloc) |alloc| { ... } as a comptime check.
const debug_alloc: ?*DebugAllocator = if (build_options.allocation_info) &debug_alloc_state else null;
pub fn main() anyerror!void {
// TODO: Use a better purpose general allocator once std has one.
// Probably after the generic composable allocators PR?
// This is not too bad for now since most allocations happen in local arenas.
allocator = std.heap.page_allocator;
if (build_options.allocation_info) {
// TODO: Use a better debugging allocator, track size in bytes, memory reserved etc..
// Initialize the leak counting allocator.
debug_alloc_state = DebugAllocator.init(allocator);
allocator = &debug_alloc_state.allocator;
}
// Init buffer for stdin read
var buffer = std.ArrayList(u8).init(allocator);
defer buffer.deinit();
try buffer.resize(4096);
// Init global vars
const stdin = std.io.getStdIn().inStream();
stdout = std.io.getStdOut().outStream();
// Read the configuration, if any.
const config_parse_options = std.json.ParseOptions{ .allocator = allocator };
var config = Config{};
defer std.json.parseFree(Config, config, config_parse_options);
// TODO: Investigate using std.fs.Watch to detect writes to the config and reload it.
config_read: {
var exec_dir_bytes: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const exec_dir_path = std.fs.selfExeDirPath(&exec_dir_bytes) catch break :config_read;
var exec_dir = std.fs.cwd().openDir(exec_dir_path, .{}) catch break :config_read;
defer exec_dir.close();
const conf_file = exec_dir.openFile("zls.json", .{}) catch break :config_read;
defer conf_file.close();
// Max 1MB
const file_buf = conf_file.inStream().readAllAlloc(allocator, 0x1000000) catch break :config_read;
defer allocator.free(file_buf);
// TODO: Better errors? Doesn't seem like std.json can provide us positions or context.
config = std.json.parse(Config, &std.json.TokenStream.init(file_buf), config_parse_options) catch |err| {
std.debug.warn("Error while parsing configuration file: {}\nUsing default config.\n", .{err});
break :config_read;
};
}
if (config.zig_lib_path) |zig_lib_path| {
if (!std.fs.path.isAbsolute(zig_lib_path)) {
std.debug.warn("zig library path is not absolute, defaulting to null.\n", .{});
allocator.free(zig_lib_path);
config.zig_lib_path = null;
}
}
try document_store.init(allocator, config.zig_lib_path);
defer document_store.deinit();
// This JSON parser is passed to processJsonRpc and reset.
var json_parser = std.json.Parser.init(allocator, false);
defer json_parser.deinit();
while (true) {
const headers = readRequestHeader(allocator, stdin) catch |err| {
try log("{}; exiting!", .{@errorName(err)});
return;
};
defer headers.deinit(allocator);
const buf = try allocator.alloc(u8, headers.content_length);
defer allocator.free(buf);
try stdin.readNoEof(buf);
try processJsonRpc(&json_parser, buf, config);
json_parser.reset();
if (debug_alloc) |dbg| {
try log("{}", .{dbg.info});
}
}
} | src/main.zig |
const std = @import("std");
const mem = std.mem;
const GraphemeBase = @This();
allocator: *mem.Allocator,
array: []bool,
lo: u21 = 32,
hi: u21 = 201546,
pub fn init(allocator: *mem.Allocator) !GraphemeBase {
var instance = GraphemeBase{
.allocator = allocator,
.array = try allocator.alloc(bool, 201515),
};
mem.set(bool, instance.array, false);
var index: u21 = 0;
instance.array[0] = true;
index = 1;
while (index <= 3) : (index += 1) {
instance.array[index] = true;
}
instance.array[4] = true;
index = 5;
while (index <= 7) : (index += 1) {
instance.array[index] = true;
}
instance.array[8] = true;
instance.array[9] = true;
instance.array[10] = true;
instance.array[11] = true;
instance.array[12] = true;
instance.array[13] = true;
index = 14;
while (index <= 15) : (index += 1) {
instance.array[index] = true;
}
index = 16;
while (index <= 25) : (index += 1) {
instance.array[index] = true;
}
index = 26;
while (index <= 27) : (index += 1) {
instance.array[index] = true;
}
index = 28;
while (index <= 30) : (index += 1) {
instance.array[index] = true;
}
index = 31;
while (index <= 32) : (index += 1) {
instance.array[index] = true;
}
index = 33;
while (index <= 58) : (index += 1) {
instance.array[index] = true;
}
instance.array[59] = true;
instance.array[60] = true;
instance.array[61] = true;
instance.array[62] = true;
instance.array[63] = true;
instance.array[64] = true;
index = 65;
while (index <= 90) : (index += 1) {
instance.array[index] = true;
}
instance.array[91] = true;
instance.array[92] = true;
instance.array[93] = true;
instance.array[94] = true;
instance.array[128] = true;
instance.array[129] = true;
index = 130;
while (index <= 133) : (index += 1) {
instance.array[index] = true;
}
instance.array[134] = true;
instance.array[135] = true;
instance.array[136] = true;
instance.array[137] = true;
instance.array[138] = true;
instance.array[139] = true;
instance.array[140] = true;
instance.array[142] = true;
instance.array[143] = true;
instance.array[144] = true;
instance.array[145] = true;
index = 146;
while (index <= 147) : (index += 1) {
instance.array[index] = true;
}
instance.array[148] = true;
instance.array[149] = true;
index = 150;
while (index <= 151) : (index += 1) {
instance.array[index] = true;
}
instance.array[152] = true;
instance.array[153] = true;
instance.array[154] = true;
instance.array[155] = true;
index = 156;
while (index <= 158) : (index += 1) {
instance.array[index] = true;
}
instance.array[159] = true;
index = 160;
while (index <= 182) : (index += 1) {
instance.array[index] = true;
}
instance.array[183] = true;
index = 184;
while (index <= 214) : (index += 1) {
instance.array[index] = true;
}
instance.array[215] = true;
index = 216;
while (index <= 410) : (index += 1) {
instance.array[index] = true;
}
instance.array[411] = true;
index = 412;
while (index <= 415) : (index += 1) {
instance.array[index] = true;
}
index = 416;
while (index <= 419) : (index += 1) {
instance.array[index] = true;
}
index = 420;
while (index <= 627) : (index += 1) {
instance.array[index] = true;
}
instance.array[628] = true;
index = 629;
while (index <= 655) : (index += 1) {
instance.array[index] = true;
}
index = 656;
while (index <= 673) : (index += 1) {
instance.array[index] = true;
}
index = 674;
while (index <= 677) : (index += 1) {
instance.array[index] = true;
}
index = 678;
while (index <= 689) : (index += 1) {
instance.array[index] = true;
}
index = 690;
while (index <= 703) : (index += 1) {
instance.array[index] = true;
}
index = 704;
while (index <= 708) : (index += 1) {
instance.array[index] = true;
}
index = 709;
while (index <= 715) : (index += 1) {
instance.array[index] = true;
}
instance.array[716] = true;
instance.array[717] = true;
instance.array[718] = true;
index = 719;
while (index <= 735) : (index += 1) {
instance.array[index] = true;
}
index = 848;
while (index <= 851) : (index += 1) {
instance.array[index] = true;
}
instance.array[852] = true;
instance.array[853] = true;
index = 854;
while (index <= 855) : (index += 1) {
instance.array[index] = true;
}
instance.array[858] = true;
index = 859;
while (index <= 861) : (index += 1) {
instance.array[index] = true;
}
instance.array[862] = true;
instance.array[863] = true;
index = 868;
while (index <= 869) : (index += 1) {
instance.array[index] = true;
}
instance.array[870] = true;
instance.array[871] = true;
index = 872;
while (index <= 874) : (index += 1) {
instance.array[index] = true;
}
instance.array[876] = true;
index = 878;
while (index <= 897) : (index += 1) {
instance.array[index] = true;
}
index = 899;
while (index <= 981) : (index += 1) {
instance.array[index] = true;
}
instance.array[982] = true;
index = 983;
while (index <= 1121) : (index += 1) {
instance.array[index] = true;
}
instance.array[1122] = true;
index = 1130;
while (index <= 1295) : (index += 1) {
instance.array[index] = true;
}
index = 1297;
while (index <= 1334) : (index += 1) {
instance.array[index] = true;
}
instance.array[1337] = true;
index = 1338;
while (index <= 1343) : (index += 1) {
instance.array[index] = true;
}
index = 1344;
while (index <= 1384) : (index += 1) {
instance.array[index] = true;
}
instance.array[1385] = true;
instance.array[1386] = true;
index = 1389;
while (index <= 1390) : (index += 1) {
instance.array[index] = true;
}
instance.array[1391] = true;
instance.array[1438] = true;
instance.array[1440] = true;
instance.array[1443] = true;
instance.array[1446] = true;
index = 1456;
while (index <= 1482) : (index += 1) {
instance.array[index] = true;
}
index = 1487;
while (index <= 1490) : (index += 1) {
instance.array[index] = true;
}
index = 1491;
while (index <= 1492) : (index += 1) {
instance.array[index] = true;
}
index = 1510;
while (index <= 1512) : (index += 1) {
instance.array[index] = true;
}
index = 1513;
while (index <= 1514) : (index += 1) {
instance.array[index] = true;
}
instance.array[1515] = true;
index = 1516;
while (index <= 1517) : (index += 1) {
instance.array[index] = true;
}
index = 1518;
while (index <= 1519) : (index += 1) {
instance.array[index] = true;
}
instance.array[1531] = true;
index = 1534;
while (index <= 1535) : (index += 1) {
instance.array[index] = true;
}
index = 1536;
while (index <= 1567) : (index += 1) {
instance.array[index] = true;
}
instance.array[1568] = true;
index = 1569;
while (index <= 1578) : (index += 1) {
instance.array[index] = true;
}
index = 1600;
while (index <= 1609) : (index += 1) {
instance.array[index] = true;
}
index = 1610;
while (index <= 1613) : (index += 1) {
instance.array[index] = true;
}
index = 1614;
while (index <= 1615) : (index += 1) {
instance.array[index] = true;
}
index = 1617;
while (index <= 1715) : (index += 1) {
instance.array[index] = true;
}
instance.array[1716] = true;
instance.array[1717] = true;
instance.array[1726] = true;
index = 1733;
while (index <= 1734) : (index += 1) {
instance.array[index] = true;
}
instance.array[1737] = true;
index = 1742;
while (index <= 1743) : (index += 1) {
instance.array[index] = true;
}
index = 1744;
while (index <= 1753) : (index += 1) {
instance.array[index] = true;
}
index = 1754;
while (index <= 1756) : (index += 1) {
instance.array[index] = true;
}
index = 1757;
while (index <= 1758) : (index += 1) {
instance.array[index] = true;
}
instance.array[1759] = true;
index = 1760;
while (index <= 1773) : (index += 1) {
instance.array[index] = true;
}
instance.array[1776] = true;
index = 1778;
while (index <= 1807) : (index += 1) {
instance.array[index] = true;
}
index = 1837;
while (index <= 1925) : (index += 1) {
instance.array[index] = true;
}
instance.array[1937] = true;
index = 1952;
while (index <= 1961) : (index += 1) {
instance.array[index] = true;
}
index = 1962;
while (index <= 1994) : (index += 1) {
instance.array[index] = true;
}
index = 2004;
while (index <= 2005) : (index += 1) {
instance.array[index] = true;
}
instance.array[2006] = true;
index = 2007;
while (index <= 2009) : (index += 1) {
instance.array[index] = true;
}
instance.array[2010] = true;
index = 2014;
while (index <= 2015) : (index += 1) {
instance.array[index] = true;
}
index = 2016;
while (index <= 2037) : (index += 1) {
instance.array[index] = true;
}
instance.array[2042] = true;
instance.array[2052] = true;
instance.array[2056] = true;
index = 2064;
while (index <= 2078) : (index += 1) {
instance.array[index] = true;
}
index = 2080;
while (index <= 2104) : (index += 1) {
instance.array[index] = true;
}
instance.array[2110] = true;
index = 2112;
while (index <= 2122) : (index += 1) {
instance.array[index] = true;
}
index = 2176;
while (index <= 2196) : (index += 1) {
instance.array[index] = true;
}
index = 2198;
while (index <= 2215) : (index += 1) {
instance.array[index] = true;
}
instance.array[2275] = true;
index = 2276;
while (index <= 2329) : (index += 1) {
instance.array[index] = true;
}
instance.array[2331] = true;
instance.array[2333] = true;
index = 2334;
while (index <= 2336) : (index += 1) {
instance.array[index] = true;
}
index = 2345;
while (index <= 2348) : (index += 1) {
instance.array[index] = true;
}
index = 2350;
while (index <= 2351) : (index += 1) {
instance.array[index] = true;
}
instance.array[2352] = true;
index = 2360;
while (index <= 2369) : (index += 1) {
instance.array[index] = true;
}
index = 2372;
while (index <= 2373) : (index += 1) {
instance.array[index] = true;
}
index = 2374;
while (index <= 2383) : (index += 1) {
instance.array[index] = true;
}
instance.array[2384] = true;
instance.array[2385] = true;
index = 2386;
while (index <= 2400) : (index += 1) {
instance.array[index] = true;
}
index = 2402;
while (index <= 2403) : (index += 1) {
instance.array[index] = true;
}
index = 2405;
while (index <= 2412) : (index += 1) {
instance.array[index] = true;
}
index = 2415;
while (index <= 2416) : (index += 1) {
instance.array[index] = true;
}
index = 2419;
while (index <= 2440) : (index += 1) {
instance.array[index] = true;
}
index = 2442;
while (index <= 2448) : (index += 1) {
instance.array[index] = true;
}
instance.array[2450] = true;
index = 2454;
while (index <= 2457) : (index += 1) {
instance.array[index] = true;
}
instance.array[2461] = true;
index = 2463;
while (index <= 2464) : (index += 1) {
instance.array[index] = true;
}
index = 2471;
while (index <= 2472) : (index += 1) {
instance.array[index] = true;
}
index = 2475;
while (index <= 2476) : (index += 1) {
instance.array[index] = true;
}
instance.array[2478] = true;
index = 2492;
while (index <= 2493) : (index += 1) {
instance.array[index] = true;
}
index = 2495;
while (index <= 2497) : (index += 1) {
instance.array[index] = true;
}
index = 2502;
while (index <= 2511) : (index += 1) {
instance.array[index] = true;
}
index = 2512;
while (index <= 2513) : (index += 1) {
instance.array[index] = true;
}
index = 2514;
while (index <= 2515) : (index += 1) {
instance.array[index] = true;
}
index = 2516;
while (index <= 2521) : (index += 1) {
instance.array[index] = true;
}
instance.array[2522] = true;
instance.array[2523] = true;
instance.array[2524] = true;
instance.array[2525] = true;
instance.array[2531] = true;
index = 2533;
while (index <= 2538) : (index += 1) {
instance.array[index] = true;
}
index = 2543;
while (index <= 2544) : (index += 1) {
instance.array[index] = true;
}
index = 2547;
while (index <= 2568) : (index += 1) {
instance.array[index] = true;
}
index = 2570;
while (index <= 2576) : (index += 1) {
instance.array[index] = true;
}
index = 2578;
while (index <= 2579) : (index += 1) {
instance.array[index] = true;
}
index = 2581;
while (index <= 2582) : (index += 1) {
instance.array[index] = true;
}
index = 2584;
while (index <= 2585) : (index += 1) {
instance.array[index] = true;
}
index = 2590;
while (index <= 2592) : (index += 1) {
instance.array[index] = true;
}
index = 2617;
while (index <= 2620) : (index += 1) {
instance.array[index] = true;
}
instance.array[2622] = true;
index = 2630;
while (index <= 2639) : (index += 1) {
instance.array[index] = true;
}
index = 2642;
while (index <= 2644) : (index += 1) {
instance.array[index] = true;
}
instance.array[2646] = true;
instance.array[2659] = true;
index = 2661;
while (index <= 2669) : (index += 1) {
instance.array[index] = true;
}
index = 2671;
while (index <= 2673) : (index += 1) {
instance.array[index] = true;
}
index = 2675;
while (index <= 2696) : (index += 1) {
instance.array[index] = true;
}
index = 2698;
while (index <= 2704) : (index += 1) {
instance.array[index] = true;
}
index = 2706;
while (index <= 2707) : (index += 1) {
instance.array[index] = true;
}
index = 2709;
while (index <= 2713) : (index += 1) {
instance.array[index] = true;
}
instance.array[2717] = true;
index = 2718;
while (index <= 2720) : (index += 1) {
instance.array[index] = true;
}
instance.array[2729] = true;
index = 2731;
while (index <= 2732) : (index += 1) {
instance.array[index] = true;
}
instance.array[2736] = true;
index = 2752;
while (index <= 2753) : (index += 1) {
instance.array[index] = true;
}
index = 2758;
while (index <= 2767) : (index += 1) {
instance.array[index] = true;
}
instance.array[2768] = true;
instance.array[2769] = true;
instance.array[2777] = true;
index = 2786;
while (index <= 2787) : (index += 1) {
instance.array[index] = true;
}
index = 2789;
while (index <= 2796) : (index += 1) {
instance.array[index] = true;
}
index = 2799;
while (index <= 2800) : (index += 1) {
instance.array[index] = true;
}
index = 2803;
while (index <= 2824) : (index += 1) {
instance.array[index] = true;
}
index = 2826;
while (index <= 2832) : (index += 1) {
instance.array[index] = true;
}
index = 2834;
while (index <= 2835) : (index += 1) {
instance.array[index] = true;
}
index = 2837;
while (index <= 2841) : (index += 1) {
instance.array[index] = true;
}
instance.array[2845] = true;
instance.array[2848] = true;
index = 2855;
while (index <= 2856) : (index += 1) {
instance.array[index] = true;
}
index = 2859;
while (index <= 2860) : (index += 1) {
instance.array[index] = true;
}
index = 2876;
while (index <= 2877) : (index += 1) {
instance.array[index] = true;
}
index = 2879;
while (index <= 2881) : (index += 1) {
instance.array[index] = true;
}
index = 2886;
while (index <= 2895) : (index += 1) {
instance.array[index] = true;
}
instance.array[2896] = true;
instance.array[2897] = true;
index = 2898;
while (index <= 2903) : (index += 1) {
instance.array[index] = true;
}
instance.array[2915] = true;
index = 2917;
while (index <= 2922) : (index += 1) {
instance.array[index] = true;
}
index = 2926;
while (index <= 2928) : (index += 1) {
instance.array[index] = true;
}
index = 2930;
while (index <= 2933) : (index += 1) {
instance.array[index] = true;
}
index = 2937;
while (index <= 2938) : (index += 1) {
instance.array[index] = true;
}
instance.array[2940] = true;
index = 2942;
while (index <= 2943) : (index += 1) {
instance.array[index] = true;
}
index = 2947;
while (index <= 2948) : (index += 1) {
instance.array[index] = true;
}
index = 2952;
while (index <= 2954) : (index += 1) {
instance.array[index] = true;
}
index = 2958;
while (index <= 2969) : (index += 1) {
instance.array[index] = true;
}
instance.array[2975] = true;
index = 2977;
while (index <= 2978) : (index += 1) {
instance.array[index] = true;
}
index = 2982;
while (index <= 2984) : (index += 1) {
instance.array[index] = true;
}
index = 2986;
while (index <= 2988) : (index += 1) {
instance.array[index] = true;
}
instance.array[2992] = true;
index = 3014;
while (index <= 3023) : (index += 1) {
instance.array[index] = true;
}
index = 3024;
while (index <= 3026) : (index += 1) {
instance.array[index] = true;
}
index = 3027;
while (index <= 3032) : (index += 1) {
instance.array[index] = true;
}
instance.array[3033] = true;
instance.array[3034] = true;
index = 3041;
while (index <= 3043) : (index += 1) {
instance.array[index] = true;
}
index = 3045;
while (index <= 3052) : (index += 1) {
instance.array[index] = true;
}
index = 3054;
while (index <= 3056) : (index += 1) {
instance.array[index] = true;
}
index = 3058;
while (index <= 3080) : (index += 1) {
instance.array[index] = true;
}
index = 3082;
while (index <= 3097) : (index += 1) {
instance.array[index] = true;
}
instance.array[3101] = true;
index = 3105;
while (index <= 3108) : (index += 1) {
instance.array[index] = true;
}
index = 3128;
while (index <= 3130) : (index += 1) {
instance.array[index] = true;
}
index = 3136;
while (index <= 3137) : (index += 1) {
instance.array[index] = true;
}
index = 3142;
while (index <= 3151) : (index += 1) {
instance.array[index] = true;
}
instance.array[3159] = true;
index = 3160;
while (index <= 3166) : (index += 1) {
instance.array[index] = true;
}
instance.array[3167] = true;
instance.array[3168] = true;
index = 3170;
while (index <= 3171) : (index += 1) {
instance.array[index] = true;
}
instance.array[3172] = true;
index = 3173;
while (index <= 3180) : (index += 1) {
instance.array[index] = true;
}
index = 3182;
while (index <= 3184) : (index += 1) {
instance.array[index] = true;
}
index = 3186;
while (index <= 3208) : (index += 1) {
instance.array[index] = true;
}
index = 3210;
while (index <= 3219) : (index += 1) {
instance.array[index] = true;
}
index = 3221;
while (index <= 3225) : (index += 1) {
instance.array[index] = true;
}
instance.array[3229] = true;
instance.array[3230] = true;
index = 3232;
while (index <= 3233) : (index += 1) {
instance.array[index] = true;
}
index = 3235;
while (index <= 3236) : (index += 1) {
instance.array[index] = true;
}
index = 3239;
while (index <= 3240) : (index += 1) {
instance.array[index] = true;
}
index = 3242;
while (index <= 3243) : (index += 1) {
instance.array[index] = true;
}
instance.array[3262] = true;
index = 3264;
while (index <= 3265) : (index += 1) {
instance.array[index] = true;
}
index = 3270;
while (index <= 3279) : (index += 1) {
instance.array[index] = true;
}
index = 3281;
while (index <= 3282) : (index += 1) {
instance.array[index] = true;
}
index = 3298;
while (index <= 3299) : (index += 1) {
instance.array[index] = true;
}
index = 3300;
while (index <= 3308) : (index += 1) {
instance.array[index] = true;
}
index = 3310;
while (index <= 3312) : (index += 1) {
instance.array[index] = true;
}
index = 3314;
while (index <= 3354) : (index += 1) {
instance.array[index] = true;
}
instance.array[3357] = true;
index = 3359;
while (index <= 3360) : (index += 1) {
instance.array[index] = true;
}
index = 3366;
while (index <= 3368) : (index += 1) {
instance.array[index] = true;
}
index = 3370;
while (index <= 3372) : (index += 1) {
instance.array[index] = true;
}
instance.array[3374] = true;
instance.array[3375] = true;
index = 3380;
while (index <= 3382) : (index += 1) {
instance.array[index] = true;
}
index = 3384;
while (index <= 3390) : (index += 1) {
instance.array[index] = true;
}
index = 3391;
while (index <= 3393) : (index += 1) {
instance.array[index] = true;
}
index = 3398;
while (index <= 3407) : (index += 1) {
instance.array[index] = true;
}
index = 3408;
while (index <= 3416) : (index += 1) {
instance.array[index] = true;
}
instance.array[3417] = true;
index = 3418;
while (index <= 3423) : (index += 1) {
instance.array[index] = true;
}
index = 3426;
while (index <= 3427) : (index += 1) {
instance.array[index] = true;
}
index = 3429;
while (index <= 3446) : (index += 1) {
instance.array[index] = true;
}
index = 3450;
while (index <= 3473) : (index += 1) {
instance.array[index] = true;
}
index = 3475;
while (index <= 3483) : (index += 1) {
instance.array[index] = true;
}
instance.array[3485] = true;
index = 3488;
while (index <= 3494) : (index += 1) {
instance.array[index] = true;
}
index = 3504;
while (index <= 3505) : (index += 1) {
instance.array[index] = true;
}
index = 3512;
while (index <= 3518) : (index += 1) {
instance.array[index] = true;
}
index = 3526;
while (index <= 3535) : (index += 1) {
instance.array[index] = true;
}
index = 3538;
while (index <= 3539) : (index += 1) {
instance.array[index] = true;
}
instance.array[3540] = true;
index = 3553;
while (index <= 3600) : (index += 1) {
instance.array[index] = true;
}
index = 3602;
while (index <= 3603) : (index += 1) {
instance.array[index] = true;
}
instance.array[3615] = true;
index = 3616;
while (index <= 3621) : (index += 1) {
instance.array[index] = true;
}
instance.array[3622] = true;
instance.array[3631] = true;
index = 3632;
while (index <= 3641) : (index += 1) {
instance.array[index] = true;
}
index = 3642;
while (index <= 3643) : (index += 1) {
instance.array[index] = true;
}
index = 3681;
while (index <= 3682) : (index += 1) {
instance.array[index] = true;
}
instance.array[3684] = true;
index = 3686;
while (index <= 3690) : (index += 1) {
instance.array[index] = true;
}
index = 3692;
while (index <= 3715) : (index += 1) {
instance.array[index] = true;
}
instance.array[3717] = true;
index = 3719;
while (index <= 3728) : (index += 1) {
instance.array[index] = true;
}
index = 3730;
while (index <= 3731) : (index += 1) {
instance.array[index] = true;
}
instance.array[3741] = true;
index = 3744;
while (index <= 3748) : (index += 1) {
instance.array[index] = true;
}
instance.array[3750] = true;
index = 3760;
while (index <= 3769) : (index += 1) {
instance.array[index] = true;
}
index = 3772;
while (index <= 3775) : (index += 1) {
instance.array[index] = true;
}
instance.array[3808] = true;
index = 3809;
while (index <= 3811) : (index += 1) {
instance.array[index] = true;
}
index = 3812;
while (index <= 3826) : (index += 1) {
instance.array[index] = true;
}
instance.array[3827] = true;
instance.array[3828] = true;
index = 3829;
while (index <= 3831) : (index += 1) {
instance.array[index] = true;
}
index = 3834;
while (index <= 3839) : (index += 1) {
instance.array[index] = true;
}
index = 3840;
while (index <= 3849) : (index += 1) {
instance.array[index] = true;
}
index = 3850;
while (index <= 3859) : (index += 1) {
instance.array[index] = true;
}
instance.array[3860] = true;
instance.array[3862] = true;
instance.array[3864] = true;
instance.array[3866] = true;
instance.array[3867] = true;
instance.array[3868] = true;
instance.array[3869] = true;
index = 3870;
while (index <= 3871) : (index += 1) {
instance.array[index] = true;
}
index = 3872;
while (index <= 3879) : (index += 1) {
instance.array[index] = true;
}
index = 3881;
while (index <= 3916) : (index += 1) {
instance.array[index] = true;
}
instance.array[3935] = true;
instance.array[3941] = true;
index = 3944;
while (index <= 3948) : (index += 1) {
instance.array[index] = true;
}
index = 3998;
while (index <= 4005) : (index += 1) {
instance.array[index] = true;
}
index = 4007;
while (index <= 4012) : (index += 1) {
instance.array[index] = true;
}
index = 4014;
while (index <= 4015) : (index += 1) {
instance.array[index] = true;
}
index = 4016;
while (index <= 4020) : (index += 1) {
instance.array[index] = true;
}
index = 4021;
while (index <= 4024) : (index += 1) {
instance.array[index] = true;
}
index = 4025;
while (index <= 4026) : (index += 1) {
instance.array[index] = true;
}
index = 4064;
while (index <= 4106) : (index += 1) {
instance.array[index] = true;
}
index = 4107;
while (index <= 4108) : (index += 1) {
instance.array[index] = true;
}
instance.array[4113] = true;
instance.array[4120] = true;
index = 4123;
while (index <= 4124) : (index += 1) {
instance.array[index] = true;
}
instance.array[4127] = true;
index = 4128;
while (index <= 4137) : (index += 1) {
instance.array[index] = true;
}
index = 4138;
while (index <= 4143) : (index += 1) {
instance.array[index] = true;
}
index = 4144;
while (index <= 4149) : (index += 1) {
instance.array[index] = true;
}
index = 4150;
while (index <= 4151) : (index += 1) {
instance.array[index] = true;
}
index = 4154;
while (index <= 4157) : (index += 1) {
instance.array[index] = true;
}
instance.array[4161] = true;
index = 4162;
while (index <= 4164) : (index += 1) {
instance.array[index] = true;
}
index = 4165;
while (index <= 4166) : (index += 1) {
instance.array[index] = true;
}
index = 4167;
while (index <= 4173) : (index += 1) {
instance.array[index] = true;
}
index = 4174;
while (index <= 4176) : (index += 1) {
instance.array[index] = true;
}
index = 4181;
while (index <= 4193) : (index += 1) {
instance.array[index] = true;
}
index = 4195;
while (index <= 4196) : (index += 1) {
instance.array[index] = true;
}
index = 4199;
while (index <= 4204) : (index += 1) {
instance.array[index] = true;
}
instance.array[4206] = true;
instance.array[4207] = true;
index = 4208;
while (index <= 4217) : (index += 1) {
instance.array[index] = true;
}
index = 4218;
while (index <= 4220) : (index += 1) {
instance.array[index] = true;
}
index = 4222;
while (index <= 4223) : (index += 1) {
instance.array[index] = true;
}
index = 4224;
while (index <= 4261) : (index += 1) {
instance.array[index] = true;
}
instance.array[4263] = true;
instance.array[4269] = true;
index = 4272;
while (index <= 4314) : (index += 1) {
instance.array[index] = true;
}
instance.array[4315] = true;
instance.array[4316] = true;
index = 4317;
while (index <= 4319) : (index += 1) {
instance.array[index] = true;
}
index = 4320;
while (index <= 4648) : (index += 1) {
instance.array[index] = true;
}
index = 4650;
while (index <= 4653) : (index += 1) {
instance.array[index] = true;
}
index = 4656;
while (index <= 4662) : (index += 1) {
instance.array[index] = true;
}
instance.array[4664] = true;
index = 4666;
while (index <= 4669) : (index += 1) {
instance.array[index] = true;
}
index = 4672;
while (index <= 4712) : (index += 1) {
instance.array[index] = true;
}
index = 4714;
while (index <= 4717) : (index += 1) {
instance.array[index] = true;
}
index = 4720;
while (index <= 4752) : (index += 1) {
instance.array[index] = true;
}
index = 4754;
while (index <= 4757) : (index += 1) {
instance.array[index] = true;
}
index = 4760;
while (index <= 4766) : (index += 1) {
instance.array[index] = true;
}
instance.array[4768] = true;
index = 4770;
while (index <= 4773) : (index += 1) {
instance.array[index] = true;
}
index = 4776;
while (index <= 4790) : (index += 1) {
instance.array[index] = true;
}
index = 4792;
while (index <= 4848) : (index += 1) {
instance.array[index] = true;
}
index = 4850;
while (index <= 4853) : (index += 1) {
instance.array[index] = true;
}
index = 4856;
while (index <= 4922) : (index += 1) {
instance.array[index] = true;
}
index = 4928;
while (index <= 4936) : (index += 1) {
instance.array[index] = true;
}
index = 4937;
while (index <= 4956) : (index += 1) {
instance.array[index] = true;
}
index = 4960;
while (index <= 4975) : (index += 1) {
instance.array[index] = true;
}
index = 4976;
while (index <= 4985) : (index += 1) {
instance.array[index] = true;
}
index = 4992;
while (index <= 5077) : (index += 1) {
instance.array[index] = true;
}
index = 5080;
while (index <= 5085) : (index += 1) {
instance.array[index] = true;
}
instance.array[5088] = true;
index = 5089;
while (index <= 5708) : (index += 1) {
instance.array[index] = true;
}
instance.array[5709] = true;
instance.array[5710] = true;
index = 5711;
while (index <= 5727) : (index += 1) {
instance.array[index] = true;
}
instance.array[5728] = true;
index = 5729;
while (index <= 5754) : (index += 1) {
instance.array[index] = true;
}
instance.array[5755] = true;
instance.array[5756] = true;
index = 5760;
while (index <= 5834) : (index += 1) {
instance.array[index] = true;
}
index = 5835;
while (index <= 5837) : (index += 1) {
instance.array[index] = true;
}
index = 5838;
while (index <= 5840) : (index += 1) {
instance.array[index] = true;
}
index = 5841;
while (index <= 5848) : (index += 1) {
instance.array[index] = true;
}
index = 5856;
while (index <= 5868) : (index += 1) {
instance.array[index] = true;
}
index = 5870;
while (index <= 5873) : (index += 1) {
instance.array[index] = true;
}
index = 5888;
while (index <= 5905) : (index += 1) {
instance.array[index] = true;
}
index = 5909;
while (index <= 5910) : (index += 1) {
instance.array[index] = true;
}
index = 5920;
while (index <= 5937) : (index += 1) {
instance.array[index] = true;
}
index = 5952;
while (index <= 5964) : (index += 1) {
instance.array[index] = true;
}
index = 5966;
while (index <= 5968) : (index += 1) {
instance.array[index] = true;
}
index = 5984;
while (index <= 6035) : (index += 1) {
instance.array[index] = true;
}
instance.array[6038] = true;
index = 6046;
while (index <= 6053) : (index += 1) {
instance.array[index] = true;
}
index = 6055;
while (index <= 6056) : (index += 1) {
instance.array[index] = true;
}
index = 6068;
while (index <= 6070) : (index += 1) {
instance.array[index] = true;
}
instance.array[6071] = true;
index = 6072;
while (index <= 6074) : (index += 1) {
instance.array[index] = true;
}
instance.array[6075] = true;
instance.array[6076] = true;
index = 6080;
while (index <= 6089) : (index += 1) {
instance.array[index] = true;
}
index = 6096;
while (index <= 6105) : (index += 1) {
instance.array[index] = true;
}
index = 6112;
while (index <= 6117) : (index += 1) {
instance.array[index] = true;
}
instance.array[6118] = true;
index = 6119;
while (index <= 6122) : (index += 1) {
instance.array[index] = true;
}
index = 6128;
while (index <= 6137) : (index += 1) {
instance.array[index] = true;
}
index = 6144;
while (index <= 6178) : (index += 1) {
instance.array[index] = true;
}
instance.array[6179] = true;
index = 6180;
while (index <= 6232) : (index += 1) {
instance.array[index] = true;
}
index = 6240;
while (index <= 6244) : (index += 1) {
instance.array[index] = true;
}
index = 6247;
while (index <= 6280) : (index += 1) {
instance.array[index] = true;
}
instance.array[6282] = true;
index = 6288;
while (index <= 6357) : (index += 1) {
instance.array[index] = true;
}
index = 6368;
while (index <= 6398) : (index += 1) {
instance.array[index] = true;
}
index = 6403;
while (index <= 6406) : (index += 1) {
instance.array[index] = true;
}
index = 6409;
while (index <= 6411) : (index += 1) {
instance.array[index] = true;
}
index = 6416;
while (index <= 6417) : (index += 1) {
instance.array[index] = true;
}
index = 6419;
while (index <= 6424) : (index += 1) {
instance.array[index] = true;
}
instance.array[6432] = true;
index = 6436;
while (index <= 6437) : (index += 1) {
instance.array[index] = true;
}
index = 6438;
while (index <= 6447) : (index += 1) {
instance.array[index] = true;
}
index = 6448;
while (index <= 6477) : (index += 1) {
instance.array[index] = true;
}
index = 6480;
while (index <= 6484) : (index += 1) {
instance.array[index] = true;
}
index = 6496;
while (index <= 6539) : (index += 1) {
instance.array[index] = true;
}
index = 6544;
while (index <= 6569) : (index += 1) {
instance.array[index] = true;
}
index = 6576;
while (index <= 6585) : (index += 1) {
instance.array[index] = true;
}
instance.array[6586] = true;
index = 6590;
while (index <= 6623) : (index += 1) {
instance.array[index] = true;
}
index = 6624;
while (index <= 6646) : (index += 1) {
instance.array[index] = true;
}
index = 6649;
while (index <= 6650) : (index += 1) {
instance.array[index] = true;
}
index = 6654;
while (index <= 6655) : (index += 1) {
instance.array[index] = true;
}
index = 6656;
while (index <= 6708) : (index += 1) {
instance.array[index] = true;
}
instance.array[6709] = true;
instance.array[6711] = true;
instance.array[6721] = true;
index = 6723;
while (index <= 6724) : (index += 1) {
instance.array[index] = true;
}
index = 6733;
while (index <= 6738) : (index += 1) {
instance.array[index] = true;
}
index = 6752;
while (index <= 6761) : (index += 1) {
instance.array[index] = true;
}
index = 6768;
while (index <= 6777) : (index += 1) {
instance.array[index] = true;
}
index = 6784;
while (index <= 6790) : (index += 1) {
instance.array[index] = true;
}
instance.array[6791] = true;
index = 6792;
while (index <= 6797) : (index += 1) {
instance.array[index] = true;
}
instance.array[6884] = true;
index = 6885;
while (index <= 6931) : (index += 1) {
instance.array[index] = true;
}
instance.array[6939] = true;
index = 6941;
while (index <= 6945) : (index += 1) {
instance.array[index] = true;
}
index = 6947;
while (index <= 6948) : (index += 1) {
instance.array[index] = true;
}
index = 6949;
while (index <= 6955) : (index += 1) {
instance.array[index] = true;
}
index = 6960;
while (index <= 6969) : (index += 1) {
instance.array[index] = true;
}
index = 6970;
while (index <= 6976) : (index += 1) {
instance.array[index] = true;
}
index = 6977;
while (index <= 6986) : (index += 1) {
instance.array[index] = true;
}
index = 6996;
while (index <= 7004) : (index += 1) {
instance.array[index] = true;
}
instance.array[7010] = true;
index = 7011;
while (index <= 7040) : (index += 1) {
instance.array[index] = true;
}
instance.array[7041] = true;
index = 7046;
while (index <= 7047) : (index += 1) {
instance.array[index] = true;
}
instance.array[7050] = true;
index = 7054;
while (index <= 7055) : (index += 1) {
instance.array[index] = true;
}
index = 7056;
while (index <= 7065) : (index += 1) {
instance.array[index] = true;
}
index = 7066;
while (index <= 7109) : (index += 1) {
instance.array[index] = true;
}
instance.array[7111] = true;
index = 7114;
while (index <= 7116) : (index += 1) {
instance.array[index] = true;
}
instance.array[7118] = true;
index = 7122;
while (index <= 7123) : (index += 1) {
instance.array[index] = true;
}
index = 7132;
while (index <= 7135) : (index += 1) {
instance.array[index] = true;
}
index = 7136;
while (index <= 7171) : (index += 1) {
instance.array[index] = true;
}
index = 7172;
while (index <= 7179) : (index += 1) {
instance.array[index] = true;
}
index = 7188;
while (index <= 7189) : (index += 1) {
instance.array[index] = true;
}
index = 7195;
while (index <= 7199) : (index += 1) {
instance.array[index] = true;
}
index = 7200;
while (index <= 7209) : (index += 1) {
instance.array[index] = true;
}
index = 7213;
while (index <= 7215) : (index += 1) {
instance.array[index] = true;
}
index = 7216;
while (index <= 7225) : (index += 1) {
instance.array[index] = true;
}
index = 7226;
while (index <= 7255) : (index += 1) {
instance.array[index] = true;
}
index = 7256;
while (index <= 7261) : (index += 1) {
instance.array[index] = true;
}
index = 7262;
while (index <= 7263) : (index += 1) {
instance.array[index] = true;
}
index = 7264;
while (index <= 7272) : (index += 1) {
instance.array[index] = true;
}
index = 7280;
while (index <= 7322) : (index += 1) {
instance.array[index] = true;
}
index = 7325;
while (index <= 7327) : (index += 1) {
instance.array[index] = true;
}
index = 7328;
while (index <= 7335) : (index += 1) {
instance.array[index] = true;
}
instance.array[7347] = true;
instance.array[7361] = true;
index = 7369;
while (index <= 7372) : (index += 1) {
instance.array[index] = true;
}
index = 7374;
while (index <= 7379) : (index += 1) {
instance.array[index] = true;
}
index = 7381;
while (index <= 7382) : (index += 1) {
instance.array[index] = true;
}
instance.array[7383] = true;
instance.array[7386] = true;
index = 7392;
while (index <= 7435) : (index += 1) {
instance.array[index] = true;
}
index = 7436;
while (index <= 7498) : (index += 1) {
instance.array[index] = true;
}
index = 7499;
while (index <= 7511) : (index += 1) {
instance.array[index] = true;
}
instance.array[7512] = true;
index = 7513;
while (index <= 7546) : (index += 1) {
instance.array[index] = true;
}
index = 7547;
while (index <= 7583) : (index += 1) {
instance.array[index] = true;
}
index = 7648;
while (index <= 7925) : (index += 1) {
instance.array[index] = true;
}
index = 7928;
while (index <= 7933) : (index += 1) {
instance.array[index] = true;
}
index = 7936;
while (index <= 7973) : (index += 1) {
instance.array[index] = true;
}
index = 7976;
while (index <= 7981) : (index += 1) {
instance.array[index] = true;
}
index = 7984;
while (index <= 7991) : (index += 1) {
instance.array[index] = true;
}
instance.array[7993] = true;
instance.array[7995] = true;
instance.array[7997] = true;
index = 7999;
while (index <= 8029) : (index += 1) {
instance.array[index] = true;
}
index = 8032;
while (index <= 8084) : (index += 1) {
instance.array[index] = true;
}
index = 8086;
while (index <= 8092) : (index += 1) {
instance.array[index] = true;
}
instance.array[8093] = true;
instance.array[8094] = true;
index = 8095;
while (index <= 8097) : (index += 1) {
instance.array[index] = true;
}
index = 8098;
while (index <= 8100) : (index += 1) {
instance.array[index] = true;
}
index = 8102;
while (index <= 8108) : (index += 1) {
instance.array[index] = true;
}
index = 8109;
while (index <= 8111) : (index += 1) {
instance.array[index] = true;
}
index = 8112;
while (index <= 8115) : (index += 1) {
instance.array[index] = true;
}
index = 8118;
while (index <= 8123) : (index += 1) {
instance.array[index] = true;
}
index = 8125;
while (index <= 8127) : (index += 1) {
instance.array[index] = true;
}
index = 8128;
while (index <= 8140) : (index += 1) {
instance.array[index] = true;
}
index = 8141;
while (index <= 8143) : (index += 1) {
instance.array[index] = true;
}
index = 8146;
while (index <= 8148) : (index += 1) {
instance.array[index] = true;
}
index = 8150;
while (index <= 8156) : (index += 1) {
instance.array[index] = true;
}
index = 8157;
while (index <= 8158) : (index += 1) {
instance.array[index] = true;
}
index = 8160;
while (index <= 8170) : (index += 1) {
instance.array[index] = true;
}
index = 8176;
while (index <= 8181) : (index += 1) {
instance.array[index] = true;
}
index = 8182;
while (index <= 8183) : (index += 1) {
instance.array[index] = true;
}
instance.array[8184] = true;
instance.array[8185] = true;
instance.array[8186] = true;
index = 8187;
while (index <= 8188) : (index += 1) {
instance.array[index] = true;
}
instance.array[8189] = true;
instance.array[8190] = true;
instance.array[8191] = true;
index = 8192;
while (index <= 8199) : (index += 1) {
instance.array[index] = true;
}
instance.array[8207] = true;
index = 8208;
while (index <= 8216) : (index += 1) {
instance.array[index] = true;
}
instance.array[8217] = true;
instance.array[8218] = true;
index = 8219;
while (index <= 8222) : (index += 1) {
instance.array[index] = true;
}
index = 8223;
while (index <= 8224) : (index += 1) {
instance.array[index] = true;
}
index = 8225;
while (index <= 8227) : (index += 1) {
instance.array[index] = true;
}
instance.array[8228] = true;
instance.array[8229] = true;
instance.array[8230] = true;
index = 8231;
while (index <= 8241) : (index += 1) {
instance.array[index] = true;
}
instance.array[8242] = true;
instance.array[8243] = true;
instance.array[8244] = true;
index = 8245;
while (index <= 8254) : (index += 1) {
instance.array[index] = true;
}
instance.array[8255] = true;
instance.array[8272] = true;
instance.array[8273] = true;
index = 8276;
while (index <= 8281) : (index += 1) {
instance.array[index] = true;
}
index = 8282;
while (index <= 8284) : (index += 1) {
instance.array[index] = true;
}
instance.array[8285] = true;
instance.array[8286] = true;
instance.array[8287] = true;
index = 8288;
while (index <= 8297) : (index += 1) {
instance.array[index] = true;
}
index = 8298;
while (index <= 8300) : (index += 1) {
instance.array[index] = true;
}
instance.array[8301] = true;
instance.array[8302] = true;
index = 8304;
while (index <= 8316) : (index += 1) {
instance.array[index] = true;
}
index = 8320;
while (index <= 8351) : (index += 1) {
instance.array[index] = true;
}
index = 8416;
while (index <= 8417) : (index += 1) {
instance.array[index] = true;
}
instance.array[8418] = true;
index = 8419;
while (index <= 8422) : (index += 1) {
instance.array[index] = true;
}
instance.array[8423] = true;
index = 8424;
while (index <= 8425) : (index += 1) {
instance.array[index] = true;
}
index = 8426;
while (index <= 8435) : (index += 1) {
instance.array[index] = true;
}
instance.array[8436] = true;
instance.array[8437] = true;
index = 8438;
while (index <= 8439) : (index += 1) {
instance.array[index] = true;
}
instance.array[8440] = true;
index = 8441;
while (index <= 8445) : (index += 1) {
instance.array[index] = true;
}
index = 8446;
while (index <= 8451) : (index += 1) {
instance.array[index] = true;
}
instance.array[8452] = true;
instance.array[8453] = true;
instance.array[8454] = true;
instance.array[8455] = true;
instance.array[8456] = true;
instance.array[8457] = true;
index = 8458;
while (index <= 8461) : (index += 1) {
instance.array[index] = true;
}
instance.array[8462] = true;
index = 8463;
while (index <= 8468) : (index += 1) {
instance.array[index] = true;
}
index = 8469;
while (index <= 8472) : (index += 1) {
instance.array[index] = true;
}
instance.array[8473] = true;
index = 8474;
while (index <= 8475) : (index += 1) {
instance.array[index] = true;
}
index = 8476;
while (index <= 8479) : (index += 1) {
instance.array[index] = true;
}
index = 8480;
while (index <= 8484) : (index += 1) {
instance.array[index] = true;
}
index = 8485;
while (index <= 8489) : (index += 1) {
instance.array[index] = true;
}
instance.array[8490] = true;
instance.array[8491] = true;
index = 8492;
while (index <= 8493) : (index += 1) {
instance.array[index] = true;
}
instance.array[8494] = true;
instance.array[8495] = true;
index = 8496;
while (index <= 8511) : (index += 1) {
instance.array[index] = true;
}
index = 8512;
while (index <= 8546) : (index += 1) {
instance.array[index] = true;
}
index = 8547;
while (index <= 8548) : (index += 1) {
instance.array[index] = true;
}
index = 8549;
while (index <= 8552) : (index += 1) {
instance.array[index] = true;
}
instance.array[8553] = true;
index = 8554;
while (index <= 8555) : (index += 1) {
instance.array[index] = true;
}
index = 8560;
while (index <= 8564) : (index += 1) {
instance.array[index] = true;
}
index = 8565;
while (index <= 8569) : (index += 1) {
instance.array[index] = true;
}
index = 8570;
while (index <= 8571) : (index += 1) {
instance.array[index] = true;
}
index = 8572;
while (index <= 8575) : (index += 1) {
instance.array[index] = true;
}
instance.array[8576] = true;
index = 8577;
while (index <= 8578) : (index += 1) {
instance.array[index] = true;
}
instance.array[8579] = true;
index = 8580;
while (index <= 8581) : (index += 1) {
instance.array[index] = true;
}
instance.array[8582] = true;
index = 8583;
while (index <= 8589) : (index += 1) {
instance.array[index] = true;
}
instance.array[8590] = true;
index = 8591;
while (index <= 8621) : (index += 1) {
instance.array[index] = true;
}
index = 8622;
while (index <= 8623) : (index += 1) {
instance.array[index] = true;
}
index = 8624;
while (index <= 8625) : (index += 1) {
instance.array[index] = true;
}
instance.array[8626] = true;
instance.array[8627] = true;
instance.array[8628] = true;
index = 8629;
while (index <= 8659) : (index += 1) {
instance.array[index] = true;
}
index = 8660;
while (index <= 8927) : (index += 1) {
instance.array[index] = true;
}
index = 8928;
while (index <= 8935) : (index += 1) {
instance.array[index] = true;
}
instance.array[8936] = true;
instance.array[8937] = true;
instance.array[8938] = true;
instance.array[8939] = true;
index = 8940;
while (index <= 8959) : (index += 1) {
instance.array[index] = true;
}
index = 8960;
while (index <= 8961) : (index += 1) {
instance.array[index] = true;
}
index = 8962;
while (index <= 8968) : (index += 1) {
instance.array[index] = true;
}
instance.array[8969] = true;
instance.array[8970] = true;
index = 8971;
while (index <= 9051) : (index += 1) {
instance.array[index] = true;
}
instance.array[9052] = true;
index = 9053;
while (index <= 9082) : (index += 1) {
instance.array[index] = true;
}
index = 9083;
while (index <= 9107) : (index += 1) {
instance.array[index] = true;
}
index = 9108;
while (index <= 9147) : (index += 1) {
instance.array[index] = true;
}
index = 9148;
while (index <= 9153) : (index += 1) {
instance.array[index] = true;
}
index = 9154;
while (index <= 9222) : (index += 1) {
instance.array[index] = true;
}
index = 9248;
while (index <= 9258) : (index += 1) {
instance.array[index] = true;
}
index = 9280;
while (index <= 9339) : (index += 1) {
instance.array[index] = true;
}
index = 9340;
while (index <= 9417) : (index += 1) {
instance.array[index] = true;
}
index = 9418;
while (index <= 9439) : (index += 1) {
instance.array[index] = true;
}
index = 9440;
while (index <= 9622) : (index += 1) {
instance.array[index] = true;
}
instance.array[9623] = true;
index = 9624;
while (index <= 9632) : (index += 1) {
instance.array[index] = true;
}
instance.array[9633] = true;
index = 9634;
while (index <= 9687) : (index += 1) {
instance.array[index] = true;
}
index = 9688;
while (index <= 9695) : (index += 1) {
instance.array[index] = true;
}
index = 9696;
while (index <= 9806) : (index += 1) {
instance.array[index] = true;
}
instance.array[9807] = true;
index = 9808;
while (index <= 10055) : (index += 1) {
instance.array[index] = true;
}
instance.array[10056] = true;
instance.array[10057] = true;
instance.array[10058] = true;
instance.array[10059] = true;
instance.array[10060] = true;
instance.array[10061] = true;
instance.array[10062] = true;
instance.array[10063] = true;
instance.array[10064] = true;
instance.array[10065] = true;
instance.array[10066] = true;
instance.array[10067] = true;
instance.array[10068] = true;
instance.array[10069] = true;
index = 10070;
while (index <= 10099) : (index += 1) {
instance.array[index] = true;
}
index = 10100;
while (index <= 10143) : (index += 1) {
instance.array[index] = true;
}
index = 10144;
while (index <= 10148) : (index += 1) {
instance.array[index] = true;
}
instance.array[10149] = true;
instance.array[10150] = true;
index = 10151;
while (index <= 10181) : (index += 1) {
instance.array[index] = true;
}
instance.array[10182] = true;
instance.array[10183] = true;
instance.array[10184] = true;
instance.array[10185] = true;
instance.array[10186] = true;
instance.array[10187] = true;
instance.array[10188] = true;
instance.array[10189] = true;
instance.array[10190] = true;
instance.array[10191] = true;
index = 10192;
while (index <= 10207) : (index += 1) {
instance.array[index] = true;
}
index = 10208;
while (index <= 10463) : (index += 1) {
instance.array[index] = true;
}
index = 10464;
while (index <= 10594) : (index += 1) {
instance.array[index] = true;
}
instance.array[10595] = true;
instance.array[10596] = true;
instance.array[10597] = true;
instance.array[10598] = true;
instance.array[10599] = true;
instance.array[10600] = true;
instance.array[10601] = true;
instance.array[10602] = true;
instance.array[10603] = true;
instance.array[10604] = true;
instance.array[10605] = true;
instance.array[10606] = true;
instance.array[10607] = true;
instance.array[10608] = true;
instance.array[10609] = true;
instance.array[10610] = true;
instance.array[10611] = true;
instance.array[10612] = true;
instance.array[10613] = true;
instance.array[10614] = true;
instance.array[10615] = true;
instance.array[10616] = true;
index = 10617;
while (index <= 10679) : (index += 1) {
instance.array[index] = true;
}
instance.array[10680] = true;
instance.array[10681] = true;
instance.array[10682] = true;
instance.array[10683] = true;
index = 10684;
while (index <= 10715) : (index += 1) {
instance.array[index] = true;
}
instance.array[10716] = true;
instance.array[10717] = true;
index = 10718;
while (index <= 10975) : (index += 1) {
instance.array[index] = true;
}
index = 10976;
while (index <= 11023) : (index += 1) {
instance.array[index] = true;
}
index = 11024;
while (index <= 11044) : (index += 1) {
instance.array[index] = true;
}
index = 11045;
while (index <= 11046) : (index += 1) {
instance.array[index] = true;
}
index = 11047;
while (index <= 11052) : (index += 1) {
instance.array[index] = true;
}
index = 11053;
while (index <= 11091) : (index += 1) {
instance.array[index] = true;
}
index = 11094;
while (index <= 11125) : (index += 1) {
instance.array[index] = true;
}
index = 11127;
while (index <= 11231) : (index += 1) {
instance.array[index] = true;
}
index = 11232;
while (index <= 11278) : (index += 1) {
instance.array[index] = true;
}
index = 11280;
while (index <= 11326) : (index += 1) {
instance.array[index] = true;
}
index = 11328;
while (index <= 11355) : (index += 1) {
instance.array[index] = true;
}
index = 11356;
while (index <= 11357) : (index += 1) {
instance.array[index] = true;
}
index = 11358;
while (index <= 11460) : (index += 1) {
instance.array[index] = true;
}
index = 11461;
while (index <= 11466) : (index += 1) {
instance.array[index] = true;
}
index = 11467;
while (index <= 11470) : (index += 1) {
instance.array[index] = true;
}
index = 11474;
while (index <= 11475) : (index += 1) {
instance.array[index] = true;
}
index = 11481;
while (index <= 11484) : (index += 1) {
instance.array[index] = true;
}
instance.array[11485] = true;
index = 11486;
while (index <= 11487) : (index += 1) {
instance.array[index] = true;
}
index = 11488;
while (index <= 11525) : (index += 1) {
instance.array[index] = true;
}
instance.array[11527] = true;
instance.array[11533] = true;
index = 11536;
while (index <= 11591) : (index += 1) {
instance.array[index] = true;
}
instance.array[11599] = true;
instance.array[11600] = true;
index = 11616;
while (index <= 11638) : (index += 1) {
instance.array[index] = true;
}
index = 11648;
while (index <= 11654) : (index += 1) {
instance.array[index] = true;
}
index = 11656;
while (index <= 11662) : (index += 1) {
instance.array[index] = true;
}
index = 11664;
while (index <= 11670) : (index += 1) {
instance.array[index] = true;
}
index = 11672;
while (index <= 11678) : (index += 1) {
instance.array[index] = true;
}
index = 11680;
while (index <= 11686) : (index += 1) {
instance.array[index] = true;
}
index = 11688;
while (index <= 11694) : (index += 1) {
instance.array[index] = true;
}
index = 11696;
while (index <= 11702) : (index += 1) {
instance.array[index] = true;
}
index = 11704;
while (index <= 11710) : (index += 1) {
instance.array[index] = true;
}
index = 11744;
while (index <= 11745) : (index += 1) {
instance.array[index] = true;
}
instance.array[11746] = true;
instance.array[11747] = true;
instance.array[11748] = true;
instance.array[11749] = true;
index = 11750;
while (index <= 11752) : (index += 1) {
instance.array[index] = true;
}
instance.array[11753] = true;
instance.array[11754] = true;
instance.array[11755] = true;
instance.array[11756] = true;
instance.array[11757] = true;
index = 11758;
while (index <= 11766) : (index += 1) {
instance.array[index] = true;
}
instance.array[11767] = true;
index = 11768;
while (index <= 11769) : (index += 1) {
instance.array[index] = true;
}
instance.array[11770] = true;
instance.array[11771] = true;
instance.array[11772] = true;
instance.array[11773] = true;
index = 11774;
while (index <= 11775) : (index += 1) {
instance.array[index] = true;
}
instance.array[11776] = true;
instance.array[11777] = true;
instance.array[11778] = true;
instance.array[11779] = true;
instance.array[11780] = true;
instance.array[11781] = true;
instance.array[11782] = true;
instance.array[11783] = true;
instance.array[11784] = true;
instance.array[11785] = true;
index = 11786;
while (index <= 11790) : (index += 1) {
instance.array[index] = true;
}
instance.array[11791] = true;
index = 11792;
while (index <= 11801) : (index += 1) {
instance.array[index] = true;
}
index = 11802;
while (index <= 11803) : (index += 1) {
instance.array[index] = true;
}
index = 11804;
while (index <= 11807) : (index += 1) {
instance.array[index] = true;
}
instance.array[11808] = true;
instance.array[11809] = true;
instance.array[11810] = true;
index = 11811;
while (index <= 11823) : (index += 1) {
instance.array[index] = true;
}
index = 11824;
while (index <= 11825) : (index += 1) {
instance.array[index] = true;
}
instance.array[11826] = true;
index = 11872;
while (index <= 11897) : (index += 1) {
instance.array[index] = true;
}
index = 11899;
while (index <= 11987) : (index += 1) {
instance.array[index] = true;
}
index = 12000;
while (index <= 12213) : (index += 1) {
instance.array[index] = true;
}
index = 12240;
while (index <= 12251) : (index += 1) {
instance.array[index] = true;
}
instance.array[12256] = true;
index = 12257;
while (index <= 12259) : (index += 1) {
instance.array[index] = true;
}
instance.array[12260] = true;
instance.array[12261] = true;
instance.array[12262] = true;
instance.array[12263] = true;
instance.array[12264] = true;
instance.array[12265] = true;
instance.array[12266] = true;
instance.array[12267] = true;
instance.array[12268] = true;
instance.array[12269] = true;
instance.array[12270] = true;
instance.array[12271] = true;
instance.array[12272] = true;
instance.array[12273] = true;
index = 12274;
while (index <= 12275) : (index += 1) {
instance.array[index] = true;
}
instance.array[12276] = true;
instance.array[12277] = true;
instance.array[12278] = true;
instance.array[12279] = true;
instance.array[12280] = true;
instance.array[12281] = true;
instance.array[12282] = true;
instance.array[12283] = true;
instance.array[12284] = true;
instance.array[12285] = true;
index = 12286;
while (index <= 12287) : (index += 1) {
instance.array[index] = true;
}
instance.array[12288] = true;
index = 12289;
while (index <= 12297) : (index += 1) {
instance.array[index] = true;
}
instance.array[12304] = true;
index = 12305;
while (index <= 12309) : (index += 1) {
instance.array[index] = true;
}
index = 12310;
while (index <= 12311) : (index += 1) {
instance.array[index] = true;
}
index = 12312;
while (index <= 12314) : (index += 1) {
instance.array[index] = true;
}
instance.array[12315] = true;
instance.array[12316] = true;
instance.array[12317] = true;
index = 12318;
while (index <= 12319) : (index += 1) {
instance.array[index] = true;
}
index = 12321;
while (index <= 12406) : (index += 1) {
instance.array[index] = true;
}
index = 12411;
while (index <= 12412) : (index += 1) {
instance.array[index] = true;
}
index = 12413;
while (index <= 12414) : (index += 1) {
instance.array[index] = true;
}
instance.array[12415] = true;
instance.array[12416] = true;
index = 12417;
while (index <= 12506) : (index += 1) {
instance.array[index] = true;
}
instance.array[12507] = true;
index = 12508;
while (index <= 12510) : (index += 1) {
instance.array[index] = true;
}
instance.array[12511] = true;
index = 12517;
while (index <= 12559) : (index += 1) {
instance.array[index] = true;
}
index = 12561;
while (index <= 12654) : (index += 1) {
instance.array[index] = true;
}
index = 12656;
while (index <= 12657) : (index += 1) {
instance.array[index] = true;
}
index = 12658;
while (index <= 12661) : (index += 1) {
instance.array[index] = true;
}
index = 12662;
while (index <= 12671) : (index += 1) {
instance.array[index] = true;
}
index = 12672;
while (index <= 12703) : (index += 1) {
instance.array[index] = true;
}
index = 12704;
while (index <= 12739) : (index += 1) {
instance.array[index] = true;
}
index = 12752;
while (index <= 12767) : (index += 1) {
instance.array[index] = true;
}
index = 12768;
while (index <= 12798) : (index += 1) {
instance.array[index] = true;
}
index = 12800;
while (index <= 12809) : (index += 1) {
instance.array[index] = true;
}
index = 12810;
while (index <= 12839) : (index += 1) {
instance.array[index] = true;
}
index = 12840;
while (index <= 12847) : (index += 1) {
instance.array[index] = true;
}
instance.array[12848] = true;
index = 12849;
while (index <= 12863) : (index += 1) {
instance.array[index] = true;
}
index = 12864;
while (index <= 12895) : (index += 1) {
instance.array[index] = true;
}
index = 12896;
while (index <= 12905) : (index += 1) {
instance.array[index] = true;
}
index = 12906;
while (index <= 12944) : (index += 1) {
instance.array[index] = true;
}
index = 12945;
while (index <= 12959) : (index += 1) {
instance.array[index] = true;
}
index = 12960;
while (index <= 13279) : (index += 1) {
instance.array[index] = true;
}
index = 13280;
while (index <= 19871) : (index += 1) {
instance.array[index] = true;
}
index = 19872;
while (index <= 19935) : (index += 1) {
instance.array[index] = true;
}
index = 19936;
while (index <= 40924) : (index += 1) {
instance.array[index] = true;
}
index = 40928;
while (index <= 40948) : (index += 1) {
instance.array[index] = true;
}
instance.array[40949] = true;
index = 40950;
while (index <= 42092) : (index += 1) {
instance.array[index] = true;
}
index = 42096;
while (index <= 42150) : (index += 1) {
instance.array[index] = true;
}
index = 42160;
while (index <= 42199) : (index += 1) {
instance.array[index] = true;
}
index = 42200;
while (index <= 42205) : (index += 1) {
instance.array[index] = true;
}
index = 42206;
while (index <= 42207) : (index += 1) {
instance.array[index] = true;
}
index = 42208;
while (index <= 42475) : (index += 1) {
instance.array[index] = true;
}
instance.array[42476] = true;
index = 42477;
while (index <= 42479) : (index += 1) {
instance.array[index] = true;
}
index = 42480;
while (index <= 42495) : (index += 1) {
instance.array[index] = true;
}
index = 42496;
while (index <= 42505) : (index += 1) {
instance.array[index] = true;
}
index = 42506;
while (index <= 42507) : (index += 1) {
instance.array[index] = true;
}
index = 42528;
while (index <= 42573) : (index += 1) {
instance.array[index] = true;
}
instance.array[42574] = true;
instance.array[42579] = true;
instance.array[42590] = true;
instance.array[42591] = true;
index = 42592;
while (index <= 42619) : (index += 1) {
instance.array[index] = true;
}
index = 42620;
while (index <= 42621) : (index += 1) {
instance.array[index] = true;
}
index = 42624;
while (index <= 42693) : (index += 1) {
instance.array[index] = true;
}
index = 42694;
while (index <= 42703) : (index += 1) {
instance.array[index] = true;
}
index = 42706;
while (index <= 42711) : (index += 1) {
instance.array[index] = true;
}
index = 42720;
while (index <= 42742) : (index += 1) {
instance.array[index] = true;
}
index = 42743;
while (index <= 42751) : (index += 1) {
instance.array[index] = true;
}
index = 42752;
while (index <= 42753) : (index += 1) {
instance.array[index] = true;
}
index = 42754;
while (index <= 42831) : (index += 1) {
instance.array[index] = true;
}
instance.array[42832] = true;
index = 42833;
while (index <= 42855) : (index += 1) {
instance.array[index] = true;
}
instance.array[42856] = true;
index = 42857;
while (index <= 42858) : (index += 1) {
instance.array[index] = true;
}
index = 42859;
while (index <= 42862) : (index += 1) {
instance.array[index] = true;
}
instance.array[42863] = true;
index = 42864;
while (index <= 42911) : (index += 1) {
instance.array[index] = true;
}
index = 42914;
while (index <= 42922) : (index += 1) {
instance.array[index] = true;
}
index = 42965;
while (index <= 42966) : (index += 1) {
instance.array[index] = true;
}
instance.array[42967] = true;
index = 42968;
while (index <= 42969) : (index += 1) {
instance.array[index] = true;
}
instance.array[42970] = true;
index = 42971;
while (index <= 42977) : (index += 1) {
instance.array[index] = true;
}
index = 42979;
while (index <= 42981) : (index += 1) {
instance.array[index] = true;
}
index = 42983;
while (index <= 42986) : (index += 1) {
instance.array[index] = true;
}
index = 42988;
while (index <= 43010) : (index += 1) {
instance.array[index] = true;
}
index = 43011;
while (index <= 43012) : (index += 1) {
instance.array[index] = true;
}
instance.array[43015] = true;
index = 43016;
while (index <= 43019) : (index += 1) {
instance.array[index] = true;
}
index = 43024;
while (index <= 43029) : (index += 1) {
instance.array[index] = true;
}
index = 43030;
while (index <= 43031) : (index += 1) {
instance.array[index] = true;
}
instance.array[43032] = true;
instance.array[43033] = true;
index = 43040;
while (index <= 43091) : (index += 1) {
instance.array[index] = true;
}
index = 43092;
while (index <= 43095) : (index += 1) {
instance.array[index] = true;
}
index = 43104;
while (index <= 43105) : (index += 1) {
instance.array[index] = true;
}
index = 43106;
while (index <= 43155) : (index += 1) {
instance.array[index] = true;
}
index = 43156;
while (index <= 43171) : (index += 1) {
instance.array[index] = true;
}
index = 43182;
while (index <= 43183) : (index += 1) {
instance.array[index] = true;
}
index = 43184;
while (index <= 43193) : (index += 1) {
instance.array[index] = true;
}
index = 43218;
while (index <= 43223) : (index += 1) {
instance.array[index] = true;
}
index = 43224;
while (index <= 43226) : (index += 1) {
instance.array[index] = true;
}
instance.array[43227] = true;
instance.array[43228] = true;
index = 43229;
while (index <= 43230) : (index += 1) {
instance.array[index] = true;
}
index = 43232;
while (index <= 43241) : (index += 1) {
instance.array[index] = true;
}
index = 43242;
while (index <= 43269) : (index += 1) {
instance.array[index] = true;
}
index = 43278;
while (index <= 43279) : (index += 1) {
instance.array[index] = true;
}
index = 43280;
while (index <= 43302) : (index += 1) {
instance.array[index] = true;
}
index = 43314;
while (index <= 43315) : (index += 1) {
instance.array[index] = true;
}
instance.array[43327] = true;
index = 43328;
while (index <= 43356) : (index += 1) {
instance.array[index] = true;
}
instance.array[43363] = true;
index = 43364;
while (index <= 43410) : (index += 1) {
instance.array[index] = true;
}
index = 43412;
while (index <= 43413) : (index += 1) {
instance.array[index] = true;
}
index = 43418;
while (index <= 43419) : (index += 1) {
instance.array[index] = true;
}
index = 43422;
while (index <= 43424) : (index += 1) {
instance.array[index] = true;
}
index = 43425;
while (index <= 43437) : (index += 1) {
instance.array[index] = true;
}
instance.array[43439] = true;
index = 43440;
while (index <= 43449) : (index += 1) {
instance.array[index] = true;
}
index = 43454;
while (index <= 43455) : (index += 1) {
instance.array[index] = true;
}
index = 43456;
while (index <= 43460) : (index += 1) {
instance.array[index] = true;
}
instance.array[43462] = true;
index = 43463;
while (index <= 43471) : (index += 1) {
instance.array[index] = true;
}
index = 43472;
while (index <= 43481) : (index += 1) {
instance.array[index] = true;
}
index = 43482;
while (index <= 43486) : (index += 1) {
instance.array[index] = true;
}
index = 43488;
while (index <= 43528) : (index += 1) {
instance.array[index] = true;
}
index = 43535;
while (index <= 43536) : (index += 1) {
instance.array[index] = true;
}
index = 43539;
while (index <= 43540) : (index += 1) {
instance.array[index] = true;
}
index = 43552;
while (index <= 43554) : (index += 1) {
instance.array[index] = true;
}
index = 43556;
while (index <= 43563) : (index += 1) {
instance.array[index] = true;
}
instance.array[43565] = true;
index = 43568;
while (index <= 43577) : (index += 1) {
instance.array[index] = true;
}
index = 43580;
while (index <= 43583) : (index += 1) {
instance.array[index] = true;
}
index = 43584;
while (index <= 43599) : (index += 1) {
instance.array[index] = true;
}
instance.array[43600] = true;
index = 43601;
while (index <= 43606) : (index += 1) {
instance.array[index] = true;
}
index = 43607;
while (index <= 43609) : (index += 1) {
instance.array[index] = true;
}
instance.array[43610] = true;
instance.array[43611] = true;
instance.array[43613] = true;
index = 43614;
while (index <= 43663) : (index += 1) {
instance.array[index] = true;
}
instance.array[43665] = true;
index = 43669;
while (index <= 43670) : (index += 1) {
instance.array[index] = true;
}
index = 43673;
while (index <= 43677) : (index += 1) {
instance.array[index] = true;
}
instance.array[43680] = true;
instance.array[43682] = true;
index = 43707;
while (index <= 43708) : (index += 1) {
instance.array[index] = true;
}
instance.array[43709] = true;
index = 43710;
while (index <= 43711) : (index += 1) {
instance.array[index] = true;
}
index = 43712;
while (index <= 43722) : (index += 1) {
instance.array[index] = true;
}
instance.array[43723] = true;
index = 43726;
while (index <= 43727) : (index += 1) {
instance.array[index] = true;
}
index = 43728;
while (index <= 43729) : (index += 1) {
instance.array[index] = true;
}
instance.array[43730] = true;
index = 43731;
while (index <= 43732) : (index += 1) {
instance.array[index] = true;
}
instance.array[43733] = true;
index = 43745;
while (index <= 43750) : (index += 1) {
instance.array[index] = true;
}
index = 43753;
while (index <= 43758) : (index += 1) {
instance.array[index] = true;
}
index = 43761;
while (index <= 43766) : (index += 1) {
instance.array[index] = true;
}
index = 43776;
while (index <= 43782) : (index += 1) {
instance.array[index] = true;
}
index = 43784;
while (index <= 43790) : (index += 1) {
instance.array[index] = true;
}
index = 43792;
while (index <= 43834) : (index += 1) {
instance.array[index] = true;
}
instance.array[43835] = true;
index = 43836;
while (index <= 43839) : (index += 1) {
instance.array[index] = true;
}
index = 43840;
while (index <= 43848) : (index += 1) {
instance.array[index] = true;
}
instance.array[43849] = true;
index = 43850;
while (index <= 43851) : (index += 1) {
instance.array[index] = true;
}
index = 43856;
while (index <= 43935) : (index += 1) {
instance.array[index] = true;
}
index = 43936;
while (index <= 43970) : (index += 1) {
instance.array[index] = true;
}
index = 43971;
while (index <= 43972) : (index += 1) {
instance.array[index] = true;
}
index = 43974;
while (index <= 43975) : (index += 1) {
instance.array[index] = true;
}
index = 43977;
while (index <= 43978) : (index += 1) {
instance.array[index] = true;
}
instance.array[43979] = true;
instance.array[43980] = true;
index = 43984;
while (index <= 43993) : (index += 1) {
instance.array[index] = true;
}
index = 44000;
while (index <= 55171) : (index += 1) {
instance.array[index] = true;
}
index = 55184;
while (index <= 55206) : (index += 1) {
instance.array[index] = true;
}
index = 55211;
while (index <= 55259) : (index += 1) {
instance.array[index] = true;
}
index = 63712;
while (index <= 64077) : (index += 1) {
instance.array[index] = true;
}
index = 64080;
while (index <= 64185) : (index += 1) {
instance.array[index] = true;
}
index = 64224;
while (index <= 64230) : (index += 1) {
instance.array[index] = true;
}
index = 64243;
while (index <= 64247) : (index += 1) {
instance.array[index] = true;
}
instance.array[64253] = true;
index = 64255;
while (index <= 64264) : (index += 1) {
instance.array[index] = true;
}
instance.array[64265] = true;
index = 64266;
while (index <= 64278) : (index += 1) {
instance.array[index] = true;
}
index = 64280;
while (index <= 64284) : (index += 1) {
instance.array[index] = true;
}
instance.array[64286] = true;
index = 64288;
while (index <= 64289) : (index += 1) {
instance.array[index] = true;
}
index = 64291;
while (index <= 64292) : (index += 1) {
instance.array[index] = true;
}
index = 64294;
while (index <= 64401) : (index += 1) {
instance.array[index] = true;
}
index = 64402;
while (index <= 64417) : (index += 1) {
instance.array[index] = true;
}
index = 64435;
while (index <= 64797) : (index += 1) {
instance.array[index] = true;
}
instance.array[64798] = true;
instance.array[64799] = true;
index = 64816;
while (index <= 64879) : (index += 1) {
instance.array[index] = true;
}
index = 64882;
while (index <= 64935) : (index += 1) {
instance.array[index] = true;
}
index = 64976;
while (index <= 64987) : (index += 1) {
instance.array[index] = true;
}
instance.array[64988] = true;
instance.array[64989] = true;
index = 65008;
while (index <= 65014) : (index += 1) {
instance.array[index] = true;
}
instance.array[65015] = true;
instance.array[65016] = true;
instance.array[65017] = true;
instance.array[65040] = true;
index = 65041;
while (index <= 65042) : (index += 1) {
instance.array[index] = true;
}
index = 65043;
while (index <= 65044) : (index += 1) {
instance.array[index] = true;
}
instance.array[65045] = true;
instance.array[65046] = true;
instance.array[65047] = true;
instance.array[65048] = true;
instance.array[65049] = true;
instance.array[65050] = true;
instance.array[65051] = true;
instance.array[65052] = true;
instance.array[65053] = true;
instance.array[65054] = true;
instance.array[65055] = true;
instance.array[65056] = true;
instance.array[65057] = true;
instance.array[65058] = true;
instance.array[65059] = true;
instance.array[65060] = true;
index = 65061;
while (index <= 65062) : (index += 1) {
instance.array[index] = true;
}
instance.array[65063] = true;
instance.array[65064] = true;
index = 65065;
while (index <= 65068) : (index += 1) {
instance.array[index] = true;
}
index = 65069;
while (index <= 65071) : (index += 1) {
instance.array[index] = true;
}
index = 65072;
while (index <= 65074) : (index += 1) {
instance.array[index] = true;
}
index = 65076;
while (index <= 65079) : (index += 1) {
instance.array[index] = true;
}
instance.array[65080] = true;
instance.array[65081] = true;
instance.array[65082] = true;
instance.array[65083] = true;
instance.array[65084] = true;
instance.array[65085] = true;
instance.array[65086] = true;
index = 65087;
while (index <= 65089) : (index += 1) {
instance.array[index] = true;
}
instance.array[65090] = true;
instance.array[65091] = true;
index = 65092;
while (index <= 65094) : (index += 1) {
instance.array[index] = true;
}
instance.array[65096] = true;
instance.array[65097] = true;
index = 65098;
while (index <= 65099) : (index += 1) {
instance.array[index] = true;
}
index = 65104;
while (index <= 65108) : (index += 1) {
instance.array[index] = true;
}
index = 65110;
while (index <= 65244) : (index += 1) {
instance.array[index] = true;
}
index = 65249;
while (index <= 65251) : (index += 1) {
instance.array[index] = true;
}
instance.array[65252] = true;
index = 65253;
while (index <= 65255) : (index += 1) {
instance.array[index] = true;
}
instance.array[65256] = true;
instance.array[65257] = true;
instance.array[65258] = true;
instance.array[65259] = true;
instance.array[65260] = true;
instance.array[65261] = true;
index = 65262;
while (index <= 65263) : (index += 1) {
instance.array[index] = true;
}
index = 65264;
while (index <= 65273) : (index += 1) {
instance.array[index] = true;
}
index = 65274;
while (index <= 65275) : (index += 1) {
instance.array[index] = true;
}
index = 65276;
while (index <= 65278) : (index += 1) {
instance.array[index] = true;
}
index = 65279;
while (index <= 65280) : (index += 1) {
instance.array[index] = true;
}
index = 65281;
while (index <= 65306) : (index += 1) {
instance.array[index] = true;
}
instance.array[65307] = true;
instance.array[65308] = true;
instance.array[65309] = true;
instance.array[65310] = true;
instance.array[65311] = true;
instance.array[65312] = true;
index = 65313;
while (index <= 65338) : (index += 1) {
instance.array[index] = true;
}
instance.array[65339] = true;
instance.array[65340] = true;
instance.array[65341] = true;
instance.array[65342] = true;
instance.array[65343] = true;
instance.array[65344] = true;
instance.array[65345] = true;
instance.array[65346] = true;
instance.array[65347] = true;
index = 65348;
while (index <= 65349) : (index += 1) {
instance.array[index] = true;
}
index = 65350;
while (index <= 65359) : (index += 1) {
instance.array[index] = true;
}
instance.array[65360] = true;
index = 65361;
while (index <= 65405) : (index += 1) {
instance.array[index] = true;
}
index = 65408;
while (index <= 65438) : (index += 1) {
instance.array[index] = true;
}
index = 65442;
while (index <= 65447) : (index += 1) {
instance.array[index] = true;
}
index = 65450;
while (index <= 65455) : (index += 1) {
instance.array[index] = true;
}
index = 65458;
while (index <= 65463) : (index += 1) {
instance.array[index] = true;
}
index = 65466;
while (index <= 65468) : (index += 1) {
instance.array[index] = true;
}
index = 65472;
while (index <= 65473) : (index += 1) {
instance.array[index] = true;
}
instance.array[65474] = true;
instance.array[65475] = true;
instance.array[65476] = true;
index = 65477;
while (index <= 65478) : (index += 1) {
instance.array[index] = true;
}
instance.array[65480] = true;
index = 65481;
while (index <= 65484) : (index += 1) {
instance.array[index] = true;
}
index = 65485;
while (index <= 65486) : (index += 1) {
instance.array[index] = true;
}
index = 65500;
while (index <= 65501) : (index += 1) {
instance.array[index] = true;
}
index = 65504;
while (index <= 65515) : (index += 1) {
instance.array[index] = true;
}
index = 65517;
while (index <= 65542) : (index += 1) {
instance.array[index] = true;
}
index = 65544;
while (index <= 65562) : (index += 1) {
instance.array[index] = true;
}
index = 65564;
while (index <= 65565) : (index += 1) {
instance.array[index] = true;
}
index = 65567;
while (index <= 65581) : (index += 1) {
instance.array[index] = true;
}
index = 65584;
while (index <= 65597) : (index += 1) {
instance.array[index] = true;
}
index = 65632;
while (index <= 65754) : (index += 1) {
instance.array[index] = true;
}
index = 65760;
while (index <= 65762) : (index += 1) {
instance.array[index] = true;
}
index = 65767;
while (index <= 65811) : (index += 1) {
instance.array[index] = true;
}
index = 65815;
while (index <= 65823) : (index += 1) {
instance.array[index] = true;
}
index = 65824;
while (index <= 65876) : (index += 1) {
instance.array[index] = true;
}
index = 65877;
while (index <= 65880) : (index += 1) {
instance.array[index] = true;
}
index = 65881;
while (index <= 65897) : (index += 1) {
instance.array[index] = true;
}
index = 65898;
while (index <= 65899) : (index += 1) {
instance.array[index] = true;
}
index = 65900;
while (index <= 65902) : (index += 1) {
instance.array[index] = true;
}
index = 65904;
while (index <= 65916) : (index += 1) {
instance.array[index] = true;
}
instance.array[65920] = true;
index = 65968;
while (index <= 66012) : (index += 1) {
instance.array[index] = true;
}
index = 66144;
while (index <= 66172) : (index += 1) {
instance.array[index] = true;
}
index = 66176;
while (index <= 66224) : (index += 1) {
instance.array[index] = true;
}
index = 66241;
while (index <= 66267) : (index += 1) {
instance.array[index] = true;
}
index = 66272;
while (index <= 66303) : (index += 1) {
instance.array[index] = true;
}
index = 66304;
while (index <= 66307) : (index += 1) {
instance.array[index] = true;
}
index = 66317;
while (index <= 66336) : (index += 1) {
instance.array[index] = true;
}
instance.array[66337] = true;
index = 66338;
while (index <= 66345) : (index += 1) {
instance.array[index] = true;
}
instance.array[66346] = true;
index = 66352;
while (index <= 66389) : (index += 1) {
instance.array[index] = true;
}
index = 66400;
while (index <= 66429) : (index += 1) {
instance.array[index] = true;
}
instance.array[66431] = true;
index = 66432;
while (index <= 66467) : (index += 1) {
instance.array[index] = true;
}
index = 66472;
while (index <= 66479) : (index += 1) {
instance.array[index] = true;
}
instance.array[66480] = true;
index = 66481;
while (index <= 66485) : (index += 1) {
instance.array[index] = true;
}
index = 66528;
while (index <= 66607) : (index += 1) {
instance.array[index] = true;
}
index = 66608;
while (index <= 66685) : (index += 1) {
instance.array[index] = true;
}
index = 66688;
while (index <= 66697) : (index += 1) {
instance.array[index] = true;
}
index = 66704;
while (index <= 66739) : (index += 1) {
instance.array[index] = true;
}
index = 66744;
while (index <= 66779) : (index += 1) {
instance.array[index] = true;
}
index = 66784;
while (index <= 66823) : (index += 1) {
instance.array[index] = true;
}
index = 66832;
while (index <= 66883) : (index += 1) {
instance.array[index] = true;
}
instance.array[66895] = true;
index = 67040;
while (index <= 67350) : (index += 1) {
instance.array[index] = true;
}
index = 67360;
while (index <= 67381) : (index += 1) {
instance.array[index] = true;
}
index = 67392;
while (index <= 67399) : (index += 1) {
instance.array[index] = true;
}
index = 67552;
while (index <= 67557) : (index += 1) {
instance.array[index] = true;
}
instance.array[67560] = true;
index = 67562;
while (index <= 67605) : (index += 1) {
instance.array[index] = true;
}
index = 67607;
while (index <= 67608) : (index += 1) {
instance.array[index] = true;
}
instance.array[67612] = true;
index = 67615;
while (index <= 67637) : (index += 1) {
instance.array[index] = true;
}
instance.array[67639] = true;
index = 67640;
while (index <= 67647) : (index += 1) {
instance.array[index] = true;
}
index = 67648;
while (index <= 67670) : (index += 1) {
instance.array[index] = true;
}
index = 67671;
while (index <= 67672) : (index += 1) {
instance.array[index] = true;
}
index = 67673;
while (index <= 67679) : (index += 1) {
instance.array[index] = true;
}
index = 67680;
while (index <= 67710) : (index += 1) {
instance.array[index] = true;
}
index = 67719;
while (index <= 67727) : (index += 1) {
instance.array[index] = true;
}
index = 67776;
while (index <= 67794) : (index += 1) {
instance.array[index] = true;
}
index = 67796;
while (index <= 67797) : (index += 1) {
instance.array[index] = true;
}
index = 67803;
while (index <= 67807) : (index += 1) {
instance.array[index] = true;
}
index = 67808;
while (index <= 67829) : (index += 1) {
instance.array[index] = true;
}
index = 67830;
while (index <= 67835) : (index += 1) {
instance.array[index] = true;
}
instance.array[67839] = true;
index = 67840;
while (index <= 67865) : (index += 1) {
instance.array[index] = true;
}
instance.array[67871] = true;
index = 67936;
while (index <= 67991) : (index += 1) {
instance.array[index] = true;
}
index = 67996;
while (index <= 67997) : (index += 1) {
instance.array[index] = true;
}
index = 67998;
while (index <= 67999) : (index += 1) {
instance.array[index] = true;
}
index = 68000;
while (index <= 68015) : (index += 1) {
instance.array[index] = true;
}
index = 68018;
while (index <= 68063) : (index += 1) {
instance.array[index] = true;
}
instance.array[68064] = true;
index = 68080;
while (index <= 68083) : (index += 1) {
instance.array[index] = true;
}
index = 68085;
while (index <= 68087) : (index += 1) {
instance.array[index] = true;
}
index = 68089;
while (index <= 68117) : (index += 1) {
instance.array[index] = true;
}
index = 68128;
while (index <= 68136) : (index += 1) {
instance.array[index] = true;
}
index = 68144;
while (index <= 68152) : (index += 1) {
instance.array[index] = true;
}
index = 68160;
while (index <= 68188) : (index += 1) {
instance.array[index] = true;
}
index = 68189;
while (index <= 68190) : (index += 1) {
instance.array[index] = true;
}
instance.array[68191] = true;
index = 68192;
while (index <= 68220) : (index += 1) {
instance.array[index] = true;
}
index = 68221;
while (index <= 68223) : (index += 1) {
instance.array[index] = true;
}
index = 68256;
while (index <= 68263) : (index += 1) {
instance.array[index] = true;
}
instance.array[68264] = true;
index = 68265;
while (index <= 68292) : (index += 1) {
instance.array[index] = true;
}
index = 68299;
while (index <= 68303) : (index += 1) {
instance.array[index] = true;
}
index = 68304;
while (index <= 68310) : (index += 1) {
instance.array[index] = true;
}
index = 68320;
while (index <= 68373) : (index += 1) {
instance.array[index] = true;
}
index = 68377;
while (index <= 68383) : (index += 1) {
instance.array[index] = true;
}
index = 68384;
while (index <= 68405) : (index += 1) {
instance.array[index] = true;
}
index = 68408;
while (index <= 68415) : (index += 1) {
instance.array[index] = true;
}
index = 68416;
while (index <= 68434) : (index += 1) {
instance.array[index] = true;
}
index = 68440;
while (index <= 68447) : (index += 1) {
instance.array[index] = true;
}
index = 68448;
while (index <= 68465) : (index += 1) {
instance.array[index] = true;
}
index = 68473;
while (index <= 68476) : (index += 1) {
instance.array[index] = true;
}
index = 68489;
while (index <= 68495) : (index += 1) {
instance.array[index] = true;
}
index = 68576;
while (index <= 68648) : (index += 1) {
instance.array[index] = true;
}
index = 68704;
while (index <= 68754) : (index += 1) {
instance.array[index] = true;
}
index = 68768;
while (index <= 68818) : (index += 1) {
instance.array[index] = true;
}
index = 68826;
while (index <= 68831) : (index += 1) {
instance.array[index] = true;
}
index = 68832;
while (index <= 68867) : (index += 1) {
instance.array[index] = true;
}
index = 68880;
while (index <= 68889) : (index += 1) {
instance.array[index] = true;
}
index = 69184;
while (index <= 69214) : (index += 1) {
instance.array[index] = true;
}
index = 69216;
while (index <= 69257) : (index += 1) {
instance.array[index] = true;
}
instance.array[69261] = true;
index = 69264;
while (index <= 69265) : (index += 1) {
instance.array[index] = true;
}
index = 69344;
while (index <= 69372) : (index += 1) {
instance.array[index] = true;
}
index = 69373;
while (index <= 69382) : (index += 1) {
instance.array[index] = true;
}
instance.array[69383] = true;
index = 69392;
while (index <= 69413) : (index += 1) {
instance.array[index] = true;
}
index = 69425;
while (index <= 69428) : (index += 1) {
instance.array[index] = true;
}
index = 69429;
while (index <= 69433) : (index += 1) {
instance.array[index] = true;
}
index = 69520;
while (index <= 69540) : (index += 1) {
instance.array[index] = true;
}
index = 69541;
while (index <= 69547) : (index += 1) {
instance.array[index] = true;
}
index = 69568;
while (index <= 69590) : (index += 1) {
instance.array[index] = true;
}
instance.array[69600] = true;
instance.array[69602] = true;
index = 69603;
while (index <= 69655) : (index += 1) {
instance.array[index] = true;
}
index = 69671;
while (index <= 69677) : (index += 1) {
instance.array[index] = true;
}
index = 69682;
while (index <= 69701) : (index += 1) {
instance.array[index] = true;
}
index = 69702;
while (index <= 69711) : (index += 1) {
instance.array[index] = true;
}
instance.array[69730] = true;
index = 69731;
while (index <= 69775) : (index += 1) {
instance.array[index] = true;
}
index = 69776;
while (index <= 69778) : (index += 1) {
instance.array[index] = true;
}
index = 69783;
while (index <= 69784) : (index += 1) {
instance.array[index] = true;
}
index = 69787;
while (index <= 69788) : (index += 1) {
instance.array[index] = true;
}
index = 69790;
while (index <= 69793) : (index += 1) {
instance.array[index] = true;
}
index = 69808;
while (index <= 69832) : (index += 1) {
instance.array[index] = true;
}
index = 69840;
while (index <= 69849) : (index += 1) {
instance.array[index] = true;
}
index = 69859;
while (index <= 69894) : (index += 1) {
instance.array[index] = true;
}
instance.array[69900] = true;
index = 69910;
while (index <= 69919) : (index += 1) {
instance.array[index] = true;
}
index = 69920;
while (index <= 69923) : (index += 1) {
instance.array[index] = true;
}
instance.array[69924] = true;
index = 69925;
while (index <= 69926) : (index += 1) {
instance.array[index] = true;
}
instance.array[69927] = true;
index = 69936;
while (index <= 69970) : (index += 1) {
instance.array[index] = true;
}
index = 69972;
while (index <= 69973) : (index += 1) {
instance.array[index] = true;
}
instance.array[69974] = true;
instance.array[69986] = true;
index = 69987;
while (index <= 70034) : (index += 1) {
instance.array[index] = true;
}
index = 70035;
while (index <= 70037) : (index += 1) {
instance.array[index] = true;
}
index = 70047;
while (index <= 70048) : (index += 1) {
instance.array[index] = true;
}
index = 70049;
while (index <= 70052) : (index += 1) {
instance.array[index] = true;
}
index = 70053;
while (index <= 70056) : (index += 1) {
instance.array[index] = true;
}
instance.array[70061] = true;
instance.array[70062] = true;
index = 70064;
while (index <= 70073) : (index += 1) {
instance.array[index] = true;
}
instance.array[70074] = true;
instance.array[70075] = true;
instance.array[70076] = true;
index = 70077;
while (index <= 70079) : (index += 1) {
instance.array[index] = true;
}
index = 70081;
while (index <= 70100) : (index += 1) {
instance.array[index] = true;
}
index = 70112;
while (index <= 70129) : (index += 1) {
instance.array[index] = true;
}
index = 70131;
while (index <= 70155) : (index += 1) {
instance.array[index] = true;
}
index = 70156;
while (index <= 70158) : (index += 1) {
instance.array[index] = true;
}
index = 70162;
while (index <= 70163) : (index += 1) {
instance.array[index] = true;
}
instance.array[70165] = true;
index = 70168;
while (index <= 70173) : (index += 1) {
instance.array[index] = true;
}
index = 70240;
while (index <= 70246) : (index += 1) {
instance.array[index] = true;
}
instance.array[70248] = true;
index = 70250;
while (index <= 70253) : (index += 1) {
instance.array[index] = true;
}
index = 70255;
while (index <= 70269) : (index += 1) {
instance.array[index] = true;
}
index = 70271;
while (index <= 70280) : (index += 1) {
instance.array[index] = true;
}
instance.array[70281] = true;
index = 70288;
while (index <= 70334) : (index += 1) {
instance.array[index] = true;
}
index = 70336;
while (index <= 70338) : (index += 1) {
instance.array[index] = true;
}
index = 70352;
while (index <= 70361) : (index += 1) {
instance.array[index] = true;
}
index = 70370;
while (index <= 70371) : (index += 1) {
instance.array[index] = true;
}
index = 70373;
while (index <= 70380) : (index += 1) {
instance.array[index] = true;
}
index = 70383;
while (index <= 70384) : (index += 1) {
instance.array[index] = true;
}
index = 70387;
while (index <= 70408) : (index += 1) {
instance.array[index] = true;
}
index = 70410;
while (index <= 70416) : (index += 1) {
instance.array[index] = true;
}
index = 70418;
while (index <= 70419) : (index += 1) {
instance.array[index] = true;
}
index = 70421;
while (index <= 70425) : (index += 1) {
instance.array[index] = true;
}
instance.array[70429] = true;
instance.array[70431] = true;
index = 70433;
while (index <= 70436) : (index += 1) {
instance.array[index] = true;
}
index = 70439;
while (index <= 70440) : (index += 1) {
instance.array[index] = true;
}
index = 70443;
while (index <= 70445) : (index += 1) {
instance.array[index] = true;
}
instance.array[70448] = true;
index = 70461;
while (index <= 70465) : (index += 1) {
instance.array[index] = true;
}
index = 70466;
while (index <= 70467) : (index += 1) {
instance.array[index] = true;
}
index = 70624;
while (index <= 70676) : (index += 1) {
instance.array[index] = true;
}
index = 70677;
while (index <= 70679) : (index += 1) {
instance.array[index] = true;
}
index = 70688;
while (index <= 70689) : (index += 1) {
instance.array[index] = true;
}
instance.array[70693] = true;
index = 70695;
while (index <= 70698) : (index += 1) {
instance.array[index] = true;
}
index = 70699;
while (index <= 70703) : (index += 1) {
instance.array[index] = true;
}
index = 70704;
while (index <= 70713) : (index += 1) {
instance.array[index] = true;
}
index = 70714;
while (index <= 70715) : (index += 1) {
instance.array[index] = true;
}
instance.array[70717] = true;
index = 70719;
while (index <= 70721) : (index += 1) {
instance.array[index] = true;
}
index = 70752;
while (index <= 70799) : (index += 1) {
instance.array[index] = true;
}
index = 70801;
while (index <= 70802) : (index += 1) {
instance.array[index] = true;
}
instance.array[70809] = true;
index = 70811;
while (index <= 70812) : (index += 1) {
instance.array[index] = true;
}
instance.array[70814] = true;
instance.array[70817] = true;
index = 70820;
while (index <= 70821) : (index += 1) {
instance.array[index] = true;
}
instance.array[70822] = true;
instance.array[70823] = true;
index = 70832;
while (index <= 70841) : (index += 1) {
instance.array[index] = true;
}
index = 71008;
while (index <= 71054) : (index += 1) {
instance.array[index] = true;
}
index = 71056;
while (index <= 71057) : (index += 1) {
instance.array[index] = true;
}
index = 71064;
while (index <= 71067) : (index += 1) {
instance.array[index] = true;
}
instance.array[71070] = true;
index = 71073;
while (index <= 71095) : (index += 1) {
instance.array[index] = true;
}
index = 71096;
while (index <= 71099) : (index += 1) {
instance.array[index] = true;
}
index = 71136;
while (index <= 71183) : (index += 1) {
instance.array[index] = true;
}
index = 71184;
while (index <= 71186) : (index += 1) {
instance.array[index] = true;
}
index = 71195;
while (index <= 71196) : (index += 1) {
instance.array[index] = true;
}
instance.array[71198] = true;
index = 71201;
while (index <= 71203) : (index += 1) {
instance.array[index] = true;
}
instance.array[71204] = true;
index = 71216;
while (index <= 71225) : (index += 1) {
instance.array[index] = true;
}
index = 71232;
while (index <= 71244) : (index += 1) {
instance.array[index] = true;
}
index = 71264;
while (index <= 71306) : (index += 1) {
instance.array[index] = true;
}
instance.array[71308] = true;
index = 71310;
while (index <= 71311) : (index += 1) {
instance.array[index] = true;
}
instance.array[71318] = true;
instance.array[71320] = true;
index = 71328;
while (index <= 71337) : (index += 1) {
instance.array[index] = true;
}
index = 71392;
while (index <= 71418) : (index += 1) {
instance.array[index] = true;
}
index = 71424;
while (index <= 71425) : (index += 1) {
instance.array[index] = true;
}
instance.array[71430] = true;
index = 71440;
while (index <= 71449) : (index += 1) {
instance.array[index] = true;
}
index = 71450;
while (index <= 71451) : (index += 1) {
instance.array[index] = true;
}
index = 71452;
while (index <= 71454) : (index += 1) {
instance.array[index] = true;
}
instance.array[71455] = true;
index = 71648;
while (index <= 71691) : (index += 1) {
instance.array[index] = true;
}
index = 71692;
while (index <= 71694) : (index += 1) {
instance.array[index] = true;
}
instance.array[71704] = true;
instance.array[71707] = true;
index = 71808;
while (index <= 71871) : (index += 1) {
instance.array[index] = true;
}
index = 71872;
while (index <= 71881) : (index += 1) {
instance.array[index] = true;
}
index = 71882;
while (index <= 71890) : (index += 1) {
instance.array[index] = true;
}
index = 71903;
while (index <= 71910) : (index += 1) {
instance.array[index] = true;
}
instance.array[71913] = true;
index = 71916;
while (index <= 71923) : (index += 1) {
instance.array[index] = true;
}
index = 71925;
while (index <= 71926) : (index += 1) {
instance.array[index] = true;
}
index = 71928;
while (index <= 71951) : (index += 1) {
instance.array[index] = true;
}
index = 71953;
while (index <= 71957) : (index += 1) {
instance.array[index] = true;
}
index = 71959;
while (index <= 71960) : (index += 1) {
instance.array[index] = true;
}
instance.array[71965] = true;
instance.array[71967] = true;
instance.array[71968] = true;
instance.array[71969] = true;
instance.array[71970] = true;
index = 71972;
while (index <= 71974) : (index += 1) {
instance.array[index] = true;
}
index = 71984;
while (index <= 71993) : (index += 1) {
instance.array[index] = true;
}
index = 72064;
while (index <= 72071) : (index += 1) {
instance.array[index] = true;
}
index = 72074;
while (index <= 72112) : (index += 1) {
instance.array[index] = true;
}
index = 72113;
while (index <= 72115) : (index += 1) {
instance.array[index] = true;
}
index = 72124;
while (index <= 72127) : (index += 1) {
instance.array[index] = true;
}
instance.array[72129] = true;
instance.array[72130] = true;
instance.array[72131] = true;
instance.array[72132] = true;
instance.array[72160] = true;
index = 72171;
while (index <= 72210) : (index += 1) {
instance.array[index] = true;
}
instance.array[72217] = true;
instance.array[72218] = true;
index = 72223;
while (index <= 72230) : (index += 1) {
instance.array[index] = true;
}
instance.array[72240] = true;
index = 72247;
while (index <= 72248) : (index += 1) {
instance.array[index] = true;
}
index = 72252;
while (index <= 72297) : (index += 1) {
instance.array[index] = true;
}
instance.array[72311] = true;
index = 72314;
while (index <= 72316) : (index += 1) {
instance.array[index] = true;
}
instance.array[72317] = true;
index = 72318;
while (index <= 72322) : (index += 1) {
instance.array[index] = true;
}
index = 72352;
while (index <= 72408) : (index += 1) {
instance.array[index] = true;
}
index = 72672;
while (index <= 72680) : (index += 1) {
instance.array[index] = true;
}
index = 72682;
while (index <= 72718) : (index += 1) {
instance.array[index] = true;
}
instance.array[72719] = true;
instance.array[72734] = true;
instance.array[72736] = true;
index = 72737;
while (index <= 72741) : (index += 1) {
instance.array[index] = true;
}
index = 72752;
while (index <= 72761) : (index += 1) {
instance.array[index] = true;
}
index = 72762;
while (index <= 72780) : (index += 1) {
instance.array[index] = true;
}
index = 72784;
while (index <= 72785) : (index += 1) {
instance.array[index] = true;
}
index = 72786;
while (index <= 72815) : (index += 1) {
instance.array[index] = true;
}
instance.array[72841] = true;
instance.array[72849] = true;
instance.array[72852] = true;
index = 72928;
while (index <= 72934) : (index += 1) {
instance.array[index] = true;
}
index = 72936;
while (index <= 72937) : (index += 1) {
instance.array[index] = true;
}
index = 72939;
while (index <= 72976) : (index += 1) {
instance.array[index] = true;
}
instance.array[72998] = true;
index = 73008;
while (index <= 73017) : (index += 1) {
instance.array[index] = true;
}
index = 73024;
while (index <= 73029) : (index += 1) {
instance.array[index] = true;
}
index = 73031;
while (index <= 73032) : (index += 1) {
instance.array[index] = true;
}
index = 73034;
while (index <= 73065) : (index += 1) {
instance.array[index] = true;
}
index = 73066;
while (index <= 73070) : (index += 1) {
instance.array[index] = true;
}
index = 73075;
while (index <= 73076) : (index += 1) {
instance.array[index] = true;
}
instance.array[73078] = true;
instance.array[73080] = true;
index = 73088;
while (index <= 73097) : (index += 1) {
instance.array[index] = true;
}
index = 73408;
while (index <= 73426) : (index += 1) {
instance.array[index] = true;
}
index = 73429;
while (index <= 73430) : (index += 1) {
instance.array[index] = true;
}
index = 73431;
while (index <= 73432) : (index += 1) {
instance.array[index] = true;
}
instance.array[73616] = true;
index = 73632;
while (index <= 73652) : (index += 1) {
instance.array[index] = true;
}
index = 73653;
while (index <= 73660) : (index += 1) {
instance.array[index] = true;
}
index = 73661;
while (index <= 73664) : (index += 1) {
instance.array[index] = true;
}
index = 73665;
while (index <= 73681) : (index += 1) {
instance.array[index] = true;
}
instance.array[73695] = true;
index = 73696;
while (index <= 74617) : (index += 1) {
instance.array[index] = true;
}
index = 74720;
while (index <= 74830) : (index += 1) {
instance.array[index] = true;
}
index = 74832;
while (index <= 74836) : (index += 1) {
instance.array[index] = true;
}
index = 74848;
while (index <= 75043) : (index += 1) {
instance.array[index] = true;
}
index = 77792;
while (index <= 78862) : (index += 1) {
instance.array[index] = true;
}
index = 82912;
while (index <= 83494) : (index += 1) {
instance.array[index] = true;
}
index = 92128;
while (index <= 92696) : (index += 1) {
instance.array[index] = true;
}
index = 92704;
while (index <= 92734) : (index += 1) {
instance.array[index] = true;
}
index = 92736;
while (index <= 92745) : (index += 1) {
instance.array[index] = true;
}
index = 92750;
while (index <= 92751) : (index += 1) {
instance.array[index] = true;
}
index = 92848;
while (index <= 92877) : (index += 1) {
instance.array[index] = true;
}
instance.array[92885] = true;
index = 92896;
while (index <= 92943) : (index += 1) {
instance.array[index] = true;
}
index = 92951;
while (index <= 92955) : (index += 1) {
instance.array[index] = true;
}
index = 92956;
while (index <= 92959) : (index += 1) {
instance.array[index] = true;
}
index = 92960;
while (index <= 92963) : (index += 1) {
instance.array[index] = true;
}
instance.array[92964] = true;
instance.array[92965] = true;
index = 92976;
while (index <= 92985) : (index += 1) {
instance.array[index] = true;
}
index = 92987;
while (index <= 92993) : (index += 1) {
instance.array[index] = true;
}
index = 92995;
while (index <= 93015) : (index += 1) {
instance.array[index] = true;
}
index = 93021;
while (index <= 93039) : (index += 1) {
instance.array[index] = true;
}
index = 93728;
while (index <= 93791) : (index += 1) {
instance.array[index] = true;
}
index = 93792;
while (index <= 93814) : (index += 1) {
instance.array[index] = true;
}
index = 93815;
while (index <= 93818) : (index += 1) {
instance.array[index] = true;
}
index = 93920;
while (index <= 93994) : (index += 1) {
instance.array[index] = true;
}
instance.array[94000] = true;
index = 94001;
while (index <= 94055) : (index += 1) {
instance.array[index] = true;
}
index = 94067;
while (index <= 94079) : (index += 1) {
instance.array[index] = true;
}
index = 94144;
while (index <= 94145) : (index += 1) {
instance.array[index] = true;
}
instance.array[94146] = true;
instance.array[94147] = true;
index = 94160;
while (index <= 94161) : (index += 1) {
instance.array[index] = true;
}
index = 94176;
while (index <= 100311) : (index += 1) {
instance.array[index] = true;
}
index = 100320;
while (index <= 101557) : (index += 1) {
instance.array[index] = true;
}
index = 101600;
while (index <= 101608) : (index += 1) {
instance.array[index] = true;
}
index = 110560;
while (index <= 110846) : (index += 1) {
instance.array[index] = true;
}
index = 110896;
while (index <= 110898) : (index += 1) {
instance.array[index] = true;
}
index = 110916;
while (index <= 110919) : (index += 1) {
instance.array[index] = true;
}
index = 110928;
while (index <= 111323) : (index += 1) {
instance.array[index] = true;
}
index = 113632;
while (index <= 113738) : (index += 1) {
instance.array[index] = true;
}
index = 113744;
while (index <= 113756) : (index += 1) {
instance.array[index] = true;
}
index = 113760;
while (index <= 113768) : (index += 1) {
instance.array[index] = true;
}
index = 113776;
while (index <= 113785) : (index += 1) {
instance.array[index] = true;
}
instance.array[113788] = true;
instance.array[113791] = true;
index = 118752;
while (index <= 118997) : (index += 1) {
instance.array[index] = true;
}
index = 119008;
while (index <= 119046) : (index += 1) {
instance.array[index] = true;
}
index = 119049;
while (index <= 119108) : (index += 1) {
instance.array[index] = true;
}
instance.array[119110] = true;
index = 119114;
while (index <= 119116) : (index += 1) {
instance.array[index] = true;
}
instance.array[119117] = true;
index = 119139;
while (index <= 119140) : (index += 1) {
instance.array[index] = true;
}
index = 119148;
while (index <= 119177) : (index += 1) {
instance.array[index] = true;
}
index = 119182;
while (index <= 119240) : (index += 1) {
instance.array[index] = true;
}
index = 119264;
while (index <= 119329) : (index += 1) {
instance.array[index] = true;
}
instance.array[119333] = true;
index = 119488;
while (index <= 119507) : (index += 1) {
instance.array[index] = true;
}
index = 119520;
while (index <= 119606) : (index += 1) {
instance.array[index] = true;
}
index = 119616;
while (index <= 119640) : (index += 1) {
instance.array[index] = true;
}
index = 119776;
while (index <= 119860) : (index += 1) {
instance.array[index] = true;
}
index = 119862;
while (index <= 119932) : (index += 1) {
instance.array[index] = true;
}
index = 119934;
while (index <= 119935) : (index += 1) {
instance.array[index] = true;
}
instance.array[119938] = true;
index = 119941;
while (index <= 119942) : (index += 1) {
instance.array[index] = true;
}
index = 119945;
while (index <= 119948) : (index += 1) {
instance.array[index] = true;
}
index = 119950;
while (index <= 119961) : (index += 1) {
instance.array[index] = true;
}
instance.array[119963] = true;
index = 119965;
while (index <= 119971) : (index += 1) {
instance.array[index] = true;
}
index = 119973;
while (index <= 120037) : (index += 1) {
instance.array[index] = true;
}
index = 120039;
while (index <= 120042) : (index += 1) {
instance.array[index] = true;
}
index = 120045;
while (index <= 120052) : (index += 1) {
instance.array[index] = true;
}
index = 120054;
while (index <= 120060) : (index += 1) {
instance.array[index] = true;
}
index = 120062;
while (index <= 120089) : (index += 1) {
instance.array[index] = true;
}
index = 120091;
while (index <= 120094) : (index += 1) {
instance.array[index] = true;
}
index = 120096;
while (index <= 120100) : (index += 1) {
instance.array[index] = true;
}
instance.array[120102] = true;
index = 120106;
while (index <= 120112) : (index += 1) {
instance.array[index] = true;
}
index = 120114;
while (index <= 120453) : (index += 1) {
instance.array[index] = true;
}
index = 120456;
while (index <= 120480) : (index += 1) {
instance.array[index] = true;
}
instance.array[120481] = true;
index = 120482;
while (index <= 120506) : (index += 1) {
instance.array[index] = true;
}
instance.array[120507] = true;
index = 120508;
while (index <= 120538) : (index += 1) {
instance.array[index] = true;
}
instance.array[120539] = true;
index = 120540;
while (index <= 120564) : (index += 1) {
instance.array[index] = true;
}
instance.array[120565] = true;
index = 120566;
while (index <= 120596) : (index += 1) {
instance.array[index] = true;
}
instance.array[120597] = true;
index = 120598;
while (index <= 120622) : (index += 1) {
instance.array[index] = true;
}
instance.array[120623] = true;
index = 120624;
while (index <= 120654) : (index += 1) {
instance.array[index] = true;
}
instance.array[120655] = true;
index = 120656;
while (index <= 120680) : (index += 1) {
instance.array[index] = true;
}
instance.array[120681] = true;
index = 120682;
while (index <= 120712) : (index += 1) {
instance.array[index] = true;
}
instance.array[120713] = true;
index = 120714;
while (index <= 120738) : (index += 1) {
instance.array[index] = true;
}
instance.array[120739] = true;
index = 120740;
while (index <= 120747) : (index += 1) {
instance.array[index] = true;
}
index = 120750;
while (index <= 120799) : (index += 1) {
instance.array[index] = true;
}
index = 120800;
while (index <= 121311) : (index += 1) {
instance.array[index] = true;
}
index = 121367;
while (index <= 121370) : (index += 1) {
instance.array[index] = true;
}
index = 121421;
while (index <= 121428) : (index += 1) {
instance.array[index] = true;
}
index = 121430;
while (index <= 121443) : (index += 1) {
instance.array[index] = true;
}
index = 121445;
while (index <= 121446) : (index += 1) {
instance.array[index] = true;
}
index = 121447;
while (index <= 121451) : (index += 1) {
instance.array[index] = true;
}
index = 123104;
while (index <= 123148) : (index += 1) {
instance.array[index] = true;
}
index = 123159;
while (index <= 123165) : (index += 1) {
instance.array[index] = true;
}
index = 123168;
while (index <= 123177) : (index += 1) {
instance.array[index] = true;
}
instance.array[123182] = true;
instance.array[123183] = true;
index = 123552;
while (index <= 123595) : (index += 1) {
instance.array[index] = true;
}
index = 123600;
while (index <= 123609) : (index += 1) {
instance.array[index] = true;
}
instance.array[123615] = true;
index = 124896;
while (index <= 125092) : (index += 1) {
instance.array[index] = true;
}
index = 125095;
while (index <= 125103) : (index += 1) {
instance.array[index] = true;
}
index = 125152;
while (index <= 125219) : (index += 1) {
instance.array[index] = true;
}
instance.array[125227] = true;
index = 125232;
while (index <= 125241) : (index += 1) {
instance.array[index] = true;
}
index = 125246;
while (index <= 125247) : (index += 1) {
instance.array[index] = true;
}
index = 126033;
while (index <= 126091) : (index += 1) {
instance.array[index] = true;
}
instance.array[126092] = true;
index = 126093;
while (index <= 126095) : (index += 1) {
instance.array[index] = true;
}
instance.array[126096] = true;
index = 126097;
while (index <= 126100) : (index += 1) {
instance.array[index] = true;
}
index = 126177;
while (index <= 126221) : (index += 1) {
instance.array[index] = true;
}
instance.array[126222] = true;
index = 126223;
while (index <= 126237) : (index += 1) {
instance.array[index] = true;
}
index = 126432;
while (index <= 126435) : (index += 1) {
instance.array[index] = true;
}
index = 126437;
while (index <= 126463) : (index += 1) {
instance.array[index] = true;
}
index = 126465;
while (index <= 126466) : (index += 1) {
instance.array[index] = true;
}
instance.array[126468] = true;
instance.array[126471] = true;
index = 126473;
while (index <= 126482) : (index += 1) {
instance.array[index] = true;
}
index = 126484;
while (index <= 126487) : (index += 1) {
instance.array[index] = true;
}
instance.array[126489] = true;
instance.array[126491] = true;
instance.array[126498] = true;
instance.array[126503] = true;
instance.array[126505] = true;
instance.array[126507] = true;
index = 126509;
while (index <= 126511) : (index += 1) {
instance.array[index] = true;
}
index = 126513;
while (index <= 126514) : (index += 1) {
instance.array[index] = true;
}
instance.array[126516] = true;
instance.array[126519] = true;
instance.array[126521] = true;
instance.array[126523] = true;
instance.array[126525] = true;
instance.array[126527] = true;
index = 126529;
while (index <= 126530) : (index += 1) {
instance.array[index] = true;
}
instance.array[126532] = true;
index = 126535;
while (index <= 126538) : (index += 1) {
instance.array[index] = true;
}
index = 126540;
while (index <= 126546) : (index += 1) {
instance.array[index] = true;
}
index = 126548;
while (index <= 126551) : (index += 1) {
instance.array[index] = true;
}
index = 126553;
while (index <= 126556) : (index += 1) {
instance.array[index] = true;
}
instance.array[126558] = true;
index = 126560;
while (index <= 126569) : (index += 1) {
instance.array[index] = true;
}
index = 126571;
while (index <= 126587) : (index += 1) {
instance.array[index] = true;
}
index = 126593;
while (index <= 126595) : (index += 1) {
instance.array[index] = true;
}
index = 126597;
while (index <= 126601) : (index += 1) {
instance.array[index] = true;
}
index = 126603;
while (index <= 126619) : (index += 1) {
instance.array[index] = true;
}
index = 126672;
while (index <= 126673) : (index += 1) {
instance.array[index] = true;
}
index = 126944;
while (index <= 126987) : (index += 1) {
instance.array[index] = true;
}
index = 126992;
while (index <= 127091) : (index += 1) {
instance.array[index] = true;
}
index = 127104;
while (index <= 127118) : (index += 1) {
instance.array[index] = true;
}
index = 127121;
while (index <= 127135) : (index += 1) {
instance.array[index] = true;
}
index = 127137;
while (index <= 127151) : (index += 1) {
instance.array[index] = true;
}
index = 127153;
while (index <= 127189) : (index += 1) {
instance.array[index] = true;
}
index = 127200;
while (index <= 127212) : (index += 1) {
instance.array[index] = true;
}
index = 127213;
while (index <= 127373) : (index += 1) {
instance.array[index] = true;
}
index = 127430;
while (index <= 127458) : (index += 1) {
instance.array[index] = true;
}
index = 127472;
while (index <= 127515) : (index += 1) {
instance.array[index] = true;
}
index = 127520;
while (index <= 127528) : (index += 1) {
instance.array[index] = true;
}
index = 127536;
while (index <= 127537) : (index += 1) {
instance.array[index] = true;
}
index = 127552;
while (index <= 127557) : (index += 1) {
instance.array[index] = true;
}
index = 127712;
while (index <= 127962) : (index += 1) {
instance.array[index] = true;
}
index = 127963;
while (index <= 127967) : (index += 1) {
instance.array[index] = true;
}
index = 127968;
while (index <= 128695) : (index += 1) {
instance.array[index] = true;
}
index = 128704;
while (index <= 128716) : (index += 1) {
instance.array[index] = true;
}
index = 128720;
while (index <= 128732) : (index += 1) {
instance.array[index] = true;
}
index = 128736;
while (index <= 128851) : (index += 1) {
instance.array[index] = true;
}
index = 128864;
while (index <= 128952) : (index += 1) {
instance.array[index] = true;
}
index = 128960;
while (index <= 128971) : (index += 1) {
instance.array[index] = true;
}
index = 128992;
while (index <= 129003) : (index += 1) {
instance.array[index] = true;
}
index = 129008;
while (index <= 129063) : (index += 1) {
instance.array[index] = true;
}
index = 129072;
while (index <= 129081) : (index += 1) {
instance.array[index] = true;
}
index = 129088;
while (index <= 129127) : (index += 1) {
instance.array[index] = true;
}
index = 129136;
while (index <= 129165) : (index += 1) {
instance.array[index] = true;
}
index = 129168;
while (index <= 129169) : (index += 1) {
instance.array[index] = true;
}
index = 129248;
while (index <= 129368) : (index += 1) {
instance.array[index] = true;
}
index = 129370;
while (index <= 129451) : (index += 1) {
instance.array[index] = true;
}
index = 129453;
while (index <= 129587) : (index += 1) {
instance.array[index] = true;
}
index = 129600;
while (index <= 129613) : (index += 1) {
instance.array[index] = true;
}
index = 129616;
while (index <= 129620) : (index += 1) {
instance.array[index] = true;
}
index = 129624;
while (index <= 129626) : (index += 1) {
instance.array[index] = true;
}
index = 129632;
while (index <= 129638) : (index += 1) {
instance.array[index] = true;
}
index = 129648;
while (index <= 129672) : (index += 1) {
instance.array[index] = true;
}
index = 129680;
while (index <= 129686) : (index += 1) {
instance.array[index] = true;
}
index = 129696;
while (index <= 129698) : (index += 1) {
instance.array[index] = true;
}
index = 129712;
while (index <= 129718) : (index += 1) {
instance.array[index] = true;
}
index = 129760;
while (index <= 129906) : (index += 1) {
instance.array[index] = true;
}
index = 129908;
while (index <= 129962) : (index += 1) {
instance.array[index] = true;
}
index = 130000;
while (index <= 130009) : (index += 1) {
instance.array[index] = true;
}
index = 131040;
while (index <= 173757) : (index += 1) {
instance.array[index] = true;
}
index = 173792;
while (index <= 177940) : (index += 1) {
instance.array[index] = true;
}
index = 177952;
while (index <= 178173) : (index += 1) {
instance.array[index] = true;
}
index = 178176;
while (index <= 183937) : (index += 1) {
instance.array[index] = true;
}
index = 183952;
while (index <= 191424) : (index += 1) {
instance.array[index] = true;
}
index = 194528;
while (index <= 195069) : (index += 1) {
instance.array[index] = true;
}
index = 196576;
while (index <= 201514) : (index += 1) {
instance.array[index] = true;
}
// Placeholder: 0. Struct name, 1. Code point kind
return instance;
}
pub fn deinit(self: *GraphemeBase) void {
self.allocator.free(self.array);
}
// isGraphemeBase checks if cp is of the kind Grapheme_Base.
pub fn isGraphemeBase(self: GraphemeBase, cp: u21) bool {
if (cp < self.lo or cp > self.hi) return false;
const index = cp - self.lo;
return if (index >= self.array.len) false else self.array[index];
} | src/components/autogen/DerivedCoreProperties/GraphemeBase.zig |
const std = @import("std");
const assert = std.debug.assert;
pub const ActivationFunction = enum {
ReLU,
Identity,
};
pub fn Layer(comptime InputType_: type, comptime inputs_size_: usize, comptime OutputType_: type, comptime outputs_size_: usize, comptime activation_function: ActivationFunction) type {
return struct {
const SelfType = @This();
const InputType: type = InputType_;
const inputs_size: usize = inputs_size_;
const OutputType: type = OutputType_;
const outputs_size: usize = outputs_size_;
weights: [outputs_size_][inputs_size_]SelfType.InputType = undefined,
biases: [inputs_size_]SelfType.InputType = undefined,
pub fn feedForward(self: *const SelfType, inputs: [*]SelfType.InputType, outputs: [*]SelfType.OutputType) void {
comptime var neuron_index: usize = 0;
inline while (neuron_index < outputs_size_) : (neuron_index += 1) {
var input_index: usize = 0;
var neuron_result: SelfType.OutputType = 0;
while (input_index < inputs_size_) : (input_index += 1) {
var input_result: SelfType.InputType = self.weights[neuron_index][input_index] * inputs[input_index] + self.biases[input_index];
input_result = switch (activation_function) {
.ReLU => std.math.min(@as(SelfType.InputType, std.math.maxInt(SelfType.OutputType)), std.math.max(0, input_result)),
.Identity => std.math.max(@as(SelfType.InputType, std.math.minInt(SelfType.OutputType)), std.math.min(@as(SelfType.InputType, std.math.maxInt(SelfType.OutputType)), input_result)),
};
neuron_result += @intCast(SelfType.OutputType, input_result);
}
outputs[neuron_index] = neuron_result;
}
}
};
}
pub fn Network(comptime layer_list: anytype) type {
return struct {
const SelfType = @This();
const InputType = @TypeOf(layer_list[0]).InputType;
const inputs_size = @TypeOf(layer_list[0]).inputs_size;
const OutputType = @TypeOf(layer_list[layer_list.len - 1]).OutputType;
const outputs_size = @TypeOf(layer_list[layer_list.len - 1]).outputs_size;
layers: @TypeOf(layer_list) = layer_list,
pub fn feedForward(self: *const SelfType, inputs: [*]SelfType.InputType, outputs: [*]SelfType.OutputType) void {
comptime assert(self.layers.len > 0);
var layer_inputs = inputs;
comptime var layer_index: usize = 0;
inline while (layer_index < self.layers.len - 1) : (layer_index += 1) {
const layer = self.layers[layer_index];
var layer_outputs: [@TypeOf(layer).outputs_size]InputType = undefined;
layer.feedForward(layer_inputs, &layer_outputs);
layer_inputs = &layer_outputs;
}
self.layers[layer_index].feedForward(layer_inputs, outputs);
}
};
}
pub fn ParallelNetworkGroup(comptime network_list: anytype) type {
return struct {
const SelfType = @This();
const InputType = @TypeOf(network_list[0]).InputType;
const inputs_size = comptime calculateInputsSize();
const OutputType = @TypeOf(network_list[0]).OutputType;
const outputs_size = comptime calculateOutputsSize();
networks: @TypeOf(network_list) = network_list,
pub fn feedForward(self: *const SelfType, inputs: [*]SelfType.InputType, outputs: [*]SelfType.OutputType) void {
comptime assert(self.networks.len > 0);
comptime var inputs_index = 0;
comptime var outputs_index = 0;
comptime var network_index: usize = 0;
inline while (network_index < network_list.len) : (network_index += 1) {
const network = &self.networks[network_index];
comptime assert(@TypeOf(network.*).InputType == SelfType.InputType);
comptime assert(@TypeOf(network.*).OutputType == SelfType.OutputType);
network.feedForward(inputs + inputs_index, outputs + outputs_index);
inputs_index += @TypeOf(network.layers[0]).inputs_size;
outputs_index += @TypeOf(network.layers[network.layers.len - 1]).outputs_size;
}
}
fn calculateInputsSize() usize {
var inputs_size_counter: usize = 0;
comptime var network_index = 0;
inline while (network_index < network_list.len) : (network_index += 1) {
inputs_size_counter += @TypeOf(network_list[network_index]).inputs_size;
}
return inputs_size_counter;
}
fn calculateOutputsSize() usize {
var outputs_size_counter: usize = 0;
comptime var network_index: usize = 0;
inline while (network_index < network_list.len) : (network_index += 1) {
outputs_size_counter += @TypeOf(network_list[network_index]).outputs_size;
}
return outputs_size_counter;
}
};
}
pub fn SerialNetworkGroup(comptime network_list: anytype) type {
return struct {
const SelfType = @This();
const FirstNetworkType = @TypeOf(network_list[0]);
const InputType = FirstNetworkType.InputType;
const inputs_size = FirstNetworkType.inputs_size;
const LastNetworkType = @TypeOf(network_list[network_list.len - 1]);
const OutputType = LastNetworkType.OutputType;
const outputs_size = LastNetworkType.outputs_size;
networks: @TypeOf(network_list) = network_list,
pub fn feedForward(self: *const SelfType, inputs: [*]SelfType.InputType, outputs: [*]SelfType.OutputType) void {
comptime assert(self.networks.len > 0);
var network_inputs = inputs;
comptime var network_index: usize = 0;
inline while (network_index < network_list.len - 1) : (network_index += 1) {
const network = &self.networks[network_index];
const network_type = @TypeOf(network.*);
var network_outputs: [network_type.outputs_size]network_type.OutputType = undefined;
network.feedForward(network_inputs, &network_outputs);
network_inputs = &network_outputs;
}
self.networks[network_index].feedForward(network_inputs, outputs);
}
};
}
//const possible_king_squares = 64;
//const possible_non_king_piece_color_squares = 5 * 2 * 64; // No +1 for the captured piece from the Shogi NNUE implementation
//const halfkp_size = possible_king_squares * possible_non_king_piece_color_squares;
//
//const WhiteInputLayer = Layer(i16, halfkp_size, i8, halfkp_size);
//const WhiteInputAffineLayer = Layer(i8, halfkp_size, i8, 256);
//const white_input_network = Network(.{ readWhiteInputLayer(), readWhiteInputAffineLayer() }){};
//
//const BlackInputLayer = Layer(i16, halfkp_size, i8, halfkp_size);
//const BlackAffineLayer = Layer(i8, halfkp_size, i8, 256);
//const black_input_network = Network(.{ readBlackInputLayer(), readBlackInputAffineLayer() }){};
//
//const board_input_network = ParallelNetworkGroup(.{ white_input_network, black_input_network }){};
//
//const HiddenLayer1 = Layer(i8, 2 * 256, i8, 32 * 32);
//const HiddenLayer2 = Layer(i8, 32 * 32, i8, 32);
//const OutputLayer = Layer(i8, 32, i32, 1);
//const evaluation_hidden_network = Network(.{ readHiddenLayer1(), readHiddenLayer2(), readOutputLayer() }){};
//
//pub const halfkp_2x256_32_32_network = SerialNetworkGroup(.{ board_input_network, evaluation_hidden_network });
test "Layer Identity Test" {
const layer = Layer(i16, 2, i8, 2, .Identity){
.weights = [2][2]i16{
[2]i16{ -50, 4 },
[2]i16{ 3, 4 },
},
.biases = [2]i16{ 10, 50 },
};
var inputs = [2]i16{ 2, 3 };
var outputs: [2]i8 = undefined;
layer.feedForward(&inputs, &outputs);
assert(outputs[0] == -28);
assert(outputs[1] == 78);
}
test "Layer ReLU Test" {
const layer = Layer(i16, 2, i8, 2, .ReLU){
.weights = [2][2]i16{
[2]i16{ -50, 4 },
[2]i16{ 3, 4 },
},
.biases = [2]i16{ 10, 50 },
};
var inputs = [2]i16{ 2, 3 };
var outputs: [2]i8 = undefined;
layer.feedForward(&inputs, &outputs);
assert(outputs[0] == 62);
assert(outputs[1] == 78);
}
test "Network Test" {
const layer1 = Layer(i16, 2, i16, 2, .Identity){
.weights = [2][2]i16{
[2]i16{ 2, 4 },
[2]i16{ 3, 4 },
},
.biases = [2]i16{ 10, 50 },
};
const layer2 = Layer(i16, 2, i16, 1, .Identity){
.weights = [1][2]i16{
[2]i16{ 3, 5 },
},
.biases = [2]i16{ 1, 2 },
};
const network = Network(.{ layer1, layer2 }){};
var inputs = [2]i16{ 2, 3 };
var outputs: [1]i16 = undefined;
network.feedForward(&inputs, &outputs);
assert(outputs[0] == 621);
}
test "Parallel Network Test" {
const layer1 = Layer(i16, 2, i16, 2, .Identity){
.weights = [2][2]i16{
[2]i16{ 2, 4 },
[2]i16{ 3, 4 },
},
.biases = [2]i16{ 10, 50 },
};
const layer2n1 = Layer(i16, 2, i16, 1, .Identity){
.weights = [1][2]i16{
[2]i16{ 3, 5 },
},
.biases = [2]i16{ 1, 2 },
};
const layer2n2 = Layer(i16, 2, i16, 1, .Identity){
.weights = [1][2]i16{
[2]i16{ 3, 6 },
},
.biases = [2]i16{ 1, 2 },
};
const network1 = Network(.{ layer1, layer2n1 }){};
const network2 = Network(.{ layer1, layer2n2 }){};
const parallel_networks = ParallelNetworkGroup(.{ network1, network2 }){};
var inputs = [4]i16{ 2, 3, 2, 3 };
var outputs: [2]i16 = undefined;
parallel_networks.feedForward(&inputs, &outputs);
assert(outputs[0] == 621);
assert(outputs[1] == 699);
}
test "Serial Network Test" {
const layer1 = Layer(i16, 2, i16, 2, .Identity){
.weights = [2][2]i16{
[2]i16{ 2, 4 },
[2]i16{ 3, 4 },
},
.biases = [2]i16{ 10, 50 },
};
const layer2 = Layer(i16, 2, i16, 1, .Identity){
.weights = [1][2]i16{
[2]i16{ 3, 5 },
},
.biases = [2]i16{ 1, 2 },
};
const network1 = Network(.{layer1}){};
const network2 = Network(.{layer2}){};
const serial_networks = SerialNetworkGroup(.{ network1, network2 }){};
var inputs = [2]i16{ 2, 3 };
var outputs: [1]i16 = undefined;
serial_networks.feedForward(&inputs, &outputs);
assert(outputs[0] == 621);
}
test "Composed Parallel and Serial Networks Test" {
const layer1g1 = Layer(i16, 2, i16, 2, .Identity){
.weights = [2][2]i16{
[2]i16{ 2, 4 },
[2]i16{ 3, 4 },
},
.biases = [2]i16{ 10, 50 },
};
const layer2n1g1 = Layer(i16, 2, i16, 1, .Identity){
.weights = [1][2]i16{
[2]i16{ 3, 5 },
},
.biases = [2]i16{ 1, 2 },
};
const layer2n2g1 = Layer(i16, 2, i16, 1, .Identity){
.weights = [1][2]i16{
[2]i16{ 3, 6 },
},
.biases = [2]i16{ 1, 2 },
};
const network1g1 = Network(.{ layer1g1, layer2n1g1 }){};
const network2g1 = Network(.{ layer1g1, layer2n2g1 }){};
const parallel_networks = ParallelNetworkGroup(.{ network1g1, network2g1 }){};
const layer1g2 = Layer(i16, 2, i16, 1, .Identity){
.weights = [1][2]i16{
[2]i16{ -1, 1 },
},
.biases = [2]i16{ 1, 6 },
};
const networkg2 = Network(.{layer1g2}){};
const composed_network = SerialNetworkGroup(.{ parallel_networks, networkg2 }){};
var inputs = [4]i16{ 2, 3, 2, 3 };
var outputs: [1]i16 = undefined;
composed_network.feedForward(&inputs, &outputs);
assert(outputs[0] == 85);
} | src/nnue/nn.zig |
const std = @import("std");
usingnamespace @import("imgui.zig");
pub const icons = @import("font_awesome.zig");
extern fn _ogImage(user_texture_id: ImTextureID, size: ImVec2, uv0: ImVec2, uv1: ImVec2) void;
extern fn _ogImageButton(user_texture_id: ImTextureID, size: ImVec2, uv0: ImVec2, uv1: ImVec2, frame_padding: c_int) bool;
/// only true if down this frame and not down the previous frame
pub fn ogKeyPressed(key: usize) bool {
return igGetIO().KeysDown[key] and igGetIO().KeysDownDuration[key] == 0;
}
/// true the entire time the key is down
pub fn ogKeyDown(key: usize) bool {
return igGetIO().KeysDown[key];
}
/// true only the frame the key is released
pub fn ogKeyUp(key: usize) bool {
return !igGetIO().KeysDown[key] and igGetIO().KeysDownDuration[key] == -1 and igGetIO().KeysDownDurationPrev[key] >= 0;
}
pub fn ogButton(label: [*c]const u8) bool {
return igButton(label, .{});
}
pub fn ogImage(texture: ImTextureID, width: i32, height: i32) void {
const white = ImVec4{ .x = 1, .y = 1, .z = 1, .w = 1 };
const size = ImVec2{ .x = @intToFloat(f32, width), .y = @intToFloat(f32, height) };
// TODO: remove when windows can handle passing ImVec4s
// igImage(texture, size, ImVec2{}, ImVec2{ .x = 1, .y = 1 }, white, .{});
_ogImage(texture, size, .{}, .{ .x = 1, .y = 1 });
}
pub fn ogImageButton(texture: ImTextureID, size: ImVec2, uv0: ImVec2, uv1: ImVec2, frame_padding: c_int, bg_col: ImVec4, tint_col: ImVec4) bool {
// TODO: remove when windows can handle passing ImVec4s
// return igImageButton(texture, size, uv0, uv1, frame_padding, bg_col, tint_col);
return _ogImageButton(texture, size, uv0, uv1, frame_padding);
}
pub fn ogGetCursorScreenPos() ImVec2 {
var pos = ImVec2{};
igGetCursorScreenPos(&pos);
return pos;
}
pub fn ogGetCursorPos() ImVec2 {
var pos = ImVec2{};
igGetCursorPos(&pos);
return pos;
}
pub fn ogGetMouseDragDelta(button: ImGuiMouseButton, lock_threshold: f32) ImVec2 {
var pos = ImVec2{};
igGetMouseDragDelta(&pos, button, lock_threshold);
return pos;
}
/// returns the drag delta of the mouse buttons that is dragging
pub fn ogGetAnyMouseDragDelta() ImVec2 {
var drag_delta = ImVec2{};
if (igIsMouseDragging(ImGuiMouseButton_Left, 0)) {
igGetMouseDragDelta(&drag_delta, ImGuiMouseButton_Left, 0);
} else {
igGetMouseDragDelta(&drag_delta, ImGuiMouseButton_Right, 0);
}
return drag_delta;
}
/// returns true if any mouse is dragging
pub fn ogIsAnyMouseDragging() bool {
return igIsMouseDragging(ImGuiMouseButton_Left, 0) or igIsMouseDragging(ImGuiMouseButton_Right, 0);
}
pub fn ogGetContentRegionAvail() ImVec2 {
var pos = ImVec2{};
igGetContentRegionAvail(&pos);
return pos;
}
pub fn ogGetWindowContentRegionMax() ImVec2 {
var max = ImVec2{};
igGetWindowContentRegionMax(&max);
return max;
}
pub fn ogGetItemRectSize() ImVec2 {
var size = ImVec2{};
igGetItemRectSize(&size);
return size;
}
pub fn ogGetWindowCenter() ImVec2 {
var max = ogGetWindowContentRegionMax();
max.x /= 2;
max.y /= 2;
return max;
}
pub fn ogAddQuad(draw_list: [*c]ImDrawList, tl: ImVec2, size: f32, col: ImU32, thickness: f32) void {
ImDrawList_AddQuad(draw_list, .{ .x = tl.x, .y = tl.y }, .{ .x = tl.x + size, .y = tl.y }, .{ .x = tl.x + size, .y = tl.y + size }, .{ .x = tl.x, .y = tl.y + size }, col, thickness);
}
pub fn ogAddQuadFilled(draw_list: [*c]ImDrawList, tl: ImVec2, size: f32, col: ImU32) void {
ImDrawList_AddQuadFilled(draw_list, .{ .x = tl.x, .y = tl.y }, .{ .x = tl.x + size, .y = tl.y }, .{ .x = tl.x + size, .y = tl.y + size }, .{ .x = tl.x, .y = tl.y + size }, col);
}
/// adds a rect outline with possibly non-matched width/height to the draw list
pub fn ogAddRect(draw_list: [*c]ImDrawList, tl: ImVec2, size: ImVec2, col: ImU32, thickness: f32) void {
ImDrawList_AddQuad(draw_list, .{ .x = tl.x, .y = tl.y }, .{ .x = tl.x + size.x, .y = tl.y }, .{ .x = tl.x + size.x, .y = tl.y + size.y }, .{ .x = tl.x, .y = tl.y + size.y }, col, thickness);
}
/// adds a rect with possibly non-matched width/height to the draw list
pub fn ogAddRectFilled(draw_list: [*c]ImDrawList, tl: ImVec2, size: ImVec2, col: ImU32) void {
ImDrawList_AddQuadFilled(draw_list, .{ .x = tl.x, .y = tl.y }, .{ .x = tl.x + size.x, .y = tl.y }, .{ .x = tl.x + size.x, .y = tl.y + size.y }, .{ .x = tl.x, .y = tl.y + size.y }, col);
}
pub fn ogInputText(label: [*c]const u8, buf: [*c]u8, buf_size: usize) bool {
return igInputText(label, buf, buf_size, ImGuiInputTextFlags_None, null, null);
}
/// adds an unformatted (igTextUnformatted) tooltip with a specific wrap width
pub fn ogUnformattedTooltip(text_wrap_pos: f32, text: [*c]const u8) void {
if (igIsItemHovered(ImGuiHoveredFlags_None)) {
igBeginTooltip();
defer igEndTooltip();
igPushTextWrapPos(igGetFontSize() * text_wrap_pos);
igTextUnformatted(text, null);
igPopTextWrapPos();
}
}
pub fn ogDrag(comptime T: type, label: [*c]const u8, p_data: *T, v_speed: f32, p_min: T, p_max: T) bool {
if (std.meta.trait.isUnsignedInt(T)) {
return ogDragUnsignedFormat(T, label, p_data, v_speed, p_min, p_max, "%u");
}
return ogDragSigned(T, label, p_data, v_speed, p_min, p_max);
}
pub fn ogDragUnsignedFormat(comptime T: type, label: [*c]const u8, p_data: *T, v_speed: f32, p_min: T, p_max: T, format: [*c]const u8) bool {
std.debug.assert(std.meta.trait.isUnsignedInt(T));
var min = p_min;
var max = p_max;
const data_type = switch (T) {
u8 => ImGuiDataType_U8,
u16 => ImGuiDataType_U16,
u32 => ImGuiDataType_U32,
usize => ImGuiDataType_U64,
else => unreachable,
};
return igDragScalar(label, data_type, p_data, v_speed, &min, &max, format, 1);
}
pub fn ogDragSigned(comptime T: type, label: [*c]const u8, p_data: *T, v_speed: f32, p_min: T, p_max: T) bool {
std.debug.assert(std.meta.trait.isSignedInt(T));
var min = p_min;
var max = p_max;
const data_type = switch (T) {
i16 => ImGuiDataType_S16,
i32 => ImGuiDataType_S32,
f32 => ImGuiDataType_Float,
else => unreachable,
};
return igDragScalar(label, data_type, p_data, v_speed, &min, &max, null, 1);
}
pub fn ogColorConvertU32ToFloat4(in: ImU32) ImVec4 {
var col = ImVec4{};
igColorConvertU32ToFloat4(&col, in);
return col;
} | src/deps/imgui/wrapper.zig |
const std = @import("std");
const mach = @import("mach");
const gpu = @import("gpu");
const glfw = @import("glfw");
const zm = @import("zmath");
const Vertex = @import("cube_mesh.zig").Vertex;
const vertices = @import("cube_mesh.zig").vertices;
const App = mach.App(*FrameParams, .{});
const UniformBufferObject = struct {
mat: zm.Mat,
};
var timer: std.time.Timer = undefined;
pub fn main() !void {
timer = try std.time.Timer.start();
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var allocator = gpa.allocator();
const ctx = try allocator.create(FrameParams);
var app = try App.init(allocator, ctx, .{});
app.window.setKeyCallback(struct {
fn callback(window: glfw.Window, key: glfw.Key, scancode: i32, action: glfw.Action, mods: glfw.Mods) void {
_ = scancode;
_ = mods;
if (action == .press) {
switch (key) {
.space => window.setShouldClose(true),
else => {},
}
}
}
}.callback);
try app.window.setSizeLimits(.{ .width = 20, .height = 20 }, .{ .width = null, .height = null });
const vs_module = app.device.createShaderModule(&.{
.label = "my vertex shader",
.code = .{ .wgsl = @embedFile("vert.wgsl") },
});
const vertex_attributes = [_]gpu.VertexAttribute{
.{ .format = .float32x4, .offset = @offsetOf(Vertex, "pos"), .shader_location = 0 },
.{ .format = .float32x2, .offset = @offsetOf(Vertex, "uv"), .shader_location = 1 },
};
const vertex_buffer_layout = gpu.VertexBufferLayout{
.array_stride = @sizeOf(Vertex),
.step_mode = .vertex,
.attribute_count = vertex_attributes.len,
.attributes = &vertex_attributes,
};
const fs_module = app.device.createShaderModule(&.{
.label = "my fragment shader",
.code = .{ .wgsl = @embedFile("frag.wgsl") },
});
const blend = gpu.BlendState{
.color = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
.alpha = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
};
const color_target = gpu.ColorTargetState{
.format = app.swap_chain_format,
.blend = &blend,
.write_mask = gpu.ColorWriteMask.all,
};
const fragment = gpu.FragmentState{
.module = fs_module,
.entry_point = "main",
.targets = &.{color_target},
.constants = null,
};
const bgle_buffer = gpu.BindGroupLayout.Entry.buffer(0, .{ .vertex = true }, .uniform, true, 0);
const bgle_sampler = gpu.BindGroupLayout.Entry.sampler(1, .{ .fragment = true }, .filtering);
const bgle_textureview = gpu.BindGroupLayout.Entry.texture(2, .{ .fragment = true }, .float, .dimension_2d, false);
const bgl = app.device.createBindGroupLayout(
&gpu.BindGroupLayout.Descriptor{
.entries = &.{ bgle_buffer, bgle_sampler, bgle_textureview },
},
);
defer bgl.release();
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl};
const pipeline_layout = app.device.createPipelineLayout(&.{
.bind_group_layouts = &bind_group_layouts,
});
const pipeline_descriptor = gpu.RenderPipeline.Descriptor{
.fragment = &fragment,
.layout = pipeline_layout,
.depth_stencil = &.{
.format = .depth24_plus,
.depth_write_enabled = true,
.depth_compare = .less,
},
.vertex = .{
.module = vs_module,
.entry_point = "main",
.buffers = &.{vertex_buffer_layout},
},
.multisample = .{
.count = 1,
.mask = 0xFFFFFFFF,
.alpha_to_coverage_enabled = false,
},
.primitive = .{
.front_face = .ccw,
.cull_mode = .back,
.topology = .triangle_list,
.strip_index_format = .none,
},
};
const vertex_buffer = app.device.createBuffer(&.{
.usage = .{ .vertex = true },
.size = @sizeOf(Vertex) * vertices.len,
.mapped_at_creation = true,
});
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
vertex_buffer.unmap();
defer vertex_buffer.release();
const uniform_buffer = app.device.createBuffer(&.{
.usage = .{ .copy_dst = true, .uniform = true },
.size = @sizeOf(UniformBufferObject),
.mapped_at_creation = false,
});
defer uniform_buffer.release();
// The texture to put on the cube
const cube_texture = app.device.createTexture(&gpu.Texture.Descriptor{
.usage = .{ .texture_binding = true, .copy_dst = true },
.size = .{ .width = app.current_desc.width, .height = app.current_desc.height },
.format = app.swap_chain_format,
});
defer cube_texture.release();
// The texture on which we render
const cube_texture_render = app.device.createTexture(&gpu.Texture.Descriptor{
.usage = .{ .render_attachment = true, .copy_src = true },
.size = .{ .width = app.current_desc.width, .height = app.current_desc.height },
.format = app.swap_chain_format,
});
defer cube_texture.release();
const sampler = app.device.createSampler(&gpu.Sampler.Descriptor{
.mag_filter = .linear,
.min_filter = .linear,
});
defer sampler.release();
const cube_texture_view = cube_texture.createView(&gpu.TextureView.Descriptor{
.format = app.swap_chain_format,
.dimension = .dimension_2d,
.mip_level_count = 1,
.array_layer_count = 1,
});
defer cube_texture_view.release();
const cube_texture_view_render = cube_texture_render.createView(&gpu.TextureView.Descriptor{
.format = app.swap_chain_format,
.dimension = .dimension_2d,
.mip_level_count = 1,
.array_layer_count = 1,
});
defer cube_texture_view.release();
const bind_group = app.device.createBindGroup(
&gpu.BindGroup.Descriptor{
.layout = bgl,
.entries = &.{
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
gpu.BindGroup.Entry.sampler(1, sampler),
gpu.BindGroup.Entry.textureView(2, cube_texture_view),
},
},
);
defer bind_group.release();
ctx.* = FrameParams{
.pipeline = app.device.createRenderPipeline(&pipeline_descriptor),
.queue = app.device.getQueue(),
.vertex_buffer = vertex_buffer,
.uniform_buffer = uniform_buffer,
.bind_group = bind_group,
.depth_texture = null,
.cube_texture = cube_texture,
.cube_texture_view = cube_texture_view,
.cube_texture_render = cube_texture_render,
.cube_texture_view_render = cube_texture_view_render,
.sampler = sampler,
.bgl = bgl,
};
vs_module.release();
fs_module.release();
pipeline_layout.release();
try app.run(.{ .frame = frame, .resize = resize });
ctx.depth_texture.?.release();
}
const FrameParams = struct {
pipeline: gpu.RenderPipeline,
queue: gpu.Queue,
vertex_buffer: gpu.Buffer,
uniform_buffer: gpu.Buffer,
bind_group: gpu.BindGroup,
depth_texture: ?gpu.Texture,
cube_texture: gpu.Texture,
cube_texture_view: gpu.TextureView,
cube_texture_render: gpu.Texture,
cube_texture_view_render: gpu.TextureView,
sampler: gpu.Sampler,
bgl: gpu.BindGroupLayout,
};
fn frame(app: *App, params: *FrameParams) !void {
const cube_view = params.cube_texture_view_render;
const back_buffer_view = app.swap_chain.?.getCurrentTextureView();
const cube_color_attachment = gpu.RenderPassColorAttachment{
.view = cube_view,
.resolve_target = null,
.clear_value = gpu.Color{ .r = 0.5, .g = 0.5, .b = 0.5, .a = 1 },
.load_op = .clear,
.store_op = .store,
};
const color_attachment = gpu.RenderPassColorAttachment{
.view = back_buffer_view,
.resolve_target = null,
.clear_value = gpu.Color{ .r = 0.5, .g = 0.5, .b = 0.5, .a = 1 },
.load_op = .clear,
.store_op = .store,
};
const depth_stencil_attachment = gpu.RenderPassDepthStencilAttachment{
.view = params.depth_texture.?.createView(&gpu.TextureView.Descriptor{
.format = .depth24_plus,
.dimension = .dimension_2d,
.array_layer_count = 1,
.mip_level_count = 1,
}),
.depth_load_op = .clear,
.depth_store_op = .store,
.depth_clear_value = 1.0,
.stencil_load_op = .none,
.stencil_store_op = .none,
};
const encoder = app.device.createCommandEncoder(null);
const cube_render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{cube_color_attachment},
.depth_stencil_attachment = &depth_stencil_attachment,
};
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{color_attachment},
.depth_stencil_attachment = &depth_stencil_attachment,
};
{
const time = @intToFloat(f32, timer.read()) / @as(f32, std.time.ns_per_s);
const model = zm.mul(zm.rotationX(time * (std.math.pi / 2.0)), zm.rotationZ(time * (std.math.pi / 2.0)));
const view = zm.lookAtRh(
zm.f32x4(0, -4, 0, 1),
zm.f32x4(0, 0, 0, 1),
zm.f32x4(0, 0, 1, 0),
);
const proj = zm.perspectiveFovRh(
(std.math.pi * 2.0 / 5.0),
@intToFloat(f32, app.current_desc.width) / @intToFloat(f32, app.current_desc.height),
1,
100,
);
const ubo = UniformBufferObject{
.mat = zm.transpose(zm.mul(zm.mul(model, view), proj)),
};
encoder.writeBuffer(params.uniform_buffer, 0, UniformBufferObject, &.{ubo});
}
const pass = encoder.beginRenderPass(&render_pass_info);
pass.setPipeline(params.pipeline);
pass.setBindGroup(0, params.bind_group, &.{0});
pass.setVertexBuffer(0, params.vertex_buffer, 0, @sizeOf(Vertex) * vertices.len);
pass.draw(vertices.len, 1, 0, 0);
pass.end();
pass.release();
encoder.copyTextureToTexture(
&gpu.ImageCopyTexture{
.texture = params.cube_texture_render,
},
&gpu.ImageCopyTexture{
.texture = params.cube_texture,
},
&.{ .width = app.current_desc.width, .height = app.current_desc.height },
);
const cube_pass = encoder.beginRenderPass(&cube_render_pass_info);
cube_pass.setPipeline(params.pipeline);
cube_pass.setBindGroup(0, params.bind_group, &.{0});
cube_pass.setVertexBuffer(0, params.vertex_buffer, 0, @sizeOf(Vertex) * vertices.len);
cube_pass.draw(vertices.len, 1, 0, 0);
cube_pass.end();
cube_pass.release();
var command = encoder.finish(null);
encoder.release();
params.queue.submit(&.{command});
command.release();
app.swap_chain.?.present();
back_buffer_view.release();
}
fn resize(app: *App, params: *FrameParams, width: u32, height: u32) !void {
if (params.depth_texture != null) {
params.depth_texture.?.release();
params.depth_texture = app.device.createTexture(&gpu.Texture.Descriptor{
.usage = .{ .render_attachment = true },
.size = .{ .width = width, .height = height },
.format = .depth24_plus,
});
params.cube_texture.release();
params.cube_texture = app.device.createTexture(&gpu.Texture.Descriptor{
.usage = .{ .texture_binding = true, .copy_dst = true },
.size = .{ .width = width, .height = height },
.format = app.swap_chain_format,
});
params.cube_texture_render.release();
params.cube_texture_render = app.device.createTexture(&gpu.Texture.Descriptor{
.usage = .{ .render_attachment = true, .copy_src = true },
.size = .{ .width = width, .height = height },
.format = app.swap_chain_format,
});
params.cube_texture_view.release();
params.cube_texture_view = params.cube_texture.createView(&gpu.TextureView.Descriptor{
.format = app.swap_chain_format,
.dimension = .dimension_2d,
.mip_level_count = 1,
.array_layer_count = 1,
});
params.cube_texture_view_render.release();
params.cube_texture_view_render = params.cube_texture_render.createView(&gpu.TextureView.Descriptor{
.format = app.swap_chain_format,
.dimension = .dimension_2d,
.mip_level_count = 1,
.array_layer_count = 1,
});
params.bind_group.release();
params.bind_group = app.device.createBindGroup(
&gpu.BindGroup.Descriptor{
.layout = params.bgl,
.entries = &.{
gpu.BindGroup.Entry.buffer(0, params.uniform_buffer, 0, @sizeOf(UniformBufferObject)),
gpu.BindGroup.Entry.sampler(1, params.sampler),
gpu.BindGroup.Entry.textureView(2, params.cube_texture_view),
},
},
);
} else {
// The first time resize is called, width and height are set to 0
params.depth_texture = app.device.createTexture(&gpu.Texture.Descriptor{
.usage = .{ .render_attachment = true },
.size = .{ .width = app.current_desc.width, .height = app.current_desc.height },
.format = .depth24_plus,
});
}
} | examples/fractal-cube/main.zig |
//--------------------------------------------------------------------------------
// Section: Types (14)
//--------------------------------------------------------------------------------
pub const CALLFRAMEINFO = extern struct {
iMethod: u32,
fHasInValues: BOOL,
fHasInOutValues: BOOL,
fHasOutValues: BOOL,
fDerivesFromIDispatch: BOOL,
cInInterfacesMax: i32,
cInOutInterfacesMax: i32,
cOutInterfacesMax: i32,
cTopLevelInInterfaces: i32,
iid: Guid,
cMethod: u32,
cParams: u32,
};
pub const CALLFRAMEPARAMINFO = extern struct {
fIn: BOOLEAN,
fOut: BOOLEAN,
stackOffset: u32,
cbParam: u32,
};
pub const CALLFRAME_COPY = enum(i32) {
NESTED = 1,
INDEPENDENT = 2,
};
pub const CALLFRAME_COPY_NESTED = CALLFRAME_COPY.NESTED;
pub const CALLFRAME_COPY_INDEPENDENT = CALLFRAME_COPY.INDEPENDENT;
pub const CALLFRAME_FREE = enum(i32) {
NONE = 0,
IN = 1,
INOUT = 2,
OUT = 4,
TOP_INOUT = 8,
TOP_OUT = 16,
ALL = 31,
};
pub const CALLFRAME_FREE_NONE = CALLFRAME_FREE.NONE;
pub const CALLFRAME_FREE_IN = CALLFRAME_FREE.IN;
pub const CALLFRAME_FREE_INOUT = CALLFRAME_FREE.INOUT;
pub const CALLFRAME_FREE_OUT = CALLFRAME_FREE.OUT;
pub const CALLFRAME_FREE_TOP_INOUT = CALLFRAME_FREE.TOP_INOUT;
pub const CALLFRAME_FREE_TOP_OUT = CALLFRAME_FREE.TOP_OUT;
pub const CALLFRAME_FREE_ALL = CALLFRAME_FREE.ALL;
pub const CALLFRAME_NULL = enum(i32) {
NONE = 0,
INOUT = 2,
OUT = 4,
ALL = 6,
};
pub const CALLFRAME_NULL_NONE = CALLFRAME_NULL.NONE;
pub const CALLFRAME_NULL_INOUT = CALLFRAME_NULL.INOUT;
pub const CALLFRAME_NULL_OUT = CALLFRAME_NULL.OUT;
pub const CALLFRAME_NULL_ALL = CALLFRAME_NULL.ALL;
pub const CALLFRAME_WALK = enum(i32) {
IN = 1,
INOUT = 2,
OUT = 4,
};
pub const CALLFRAME_WALK_IN = CALLFRAME_WALK.IN;
pub const CALLFRAME_WALK_INOUT = CALLFRAME_WALK.INOUT;
pub const CALLFRAME_WALK_OUT = CALLFRAME_WALK.OUT;
pub const CALLFRAME_MARSHALCONTEXT = extern struct {
fIn: BOOLEAN,
dwDestContext: u32,
pvDestContext: ?*anyopaque,
punkReserved: ?*IUnknown,
guidTransferSyntax: Guid,
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICallFrame_Value = Guid.initString("d573b4b0-894e-11d2-b8b6-00c04fb9618a");
pub const IID_ICallFrame = &IID_ICallFrame_Value;
pub const ICallFrame = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetInfo: fn(
self: *const ICallFrame,
pInfo: ?*CALLFRAMEINFO,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetIIDAndMethod: fn(
self: *const ICallFrame,
pIID: ?*Guid,
piMethod: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetNames: fn(
self: *const ICallFrame,
pwszInterface: ?*?PWSTR,
pwszMethod: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetStackLocation: fn(
self: *const ICallFrame,
) callconv(@import("std").os.windows.WINAPI) ?*anyopaque,
SetStackLocation: fn(
self: *const ICallFrame,
pvStack: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) void,
SetReturnValue: fn(
self: *const ICallFrame,
hr: HRESULT,
) callconv(@import("std").os.windows.WINAPI) void,
GetReturnValue: fn(
self: *const ICallFrame,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetParamInfo: fn(
self: *const ICallFrame,
iparam: u32,
pInfo: ?*CALLFRAMEPARAMINFO,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetParam: fn(
self: *const ICallFrame,
iparam: u32,
pvar: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetParam: fn(
self: *const ICallFrame,
iparam: u32,
pvar: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Copy: fn(
self: *const ICallFrame,
copyControl: CALLFRAME_COPY,
pWalker: ?*ICallFrameWalker,
ppFrame: ?*?*ICallFrame,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Free: fn(
self: *const ICallFrame,
pframeArgsDest: ?*ICallFrame,
pWalkerDestFree: ?*ICallFrameWalker,
pWalkerCopy: ?*ICallFrameWalker,
freeFlags: u32,
pWalkerFree: ?*ICallFrameWalker,
nullFlags: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
FreeParam: fn(
self: *const ICallFrame,
iparam: u32,
freeFlags: u32,
pWalkerFree: ?*ICallFrameWalker,
nullFlags: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
WalkFrame: fn(
self: *const ICallFrame,
walkWhat: u32,
pWalker: ?*ICallFrameWalker,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMarshalSizeMax: fn(
self: *const ICallFrame,
pmshlContext: ?*CALLFRAME_MARSHALCONTEXT,
mshlflags: MSHLFLAGS,
pcbBufferNeeded: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Marshal: fn(
self: *const ICallFrame,
pmshlContext: ?*CALLFRAME_MARSHALCONTEXT,
mshlflags: MSHLFLAGS,
pBuffer: [*]u8,
cbBuffer: u32,
pcbBufferUsed: ?*u32,
pdataRep: ?*u32,
prpcFlags: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Unmarshal: fn(
self: *const ICallFrame,
pBuffer: [*]u8,
cbBuffer: u32,
dataRep: u32,
pcontext: ?*CALLFRAME_MARSHALCONTEXT,
pcbUnmarshalled: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ReleaseMarshalData: fn(
self: *const ICallFrame,
pBuffer: [*]u8,
cbBuffer: u32,
ibFirstRelease: u32,
dataRep: u32,
pcontext: ?*CALLFRAME_MARSHALCONTEXT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Invoke: fn(
self: *const ICallFrame,
pvReceiver: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetInfo(self: *const T, pInfo: ?*CALLFRAMEINFO) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetInfo(@ptrCast(*const ICallFrame, self), pInfo);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetIIDAndMethod(self: *const T, pIID: ?*Guid, piMethod: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetIIDAndMethod(@ptrCast(*const ICallFrame, self), pIID, piMethod);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetNames(self: *const T, pwszInterface: ?*?PWSTR, pwszMethod: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetNames(@ptrCast(*const ICallFrame, self), pwszInterface, pwszMethod);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetStackLocation(self: *const T) callconv(.Inline) ?*anyopaque {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetStackLocation(@ptrCast(*const ICallFrame, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_SetStackLocation(self: *const T, pvStack: ?*anyopaque) callconv(.Inline) void {
return @ptrCast(*const ICallFrame.VTable, self.vtable).SetStackLocation(@ptrCast(*const ICallFrame, self), pvStack);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_SetReturnValue(self: *const T, hr: HRESULT) callconv(.Inline) void {
return @ptrCast(*const ICallFrame.VTable, self.vtable).SetReturnValue(@ptrCast(*const ICallFrame, self), hr);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetReturnValue(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetReturnValue(@ptrCast(*const ICallFrame, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetParamInfo(self: *const T, iparam: u32, pInfo: ?*CALLFRAMEPARAMINFO) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetParamInfo(@ptrCast(*const ICallFrame, self), iparam, pInfo);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_SetParam(self: *const T, iparam: u32, pvar: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).SetParam(@ptrCast(*const ICallFrame, self), iparam, pvar);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetParam(self: *const T, iparam: u32, pvar: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetParam(@ptrCast(*const ICallFrame, self), iparam, pvar);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_Copy(self: *const T, copyControl: CALLFRAME_COPY, pWalker: ?*ICallFrameWalker, ppFrame: ?*?*ICallFrame) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).Copy(@ptrCast(*const ICallFrame, self), copyControl, pWalker, ppFrame);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_Free(self: *const T, pframeArgsDest: ?*ICallFrame, pWalkerDestFree: ?*ICallFrameWalker, pWalkerCopy: ?*ICallFrameWalker, freeFlags: u32, pWalkerFree: ?*ICallFrameWalker, nullFlags: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).Free(@ptrCast(*const ICallFrame, self), pframeArgsDest, pWalkerDestFree, pWalkerCopy, freeFlags, pWalkerFree, nullFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_FreeParam(self: *const T, iparam: u32, freeFlags: u32, pWalkerFree: ?*ICallFrameWalker, nullFlags: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).FreeParam(@ptrCast(*const ICallFrame, self), iparam, freeFlags, pWalkerFree, nullFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_WalkFrame(self: *const T, walkWhat: u32, pWalker: ?*ICallFrameWalker) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).WalkFrame(@ptrCast(*const ICallFrame, self), walkWhat, pWalker);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_GetMarshalSizeMax(self: *const T, pmshlContext: ?*CALLFRAME_MARSHALCONTEXT, mshlflags: MSHLFLAGS, pcbBufferNeeded: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).GetMarshalSizeMax(@ptrCast(*const ICallFrame, self), pmshlContext, mshlflags, pcbBufferNeeded);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_Marshal(self: *const T, pmshlContext: ?*CALLFRAME_MARSHALCONTEXT, mshlflags: MSHLFLAGS, pBuffer: [*]u8, cbBuffer: u32, pcbBufferUsed: ?*u32, pdataRep: ?*u32, prpcFlags: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).Marshal(@ptrCast(*const ICallFrame, self), pmshlContext, mshlflags, pBuffer, cbBuffer, pcbBufferUsed, pdataRep, prpcFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_Unmarshal(self: *const T, pBuffer: [*]u8, cbBuffer: u32, dataRep: u32, pcontext: ?*CALLFRAME_MARSHALCONTEXT, pcbUnmarshalled: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).Unmarshal(@ptrCast(*const ICallFrame, self), pBuffer, cbBuffer, dataRep, pcontext, pcbUnmarshalled);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_ReleaseMarshalData(self: *const T, pBuffer: [*]u8, cbBuffer: u32, ibFirstRelease: u32, dataRep: u32, pcontext: ?*CALLFRAME_MARSHALCONTEXT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).ReleaseMarshalData(@ptrCast(*const ICallFrame, self), pBuffer, cbBuffer, ibFirstRelease, dataRep, pcontext);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrame_Invoke(self: *const T, pvReceiver: ?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrame.VTable, self.vtable).Invoke(@ptrCast(*const ICallFrame, self), pvReceiver);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICallIndirect_Value = Guid.initString("d573b4b1-894e-11d2-b8b6-00c04fb9618a");
pub const IID_ICallIndirect = &IID_ICallIndirect_Value;
pub const ICallIndirect = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CallIndirect: fn(
self: *const ICallIndirect,
phrReturn: ?*HRESULT,
iMethod: u32,
pvArgs: ?*anyopaque,
cbArgs: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMethodInfo: fn(
self: *const ICallIndirect,
iMethod: u32,
pInfo: ?*CALLFRAMEINFO,
pwszMethod: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetStackSize: fn(
self: *const ICallIndirect,
iMethod: u32,
cbArgs: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetIID: fn(
self: *const ICallIndirect,
piid: ?*Guid,
pfDerivesFromIDispatch: ?*BOOL,
pcMethod: ?*u32,
pwszInterface: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallIndirect_CallIndirect(self: *const T, phrReturn: ?*HRESULT, iMethod: u32, pvArgs: ?*anyopaque, cbArgs: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallIndirect.VTable, self.vtable).CallIndirect(@ptrCast(*const ICallIndirect, self), phrReturn, iMethod, pvArgs, cbArgs);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallIndirect_GetMethodInfo(self: *const T, iMethod: u32, pInfo: ?*CALLFRAMEINFO, pwszMethod: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallIndirect.VTable, self.vtable).GetMethodInfo(@ptrCast(*const ICallIndirect, self), iMethod, pInfo, pwszMethod);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallIndirect_GetStackSize(self: *const T, iMethod: u32, cbArgs: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallIndirect.VTable, self.vtable).GetStackSize(@ptrCast(*const ICallIndirect, self), iMethod, cbArgs);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallIndirect_GetIID(self: *const T, piid: ?*Guid, pfDerivesFromIDispatch: ?*BOOL, pcMethod: ?*u32, pwszInterface: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallIndirect.VTable, self.vtable).GetIID(@ptrCast(*const ICallIndirect, self), piid, pfDerivesFromIDispatch, pcMethod, pwszInterface);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICallInterceptor_Value = Guid.initString("60c7ca75-896d-11d2-b8b6-00c04fb9618a");
pub const IID_ICallInterceptor = &IID_ICallInterceptor_Value;
pub const ICallInterceptor = extern struct {
pub const VTable = extern struct {
base: ICallIndirect.VTable,
RegisterSink: fn(
self: *const ICallInterceptor,
psink: ?*ICallFrameEvents,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetRegisteredSink: fn(
self: *const ICallInterceptor,
ppsink: ?*?*ICallFrameEvents,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace ICallIndirect.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallInterceptor_RegisterSink(self: *const T, psink: ?*ICallFrameEvents) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallInterceptor.VTable, self.vtable).RegisterSink(@ptrCast(*const ICallInterceptor, self), psink);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallInterceptor_GetRegisteredSink(self: *const T, ppsink: ?*?*ICallFrameEvents) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallInterceptor.VTable, self.vtable).GetRegisteredSink(@ptrCast(*const ICallInterceptor, self), ppsink);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICallFrameEvents_Value = Guid.initString("fd5e0843-fc91-11d0-97d7-00c04fb9618a");
pub const IID_ICallFrameEvents = &IID_ICallFrameEvents_Value;
pub const ICallFrameEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnCall: fn(
self: *const ICallFrameEvents,
pFrame: ?*ICallFrame,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrameEvents_OnCall(self: *const T, pFrame: ?*ICallFrame) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrameEvents.VTable, self.vtable).OnCall(@ptrCast(*const ICallFrameEvents, self), pFrame);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICallUnmarshal_Value = Guid.initString("5333b003-2e42-11d2-b89d-00c04fb9618a");
pub const IID_ICallUnmarshal = &IID_ICallUnmarshal_Value;
pub const ICallUnmarshal = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Unmarshal: fn(
self: *const ICallUnmarshal,
iMethod: u32,
pBuffer: [*]u8,
cbBuffer: u32,
fForceBufferCopy: BOOL,
dataRep: u32,
pcontext: ?*CALLFRAME_MARSHALCONTEXT,
pcbUnmarshalled: ?*u32,
ppFrame: ?*?*ICallFrame,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ReleaseMarshalData: fn(
self: *const ICallUnmarshal,
iMethod: u32,
pBuffer: [*]u8,
cbBuffer: u32,
ibFirstRelease: u32,
dataRep: u32,
pcontext: ?*CALLFRAME_MARSHALCONTEXT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallUnmarshal_Unmarshal(self: *const T, iMethod: u32, pBuffer: [*]u8, cbBuffer: u32, fForceBufferCopy: BOOL, dataRep: u32, pcontext: ?*CALLFRAME_MARSHALCONTEXT, pcbUnmarshalled: ?*u32, ppFrame: ?*?*ICallFrame) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallUnmarshal.VTable, self.vtable).Unmarshal(@ptrCast(*const ICallUnmarshal, self), iMethod, pBuffer, cbBuffer, fForceBufferCopy, dataRep, pcontext, pcbUnmarshalled, ppFrame);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallUnmarshal_ReleaseMarshalData(self: *const T, iMethod: u32, pBuffer: [*]u8, cbBuffer: u32, ibFirstRelease: u32, dataRep: u32, pcontext: ?*CALLFRAME_MARSHALCONTEXT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallUnmarshal.VTable, self.vtable).ReleaseMarshalData(@ptrCast(*const ICallUnmarshal, self), iMethod, pBuffer, cbBuffer, ibFirstRelease, dataRep, pcontext);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICallFrameWalker_Value = Guid.initString("08b23919-392d-11d2-b8a4-00c04fb9618a");
pub const IID_ICallFrameWalker = &IID_ICallFrameWalker_Value;
pub const ICallFrameWalker = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnWalkInterface: fn(
self: *const ICallFrameWalker,
iid: ?*const Guid,
ppvInterface: ?*?*anyopaque,
fIn: BOOL,
fOut: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICallFrameWalker_OnWalkInterface(self: *const T, iid: ?*const Guid, ppvInterface: ?*?*anyopaque, fIn: BOOL, fOut: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ICallFrameWalker.VTable, self.vtable).OnWalkInterface(@ptrCast(*const ICallFrameWalker, self), iid, ppvInterface, fIn, fOut);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IInterfaceRelated_Value = Guid.initString("d1fb5a79-7706-11d1-adba-00c04fc2adc0");
pub const IID_IInterfaceRelated = &IID_IInterfaceRelated_Value;
pub const IInterfaceRelated = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetIID: fn(
self: *const IInterfaceRelated,
iid: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetIID: fn(
self: *const IInterfaceRelated,
piid: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IInterfaceRelated_SetIID(self: *const T, iid: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IInterfaceRelated.VTable, self.vtable).SetIID(@ptrCast(*const IInterfaceRelated, self), iid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IInterfaceRelated_GetIID(self: *const T, piid: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IInterfaceRelated.VTable, self.vtable).GetIID(@ptrCast(*const IInterfaceRelated, self), piid);
}
};}
pub usingnamespace MethodMixin(@This());
};
//--------------------------------------------------------------------------------
// Section: Functions (2)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows5.0'
pub extern "ole32" fn CoGetInterceptor(
iidIntercepted: ?*const Guid,
punkOuter: ?*IUnknown,
iid: ?*const Guid,
ppv: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "ole32" fn CoGetInterceptorFromTypeInfo(
iidIntercepted: ?*const Guid,
punkOuter: ?*IUnknown,
typeInfo: ?*ITypeInfo,
iid: ?*const Guid,
ppv: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (9)
//--------------------------------------------------------------------------------
const Guid = @import("../../zig.zig").Guid;
const BOOL = @import("../../foundation.zig").BOOL;
const BOOLEAN = @import("../../foundation.zig").BOOLEAN;
const HRESULT = @import("../../foundation.zig").HRESULT;
const ITypeInfo = @import("../../system/com.zig").ITypeInfo;
const IUnknown = @import("../../system/com.zig").IUnknown;
const MSHLFLAGS = @import("../../system/com.zig").MSHLFLAGS;
const PWSTR = @import("../../foundation.zig").PWSTR;
const VARIANT = @import("../../system/com.zig").VARIANT;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | win32/system/com/call_obj.zig |
const std = @import("std");
const fixedBufferStream = std.io.fixedBufferStream;
const testing = std.testing;
const style = @import("style.zig");
const Style = style.Style;
const FontStyle = style.FontStyle;
const Color = style.Color;
pub const Esc = "\x1B";
pub const Csi = Esc ++ "[";
pub const Reset = Csi ++ "0m";
const font_style_codes = std.ComptimeStringMap([]const u8, .{
.{ "bold", "1" },
.{ "dim", "2" },
.{ "italic", "3" },
.{ "underline", "4" },
.{ "slowblink", "5" },
.{ "rapidblink", "6" },
.{ "reverse", "7" },
.{ "hidden", "8" },
.{ "crossedout", "9" },
.{ "fraktur", "20" },
.{ "overline", "53" },
});
/// Update the current style of the ANSI terminal
/// Tries to use as little escape codes as possible
pub fn updateStyle(writer: anytype, new: Style, old: ?Style) !void {
if (old) |sty| if (new.eql(sty)) return;
if (new.isDefault()) return try resetStyle(writer);
// A reset is required if the new font style has attributes not
// present in the old style or if the old style is not known
const reset_required = if (old) |sty| !sty.font_style.subsetOf(new.font_style) else true;
if (reset_required) try resetStyle(writer);
// Start the escape sequence
try writer.writeAll(Csi);
var written_something = false;
// Font styles
const write_styles = if (reset_required) new.font_style else new.font_style.without(old.?.font_style);
inline for (std.meta.fields(FontStyle)) |field| {
if (@field(write_styles, field.name)) {
comptime const code = font_style_codes.get(field.name).?;
if (written_something) {
try writer.writeAll(";");
} else {
written_something = true;
}
try writer.writeAll(code);
}
}
// Foreground color
if (reset_required and new.foreground != .Default or old != null and !old.?.foreground.eql(new.foreground)) {
if (written_something) {
try writer.writeAll(";");
} else {
written_something = true;
}
switch (new.foreground) {
.Default => try writer.writeAll("39"),
.Black => try writer.writeAll("30"),
.Red => try writer.writeAll("31"),
.Green => try writer.writeAll("32"),
.Yellow => try writer.writeAll("33"),
.Blue => try writer.writeAll("34"),
.Magenta => try writer.writeAll("35"),
.Cyan => try writer.writeAll("36"),
.White => try writer.writeAll("37"),
.Fixed => |fixed| try writer.print("38;5;{}", .{fixed}),
.Grey => |grey| try writer.print("38;2;{};{};{}", .{ grey, grey, grey }),
.RGB => |rgb| try writer.print("38;2;{};{};{}", .{ rgb.r, rgb.g, rgb.b }),
}
}
// Background color
if (reset_required and new.background != .Default or old != null and !old.?.background.eql(new.background)) {
if (written_something) {
try writer.writeAll(";");
} else {
written_something = true;
}
switch (new.background) {
.Default => try writer.writeAll("49"),
.Black => try writer.writeAll("40"),
.Red => try writer.writeAll("41"),
.Green => try writer.writeAll("42"),
.Yellow => try writer.writeAll("43"),
.Blue => try writer.writeAll("44"),
.Magenta => try writer.writeAll("45"),
.Cyan => try writer.writeAll("46"),
.White => try writer.writeAll("47"),
.Fixed => |fixed| try writer.print("48;5;{}", .{fixed}),
.Grey => |grey| try writer.print("38;2;{};{};{}", .{ grey, grey, grey }),
.RGB => |rgb| try writer.print("48;2;{};{};{}", .{ rgb.r, rgb.g, rgb.b }),
}
}
// End the escape sequence
try writer.writeAll("m");
}
test "same style default, no update" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{}, Style{});
const expected = "";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "same style non-default, no update" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
const sty = Style{
.foreground = Color.Green,
};
try updateStyle(fixed_buf_stream.writer(), sty, sty);
const expected = "";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "reset to default, old null" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{}, null);
const expected = "\x1B[0m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "reset to default, old non-null" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{}, Style{
.font_style = FontStyle.bold,
});
const expected = "\x1B[0m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "bold style" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{
.font_style = FontStyle.bold,
}, Style{});
const expected = "\x1B[1m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "add bold style" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{
.font_style = FontStyle{ .bold = true, .italic = true },
}, Style{
.font_style = FontStyle.italic,
});
const expected = "\x1B[1m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "reset required font style" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{
.font_style = FontStyle.bold,
}, Style{
.font_style = FontStyle{ .bold = true, .underline = true },
});
const expected = "\x1B[0m\x1B[1m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "reset required color style" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{
.foreground = Color.Red,
}, null);
const expected = "\x1B[0m\x1B[31m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "no reset required color style" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{
.foreground = Color.Red,
}, Style{});
const expected = "\x1B[31m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "no reset required add color style" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try updateStyle(fixed_buf_stream.writer(), Style{
.foreground = Color.Red,
.background = Color.Magenta,
}, Style{
.background = Color.Magenta,
});
const expected = "\x1B[31m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
pub fn resetStyle(writer: anytype) !void {
try writer.writeAll(Csi ++ "0m");
}
test "reset style" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
try resetStyle(fixed_buf_stream.writer());
const expected = "\x1B[0m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
}
test "Grey foreground color" {
var buf: [1024]u8 = undefined;
var fixed_buf_stream = fixedBufferStream(&buf);
var new_style = Style{};
new_style.foreground = Color{ .Grey = 1 };
try updateStyle(fixed_buf_stream.writer(), new_style, Style{});
const expected = "\x1B[38;2;1;1;1m";
const actual = fixed_buf_stream.getWritten();
testing.expectEqualSlices(u8, expected, actual);
} | src/format.zig |
const clap = @import("clap");
const format = @import("format");
const it = @import("ziter");
const std = @import("std");
const ston = @import("ston");
const util = @import("util");
const debug = std.debug;
const fmt = std.fmt;
const fs = std.fs;
const heap = std.heap;
const io = std.io;
const log = std.log;
const math = std.math;
const mem = std.mem;
const os = std.os;
const rand = std.rand;
const testing = std.testing;
const Program = @This();
allocator: mem.Allocator,
options: struct {
seed: u64,
type: Type,
method: Method,
},
pokedex: Set = Set{},
pokemons: Pokemons = Pokemons{},
static_mons: StaticMons = StaticMons{},
given_mons: StaticMons = StaticMons{},
hollow_mons: StaticMons = StaticMons{},
hidden_hollows: HiddenHollows = HiddenHollows{},
const Method = enum {
random,
@"same-stats",
@"simular-stats",
@"legendary-with-legendary",
};
const Type = enum {
random,
same,
};
pub const main = util.generateMain(Program);
pub const version = "0.0.0";
pub const description =
\\Randomizes static, given and hollow Pokémons. Doesn't work for hg and ss yet.
\\
;
pub const params = &[_]clap.Param(clap.Help){
clap.parseParam("-h, --help Display this help text and exit. ") catch unreachable,
clap.parseParam("-s, --seed <INT> The seed to use for random numbers. A random seed will be picked if this is not specified.") catch unreachable,
clap.parseParam("-m, --method <random|same-stats|simular-stats|legendary-with-legendary> The method used to pick the new static Pokémon. (default: random) ") catch unreachable,
clap.parseParam("-t, --types <random|same> Which type each static pokemon should be. (default: random) ") catch unreachable,
clap.parseParam("-v, --version Output version information and exit. ") catch unreachable,
};
pub fn init(allocator: mem.Allocator, args: anytype) !Program {
const type_arg = args.option("--types") orelse "random";
const types = std.meta.stringToEnum(Type, type_arg) orelse {
log.err("--types does not support '{s}'", .{type_arg});
return error.InvalidArgument;
};
const method_arg = args.option("--method") orelse "random";
const method = std.meta.stringToEnum(Method, method_arg) orelse {
log.err("--method does not support '{s}'", .{method_arg});
return error.InvalidArgument;
};
return Program{
.allocator = allocator,
.options = .{
.seed = try util.args.seed(args),
.type = types,
.method = method,
},
};
}
pub fn run(
program: *Program,
comptime Reader: type,
comptime Writer: type,
stdio: util.CustomStdIoStreams(Reader, Writer),
) anyerror!void {
try format.io(program.allocator, stdio.in, stdio.out, program, useGame);
try program.randomize();
try program.output(stdio.out);
}
fn output(program: *Program, writer: anytype) !void {
try ston.serialize(writer, .{
.static_pokemons = program.static_mons,
.given_pokemons = program.given_mons,
});
for (program.hidden_hollows.values()) |hollow, i| {
const hollow_key = program.hidden_hollows.keys()[i];
for (hollow.values()) |group, j| {
const group_key = hollow.keys()[j];
for (group.values()) |pokemon, g| {
const pokemon_key = group.keys()[g];
const si = pokemon.species_index orelse continue;
try ston.serialize(writer, .{
.hidden_hollows = ston.index(hollow_key, .{
.groups = ston.index(group_key, .{
.pokemons = ston.index(pokemon_key, .{
.species = program.hollow_mons.get(si).?.species,
}),
}),
}),
});
}
}
}
}
fn useGame(program: *Program, parsed: format.Game) !void {
const allocator = program.allocator;
switch (parsed) {
.pokedex => |pokedex| {
_ = try program.pokedex.put(allocator, pokedex.index, {});
return error.DidNotConsumeData;
},
.pokemons => |pokemons| {
const pokemon_kv = try program.pokemons.getOrPutValue(allocator, pokemons.index, .{});
const pokemon = pokemon_kv.value_ptr;
switch (pokemons.value) {
.stats => |stats| pokemon.stats[@enumToInt(stats)] = stats.value(),
.types => |types| _ = try pokemon.types.put(allocator, types.value, {}),
.growth_rate => |growth_rate| pokemon.growth_rate = growth_rate,
.catch_rate => |catch_rate| pokemon.catch_rate = catch_rate,
.gender_ratio => |gender_ratio| pokemon.gender_ratio = gender_ratio,
.pokedex_entry => |pokedex_entry| pokemon.pokedex_entry = pokedex_entry,
.egg_groups => |groups| {
// TODO: Should we save both egg groups?
if (groups.index == 0)
pokemon.egg_group = groups.value;
},
.evos => |evos| switch (evos.value) {
.target => |target| _ = try pokemon.evos.put(allocator, target, {}),
.method,
.param,
=> return error.DidNotConsumeData,
},
.base_exp_yield,
.ev_yield,
.items,
.egg_cycles,
.base_friendship,
.abilities,
.color,
.moves,
.tms,
.hms,
.name,
=> return error.DidNotConsumeData,
}
return error.DidNotConsumeData;
},
.static_pokemons => |pokemons| switch (pokemons.value) {
.species => |species| {
_ = try program.static_mons.put(allocator, pokemons.index, .{ .species = species });
return;
},
.level => return error.DidNotConsumeData,
},
.given_pokemons => |pokemons| switch (pokemons.value) {
.species => |species| {
_ = try program.given_mons.put(allocator, pokemons.index, .{ .species = species });
return;
},
.level => return error.DidNotConsumeData,
},
.hidden_hollows => |hollows| {
const groups = (try program.hidden_hollows.getOrPutValue(
allocator,
hollows.index,
.{},
)).value_ptr;
switch (hollows.value) {
.groups => |group| {
const pokemons = (try groups.getOrPutValue(allocator, group.index, .{}))
.value_ptr;
switch (group.value) {
.pokemons => |mon| {
const pokemon = (try pokemons.getOrPutValue(
allocator,
mon.index,
.{},
)).value_ptr;
switch (mon.value) {
.species => |species| {
const index = @intCast(u16, program.hollow_mons.count());
_ = try program.hollow_mons.put(
allocator,
index,
.{ .species = species },
);
pokemon.species_index = index;
},
}
return;
},
}
},
.items => return error.DidNotConsumeData,
}
},
.version,
.game_title,
.gamecode,
.instant_text,
.starters,
.text_delays,
.trainers,
.moves,
.abilities,
.types,
.tms,
.hms,
.items,
.maps,
.wild_pokemons,
.pokeball_items,
.text,
=> return error.DidNotConsumeData,
}
unreachable;
}
fn randomize(program: *Program) !void {
const allocator = program.allocator;
const random = rand.DefaultPrng.init(program.options.seed).random();
const species = try getPokedexPokemons(allocator, program.pokemons, program.pokedex);
for ([_]StaticMons{
program.static_mons,
program.given_mons,
program.hollow_mons,
}) |static_mons| {
switch (program.options.method) {
.random => switch (program.options.type) {
.random => {
const max = species.count();
if (max == 0)
return;
for (static_mons.values()) |*static|
static.species = util.random.item(random, species.keys()).?.*;
},
.same => {
const by_type = try getSpeciesByType(allocator, program.pokemons, species);
for (static_mons.values()) |*static| {
const pokemon = program.pokemons.get(static.species).?;
const type_max = pokemon.types.count();
if (type_max == 0)
continue;
const t = util.random.item(random, pokemon.types.keys()).?.*;
const pokemons = by_type.get(t).?;
static.species = util.random.item(random, pokemons.keys()).?.*;
}
},
},
.@"same-stats", .@"simular-stats" => {
const by_type = switch (program.options.type) {
// When we do random, we should never actually touch the 'by_type'
// table, so let's just avoid doing the work of constructing it :)
.random => undefined,
.same => try getSpeciesByType(allocator, program.pokemons, species),
};
var simular = std.ArrayList(u16).init(allocator);
for (static_mons.values()) |*static| {
defer simular.shrinkRetainingCapacity(0);
// If the static Pokémon does not exist in the program
// we received, then there is no way for us to compare
// its stats with other Pokémons. The only thing we can
// assume is that the Pokémon it currently is
// is simular/same as itself.
const prev_pokemon = program.pokemons.get(static.species) orelse continue;
var min = @intCast(i64, it.fold(&prev_pokemon.stats, @as(usize, 0), foldu8));
var max = min;
// For same-stats, we can just make this loop run once, which will
// make the simular list only contain pokemons with the same stats.
const condition = if (program.options.method == .@"simular-stats")
@as(usize, 25)
else
@as(usize, 1);
while (simular.items.len < condition) : ({
min -= 5;
max += 5;
}) {
switch (program.options.type) {
.random => for (species.keys()) |s| {
const pokemon = program.pokemons.get(s).?;
const total = @intCast(i64, it.fold(&pokemon.stats, @as(usize, 0), foldu8));
if (min <= total and total <= max)
try simular.append(s);
},
.same => {
// If this Pokémon has no type (for some reason), then we
// cannot pick a pokemon of the same type. The only thing
// we can assume is that the Pokémon is the same type
// as it self, and therefor just use that as the simular
// Pokémon.
const type_max = prev_pokemon.types.count();
if (type_max == 0) {
try simular.append(static.species);
break;
}
for (prev_pokemon.types.keys()) |t| {
const pokemons_of_type = by_type.get(t).?;
for (pokemons_of_type.keys()) |s| {
const pokemon = program.pokemons.get(s).?;
const total = @intCast(i64, it.fold(&pokemon.stats, @as(usize, 0), foldu8));
if (min <= total and total <= max)
try simular.append(s);
}
}
},
}
}
static.species = util.random.item(random, simular.items).?.*;
}
},
.@"legendary-with-legendary" => {
// There is no way to specify in game that a Pokemon is a legendary.
// There are therefor two methods we can use to pick legendaries
// 1. Have a table of Pokemons which are legendaries.
// - This does not work with roms that have been hacked
// in a way that changes which Pokemons should be considered
// legendary
// 2. Find legendaries by looking at their stats, evolution line
// and other patterns common for legendaries
//
// I have chosen the latter method.
// First, lets give each Pokemon a "legendary rating" which
// is a measure as to how many "legendary" criteria this
// Pokemon fits into. This rating can be negative.
var ratings = std.AutoArrayHashMap(u16, isize).init(allocator);
for (species.keys()) |s| {
const pokemon = program.pokemons.get(s).?;
const rating = (try ratings.getOrPutValue(s, 0)).value_ptr;
// Legendaries are generally in the "slow" to "medium_slow"
// growth rating
rating.* += @as(isize, @boolToInt(pokemon.growth_rate == .slow or
pokemon.growth_rate == .medium_slow));
// They generally have a catch rate of 45 or less
rating.* += @as(isize, @boolToInt(pokemon.catch_rate <= 45));
// They tend to not have a gender (255 in gender_ratio means
// genderless).
rating.* += @as(isize, @boolToInt(pokemon.gender_ratio == 255));
// Most are part of the "undiscovered" egg group
rating.* += @as(isize, @boolToInt(pokemon.egg_group == .undiscovered));
// And they don't evolve from anything. Subtract
// score from this Pokemons evolutions.
for (pokemon.evos.keys()) |evo| {
const evo_rating = (try ratings.getOrPutValue(evo, 0)).value_ptr;
evo_rating.* -= 10;
rating.* -= 10;
}
}
const rating_to_be_legendary = blk: {
var res: isize = 0;
for (ratings.values()) |rating|
res = math.max(res, rating);
// Not all legendaries match all criteria. Let's
// allow for legendaries that miss on criteria.
break :blk res - 1;
};
var legendaries = Set{};
var rest = Set{};
for (ratings.values()) |rating, i| {
const rating_key = ratings.keys()[i];
if (rating >= rating_to_be_legendary) {
_ = try legendaries.put(allocator, rating_key, {});
} else {
_ = try rest.put(allocator, rating_key, {});
}
}
const legendaries_by_type = switch (program.options.type) {
.random => undefined,
.same => try getSpeciesByType(allocator, program.pokemons, legendaries),
};
const rest_by_type = switch (program.options.type) {
.random => undefined,
.same => try getSpeciesByType(allocator, program.pokemons, rest),
};
for (static_mons.values()) |*static| {
const pokemon = program.pokemons.get(static.species) orelse continue;
const rating = ratings.get(static.species) orelse continue;
const pick_from = switch (program.options.type) {
.random => if (rating >= rating_to_be_legendary) legendaries else rest,
.same => blk: {
const type_max = pokemon.types.count();
if (type_max == 0)
continue;
const types = pokemon.types;
const picked_type = util.random.item(random, types.keys()).?.*;
const pick_from_by_type = if (rating >= rating_to_be_legendary) legendaries_by_type else rest_by_type;
break :blk pick_from_by_type.get(picked_type) orelse continue;
},
};
const max = pick_from.count();
if (max == 0)
continue;
static.species = util.random.item(random, pick_from.keys()).?.*;
}
},
}
}
}
const Pokemons = std.AutoArrayHashMapUnmanaged(u16, Pokemon);
const Set = std.AutoArrayHashMapUnmanaged(u16, void);
const SpeciesByType = std.AutoArrayHashMapUnmanaged(u16, Set);
const StaticMons = std.AutoArrayHashMapUnmanaged(u16, StaticMon);
const HiddenHollows = std.AutoArrayHashMapUnmanaged(u16, HollowGroups);
const HollowGroups = std.AutoArrayHashMapUnmanaged(u8, HollowPokemons);
const HollowPokemons = std.AutoArrayHashMapUnmanaged(u8, HollowMon);
fn getPokedexPokemons(allocator: mem.Allocator, pokemons: Pokemons, pokedex: Set) !Set {
var res = Set{};
errdefer res.deinit(allocator);
for (pokemons.values()) |pokemon, i| {
const species = pokemons.keys()[i];
if (pokemon.catch_rate == 0)
continue;
if (pokedex.get(pokemon.pokedex_entry) == null)
continue;
_ = try res.put(allocator, species, {});
}
return res;
}
fn getSpeciesByType(allocator: mem.Allocator, pokemons: Pokemons, _species: Set) !SpeciesByType {
var res = SpeciesByType{};
errdefer {
for (res.values()) |*v|
v.deinit(allocator);
res.deinit(allocator);
}
for (_species.keys()) |s| {
const pokemon = pokemons.get(s) orelse continue;
for (pokemon.types.keys()) |t| {
const set = try res.getOrPutValue(allocator, t, .{});
_ = try set.value_ptr.put(allocator, s, {});
}
}
return res;
}
fn foldu8(a: usize, b: u8) usize {
return a + b;
}
const HollowMon = struct {
species_index: ?u16 = null,
};
const Pokemon = struct {
stats: [6]u8 = [_]u8{0} ** 6,
pokedex_entry: u16 = math.maxInt(u16),
catch_rate: usize = 1,
growth_rate: format.GrowthRate = .fast,
gender_ratio: usize = math.maxInt(usize),
egg_group: format.EggGroup = .invalid,
types: Set = Set{},
evos: Set = Set{},
};
const StaticMon = struct {
species: u16,
};
fn testIt(comptime prefix: []const u8) !void {
const H = struct {
fn pokemon(
comptime id: []const u8,
comptime stat: []const u8,
comptime t1: []const u8,
comptime t2: []const u8,
comptime growth_rate: []const u8,
comptime catch_rate: []const u8,
comptime gender_ratio: []const u8,
comptime egg_groups: []const u8,
comptime evo: ?[]const u8,
) []const u8 {
return ".pokedex[" ++ id ++ "].height=0\n" ++
".pokemons[" ++ id ++ "].pokedex_entry=" ++ id ++ "\n" ++
".pokemons[" ++ id ++ "].stats.hp=" ++ stat ++ "\n" ++
".pokemons[" ++ id ++ "].stats.attack=" ++ stat ++ "\n" ++
".pokemons[" ++ id ++ "].stats.defense=" ++ stat ++ "\n" ++
".pokemons[" ++ id ++ "].stats.speed=" ++ stat ++ "\n" ++
".pokemons[" ++ id ++ "].stats.sp_attack=" ++ stat ++ "\n" ++
".pokemons[" ++ id ++ "].stats.sp_defense=" ++ stat ++ "\n" ++
".pokemons[" ++ id ++ "].types[0]=" ++ t1 ++ "\n" ++
".pokemons[" ++ id ++ "].types[1]=" ++ t2 ++ "\n" ++
".pokemons[" ++ id ++ "].growth_rate=" ++ growth_rate ++ "\n" ++
".pokemons[" ++ id ++ "].catch_rate=" ++ catch_rate ++ "\n" ++
".pokemons[" ++ id ++ "].gender_ratio=" ++ gender_ratio ++ "\n" ++
".pokemons[" ++ id ++ "].egg_groups[0]=" ++ egg_groups ++ "\n" ++
if (evo) |e| ".pokemons[" ++ id ++ "].evos[0].target=" ++ e ++ "\n" else "";
}
fn static(
comptime id: []const u8,
comptime species: []const u8,
) []const u8 {
return prefix ++ "[" ++ id ++ "].species=" ++ species ++ "\n";
}
};
const legendaries = comptime H.pokemon("0", "10", "15", "2", "slow", "3", "255", "undiscovered", null) ++
H.pokemon("1", "10", "13", "2", "slow", "3", "255", "undiscovered", null) ++
H.pokemon("2", "10", "10", "2", "slow", "3", "255", "undiscovered", null) ++
H.pokemon("3", "10", "13", "13", "slow", "3", "255", "undiscovered", null) ++
H.pokemon("4", "11", "11", "11", "slow", "3", "255", "undiscovered", null) ++
H.pokemon("5", "11", "5", "5", "slow", "3", "255", "undiscovered", null) ++
H.pokemon("6", "11", "15", "15", "slow", "3", "255", "undiscovered", null) ++
H.pokemon("7", "12", "16", "14", "slow", "3", "254", "undiscovered", null) ++
H.pokemon("8", "12", "16", "14", "slow", "3", "0", "undiscovered", null) ++
H.pokemon("9", "12", "11", "11", "slow", "3", "255", "water1", null);
const pseudo_legendaries = comptime H.pokemon("10", "10", "16", "16", "slow", "45", "127", "water1", "11") ++
H.pokemon("11", "10", "16", "2", "slow", "45", "127", "water1", null) ++
H.pokemon("12", "10", "5", "4", "slow", "45", "127", "monster", "13") ++
H.pokemon("13", "10", "5", "17", "slow", "45", "127", "monster", null) ++
H.pokemon("14", "11", "16", "16", "slow", "45", "127", "dragon", "15") ++
H.pokemon("15", "11", "16", "2", "slow", "45", "127", "dragon", null) ++
H.pokemon("16", "11", "8", "14", "slow", "3", "255", "mineral", "17") ++
H.pokemon("17", "11", "8", "14", "slow", "3", "255", "mineral", null) ++
H.pokemon("18", "12", "16", "4", "slow", "45", "127", "monster", "19") ++
H.pokemon("19", "12", "16", "4", "slow", "45", "127", "monster", null) ++
H.pokemon("20", "12", "17", "16", "slow", "45", "127", "dragon", "21") ++
H.pokemon("21", "12", "17", "16", "slow", "45", "127", "dragon", null);
const pokemons_to_not_pick_ever = comptime H.pokemon("22", "12", "11", "11", "slow", "0", "255", "water1", null) ++
H.pokemon("23", "12", "17", "16", "slow", "0", "127", "dragon", null);
const result_prefix = legendaries ++ pseudo_legendaries ++ pokemons_to_not_pick_ever;
const test_string = comptime result_prefix ++
H.static("0", "0") ++
H.static("1", "1") ++
H.static("2", "2") ++
H.static("3", "3") ++
H.static("4", "4") ++
H.static("5", "21");
try util.testing.testProgram(Program, &[_][]const u8{"--seed=0"}, test_string, result_prefix ++
prefix ++ "[0].species=7\n" ++
prefix ++ "[1].species=8\n" ++
prefix ++ "[2].species=7\n" ++
prefix ++ "[3].species=0\n" ++
prefix ++ "[4].species=10\n" ++
prefix ++ "[5].species=0\n");
try util.testing.testProgram(Program, &[_][]const u8{ "--seed=0", "--types=same" }, test_string, result_prefix ++
prefix ++ "[0].species=0\n" ++
prefix ++ "[1].species=1\n" ++
prefix ++ "[2].species=2\n" ++
prefix ++ "[3].species=3\n" ++
prefix ++ "[4].species=4\n" ++
prefix ++ "[5].species=13\n");
try util.testing.testProgram(Program, &[_][]const u8{ "--seed=1", "--method=same-stats" }, test_string, result_prefix ++
prefix ++ "[0].species=12\n" ++
prefix ++ "[1].species=11\n" ++
prefix ++ "[2].species=0\n" ++
prefix ++ "[3].species=11\n" ++
prefix ++ "[4].species=5\n" ++
prefix ++ "[5].species=19\n");
try util.testing.testProgram(Program, &[_][]const u8{ "--seed=1", "--method=same-stats", "--types=same" }, test_string, result_prefix ++
prefix ++ "[0].species=11\n" ++
prefix ++ "[1].species=2\n" ++
prefix ++ "[2].species=2\n" ++
prefix ++ "[3].species=3\n" ++
prefix ++ "[4].species=4\n" ++
prefix ++ "[5].species=18\n");
try util.testing.testProgram(Program, &[_][]const u8{ "--seed=2", "--method=simular-stats" }, test_string, result_prefix ++
prefix ++ "[0].species=10\n" ++
prefix ++ "[1].species=0\n" ++
prefix ++ "[2].species=4\n" ++
prefix ++ "[3].species=1\n" ++
prefix ++ "[4].species=3\n" ++
prefix ++ "[5].species=15\n");
try util.testing.testProgram(Program, &[_][]const u8{ "--seed=2", "--method=simular-stats", "--types=same" }, test_string, result_prefix ++
prefix ++ "[0].species=15\n" ++
prefix ++ "[1].species=3\n" ++
prefix ++ "[2].species=1\n" ++
prefix ++ "[3].species=3\n" ++
prefix ++ "[4].species=4\n" ++
prefix ++ "[5].species=14\n");
try util.testing.testProgram(Program, &[_][]const u8{ "--seed=3", "--method=legendary-with-legendary" }, test_string, result_prefix ++
prefix ++ "[0].species=0\n" ++
prefix ++ "[1].species=6\n" ++
prefix ++ "[2].species=8\n" ++
prefix ++ "[3].species=8\n" ++
prefix ++ "[4].species=6\n" ++
prefix ++ "[5].species=11\n");
try util.testing.testProgram(Program, &[_][]const u8{ "--seed=4", "--method=legendary-with-legendary", "--types=same" }, test_string, result_prefix ++
prefix ++ "[0].species=1\n" ++
prefix ++ "[1].species=3\n" ++
prefix ++ "[2].species=2\n" ++
prefix ++ "[3].species=3\n" ++
prefix ++ "[4].species=4\n" ++
prefix ++ "[5].species=21\n");
}
test "tm35-rand-static" {
try testIt(".static_pokemons");
try testIt(".given_pokemons");
try testIt(".hidden_hollows[0].groups[0].pokemons");
} | src/randomizers/tm35-rand-static.zig |
const AtomicOrder = @import("builtin").AtomicOrder;
const Vector = @import("vector.zig").Vector(f32);
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const warn = std.debug.warn;
const allocator = std.heap.page_allocator;
const Math = std.math;
const json = std.json;
// Location for MMIO, Vector pointer and vector length will be put there.
var arg_ptr = [_]usize{0, 0};
const SELF_INTERSECTION_THRESHOLD: f32 = 0.001;
const Ray = struct {
point: Vector,
vector: Vector,
};
const ObjectType = enum {
Plane,
Sphere,
};
const Scene = struct {
const Camera = struct {
point: Vector,
vector: Vector,
fov: f32,
};
camera: Camera,
const Object = struct {
@"type": ObjectType,
point: Vector,
color: Vector,
specular: f32,
lambert: f32,
ambient: f32,
radius: f32 = 0.0,
normal: Vector = Vector.new(0.0, 0.0, 0.0),
};
objects: []Object,
lights: []Vector,
checker: []Vector,
};
export fn __wbindgen_global_argument_ptr() usize {
@fence(AtomicOrder.SeqCst);
return @ptrToInt(&arg_ptr);
}
export fn __wbindgen_free_u8(ptr: u32, len: u32) void {
@fence(AtomicOrder.SeqCst);
allocator.free(@intToPtr([*]u8, ptr)[0..len]);
}
export fn __wbindgen_free_f32(ptr: u32, len: u32) void {
@fence(AtomicOrder.SeqCst);
allocator.free(@intToPtr([*]volatile f32, ptr)[0..len]);
}
export fn __wbindgen_malloc_u8(len: u32) usize {
@fence(AtomicOrder.SeqCst);
const mem = allocator.alloc(u8, len) catch unreachable;
return @ptrToInt(mem.ptr);
}
fn closer(a: ?f32, b: ?f32) bool {
if (a !=null and b != null)
return (a.? > SELF_INTERSECTION_THRESHOLD and a.? < b.?);
if (a == null and b == null)
return false;
return a orelse 0.0 > SELF_INTERSECTION_THRESHOLD;
}
const IntersectionResult = struct {
distance: ?f32,
object: ?Scene.Object,
};
fn intersect_scene(ray: Ray, scene: Scene) IntersectionResult {
var closest = IntersectionResult{ .distance = null, .object = null };
for (scene.objects) |object| {
var distance = object_intersection(object, ray);
if (closer(distance, closest.distance)) {
closest = IntersectionResult{
.distance = distance,
.object = object
};
}
}
return closest;
}
fn object_intersection(object: Scene.Object, ray: Ray) ?f32 {
return switch (object.type) {
ObjectType.Sphere => blk: {
const eye_to_center = object.point.subtract(ray.point);
const v = eye_to_center.dot_product(ray.vector);
const eo_dot = eye_to_center.dot_product(eye_to_center);
const discriminant = (object.radius * object.radius) - eo_dot + (v * v);
if (discriminant < 0.0) {
return null;
}
const distance = v - Math.sqrt(discriminant);
if (distance > SELF_INTERSECTION_THRESHOLD) {
return distance;
}
break :blk null;
},
ObjectType.Plane => blk: {
const neg_norm = object.normal.negate();
const denom = neg_norm.dot_product(ray.vector);
if (denom <= 0.0) {
return null;
}
const interm = object.point.subtract(ray.point);
break :blk interm.dot_product(neg_norm) / denom;
},
else => null
};
}
fn plane_color_at(point_at_time: Vector, plane: Scene.Object, scene: Scene) Vector {
// Point from plane origin
// This is a complete hack to make up for my sad lack of lin alg. knowledge
const from_origin = point_at_time.subtract(plane.point);
const width = 2.0;
var px = Vector.new(0.0, 1.0, 0.0);
var py = Vector.new(0.0, 0.0, 1.0);
if (plane.normal.z != 0.0) {
py = Vector.new(1.0, 0.0, 1.0);
}
if (plane.normal.y != 0.0) {
px = Vector.new(0.0, 0.0, 1.0);
py = Vector.new(1.0, 0.0, 0.0);
}
const cx = px.dot_product(from_origin);
const cy = py.dot_product(from_origin);
const x_cond = (cx < 0.0 and @rem(cx, width) < -width / 2.0) or (cx > 0.0 and @rem(cx, width) < width / 2.0);
const y_cond = (cy < 0.0 and @rem(cy, width) < -width / 2.0) or (cy > 0.0 and @rem(cy, width) < width / 2.0);
if ((x_cond and !y_cond) or (y_cond and !x_cond)) {
return scene.checker[0].scale(1.0);
}
return scene.checker[1].scale(1.0);
}
fn get_normal(object: Scene.Object, pos: Vector) Vector {
return switch (object.type) {
ObjectType.Sphere => pos.subtract(object.point).unit(),
ObjectType.Plane => object.normal.unit(),
else => Vector.new(0.0, 0.0, 0.0),
};
}
fn surface(
ray: Ray,
scene: Scene,
object: Scene.Object,
point_at_time: Vector,
normal: Vector,
depth: usize,
) Vector {
var lambert = object.lambert;
var specular = object.specular;
var ambient = object.ambient;
var b = switch (object.type) {
ObjectType.Sphere => object.color.scale(1.0),
ObjectType.Plane => plane_color_at(point_at_time, object, scene),
};
var c = Vector.zero();
var lambert_amount: f32 = 0.0;
if (lambert > 0.0) {
for (scene.lights) |light| {
if (!is_light_visible(point_at_time, scene, light)) {
continue;
}
const contribution = light.subtract(point_at_time).unit().dot_product(normal);
if (contribution > 0.0) {
lambert_amount += contribution;
}
}
}
if (specular > 0.0) {
const reflected_ray = Ray{
.point = point_at_time,
.vector = ray.vector.reflect_through(normal),
};
const reflected_color = trace(reflected_ray, scene, depth + 1);
if (reflected_color != null) {
c = c.add(reflected_color.?.scale(specular));
}
}
lambert_amount = min(lambert_amount, 1.0);
return c.add3(b.scale(lambert_amount * lambert), b.scale(ambient));
}
fn trace(ray: Ray, scene: Scene, depth: usize) ?Vector {
if (depth > 2) {
return null;
}
var dist_object = intersect_scene(ray, scene);
return if (dist_object.distance) |distance| (
if (dist_object.object) |collision| blk: {
const point_in_time = ray.point.add(ray.vector.scale(distance));
break :blk surface(
ray,
scene,
collision,
point_in_time,
get_normal(collision, point_in_time),
depth
);
} else Vector.zero()
) else Vector.zero();
}
fn render(scene_str: []const u8, width: u32, height: u32) []f32 {
const options = json.ParseOptions{ .allocator = allocator };
const scene = json.parse(Scene, &json.TokenStream.init(scene_str), options) catch unreachable;
defer json.parseFree(Scene, scene, options);
const camera = scene.camera;
const eye_vector = camera.vector.subtract(camera.point).unit();
const vp_right = eye_vector.cross_product(Vector.up()).unit();
const vp_up = vp_right.cross_product(eye_vector).unit();
const fov_radians = Math.pi * (camera.fov / 2.0) / 180.0;
const height_width_ratio = @intToFloat(f32, height) / @intToFloat(f32, width);
const half_width = Math.tan(fov_radians);
const half_height = height_width_ratio * half_width;
const camera_width = half_width * 2.0;
const camera_height = half_height * 2.0;
const pixel_width = camera_width / (@intToFloat(f32, width) - 1.0);
const pixel_height = camera_height / (@intToFloat(f32, height) - 1.0);
var ray = Ray{
.point = camera.point,
.vector = Vector.up(),
};
var len = width * height * 4;
var x: u32 = 0;
var y: u32 = 0;
var result = allocator.alloc(f32, len) catch unreachable;
while (y < height) {
x = 0;
while( x < width) {
const i = 4 * (y * width + x);
const x_comp = vp_right.scale((@intToFloat(f32, x) * pixel_width) - half_width);
const y_comp = vp_up.scale((@intToFloat(f32, y) * pixel_height) - half_height);
ray.vector = eye_vector.add3(x_comp, y_comp).unit();
const color = trace(ray, scene, 0) orelse Vector.new(0.0, 0.0, 0.0);
result[i + 0] = @floatCast(f32, color.x);
result[i + 1] = @floatCast(f32, color.y);
result[i + 2] = @floatCast(f32, color.z);
result[i + 3] = 255.0;
x += 1;
}
y += 1;
}
return result;
}
fn is_light_visible(point: Vector, scene: Scene, light: Vector) bool {
const point_to_light_vector = light.subtract(point);
const distance_to_light = point_to_light_vector.length();
const ray = Ray {
.point = point,
.vector = point_to_light_vector.unit(),
};
const res = intersect_scene(ray, scene);
return if (res.distance != null) res.distance.? > distance_to_light else true;
}
fn min(a: f32, b: f32) f32 {
return if (a > b) b else a;
}
export fn binding(retptr: u32, ptr_scene: u32, len_scene: u32, width: u32, height: u32) void {
@fence(AtomicOrder.SeqCst);
const input = @intToPtr([*]u8, ptr_scene)[0..len_scene];
defer allocator.free(input);
var result = render(input, width, height);
@intToPtr([*]volatile usize, retptr).* = @ptrToInt(result.ptr);
@intToPtr([*]volatile usize, retptr + 4).* = width * height * 4;
}
const testing = std.testing;
test "json.parse" {
const test_json =
\\ {
\\ "camera": {
\\ "point":{"x":0,"y":0,"z":7},
\\ "vector":{"x":0,"y":0,"z":0},
\\ "fov":70
\\ },
\\ "objects":[
\\ {"type":"Sphere","point":{"x":0.17989201943833377,"y":0.11992801295888919,"z":2.9946016198056125},"color":{"x":0,"y":0,"z":0},"specular":0.7,"lambert":0.5,"ambient":0.3,"radius":1},
\\ {"type":"Sphere","point":{"x":-0.17989201943833377,"y":-1.9964010798704084,"z":-2.9946016198056125},"color":{"x":0,"y":0,"z":0},"specular":0.7,"lambert":0.5,"ambient":0.3,"radius":1},
\\ {"type":"Sphere","point":{"x":0,"y":0,"z":0},"color":{"x":255,"y":255,"z":255},"specular":0.25,"lambert":0.72,"ambient":0.26,"radius":1.5},
\\ {"type":"Plane","point":{"x":0,"y":5,"z":0},"normal":{"x":0,"y":-1,"z":0},"color":{"x":200,"y":200,"z":200},"specular":0,"lambert":0.9,"ambient":0.2},
\\ {"type":"Plane","point":{"x":0,"y":-5,"z":0},"normal":{"x":0,"y":1,"z":0},"color":{"x":100,"y":100,"z":100},"specular":0,"lambert":0.9,"ambient":0.2},
\\ {"type":"Plane","point":{"x":-5,"y":0,"z":0},"normal":{"x":1,"y":0,"z":0},"color":{"x":100,"y":100,"z":100},"specular":0,"lambert":0.9,"ambient":0.2},
\\ {"type":"Plane","point":{"x":5,"y":0,"z":0},"normal":{"x":-1,"y":0,"z":0},"color":{"x":100,"y":100,"z":100},"specular":0,"lambert":0.9,"ambient":0.2},
\\ {"type":"Plane","point":{"x":0,"y":0,"z":-12},"normal":{"x":0,"y":0,"z":1},"color":{"x":100,"y":100,"z":100},"specular":0,"lambert":0.9,"ambient":0.2},
\\ {"type":"Plane","point":{"x":0,"y":0,"z":12},"normal":{"x":0,"y":0,"z":-1},"color":{"x":100,"y":100,"z":100},"specular":0,"lambert":0.9,"ambient":0.2}
\\ ],
\\ "checker":[
\\ {"x":50,"y":0,"z":89},
\\ {"x":92,"y":209,"z":92}
\\ ],
\\ "lights":[
\\ {"x":3,"y":3,"z":5}
\\ ]
\\ }
;
var result = render(test_json, 10, 10);
// just testing if it fails
testing.expect(result.len == 400);
} | zigray/lib.zig |
const std = @import("std");
const builtin = @import("builtin");
const io = std.io;
const os = std.os;
const fs = std.fs;
const time = @import("../Zig-PSP/src/psp/os/time.zig");
pub const Level = enum {
const Self = @This();
Trace,
Debug,
Info,
Warn,
Error,
Fatal,
fn toString(self: Self) []const u8 {
return switch (self) {
Level.Trace => "TRACE",
Level.Debug => "DEBUG",
Level.Info => "INFO",
Level.Warn => "WARN",
Level.Error => "ERROR",
Level.Fatal => "FATAL",
};
}
};
var level: Level = Level.Trace;
var quiet: bool = false;
var file: fs.File = undefined;
var start : i64 = 0;
pub fn init() !void {
start = time.milliTimestamp();
file = try fs.cwd().createFile("./log.txt", fs.File.CreateFlags {.truncate = true});
}
pub fn deinit() void{
file.close();
}
pub fn log(lv: Level, comptime fmt: []const u8, args: anytype) !void {
if (@enumToInt(lv) < @enumToInt(level)) {
return;
}
if(!quiet){
//try std.fmt.format(file.writer(), "[{}]", .{time.milliTimestamp() - start});
try std.fmt.format(file.writer(), "[{}]", .{lv.toString()});
try std.fmt.format(file.writer(), ": ", .{});
try std.fmt.format(file.writer(), fmt, args);
try std.fmt.format(file.writer(), "\n", .{});
}
}
pub fn setLevel(lv: Level) void {
level = lv;
}
pub fn trace(comptime fmt: []const u8, args: anytype) void {
log(Level.Trace, fmt, args) catch return;
}
pub fn debug(comptime fmt: []const u8, args: anytype) void {
log(Level.Debug, fmt, args) catch return;
}
pub fn info(comptime fmt: []const u8, args: anytype) void {
log(Level.Info, fmt, args) catch return;
}
pub fn warn(comptime fmt: []const u8, args: anytype) void {
log(Level.Warn, fmt, args) catch return;
}
pub fn err(comptime fmt: []const u8, args: anytype) void {
log(Level.Error, fmt, args) catch return;
}
pub fn fatal(comptime fmt: []const u8, args: anytype) void {
log(Level.Fatal, fmt, args) catch return;
} | src/Utils/logger.zig |
const std = @import("std");
const mem = std.mem;
const ArrayList = std.ArrayList;
const warn = std.debug.warn;
pub const replacement_rune: i32 = 0xfffd;
pub const max_rune: i32 = 0x10ffff;
// 0xd800-0xdc00 encodes the high 10 bits of a pair.
// 0xdc00-0xe000 encodes the low 10 bits of a pair.
// the value is those 20 bits plus 0x10000.
const surr1: i32 = 0xd800;
const surr2: i32 = 0xdc00;
const surr3: i32 = 0xe000;
const surrSelf: i32 = 0x10000;
// isSurrogate reports whether the specified Unicode code point
// can appear in a surrogate pair.
pub fn issSurrogate(r: i32) bool {
return surr1 <= r and r < surr3;
}
// ArrayUTF16 this holds an array/slice of utf16 code points. The API of this
// packages avoid ussing raw []u16 to simplify manamemeng and freeing of memory.
pub const ArrayUTF16 = ArrayList(u16);
pub const ArrayUTF8 = ArrayList(i32);
// decodeRune returns the UTF-16 decoding of a surrogate pair.
// If the pair is not a valid UTF-16 surrogate pair, DecodeRune returns
// the Unicode replacement code point U+FFFD.
pub fn decodeRune(r1: i32, r2: i32) i32 {
if (surr1 <= r1 and r1 < surr2 and surr2 <= r2 and r2 < surr3) {
return (((r1 - surr1) << 10) | (r2 - surr2)) + surrSelf;
}
return replacement_rune;
}
pub const Pair = struct {
r1: i32,
r2: i32,
};
// encodeRune returns the UTF-16 surrogate pair r1, r2 for the given rune.
// If the rune is not a valid Unicode code point or does not need encoding,
// EncodeRune returns U+FFFD, U+FFFD.
pub fn encodeRune(r: i32) Pair {
if (r < surrSelf or r > max_rune) {
return Pair{ .r1 = replacement_rune, .r2 = replacement_rune };
}
const rn = r - surrSelf;
return Pair{ .r1 = surr1 + ((rn >> 10) & 0x3ff), .r2 = surr2 + (rn & 0x3ff) };
}
// encode returns the UTF-16 encoding of the Unicode code point sequence s. It
// is up to the caller to free the returned slice from the allocator a when done.
pub fn encode(allocator: *mem.Allocator, s: []const i32) !ArrayUTF16 {
var n: usize = s.len;
for (s) |v| {
if (v >= surrSelf) {
n += 1;
}
}
var list = ArrayUTF16.init(allocator);
try list.resize(n);
n = 0;
for (s) |v, id| {
if (0 <= v and v < surr1 or surr3 <= v and v < surrSelf) {
list.set(n, @intCast(u16, v));
n += 1;
} else if (surrSelf <= v and v <= max_rune) {
const r = encodeRune(v);
list.set(n, @intCast(u16, r.r1));
list.set(n + 1, @intCast(u16, r.r2));
n += 2;
} else {
list.set(n, @intCast(u16, replacement_rune));
n += 1;
}
}
list.shrink(n);
return list;
}
// decode returns the Unicode code point sequence represented
// by the UTF-16 encoding s.
pub fn decode(a: *mem.Allocator, s: []u16) !ArrayUTF8 {
var list = ArrayUTF8.init(a);
try list.resize(s.len);
var n = 0;
var i: usize = 0;
while (i < s.len) : (i += 1) {
const r = @intCast(i32, s[i]);
if (r < surr1 or surr3 <= r) {
//normal rune
list.set(n, r);
} else if (surr1 <= r and r < surr2 and i + 1 < len(s) and surr2 <= s[i + 1] and s[i + 1] < surr3) {
// valid surrogate sequence
list.set(n, decodeRune(r, @intCast(i32, s[i + 1])));
i += 1;
} else {
list.set(n, replacement_rune);
}
n += 1;
}
list.shrink(n);
return list;
} | src/unicode/utf16/index.zig |
const zt = @import("zt");
const main = @import("../main.zig");
usingnamespace @import("imgui");
usingnamespace zt.custom_components;
var rotation: f32 = 0.0;
var scale: f32 = 1.0;
var lineStart: zt.math.Vec2 = .{ .x = -200, .y = 200 };
var lineEnd: zt.math.Vec2 = .{ .x = 200, .y = 200 };
var startColor: zt.math.Vec4 = zt.math.Vec4.white;
var endColor: zt.math.Vec4 = zt.math.Vec4.white;
var thickness: f32 = 2;
var radius: f32 = 10;
pub fn update(ctx: *main.SampleApplication.Context) void {
control(ctx);
var render = ctx.data.render;
var io = igGetIO();
// It's important to set the render size, then the camera. This applies the matrices used to display all the sprites.
render.updateRenderSize(io.*.DisplaySize);
render.updateCamera(.{}, scale, rotation);
var y: i32 = -4;
var x: i32 = -4;
while (y <= 4) : (y += 1) {
while (x <= 4) : (x += 1) {
const pos = zt.math.vec2(32 * @intToFloat(f32, x), 32 * @intToFloat(f32, y));
render.sprite(ctx.data.sheet, pos, 0, zt.math.vec2(32, 32), zt.math.Vec4.white, zt.math.vec2(0.5, 0.5), zt.math.rect(16, 0, 16, 16));
}
x = -4;
}
// Text in the renderer is very rudimentary and not really intended for ingame use in world space.
// All renderer does is add text to imgui's background drawlist. As such you should transform world to screenspace.
const message =
\\Sadly, text is forwarded by imgui and not affected by matrices.
\\Note this also means nothing but imgui itself can be infront of this text.
\\But fear not! It wouldn't be difficult to implement your own text if needed.
;
render.text(render.worldToScreen(.{ .x = -128 - 16, .y = -190 }), message, zt.math.Vec4.white);
// You can also draw lines. In this example I want a solid white line, so the source rect is somewhere on the sheet
// that is pure white.
render.line(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), lineStart, lineEnd, 0, thickness, startColor, endColor);
// Surround the grid with a hollow rect.
render.rectangleHollow(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), zt.math.rect(-128, -128, 256, 256), 0, 2.0, zt.math.vec4(0.0, 1.0, 0.0, 1.0));
// Surround the grid with a hollow rect.
render.circle(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), zt.math.vec2(0, 200), radius, 0, zt.math.vec4(0.0, 1.0, 0.0, 1.0));
// Renderer also has no clue when a frame ends, so you must manually flush at the end of every render cycle.
// It is not required to do this between texture changes, as this is handled internally.
render.flush();
}
fn control(ctx: *main.SampleApplication.Context) void {
var io = igGetIO();
igSetNextWindowPos(io.*.DisplaySize, ImGuiCond_Appearing, .{ .x = 1, .y = 1 });
if (igBegin("Renderer Demo Settings", null, ImGuiWindowFlags_None)) {
igPushItemWidth(igGetWindowWidth() * 0.5);
_ = igDragFloat("Camera Rotation", &rotation, 0.02, zt.math.toRadians(-360.0), zt.math.toRadians(360.0), "%.3f", ImGuiSliderFlags_None);
_ = igDragFloat("Camera Zoom", &scale, 0.02, 0.1, 16, "%.3f", ImGuiSliderFlags_None);
igSeparator();
_ = ztEditDrag("Line Start", 0.1, &lineStart);
_ = ztEditDrag("Line End", 0.1, &lineEnd);
_ = ztEditDrag("Line Thickness", 0.1, &thickness);
_ = ztEditDrag("Line Start color", 0.1, &startColor);
_ = ztEditDrag("Line End color", 0.1, &endColor);
igSeparator();
_ = ztEditDrag("Circle Radius", 0.1, &radius);
_ = ztEditDrag("Circle Resolution", 0.1, &ctx.data.render.resolution);
igPopItemWidth();
}
igEnd();
} | example/src/scenes/renderer.zig |
const Self = @This();
const build_options = @import("build_options");
const std = @import("std");
const wlr = @import("wlroots");
const wl = @import("wayland").server.wl;
const pixman = @import("pixman");
const log = @import("log.zig");
const server = &@import("main.zig").server;
const util = @import("util.zig");
const Cursor = @import("Cursor.zig");
const Seat = @import("Seat.zig");
const View = @import("View.zig");
constraint: *wlr.PointerConstraintV1,
cursor: *Cursor,
destroy: wl.Listener(*wlr.PointerConstraintV1) = wl.Listener(*wlr.PointerConstraintV1).init(handleDestroy),
set_region: wl.Listener(void) = wl.Listener(void).init(handleSetRegion),
pub fn init(self: *Self, constraint: *wlr.PointerConstraintV1) void {
const seat = @intToPtr(*Seat, constraint.seat.data);
self.* = .{
.constraint = constraint,
.cursor = &seat.cursor,
};
self.constraint.data = @ptrToInt(self);
self.constraint.events.destroy.add(&self.destroy);
self.constraint.events.set_region.add(&self.set_region);
if (seat.focused == .view and seat.focused.view.surface == self.constraint.surface) {
self.setAsActive();
}
}
pub fn setAsActive(self: *Self) void {
if (self.cursor.constraint == self.constraint) return;
if (self.cursor.constraint) |constraint| {
constraint.sendDeactivated();
}
self.cursor.constraint = self.constraint;
// TODO: This is the same hack sway uses to deal with the fact that this
// function may be called in response to a surface commit but before the
// the wlroots pointer constraints implementation's commit handler has been called.
// This logic is duplicated from that commit handler.
if (self.constraint.current.region.notEmpty()) {
_ = self.constraint.region.intersect(&self.constraint.surface.input_region, &self.constraint.current.region);
} else {
_ = self.constraint.region.copy(&self.constraint.surface.input_region);
}
self.constrainToRegion();
self.constraint.sendActivated();
}
fn constrainToRegion(self: *Self) void {
if (self.cursor.constraint != self.constraint) return;
if (View.fromWlrSurface(self.constraint.surface)) |view| {
const output = view.output;
const output_box = server.root.output_layout.getBox(output.wlr_output).?;
const surface_lx = @intToFloat(f64, output_box.x + view.current.box.x - view.surface_box.x);
const surface_ly = @intToFloat(f64, output_box.y + view.current.box.y - view.surface_box.y);
const sx = @floatToInt(c_int, self.cursor.wlr_cursor.x - surface_lx);
const sy = @floatToInt(c_int, self.cursor.wlr_cursor.y - surface_ly);
// If the cursor is not already inside the constraint region, warp
// it to an arbitrary point inside the constraint region.
if (!self.constraint.region.containsPoint(sx, sy, null)) {
const rects = self.constraint.region.rectangles();
if (rects.len > 0) {
const new_lx = surface_lx + @intToFloat(f64, rects[0].x1 + rects[0].x2) / 2;
const new_ly = surface_ly + @intToFloat(f64, rects[0].y1 + rects[0].y2) / 2;
self.cursor.wlr_cursor.warpClosest(null, new_lx, new_ly);
}
}
}
}
fn handleDestroy(listener: *wl.Listener(*wlr.PointerConstraintV1), constraint: *wlr.PointerConstraintV1) void {
const self = @fieldParentPtr(Self, "destroy", listener);
self.destroy.link.remove();
self.set_region.link.remove();
if (self.cursor.constraint == self.constraint) {
warpToHint(self.cursor);
self.cursor.constraint = null;
}
util.gpa.destroy(self);
}
fn handleSetRegion(listener: *wl.Listener(void)) void {
const self = @fieldParentPtr(Self, "set_region", listener);
self.constrainToRegion();
}
pub fn warpToHint(cursor: *Cursor) void {
if (cursor.constraint) |constraint| {
if (constraint.current.committed.cursor_hint) {
if (View.fromWlrSurface(constraint.surface)) |view| {
const output = view.output;
const output_box = server.root.output_layout.getBox(output.wlr_output).?;
const surface_lx = @intToFloat(f64, output_box.x + view.current.box.x - view.surface_box.x);
const surface_ly = @intToFloat(f64, output_box.y + view.current.box.y - view.surface_box.y);
_ = cursor.wlr_cursor.warp(
null,
surface_lx + constraint.current.cursor_hint.x,
surface_ly + constraint.current.cursor_hint.y,
);
}
}
}
} | source/river-0.1.0/river/PointerConstraint.zig |
pub const Pos = packed struct {
line: usize,
col: usize,
index: usize,
};
pub const Token = struct {
pos: Pos,
tkn_type: TokenType,
value: []const u8,
};
pub const TokenType = enum(u8) {
Identifier, // id
Equal, // =
Let, // 'let'
Integer, // refers to ints
IntKeyword, // 'int'
Float, // refer to floats
FloatKeyword, // 'float'
String, // refer to strings
StringKeyword, // 'str'
Char, // refers to chars
CharKeyword, // 'char'
True, // 'true'
False, // 'false'
BoolKeyword, // 'bool'
Newline, // ! '\n' Weird Token
SemiColon, // ;
Colon, // :
Function, // 'fn'
LeftParen, // (
RightParen, // )
LeftCurly, // {
RightCurly, // }
LeftSQRBrackets, // [
RightSQRBrackets, // ]
Return, // 'return'
Import, // 'import'
Include, // 'include'
If, // 'if'
Else, // 'else'
For, // 'for'
While, // 'while'
Greater, // >
GreaterEqual, // >=
Less, // <
LessEqual, // <=
Dot, // .
Not, // "!"
And, // 'and'
Or, // 'or'
DoubleQuotes, // "
Quote, // '
Comma, // ,
Default, // '_'
Public, // 'pub'
Divider, // |
Mutable, // 'mut'
Match, // 'match'
As, // 'as'
EqualEqual, // ==
Break, // 'break'
Plus, // +
Minus, // -
Star, // *
Div, // /
AddEqual, // +=
SubEqual, // -=
MultEqual, // *=
DivEqual, // /=
Struct, // 'struct'
Ref, // 'ref'
Void, // 'void'
Skip, // 'continue' alternative
Defer, // `defer`
Nil, // `nil`
Long, // long
LongKeyword, // `long`
Type, // type
Block, // c code block
Builtin, // zig like builtins @identifer
pub fn describe(self: TokenType) []const u8 {
return @tagName(self);
}
};
pub const Type = enum(u8) {
_int,
_isize,
_usize,
_u8,
_u16,
_u32,
_u64,
_i8,
_i16,
_i32,
_i64,
_f32,
_f64,
_f128,
_char,
_void,
_struct,
_none,
pub fn describe(self: Type) []const u8 {
return @tagName(self);
}
// zig fmt: off
pub fn typeToC(self: Type) []const u8 {
switch (self) {
._isize => return "long long",
._usize => return "size_t",
._int => return "int",
._f32 => return "float",
._f64 => return "double",
._f128 => return "long double",
._char => return "char",
._void => return "void",
._i8 => return "int8_t",
._i16 => return "int16_t",
._i32 => return "int32_t",
._i64 => return "int64_t",
._u8 => return "uint8_t",
._u16 => return "uint16_t",
._u32 => return "uint32_t",
._u64 => return "uint64_t",
._none => return "NULL",
}
}
// zig fmt: on
}; | src/lexer/tokens.zig |
const Print = @This();
const std = @import("std");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const leb128 = std.leb;
const link = @import("../../link.zig");
const log = std.log.scoped(.codegen);
const math = std.math;
const mem = std.mem;
const Air = @import("../../Air.zig");
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const DW = std.dwarf;
const Encoder = bits.Encoder;
const ErrorMsg = Module.ErrorMsg;
const MCValue = @import("CodeGen.zig").MCValue;
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
const Instruction = bits.Instruction;
const Register = bits.Register;
const Type = @import("../../type.zig").Type;
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
mir: Mir,
bin_file: *link.File,
pub fn printMir(print: *const Print, w: anytype, mir_to_air_map: std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index), air: Air) !void {
const instruction_bytes = print.mir.instructions.len *
// Here we don't use @sizeOf(Mir.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
(@sizeOf(Mir.Inst.Tag) + 2 + 8);
const extra_bytes = print.mir.extra.len * @sizeOf(u32);
const total_bytes = @sizeOf(Mir) + instruction_bytes + extra_bytes;
// zig fmt: off
std.debug.print(
\\# Total MIR bytes: {}
\\# MIR Instructions: {d} ({})
\\# MIR Extra Data: {d} ({})
\\
, .{
fmtIntSizeBin(total_bytes),
print.mir.instructions.len, fmtIntSizeBin(instruction_bytes),
print.mir.extra.len, fmtIntSizeBin(extra_bytes),
});
// zig fmt: on
const mir_tags = print.mir.instructions.items(.tag);
for (mir_tags) |tag, index| {
const inst = @intCast(u32, index);
if (mir_to_air_map.get(inst)) |air_index| {
try w.print("air index %{} ({}) for following mir inst(s)\n", .{ air_index, air.instructions.items(.tag)[air_index] });
}
try w.writeAll(" ");
switch (tag) {
.adc => try print.mirArith(.adc, inst, w),
.add => try print.mirArith(.add, inst, w),
.sub => try print.mirArith(.sub, inst, w),
.xor => try print.mirArith(.xor, inst, w),
.@"and" => try print.mirArith(.@"and", inst, w),
.@"or" => try print.mirArith(.@"or", inst, w),
.sbb => try print.mirArith(.sbb, inst, w),
.cmp => try print.mirArith(.cmp, inst, w),
.mov => try print.mirArith(.mov, inst, w),
.adc_mem_imm => try print.mirArithMemImm(.adc, inst, w),
.add_mem_imm => try print.mirArithMemImm(.add, inst, w),
.sub_mem_imm => try print.mirArithMemImm(.sub, inst, w),
.xor_mem_imm => try print.mirArithMemImm(.xor, inst, w),
.and_mem_imm => try print.mirArithMemImm(.@"and", inst, w),
.or_mem_imm => try print.mirArithMemImm(.@"or", inst, w),
.sbb_mem_imm => try print.mirArithMemImm(.sbb, inst, w),
.cmp_mem_imm => try print.mirArithMemImm(.cmp, inst, w),
.mov_mem_imm => try print.mirArithMemImm(.mov, inst, w),
.adc_scale_src => try print.mirArithScaleSrc(.adc, inst, w),
.add_scale_src => try print.mirArithScaleSrc(.add, inst, w),
.sub_scale_src => try print.mirArithScaleSrc(.sub, inst, w),
.xor_scale_src => try print.mirArithScaleSrc(.xor, inst, w),
.and_scale_src => try print.mirArithScaleSrc(.@"and", inst, w),
.or_scale_src => try print.mirArithScaleSrc(.@"or", inst, w),
.sbb_scale_src => try print.mirArithScaleSrc(.sbb, inst, w),
.cmp_scale_src => try print.mirArithScaleSrc(.cmp, inst, w),
.mov_scale_src => try print.mirArithScaleSrc(.mov, inst, w),
.adc_scale_dst => try print.mirArithScaleDst(.adc, inst, w),
.add_scale_dst => try print.mirArithScaleDst(.add, inst, w),
.sub_scale_dst => try print.mirArithScaleDst(.sub, inst, w),
.xor_scale_dst => try print.mirArithScaleDst(.xor, inst, w),
.and_scale_dst => try print.mirArithScaleDst(.@"and", inst, w),
.or_scale_dst => try print.mirArithScaleDst(.@"or", inst, w),
.sbb_scale_dst => try print.mirArithScaleDst(.sbb, inst, w),
.cmp_scale_dst => try print.mirArithScaleDst(.cmp, inst, w),
.mov_scale_dst => try print.mirArithScaleDst(.mov, inst, w),
.adc_scale_imm => try print.mirArithScaleImm(.adc, inst, w),
.add_scale_imm => try print.mirArithScaleImm(.add, inst, w),
.sub_scale_imm => try print.mirArithScaleImm(.sub, inst, w),
.xor_scale_imm => try print.mirArithScaleImm(.xor, inst, w),
.and_scale_imm => try print.mirArithScaleImm(.@"and", inst, w),
.or_scale_imm => try print.mirArithScaleImm(.@"or", inst, w),
.sbb_scale_imm => try print.mirArithScaleImm(.sbb, inst, w),
.cmp_scale_imm => try print.mirArithScaleImm(.cmp, inst, w),
.mov_scale_imm => try print.mirArithScaleImm(.mov, inst, w),
.adc_mem_index_imm => try print.mirArithMemIndexImm(.adc, inst, w),
.add_mem_index_imm => try print.mirArithMemIndexImm(.add, inst, w),
.sub_mem_index_imm => try print.mirArithMemIndexImm(.sub, inst, w),
.xor_mem_index_imm => try print.mirArithMemIndexImm(.xor, inst, w),
.and_mem_index_imm => try print.mirArithMemIndexImm(.@"and", inst, w),
.or_mem_index_imm => try print.mirArithMemIndexImm(.@"or", inst, w),
.sbb_mem_index_imm => try print.mirArithMemIndexImm(.sbb, inst, w),
.cmp_mem_index_imm => try print.mirArithMemIndexImm(.cmp, inst, w),
.mov_mem_index_imm => try print.mirArithMemIndexImm(.mov, inst, w),
.movabs => try print.mirMovabs(inst, w),
.lea => try print.mirLea(inst, w),
.lea_pie => try print.mirLeaPie(inst, w),
.imul_complex => try print.mirIMulComplex(inst, w),
.push => try print.mirPushPop(.push, inst, w),
.pop => try print.mirPushPop(.pop, inst, w),
.jmp => try print.mirJmpCall(.jmp, inst, w),
.call => try print.mirJmpCall(.call, inst, w),
.cond_jmp_greater_less => try print.mirCondJmp(.cond_jmp_greater_less, inst, w),
.cond_jmp_above_below => try print.mirCondJmp(.cond_jmp_above_below, inst, w),
.cond_jmp_eq_ne => try print.mirCondJmp(.cond_jmp_eq_ne, inst, w),
.cond_set_byte_greater_less => try print.mirCondSetByte(.cond_set_byte_greater_less, inst, w),
.cond_set_byte_above_below => try print.mirCondSetByte(.cond_set_byte_above_below, inst, w),
.cond_set_byte_eq_ne => try print.mirCondSetByte(.cond_set_byte_eq_ne, inst, w),
.@"test" => try print.mirTest(inst, w),
.brk => try w.writeAll("brk\n"),
.ret => try w.writeAll("ret\n"),
.nop => try w.writeAll("nop\n"),
.syscall => try w.writeAll("syscall\n"),
.call_extern => try print.mirCallExtern(inst, w),
.dbg_line, .dbg_prologue_end, .dbg_epilogue_begin, .arg_dbg_info => try w.print("{s}\n", .{@tagName(tag)}),
.push_regs_from_callee_preserved_regs => try print.mirPushPopRegsFromCalleePreservedRegs(.push, inst, w),
.pop_regs_from_callee_preserved_regs => try print.mirPushPopRegsFromCalleePreservedRegs(.pop, inst, w),
else => {
try w.print("TODO emit asm for {s}\n", .{@tagName(tag)});
},
}
}
}
fn mirPushPop(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
switch (ops.flags) {
0b00 => {
// PUSH/POP reg
try w.print("{s} {s}", .{ @tagName(tag), @tagName(ops.reg1) });
},
0b01 => {
// PUSH/POP r/m64
const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("{s} [{s} + {d}]", .{ @tagName(tag), @tagName(ops.reg1), imm });
},
0b10 => {
const imm = print.mir.instructions.items(.data)[inst].imm;
// PUSH imm32
assert(tag == .push);
try w.print("{s} {d}", .{ @tagName(tag), imm });
},
0b11 => unreachable,
}
try w.writeByte('\n');
}
fn mirPushPopRegsFromCalleePreservedRegs(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const payload = print.mir.instructions.items(.data)[inst].payload;
const data = print.mir.extraData(Mir.RegsToPushOrPop, payload).data;
const regs = data.regs;
var disp: u32 = data.disp + 8;
if (regs == 0) return w.writeAll("no regs from callee_preserved_regs\n");
var printed_first_reg = false;
for (bits.callee_preserved_regs) |reg, i| {
if ((regs >> @intCast(u5, i)) & 1 == 0) continue;
if (printed_first_reg) try w.writeAll(" ");
printed_first_reg = true;
if (tag == .push) {
try w.print("mov qword ptr [{s} + {d}], {s}", .{
@tagName(ops.reg1),
@bitCast(u32, -@intCast(i32, disp)),
@tagName(reg.to64()),
});
} else {
try w.print("mov {s}, qword ptr [{s} + {d}]", .{
@tagName(reg.to64()),
@tagName(ops.reg1),
@bitCast(u32, -@intCast(i32, disp)),
});
}
disp += 8;
try w.writeByte('\n');
}
}
fn mirJmpCall(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
try w.print("{s} ", .{@tagName(tag)});
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const flag = @truncate(u1, ops.flags);
if (flag == 0) {
return w.writeAll("TODO target\n");
}
if (ops.reg1 == .none) {
// JMP/CALL [imm]
const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("[{x}]\n", .{imm});
return;
}
// JMP/CALL reg
try w.print("{s}\n", .{@tagName(ops.reg1)});
}
fn mirCondJmp(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
_ = print;
_ = tag;
_ = inst;
try w.writeAll("TODO print mirCondJmp\n");
}
fn mirCondSetByte(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
_ = tag;
_ = inst;
_ = print;
try w.writeAll("TODO print mirCondSetByte\n");
}
fn mirTest(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
_ = print;
_ = inst;
try w.writeAll("TODO print mirTest\n");
}
fn mirArith(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
try w.writeAll(@tagName(tag));
try w.writeByte(' ');
switch (ops.flags) {
0b00 => {
if (ops.reg2 == .none) {
const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("{s}, {d}", .{ @tagName(ops.reg1), imm });
} else try w.print("{s}, {s}", .{ @tagName(ops.reg1), @tagName(ops.reg2) });
},
0b01 => {
const imm = print.mir.instructions.items(.data)[inst].imm;
if (ops.reg2 == .none) {
try w.print("{s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
16 => try w.print("word ptr ", .{}),
32 => try w.print("dword ptr ", .{}),
64 => try w.print("qword ptr ", .{}),
else => unreachable,
}
try w.print("[ds:{d}]", .{imm});
} else {
try w.print("{s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
16 => try w.print("word ptr ", .{}),
32 => try w.print("dword ptr ", .{}),
64 => try w.print("qword ptr ", .{}),
else => unreachable,
}
try w.print("[{s} + {d}]", .{ @tagName(ops.reg2), imm });
}
},
0b10 => {
const imm = print.mir.instructions.items(.data)[inst].imm;
if (ops.reg2 == .none) {
try w.writeAll("unused variant");
} else {
switch (ops.reg2.size()) {
8 => try w.print("byte ptr ", .{}),
16 => try w.print("word ptr ", .{}),
32 => try w.print("dword ptr ", .{}),
64 => try w.print("qword ptr ", .{}),
else => unreachable,
}
try w.print("[{s} + {d}], {s}", .{ @tagName(ops.reg1), imm, @tagName(ops.reg2) });
}
},
0b11 => {
try w.writeAll("unused variant");
},
}
try w.writeByte('\n');
}
fn mirArithMemImm(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const payload = print.mir.instructions.items(.data)[inst].payload;
const imm_pair = print.mir.extraData(Mir.ImmPair, payload).data;
try w.print("{s} ", .{@tagName(tag)});
switch (ops.flags) {
0b00 => try w.print("byte ptr ", .{}),
0b01 => try w.print("word ptr ", .{}),
0b10 => try w.print("dword ptr ", .{}),
0b11 => try w.print("qword ptr ", .{}),
}
try w.print("[{s} + {d}], {d}\n", .{ @tagName(ops.reg1), imm_pair.dest_off, imm_pair.operand });
}
fn mirArithScaleSrc(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const scale = ops.flags;
// OP reg1, [reg2 + scale*rcx + imm32]
const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("{s} {s}, [{s} + {d}*rcx + {d}]\n", .{ @tagName(tag), @tagName(ops.reg1), @tagName(ops.reg2), scale, imm });
}
fn mirArithScaleDst(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const scale = ops.flags;
const imm = print.mir.instructions.items(.data)[inst].imm;
if (ops.reg2 == .none) {
// OP [reg1 + scale*rax + 0], imm32
try w.print("{s} [{s} + {d}*rax + 0], {d}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm });
}
// OP [reg1 + scale*rax + imm32], reg2
try w.print("{s} [{s} + {d}*rax + {d}], {s}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm, @tagName(ops.reg2) });
}
fn mirArithScaleImm(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const scale = ops.flags;
const payload = print.mir.instructions.items(.data)[inst].payload;
const imm_pair = print.mir.extraData(Mir.ImmPair, payload).data;
try w.print("{s} [{s} + {d}*rax + {d}], {d}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm_pair.dest_off, imm_pair.operand });
}
fn mirArithMemIndexImm(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const payload = print.mir.instructions.items(.data)[inst].payload;
const imm_pair = print.mir.extraData(Mir.ImmPair, payload).data;
try w.print("{s} ", .{@tagName(tag)});
switch (ops.flags) {
0b00 => try w.print("byte ptr ", .{}),
0b01 => try w.print("word ptr ", .{}),
0b10 => try w.print("dword ptr ", .{}),
0b11 => try w.print("qword ptr ", .{}),
}
try w.print("[{s} + 1*rax + {d}], {d}\n", .{ @tagName(ops.reg1), imm_pair.dest_off, imm_pair.operand });
}
fn mirMovabs(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
const tag = print.mir.instructions.items(.tag)[inst];
assert(tag == .movabs);
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const is_64 = ops.reg1.size() == 64;
const imm: i128 = if (is_64) blk: {
const payload = print.mir.instructions.items(.data)[inst].payload;
const imm64 = print.mir.extraData(Mir.Imm64, payload).data;
break :blk imm64.decode();
} else print.mir.instructions.items(.data)[inst].imm;
if (ops.flags == 0b00) {
// movabs reg, imm64
try w.print("movabs {s}, {d}\n", .{ @tagName(ops.reg1), imm });
}
if (ops.reg1 == .none) {
try w.writeAll("movabs moffs64, rax\n");
} else {
// movabs rax, moffs64
try w.writeAll("movabs rax, moffs64\n");
}
}
fn mirIMulComplex(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
const tag = print.mir.instructions.items(.tag)[inst];
assert(tag == .imul_complex);
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
switch (ops.flags) {
0b00 => {
try w.print("imul {s}, {s}\n", .{ @tagName(ops.reg1), @tagName(ops.reg2) });
},
0b10 => {
const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("imul {s}, {s}, {d}\n", .{ @tagName(ops.reg1), @tagName(ops.reg2), imm });
},
else => return w.writeAll("TODO implement imul\n"),
}
}
fn mirLea(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
try w.writeAll("lea ");
switch (ops.flags) {
0b00 => {
const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("{s} [", .{@tagName(ops.reg1)});
if (ops.reg2 != .none) {
try w.print("{s} + ", .{@tagName(ops.reg2)});
} else {
try w.print("ds:", .{});
}
try w.print("{d}]", .{imm});
},
0b01 => {
try w.print("{s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
16 => try w.print("word ptr ", .{}),
32 => try w.print("dword ptr ", .{}),
64 => try w.print("qword ptr ", .{}),
else => unreachable,
}
try w.print("[rip + 0x0] ", .{});
const payload = print.mir.instructions.items(.data)[inst].payload;
const imm = print.mir.extraData(Mir.Imm64, payload).data.decode();
try w.print("target@{x}", .{imm});
},
0b10 => {
const imm = print.mir.instructions.items(.data)[inst].imm;
try w.print("{s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
16 => try w.print("word ptr ", .{}),
32 => try w.print("dword ptr ", .{}),
64 => try w.print("qword ptr ", .{}),
else => unreachable,
}
try w.print("[rbp + rcx + {d}]", .{imm});
},
0b11 => {
try w.writeAll("unused variant");
},
}
try w.writeAll("\n");
}
fn mirLeaPie(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const load_reloc = print.mir.instructions.items(.data)[inst].load_reloc;
try w.print("lea {s}, ", .{@tagName(ops.reg1)});
switch (ops.reg1.size()) {
8 => try w.print("byte ptr ", .{}),
16 => try w.print("word ptr ", .{}),
32 => try w.print("dword ptr ", .{}),
64 => try w.print("qword ptr ", .{}),
else => unreachable,
}
try w.print("[rip + 0x0] ", .{});
if (print.bin_file.cast(link.File.MachO)) |macho_file| {
const target = macho_file.locals.items[load_reloc.sym_index];
const target_name = macho_file.getString(target.n_strx);
try w.print("target@{s}", .{target_name});
} else {
try w.print("TODO lea PIE for other backends", .{});
}
return w.writeByte('\n');
}
fn mirCallExtern(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {
_ = print;
_ = inst;
return w.writeAll("TODO call_extern");
} | src/arch/x86_64/PrintMir.zig |
const std = @import("std");
const c = @import("internal/c.zig");
const internal = @import("internal/internal.zig");
const log = std.log.scoped(.git);
const git = @import("git.zig");
pub const Remote = opaque {
/// Free the memory associated with a remote.
pub fn deinit(self: *Remote) !void {
log.debug("Remote.deinit called", .{});
c.git_remote_free(@ptrCast(*c.git_remote, self));
log.debug("remote closed successfully", .{});
}
/// Create a copy of an existing remote. All internal strings are also duplicated. Callbacks are not duplicated.
pub fn duplicate(self: *Remote) !*Remote {
log.debug("Remote.duplicate called", .{});
var remote: *Remote = undefined;
try internal.wrapCall("git_remote_dup", .{
@ptrCast(*?*c.git_remote, &remote),
@ptrCast(*c.git_remote, self),
});
log.debug("successfully duplicated remote", .{});
return remote;
}
/// Get the remote's repository.
pub fn getOwner(self: *const Remote) ?*git.Repository {
log.debug("Remote.getOwner called", .{});
const ret = @ptrCast(
?*git.Repository,
c.git_remote_owner(@ptrCast(*const c.git_remote, self)),
);
log.debug("owner: {*}", .{ret});
return ret;
}
/// Get the remote's name.
pub fn getName(self: *const Remote) ?[:0]const u8 {
log.debug("Remote.getName called", .{});
const ret = if (c.git_remote_name(@ptrCast(*const c.git_remote, self))) |r|
std.mem.sliceTo(r, 0)
else
null;
log.debug("name: {s}", .{ret});
return ret;
}
/// Get the remote's url
///
/// If url.*.insteadOf has been configured for this URL, it will return the modified URL.
pub fn getUrl(self: *const Remote) ?[:0]const u8 {
log.debug("Remote.getUrl called", .{});
const ret = if (c.git_remote_url(@ptrCast(*const c.git_remote, self))) |r|
std.mem.sliceTo(r, 0)
else
null;
log.debug("url: {s}", .{ret});
return ret;
}
/// Get the remote's url for pushing.
pub fn getPushUrl(self: *const Remote) ?[:0]const u8 {
log.debug("Remote.getPushUrl called", .{});
const ret = if (c.git_remote_pushurl(@ptrCast(*const c.git_remote, self))) |r|
std.mem.sliceTo(r, 0)
else
null;
log.debug("push url: {s}", .{ret});
return ret;
}
/// Get the remote's list of fetch refspecs.
///
/// The memory is owned by the caller and should be free with StrArray.deinit.
pub fn getFetchRefspecs(self: *const Remote) !git.StrArray {
log.debug("Remote.getFetchRefspecs called", .{});
var ret: git.StrArray = .{};
try internal.wrapCall("git_remote_get_fetch_refspecs", .{
@ptrCast(*c.git_strarray, &ret),
@ptrCast(*const c.git_remote, self),
});
log.debug("successfully got fetch refspecs", .{});
return ret;
}
/// Get the remote's list of push refspecs.
///
/// The memory is owned by the caller and should be free with StrArray.deinit.
pub fn getPushRefspecs(self: *const Remote) !git.StrArray {
log.debug("Remote.getPushRefspecs called", .{});
var ret: git.StrArray = .{};
try internal.wrapCall("git_remote_get_push_refspecs", .{
@ptrCast(*c.git_strarray, &ret),
@ptrCast(*const c.git_remote, self),
});
log.debug("successfully got push refspecs", .{});
return ret;
}
/// Get the number of refspecs for a remote.
pub fn getRefspecCount(self: *const Remote) usize {
log.debug("Remote.getRefspecsCount called", .{});
const ret = c.git_remote_refspec_count(@ptrCast(*const c.git_remote, self));
log.debug("refspec count: {}", .{ret});
return ret;
}
/// Get a refspec from the remote
///
/// ## Parameters
/// * `n` - The refspec to get.
pub fn getRefspec(self: *const Remote, n: usize) ?*const git.Refspec {
log.debug("Remote.getRefspec called", .{});
const ret = @ptrCast(
?*const git.Refspec,
c.git_remote_get_refspec(@ptrCast(*const c.git_remote, self), n),
);
log.debug("got refspec: {*}", .{ret});
return ret;
}
/// Open a connection to a remote.
///
/// ## Parameters
/// * `direction` - Fetch if you want to fetch or push if you want to push.
/// * `callbacks` - The callbacks to use for this connection.
/// * `proxy_opts` - Proxy settings.
/// * `custom_headers` - Extra HTTP headers to use in this connection.
pub fn connect(
self: *Remote,
direction: git.Direction,
callbacks: git.RemoteCallbacks,
proxy_opts: git.ProxyOptions,
custom_headers: git.StrArray,
) !void {
log.debug("Remote.connect called, direction: {}, proxy_opts: {}", .{ direction, proxy_opts });
const c_proxy_opts = internal.make_c_option.proxyOptions(proxy_opts);
try internal.wrapCall("git_remote_connect", .{
@ptrCast(*c.git_remote, self),
@enumToInt(direction),
@ptrCast(*const c.git_remote_callbacks, &callbacks),
&c_proxy_opts,
@ptrCast(*const c.git_strarray, &custom_headers),
});
log.debug("successfully made connection to remote", .{});
}
/// Get the remote repository's reference advertisement list
///
/// Get the list of references with which the server responds to a new connection.
///
/// The remote (or more exactly its transport) must have connected to the remote repository. This list is available
/// as soon as the connection to the remote is initiated and it remains available after disconnecting.
///
/// The memory belongs to the remote. The pointer will be valid as long as a new connection is not initiated, but
/// it is recommended that you make a copy in order to make use of the data.
pub fn ls(self: *Remote) ![]*const Head {
log.debug("Remote.ls called", .{});
var head_ptr: [*]*const Head = undefined;
var head_n: usize = undefined;
try internal.wrapCall("git_remote_ls", .{
@ptrCast(*?[*]?*const c.git_remote_head, &head_ptr),
&head_n,
@ptrCast(*c.git_remote, self),
});
log.debug("successfully found heads", .{});
return head_ptr[0..head_n];
}
/// Description of a reference advertised by a remote server, given out on `ls` calls.
pub const Head = extern struct {
/// use `isLocal()`
z_local: c_int,
oid: git.Oid,
loid: git.Oid,
/// use `getName()`
z_name: [*:0]u8,
/// use `getSymrefTarget()`
z_symref_target: ?[*:0]u8,
// is available locally
pub fn isLocal(self: Head) bool {
return self.z_local != 0;
}
pub fn getName(self: Head) [:0]const u8 {
return std.mem.sliceTo(self.z_name, 0);
}
/// If the server send a symref mapping for this ref, this will point to the target.
pub fn getSymrefTarget(self: Head) ?[:0]const u8 {
log.debug("Remote.getUrl called", .{});
const symref = if (self.z_symref_target) |s|
std.mem.sliceTo(s, 0)
else
null;
log.debug("symref: {s}", .{symref});
return symref;
}
test {
try std.testing.expectEqual(@sizeOf(c.git_remote_head), @sizeOf(Head));
try std.testing.expectEqual(@bitSizeOf(c.git_remote_head), @bitSizeOf(Head));
}
comptime {
std.testing.refAllDecls(@This());
}
};
/// Check whether the remote is connected.
///
/// Check whether the remote's underlying transport is connected to the remote host.
pub fn connected(self: *const Remote) bool {
log.debug("Remote.connected called", .{});
const res = c.git_remote_connected(@ptrCast(*const c.git_remote, self)) != 0;
log.debug("connected: {}", .{res});
return res;
}
/// Cancel the operation.
///
/// At certain points in its operation, the network code checks whether the operation has been cancelled and if so
/// stops the operation.
pub fn stop(self: *Remote) !void {
log.debug("Remote.stop called", .{});
try internal.wrapCall("git_remote_stop", .{
@ptrCast(*c.git_remote, self),
});
log.debug("successfully stopped remote operation", .{});
}
/// Disconnect from the remote
///
/// Close the connection to the remote.
pub fn disconnect(self: *Remote) !void {
log.debug("Remote.diconnect called", .{});
try internal.wrapCall("git_remote_disconnect", .{
@ptrCast(*c.git_remote, self),
});
log.debug("successfully disconnected remote", .{});
}
/// Download and index the packfile
///
/// Connect to the remote if it hasn't been done yet, negotiate with the remote git which objects are missing,
/// download and index the packfile.
///
/// The .idx file will be created and both it and the packfile with be renamed to their final name.
///
/// ## Parameters
/// * `refspecs` - The refspecs to use for this negotiation and download. Use an empty array to use the base refspecs
/// * `options` - The options to use for this fetch
pub fn download(self: *Remote, refspecs: git.StrArray, options: FetchOptions) !void {
log.debug("Remote.download called, options: {}", .{options});
const c_options = internal.make_c_option.fetchOptions(options);
try internal.wrapCall("git_remote_download", .{
@ptrCast(*c.git_remote, self),
@ptrCast(*const c.git_strarray, &refspecs),
&c_options,
});
log.debug("successfully downloaded remote", .{});
}
/// Create a packfile and send it to the server
///
/// Connect to the remote if it hasn't been done yet, negotiate with the remote git which objects are missing, create a
/// packfile with the missing objects and send it.
///
/// ## Parameters
/// * refspecs - the refspecs to use for this negotiation and upload. Use an empty array to use the base refspecs.
/// * options - the options to use for this push.
pub fn upload(self: *Remote, refspecs: git.StrArray, options: PushOptions) !void {
log.debug("Remote.upload called, options: {}", .{options});
const c_options = internal.make_c_option.pushOptions(options);
try internal.wrapCall("git_remote_upload", .{
@ptrCast(*c.git_remote, self),
@ptrCast(*const c.git_strarray, &refspecs),
&c_options,
});
log.debug("successfully completed upload", .{});
}
/// Update the tips to the new state.
///
/// ## Parameters
/// * `callbacks` - The callback structure to use
/// * `update_fetchhead` - Whether to write to FETCH_HEAD. Pass true to behave like git.
/// * `download_tags` - What the behaviour for downloading tags is for this fetch.
/// This is ignored for push.
/// This must be the same value passed to `Remote.download()`.
/// * `reflog_message` - The message to insert into the reflogs.
/// If `null` and fetching, the default is "fetch <name>", where <name> is the name of the remote
/// (or its url, for in-memory remotes).
/// This parameter is ignored when pushing.
pub fn updateTips(
self: *Remote,
callbacks: RemoteCallbacks,
update_fetchead: bool,
download_tags: RemoteAutoTagOption,
reflog_message: ?[:0]const u8,
) !void {
log.debug("Remote.updateTips called, update_fetchhead: {}, download_tags: {}, reflog_message: {s}", .{
update_fetchead,
download_tags,
reflog_message,
});
const c_reflog_message = if (reflog_message) |s| s.ptr else null;
try internal.wrapCall("git_remote_update_tips", .{
@ptrCast(*c.git_remote, self),
@ptrCast(*const c.git_remote_callbacks, &callbacks),
@boolToInt(update_fetchead),
@enumToInt(download_tags),
c_reflog_message,
});
log.debug("successfully updated tips", .{});
}
/// Download new data and update tips.
///
/// ## Parameters
/// * `refspecs` - The refspecs to use for this fetch. Pass an empty array to use the base refspecs.
/// * `options` - Options to use for this fetch.
/// * `reflog_message` - The message to insert into the reflogs. If `null`, the default is "fetch".
pub fn fetch(
self: *Remote,
refspecs: git.StrArray,
options: FetchOptions,
reflog_message: ?[:0]const u8,
) !void {
log.debug("Remote.fetch called, options: {}, reflog_message: {s}", .{ options, reflog_message });
const c_reflog_message = if (reflog_message) |s| s.ptr else null;
const c_options = internal.make_c_option.fetchOptions(options);
try internal.wrapCall("git_remote_fetch", .{
@ptrCast(*c.git_remote, self),
@ptrCast(*const c.git_strarray, &refspecs),
&c_options,
c_reflog_message,
});
log.debug("successfully fetched remote", .{});
}
/// Prune tracking refs that are no longer present on remote.
///
/// ## Parameters
/// * `callbacks` - Callbacks to use for this prune.
pub fn prune(self: *Remote, callbacks: RemoteCallbacks) !void {
log.debug("Remote.prune called", .{});
try internal.wrapCall("git_remote_prune", .{
@ptrCast(*c.git_remote, self),
@ptrCast(*const c.git_remote_callbacks, &callbacks),
});
log.debug("successfully pruned remote", .{});
}
/// Preform a push.
///
/// ## Parameters
/// * `refspecs` - The refspecs to use for pushing. If an empty array is provided, the configured refspecs will be used.
/// * `options` - The options to use for this push.
pub fn push(self: *Remote, refspecs: git.StrArray, options: PushOptions) !void {
log.debug("Remote.push called, options: {}", .{options});
const c_options = internal.make_c_option.pushOptions(options);
try internal.wrapCall("git_remote_push", .{
@ptrCast(*c.git_remote, self),
@ptrCast(*const c.git_strarray, &refspecs),
&c_options,
});
log.debug("successfully pushed remote", .{});
}
/// Get the statistics structure that is filled in by the fetch operation.
pub fn getStats(self: *Remote) *const git.IndexerProgress {
log.debug("Remote.getStats called", .{});
const ret = @ptrCast(
*const git.IndexerProgress,
c.git_remote_stats(@ptrCast(*c.git_remote, self)),
);
log.debug("successfully got statistics", .{});
return ret;
}
/// Retrieve the tag auto-follow setting.
pub fn getAutotag(self: *const Remote) RemoteAutoTagOption {
log.debug("Remote.getAutotag called", .{});
const ret = @intToEnum(
RemoteAutoTagOption,
c.git_remote_autotag(@ptrCast(*const c.git_remote, self)),
);
log.debug("autotag setting: {}", .{ret});
return ret;
}
/// Retrieve the ref-prune setting.
pub fn getPruneRefSetting(self: *const Remote) bool {
log.debug("Remote.getPruneRefSetting called", .{});
const ret = c.git_remote_prune_refs(@ptrCast(*const c.git_remote, self)) != 0;
log.debug("prune ref: {}", .{ret});
return ret;
}
/// Retrieve the name of the remote's default branch
///
/// The default branch of a repository is the branch which HEAD points to. If the remote does not support reporting this
/// information directly, it performs the guess as git does; that is, if there are multiple branches which point to the
/// same commit, the first one is chosen. If the master branch is a candidate, it wins.
///
/// This function must only be called after connecting.
pub fn defaultBranch(self: *Remote) !git.Buf {
log.debug("Remote.defaultBranch called", .{});
var buf = git.Buf{};
try internal.wrapCall("git_remote_default_branch", .{
@ptrCast(*c.git_buf, &buf),
@ptrCast(*c.git_remote, self),
});
log.debug("successfully found default branch: {s}", .{buf.toSlice()});
return buf;
}
comptime {
std.testing.refAllDecls(@This());
}
};
/// Automatic tag following option.
pub const RemoteAutoTagOption = enum(c_uint) {
/// Use the setting from the configuration.
unspecified = 0,
/// Ask the server for tags pointing to objects we're already downloading.
auto,
/// Don't ask for any tags beyond the refspecs.
none,
/// Ask for all the tags.
all,
};
/// Remote creation options structure.
pub const RemoteCreateOptions = struct {
/// The repository that should own the remote.
/// Setting this to `null` results in a detached remote.
repository: ?*git.Repository = null,
/// The remote's name.
/// Setting this to `null` results in an in-memory/anonymous remote.
name: ?[:0]const u8 = null,
/// The fetchspec the remote should use.
fetchspec: ?[:0]const u8 = null,
/// Additional flags for the remote
flags: RemoteCreateFlags = .{},
/// Remote creation options flags.
pub const RemoteCreateFlags = packed struct {
/// Ignore the repository apply.insteadOf configuration.
skip_insteadof: bool = false,
/// Don't build a fetchspec from the name if none is set.
skip_default_fetchspec: bool = false,
z_padding: u30 = 0,
pub fn format(
value: RemoteCreateFlags,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
return internal.formatWithoutFields(
value,
options,
writer,
&.{"z_padding"},
);
}
test {
try std.testing.expectEqual(@sizeOf(c_uint), @sizeOf(RemoteCreateFlags));
try std.testing.expectEqual(@bitSizeOf(c_uint), @bitSizeOf(RemoteCreateFlags));
}
comptime {
std.testing.refAllDecls(@This());
}
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// Fetch options structure.
pub const FetchOptions = struct {
/// Callbacks to use for this fetch operation.
callbacks: RemoteCallbacks = .{},
/// Whether to perform a prune after the fetch.
prune: FetchPrune = .unspecified,
/// Whether to write the results to FETCH_HEAD. Defaults to on. Leave this default to behave like git.
update_fetchhead: bool = true,
/// Determines how to behave regarding tags on the remote, such as auto-dowloading tags for objects we're
/// downloading or downloading all of them. The default is to auto-follow tags.
download_tags: RemoteAutoTagOption = .unspecified,
/// Proxy options to use, bu default no proxy is used.
proxy_opts: git.ProxyOptions = .{},
/// Extra headers for this fetch operation.
custom_headers: git.StrArray = .{},
/// Acceptable prune settings from the configuration.
pub const FetchPrune = enum(c_uint) {
/// Use the setting from the configuration.
unspecified = 0,
/// Force pruning on.
prune,
/// Force pruning off.
no_prune,
};
comptime {
std.testing.refAllDecls(@This());
}
};
pub const PushOptions = struct {
/// If the transport being used to push to the remote requires the creation of a pack file, this controls the
/// number of worker threads used by the packbuilder when creating that pack file to be sent to the remote.
/// If set to 0, the packbuilder will auto-detect the number of threads to create. The default value is 1.
pb_parallelism: c_uint = 1,
///Callbacks to use for this push operation
callbacks: RemoteCallbacks = .{},
///Proxy options to use, by default no proxy is used.
proxy_opts: git.ProxyOptions = .{},
///Extra headers for this push operation
custom_headers: git.StrArray = .{},
};
/// Set the callbacks to be called by the remote when informing the user about the progress of the network operations.
pub const RemoteCallbacks = extern struct {
version: c_uint = c.GIT_CHECKOUT_OPTIONS_VERSION,
/// Textual progress from the remote. Text send over the progress side-band will be passed to this function (this is
/// the 'counting objects' output).
///
/// Return a negative value to cancel the network operation.
///
/// ## Parameters
/// * `str` - The message from the transport
/// * `len` - The length of the message
/// * `payload` - Payload provided by the caller
sideband_progress: ?fn (str: [*:0]const u8, len: c_uint, payload: ?*anyopaque) callconv(.C) c_int = null,
/// Completion is called when different parts of the download process are done (currently unused).
completion: ?fn (completion_type: RemoteCompletion, payload: ?*anyopaque) callconv(.C) c_int = null,
/// This will be called if the remote host requires authentication in order to connect to it.
///
/// Return 0 for success, < 0 to indicate an error, > 0 to indicate no credential was acquired
/// Returning `errorToCInt(GitError.Passthrough)` will make libgit2 behave as though this field isn't set.
///
/// ## Parameters
/// * `out` - The newly created credential object.
/// * `url` - The resource for which we are demanding a credential.
/// * `username_from_url` - The username that was embedded in a "user\@host" remote url, or `null` if not included.
/// * `allowed_types` - A bitmask stating which credential types are OK to return.
/// * `payload` - The payload provided when specifying this callback.
credentials: ?fn (
out: **git.Credential,
url: [*:0]const u8,
username_from_url: [*:0]const u8,
allowed_types: git.CredentialType,
payload: ?*anyopaque,
) callconv(.C) c_int = null,
/// If cert verification fails, this will be called to let the user make the final decision of whether to allow the
/// connection to proceed. Returns 0 to allow the connection or a negative value to indicate an error.
///
/// Return 0 to proceed with the connection, < 0 to fail the connection or > 0 to indicate that the callback refused
/// to act and that the existing validity determination should be honored
///
/// ## Parameters
/// * `cert` - The host certificate
/// * `valid` - Whether the libgit2 checks (OpenSSL or WinHTTP) think this certificate is valid.
/// * `host` - Hostname of the host libgit2 connected to
/// * `payload` - Payload provided by the caller
certificate_check: ?fn (
cert: *git.Certificate,
valid: bool,
host: [*:0]const u8,
payload: ?*anyopaque,
) callconv(.C) c_int = null,
/// During the download of new data, this will be regularly called with the current count of progress done by the
/// indexer.
///
/// Return a value less than 0 to cancel the indexing or download.
///
/// ## Parameters
/// * `stats` - Structure containing information about the state of the transfer
/// * `payload` - Payload provided by the caller
transfer_progress: ?fn (stats: *const git.IndexerProgress, payload: ?*anyopaque) callconv(.C) c_int = null,
/// Each time a reference is updated locally, this function will be called with information about it.
update_tips: ?fn (
refname: [*:0]const u8,
a: *const git.Oid,
b: *const git.Oid,
payload: ?*anyopaque,
) callconv(.C) c_int = null,
/// Function to call with progress information during pack building. Be aware that this is called inline with pack
/// building operations, so perfomance may be affected.
pack_progress: ?fn (stage: git.PackbuilderStage, current: u32, total: u32, payload: ?*anyopaque) callconv(.C) c_int = null,
/// Function to call with progress information during the upload portion nof a push. Be aware that this is called
/// inline with pack building operations, so performance may be affected.
push_transfer_progress: ?fn (current: c_uint, total: c_uint, size: usize, payload: ?*anyopaque) callconv(.C) c_int = null,
/// Callback used to inform of the update status from the remote.
///
/// Called for each updated reference on push. If `status` is not `null`, the update was rejected by the remote server
/// and `status` contains the reason given.
///
/// 0 on success, otherwise an error
///
/// ## Parameters
/// * `refname` - Refname specifying to the remote ref
/// * `status` - Status message sent from the remote
/// * `data` - Data provided by the caller
push_update_reference: ?fn (
refname: [*:0]const u8,
status: ?[*:0]const u8,
data: ?*anyopaque,
) callconv(.C) c_int = null,
/// Called once between the negotiation step and the upload. It provides information about what updates will be
/// performed.
/// Callback used to inform of upcoming updates.
///
/// ## Parameters
/// * `updates` - An array containing the updates which will be sent as commands to the destination.
/// * `len` - Number of elements in `updates`
/// * `payload` - Payload provided by the caller
push_negotiation: ?fn (updates: [*]*const PushUpdate, len: usize, payload: ?*anyopaque) callconv(.C) c_int = null,
/// Create the transport to use for this operation. Leave `null` to auto-detect.
transport: ?fn (out: **git.Transport, owner: *Remote, param: ?*anyopaque) callconv(.C) c_int = null,
// This will be passed to each of the callbacks in this sruct as the last parameter.
payload: ?*anyopaque = null,
/// Resolve URL before connecting to remote. The returned URL will be used to connect to the remote instead.
/// This callback is deprecated; users should use git_remote_ready_cb and configure the instance URL instead.
///
/// Return 0 on success, `errorToCInt(GitError.Passthrough)` or an error
/// If you return `errorToCInt(GitError.Passthrough)`, you don't need to write anything to url_resolved.
///
/// ## Parameters
/// * `url_resolved` - The buffer to write the resolved URL to
/// * `url` - The URL to resolve
/// * `direction` - Direction of the resolution
/// * `payload` - Payload provided by the caller
resolve_url: ?fn (
url_resolved: *git.Buf,
url: [*:0]const u8,
direction: git.Direction,
payload: ?*anyopaque,
) callconv(.C) c_int = null,
/// Argument to the completion callback which tells it which operation finished.
pub const RemoteCompletion = enum(c_uint) {
download,
indexing,
@"error",
};
/// Represents an update which will be performed on the remote during push
pub const PushUpdate = extern struct {
/// The source name of the reference
src_refname: [*:0]const u8,
/// The name of the reference to update on the server
dst_refname: [*:0]const u8,
/// The current target of the reference
src: git.Oid,
/// The new target for the reference
dst: git.Oid,
test {
try std.testing.expectEqual(@sizeOf(c.git_push_update), @sizeOf(PushUpdate));
try std.testing.expectEqual(@bitSizeOf(c.git_push_update), @bitSizeOf(PushUpdate));
}
comptime {
std.testing.refAllDecls(@This());
}
};
test {
try std.testing.expectEqual(@sizeOf(c.git_remote_callbacks), @sizeOf(RemoteCallbacks));
try std.testing.expectEqual(@bitSizeOf(c.git_remote_callbacks), @bitSizeOf(RemoteCallbacks));
}
comptime {
std.testing.refAllDecls(@This());
}
};
comptime {
std.testing.refAllDecls(@This());
} | src/remote.zig |
const std = @import("std");
const hasher = std.hash.Fnv1a_64;
const cuckoo = @import("./src/cuckoofilter.zig");
fn fingerprint8(x: []const u8) u8 {
return x[0];
}
fn fingerprint32(x: []const u8) u32 {
// Just a sample strategy, not suitable for all types
// of input. Imagine if you were adding images to the
// filter: all fingerprints would be the same because
// most formats have a standard header. In that case
// you want to make sure to use the actual graphical
// data to pluck your fingerprint from.
return @bytesToSlice(u32, x[0..@sizeOf(u32)])[0];
}
pub fn main() !void {
// Assume we want to keep track of max 1 Million items.
const universe_size = 1000000;
// Let's use Filter8, a filter with 1 byte long
// fingerprints and a 3% max *false positive* error rate.
// Note: Cuckoo filters cannot produce false negatives.
// Error % information:
_ = cuckoo.Filter8.MaxError;
// ╚═> 3.125e-02 (~0.03, i.e. 3%)
_ = cuckoo.Filter16.MaxError;
// ╚═> 1.22070312e-04 (~0.0001, i.e. 0.01%)
_ = cuckoo.Filter32.MaxError;
// ╚═> 9.31322574e-10 (~0.000000001, i.e. 0.0000001%)
// First let's calculate how big the filter has to be:
const memsize = comptime cuckoo.Filter8.size_for(universe_size);
// The value of memsize has to be a power of two and it
// is *strongly* recommended to keep the fill rate of a
// filter under 80%. size_for() will pad the number for
// you automatically and then round up to the closest
// power of 2. size_for_exactly() will not apply any
// padding before rounding up.
// Use capacity() to know how many items a slice of memory
// can store for the given filter type.
_ = cuckoo.Filter8.capacity(memsize); // => 2097152
// Note: this function will return the theoretical maximum
// capacity, without subtracting any padding. It's smart
// to adjust your expectations to match how much memory
// you have to allocate anyway, but don't get too greedy.
// I say `theoretical` because an overfilled filter will
// start refusing inserts with a TooFull error.
// This is how you allocate static memory for the filter:
var memory: [memsize]u8 align(cuckoo.Filter8.Align) = undefined;
// Note: the filter benefits from a specific alignment
// (which differs from type to type) so you must specify it
// when allocating memory. Failing to do so will result in
// a comptime error.
// Instantiating a filter
var cf8 = try cuckoo.Filter8.init(memory[0..]);
//
// FILTER USAGE
//
const banana_h = hasher.hash("banana");
const banana_fp = fingerprint8("banana");
const apple_h = hasher.hash("apple");
const apple_fp = fingerprint8("apple");
_ = try cf8.maybe_contains(banana_h, banana_fp); // => false
_ = try cf8.count(); // => 0
try cf8.add(banana_h, banana_fp);
_ = try cf8.maybe_contains(banana_h, banana_fp); // => true
_ = try cf8.maybe_contains(apple_h, apple_fp); // => false
_ = try cf8.count(); // => 1
try cf8.remove(banana_h, banana_fp);
_ = try cf8.maybe_contains(banana_h, banana_fp); // => false
_ = try cf8.count(); // => 0
// The filter can also be used with dynamic memory.
// It's up to you to manage that via an allocator.
const example_allocator = std.heap.c_allocator;
// Don't forget to free the memory afterwards.
const memsize32 = comptime cuckoo.Filter32.size_for_exactly(64);
var dyn_memory = try example_allocator.alignedAlloc(u8, cuckoo.Filter32.Align, memsize32);
defer example_allocator.free(dyn_memory);
var dyn_cf32 = try example_allocator.create(cuckoo.Filter32);
defer example_allocator.destroy(dyn_cf32);
dyn_cf32.* = try cuckoo.Filter32.init(dyn_memory);
// When restoring a persisted filter, you should only persist the individual fields
// as, for example, .buckets is a slice that points to `dyn_memory` which would be
// invalid upon restore (wrong pointer) and just a waste of space when stored.
// Upon loading, to reconnect the filter and its `dyn_memory`, use bytesToBuckets.
// Here's an example (which is not necessary to make this script work, as we just created
// the entire filter):
//
// dyn_cf32.buckets = cuckoo.Filter32.bytesToBuckets(dyn_memory);
//
// USAGE FAILURE SCENARIOS
//
// 1. Adding too many colliding items (because of bad entropy or
// because you are adding multiple copies of the same item)
const pear_h = hasher.hash("pear");
const pear_fp = fingerprint32("pear");
try dyn_cf32.add(pear_h, pear_fp);
try dyn_cf32.add(pear_h, pear_fp);
try dyn_cf32.add(pear_h, pear_fp);
try dyn_cf32.add(pear_h, pear_fp);
try dyn_cf32.add(pear_h, pear_fp);
// No more space for items with equal hash and fp,
// next insert will fail.
dyn_cf32.add(pear_h, pear_fp) catch |err| switch (err) {
error.TooFull => std.debug.warn("yep, too full\n"),
else => unreachable,
};
// Other inserts that don't collide can still succeed
const orange_h = hasher.hash("orange");
const orange_fp = fingerprint32("orange");
try dyn_cf32.add(orange_h, orange_fp);
// 2. You can only delete elements that were inserted before.
// Trying to delete a non-existing item has a chance of
// breaking the filter (makes false negatives possible).
// Deleting a non-existing item can either cause the
// deletion of another colliding item or fail to find
// a matching fingerprint in the filter. In the second
// case the filter locks down and returns an error for
// all operations, as it is now impossible to know what
// the correct state would be.
dyn_cf32.remove(0, 0) catch |err| switch (err) {
error.Broken => std.debug.warn(".remove, broken\n"),
};
_ = dyn_cf32.is_broken(); // => true
dyn_cf32.add(orange_fp, orange_fp) catch |err| switch (err) {
error.Broken => std.debug.warn(".add, broken\n"),
error.TooFull => {},
};
if (dyn_cf32.count()) |_| {
std.debug.warn(".count, works\n"); // won't be printed
} else |err| switch (err) {
error.Broken => std.debug.warn(".count, broken\n")
}
// Since searching does not mutate the filter, if the item
// is found, no error is returned:
_ = try dyn_cf32.maybe_contains(orange_h, orange_fp); // => true
// But if an item is not found, we don't know if it was wrongly
// deleted or not, so the filter has to return an error in order
// to ensure that it does not return a false negative response.
if (dyn_cf32.maybe_contains(0, 0)) |_| {
std.debug.warn(".maybe_contains, works\n"); // won't be printed
} else |err| switch (err) {
error.Broken => std.debug.warn(".maybe_contains, broken\n")
}
// You should *NEVER* get into that situation. If you do, it's
// a programming error. If your program runs in an environment
// where a request that involves the filter might be repeated
// (e.g. web servers), mark each request by a unique ID and
// keep some kind of commit log to ensure you don't run the
// same request twice, as it's semantically wrong to expect
// idempotence from Cuckoo filter commands.
// 3. Other small errors could be trying to pass to init memory
// with the wrong alignment or a wrong buffer size. Try to
// use the provided functions (i.e. size_for, size_for_exactly)
// to always have your buffers be the right size. You can
// also use those functions to reason about your data and even
// opt not to use a filter if the tradeoff is not worth it.
if (cuckoo.Filter8.init(memory[1..13])) |_| {
std.debug.warn(".init, works\n"); // won't be printed
} else |err| switch (err) {
error.BadLength => std.debug.warn(".init failed, use .size_for()!\n")
}
//
// FIXING TOO FULL
//
// Filter8 and Filter16 have 4 element-wide buckets,
// while Filter32 has 2 element-wide buckets.
// Each fingerprint has two buckets that can be used to
// house it. This means that you can have, respectively,
// up to 8 (F8, F16) and 4 (F32) collisions/copies before both
// buckets fill completely and you get TooFull. In practice,
// you get an extra chance because of how filters work internally.
// There's a special slot that houses a single fingerprint that
// could not find space in one of its 2 candidate slots.
// The problem is that once that "safety" slot is filled, the
// filter becomes much more succeptible to collisions and is forced
// to return TooFull when in fact it could try to make space.
// If you are also deleting elements from the filter, and
// not just adding them, this is what you can do to try and
// recover from that situation.
// Returns true if the safety slot is occupied.
var bad_situation = dyn_cf32.is_toofull();
// Note that you might not have ever received a TooFull error for
// this function to return true. In our previous example with
// dyn_cf32, it took us 5 insertions to obtain a TooFull error.
// This function would return true after 4.
// Try to fix the situation:
if (bad_situation) {
dyn_cf32.fix_toofull() catch |err| switch (err) {
error.Broken => {},
error.TooFull => {},
};
}
// With this function you can only fix TooFull, not Broken.
// If fix_toofull returns TooFull, it means that it failed.
// In practice you will need to free more elements before
// being able to fix the situation, but in theory calling
// the function multiple times might eventually fix the
// situation (i.e. it can make progress each call).
// That said, going back to practical usage, you are probably
// in a problematic situation when it gets to that point.
// To ensure you never have to deal with these problems,
// make sure you:
// (1) Never overfill/undersize a filter.
// (2) Get entropy right for the fingerprinting function.
//
// A trick to get (2) right is to pluck it not out of the
// original element, but out of hash2(element). Just make sure
// you use a different hasing function, independent from the
// fitst one, otherwise you're going to still end up with too
// little entropy, and be aware of the increased computational
// cost. Secondary hashing might be worth it for semi-strucutred
// data where you might find it hard to know if you're plucking
// "variable" data or part of the structure (e.g. JSON data),
// since the latter is bound to have lower entropy.
//
// PRNG Stuff
//
// Cuckoo Filters need a random number generator to decide which
// fingerprint to evict when a given bucket pair is full. This
// library provides a default implementation that uses the Zig
// standard library's Xoroshiro implementation, seeded by default to 42.
// If your application has short-lived sessions, a static seed won't be
// good enough, as it will basically result in giving out the same
// number over and over again, similarly to what is shown in that one
// dilbert strip. To fix that use seed_default_prng:
var buf: [8]u8 = undefined;
try std.crypto.randomBytes(buf[0..]);
const seed = std.mem.readIntSliceLittle(u64, buf[0..8]);
cuckoo.seed_default_prng(seed);
// Additionally, you might also want to provide your own PRNG
// implementation, either because you have specific needs (CSPRNG) or
// because you might want to make the filter fully deterministic and thus
// need to be able to persist and restore the PRNG's state.
// You can customize the PRNG of each filter by providing an appropriate
// function pointer:
dyn_cf32.rand_fn = DilbertRandom;
// From now on `dyn_cf32` will stop using the default implementation
// (shared by default by all filters) and will instead only use the
// provided function. If you use this functionality, *make sure to
// set the function pointer again when loading the filter from disk*.
// If you're fine with the default implementation and want more control
// than just seeding, use .get_default_prng_state() and
// .set_default_prng_state(), but beware that you are modifying a
// "singleton" struct used by all filters. If you are in a multi-threaded
// context this might cause problems if you are executing
// .add / .delete / .fix_toofull and altering the prng singleton at the
// same time. In that case you will have to customize .rand_fn
}
fn DilbertRandom() u1 {
return 1;
} | example.zig |
const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
const target = b.standardTargetOptions(.{});
var main_tests = b.addTest("src/main.zig");
main_tests.setBuildMode(mode);
main_tests.setTarget(target);
link(b, main_tests, .{});
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&main_tests.step);
}
pub const LinuxWindowManager = enum {
X11,
Wayland,
};
pub const Options = struct {
/// Not supported on macOS.
vulkan: bool = true,
/// Only respected on macOS.
metal: bool = true,
/// Deprecated on macOS.
opengl: bool = false,
/// Not supported on macOS.
gles: bool = false,
/// Only respected on Linux.
linux_window_manager: LinuxWindowManager = .X11,
};
pub fn link(b: *Builder, step: *std.build.LibExeObjStep, options: Options) void {
var arena = std.heap.ArenaAllocator.init(b.allocator);
defer arena.deinit();
const lib = b.addStaticLibrary("engine", "src/main.zig");
lib.setBuildMode(step.build_mode);
lib.setTarget(step.target);
const target = (std.zig.system.NativeTargetInfo.detect(b.allocator, step.target) catch unreachable).target;
switch (target.os.tag) {
.windows => {
var sources = std.ArrayList([]const u8).init(&arena.allocator);
for ([_][]const u8{
// Windows-specific sources
"upstream/glfw/src/win32_thread.c",
"upstream/glfw/src/wgl_context.c",
"upstream/glfw/src/win32_init.c",
"upstream/glfw/src/win32_monitor.c",
"upstream/glfw/src/win32_time.c",
"upstream/glfw/src/win32_joystick.c",
"upstream/glfw/src/win32_window.c",
// General sources
"upstream/glfw/src/monitor.c",
"upstream/glfw/src/init.c",
"upstream/glfw/src/vulkan.c",
"upstream/glfw/src/input.c",
"upstream/glfw/src/osmesa_context.c",
"upstream/glfw/src/egl_context.c",
"upstream/glfw/src/context.c",
"upstream/glfw/src/window.c",
}) |path| {
var abs_path = std.fs.path.join(&arena.allocator, &.{ thisDir(), path }) catch unreachable;
sources.append(abs_path) catch unreachable;
}
lib.addCSourceFiles(sources.items, &.{"-D_GLFW_WIN32"});
},
.macos => {
includeSdkMacOS(b, lib);
var sources = std.ArrayList([]const u8).init(&arena.allocator);
for ([_][]const u8{
// MacOS-specific sources
"upstream/glfw/src/cocoa_joystick.m",
"upstream/glfw/src/cocoa_init.m",
"upstream/glfw/src/cocoa_window.m",
"upstream/glfw/src/cocoa_time.c",
"upstream/glfw/src/cocoa_monitor.m",
"upstream/glfw/src/nsgl_context.m",
"upstream/glfw/src/posix_thread.c",
// General sources
"upstream/glfw/src/monitor.c",
"upstream/glfw/src/init.c",
"upstream/glfw/src/vulkan.c",
"upstream/glfw/src/input.c",
"upstream/glfw/src/osmesa_context.c",
"upstream/glfw/src/egl_context.c",
"upstream/glfw/src/context.c",
"upstream/glfw/src/window.c",
}) |path| {
var abs_path = std.fs.path.join(&arena.allocator, &.{ thisDir(), path }) catch unreachable;
sources.append(abs_path) catch unreachable;
}
lib.addCSourceFiles(sources.items, &.{"-D_GLFW_COCOA"});
},
else => {
// Assume Linux-like
includeSdkLinuxX8664(b, step);
// TODO(slimsag): for now, Linux must be built with glibc, not musl:
//
// ```
// ld.lld: error: cannot create a copy relocation for symbol stderr
// thread 2004762 panic: attempt to unwrap error: LLDReportedFailure
// ```
step.target.abi = .gnu;
lib.setTarget(step.target);
var general_sources = std.ArrayList([]const u8).init(&arena.allocator);
const flag = switch (options.linux_window_manager) {
.X11 => "-D_GLFW_X11",
.Wayland => "_D_GLFW_WAYLAND",
};
for ([_][]const u8{
// General Linux-like sources
"upstream/glfw/src/posix_time.c",
"upstream/glfw/src/posix_thread.c",
"upstream/glfw/src/linux_joystick.c",
// General sources
"upstream/glfw/src/monitor.c",
"upstream/glfw/src/init.c",
"upstream/glfw/src/vulkan.c",
"upstream/glfw/src/input.c",
"upstream/glfw/src/osmesa_context.c",
"upstream/glfw/src/egl_context.c",
"upstream/glfw/src/context.c",
"upstream/glfw/src/window.c",
}) |path| {
var abs_path = std.fs.path.join(&arena.allocator, &.{ thisDir(), path }) catch unreachable;
general_sources.append(abs_path) catch unreachable;
}
lib.addCSourceFiles(general_sources.items, &.{flag});
switch (options.linux_window_manager) {
.X11 => {
var x11_sources = std.ArrayList([]const u8).init(&arena.allocator);
for ([_][]const u8{
"upstream/glfw/src/x11_init.c",
"upstream/glfw/src/x11_window.c",
"upstream/glfw/src/x11_monitor.c",
"upstream/glfw/src/xkb_unicode.c",
"upstream/glfw/src/glx_context.c",
}) |path| {
var abs_path = std.fs.path.join(&arena.allocator, &.{ thisDir(), path }) catch unreachable;
x11_sources.append(abs_path) catch unreachable;
}
lib.addCSourceFiles(x11_sources.items, &.{flag});
},
.Wayland => {
var wayland_sources = std.ArrayList([]const u8).init(&arena.allocator);
for ([_][]const u8{
"upstream/glfw/src/wl_monitor.c",
"upstream/glfw/src/wl_window.c",
"upstream/glfw/src/wl_init.c",
}) |path| {
var abs_path = std.fs.path.join(&arena.allocator, &.{ thisDir(), path }) catch unreachable;
wayland_sources.append(abs_path) catch unreachable;
}
lib.addCSourceFiles(wayland_sources.items, &.{flag});
},
}
},
}
linkGLFW(b, lib, options);
lib.install();
step.linkLibrary(lib);
linkGLFW(b, step, options);
}
fn thisDir() []const u8 {
return std.fs.path.dirname(@src().file) orelse ".";
}
fn linkGLFW(b: *Builder, step: *std.build.LibExeObjStep, options: Options) void {
var include_dir = std.fs.path.join(b.allocator, &.{ thisDir(), "upstream/glfw/include" }) catch unreachable;
defer b.allocator.free(include_dir);
step.addIncludeDir(include_dir);
step.linkLibC();
const target = (std.zig.system.NativeTargetInfo.detect(b.allocator, step.target) catch unreachable).target;
switch (target.os.tag) {
.windows => {
step.linkSystemLibrary("gdi32");
if (options.opengl) {
step.linkSystemLibrary("opengl32");
}
if (options.gles) {
// TODO(slimsag): does anyone want GLESv1/GLESv3 options?
step.linkSystemLibrary("GLESv2");
}
},
.macos => {
includeSdkMacOS(b, step);
step.linkFramework("Cocoa");
step.linkFramework("IOKit");
step.linkFramework("CoreFoundation");
if (options.metal) {
step.linkFramework("Metal");
}
if (options.opengl) {
step.linkFramework("OpenGL");
}
},
else => {
// Assume Linux-like
includeSdkLinuxX8664(b, step);
switch (options.linux_window_manager) {
.X11 => {
step.linkSystemLibrary("X11");
step.linkSystemLibrary("xcb");
step.linkSystemLibrary("Xau");
step.linkSystemLibrary("Xdmcp");
},
.Wayland => step.linkSystemLibrary("wayland-client"),
}
// Note: no need to link against vulkan, GLFW finds it dynamically at runtime.
// https://www.glfw.org/docs/3.3/vulkan_guide.html#vulkan_loader
if (options.opengl) {
step.linkSystemLibrary("GL");
}
if (options.gles) {
// TODO(slimsag): does anyone want GLESv1/GLESv3 options?
step.linkSystemLibrary("GLESv2");
}
},
}
}
fn includeSdkMacOS(b: *Builder, step: *std.build.LibExeObjStep) void {
const sdk_root_dir = getSdkRoot(b.allocator, "sdk-macos-11.3") catch unreachable;
defer b.allocator.free(sdk_root_dir);
var sdk_root_frameworks = std.fs.path.join(b.allocator, &.{ sdk_root_dir, "root/System/Library/Frameworks" }) catch unreachable;
defer b.allocator.free(sdk_root_frameworks);
step.addFrameworkDir(sdk_root_frameworks);
var sdk_root_includes = std.fs.path.join(b.allocator, &.{ sdk_root_dir, "root/usr/include" }) catch unreachable;
defer b.allocator.free(sdk_root_includes);
step.addSystemIncludeDir(sdk_root_includes);
var sdk_root_libs = std.fs.path.join(b.allocator, &.{ sdk_root_dir, "root/usr/lib" }) catch unreachable;
defer b.allocator.free(sdk_root_libs);
step.addLibPath(sdk_root_libs);
// TODO(slimsag): Without setting sysroot, zld fails to resolve /usr/lib/libobjc.A.dylib when specifying -Dtarget=x86_64-macos
// Presumably has something to do with https://github.com/ziglang/zig/issues/6996 - I think zld doesn't consider addLibPath/addFrameworkDir
// resolution as part of dependant libs: https://github.com/ziglang/zig/blob/2d855745f91852af92ad970feef96e55919993d3/src/link/MachO/Dylib.zig#L477-L483
var sdk_sysroot = std.fs.path.join(b.allocator, &.{ sdk_root_dir, "root/" }) catch unreachable;
b.sysroot = sdk_sysroot; // TODO(slimsag): leaks, b.sysroot doesn't get free'd by builder?
}
fn includeSdkLinuxX8664(b: *Builder, step: *std.build.LibExeObjStep) void {
const sdk_root_dir = getSdkRoot(b.allocator, "sdk-linux-x86_64") catch unreachable;
defer b.allocator.free(sdk_root_dir);
var sdk_root_includes = std.fs.path.join(b.allocator, &.{ sdk_root_dir, "root/usr/include" }) catch unreachable;
defer b.allocator.free(sdk_root_includes);
step.addSystemIncludeDir(sdk_root_includes);
var sdk_root_libs = std.fs.path.join(b.allocator, &.{ sdk_root_dir, "root/usr/lib/x86_64-linux-gnu" }) catch unreachable;
defer b.allocator.free(sdk_root_libs);
step.addLibPath(sdk_root_libs);
}
/// Caller owns returned memory.
fn getSdkRoot(allocator: *std.mem.Allocator, comptime name: []const u8) ![]const u8 {
// Find the directory where the SDK should be located. We'll consider two locations:
//
// 1. $SDK_PATH/<name> (if set, e.g. for testing changes to SDKs easily)
// 2. <appdata>/<name> (default)
//
// Where `<name>` is the name of the SDK, e.g. `sdk-macos-11.3`.
var sdk_root_dir: []const u8 = undefined;
var sdk_path_dir: []const u8 = undefined;
defer allocator.free(sdk_path_dir);
if (std.process.getEnvVarOwned(allocator, "SDK_PATH")) |sdk_path| {
sdk_path_dir = sdk_path;
sdk_root_dir = try std.fs.path.join(allocator, &.{ sdk_path, name });
} else |err| switch (err) {
error.EnvironmentVariableNotFound => {
sdk_path_dir = try std.fs.getAppDataDir(allocator, "mach");
sdk_root_dir = try std.fs.path.join(allocator, &.{ sdk_path_dir, name });
},
else => |e| return e,
}
// If the SDK exists, return it. Otherwise, clone it.
if (std.fs.openDirAbsolute(sdk_root_dir, .{})) {
return sdk_root_dir;
} else |err| return switch (err) {
error.FileNotFound => {
std.log.info("cloning required sdk..\ngit clone https://github.com/hexops/{s} '{s}'..\n", .{ name, sdk_root_dir });
if (std.mem.eql(u8, name, "sdk-macos-11.3")) {
if (!try confirmAppleSDKAgreement(allocator)) @panic("cannot continue");
}
try std.fs.cwd().makePath(sdk_path_dir);
const argv = &[_][]const u8{ "git", "clone", "https://github.com/hexops/" ++ name };
const child = try std.ChildProcess.init(argv, allocator);
child.cwd = sdk_path_dir;
child.stdin = std.io.getStdOut();
child.stderr = std.io.getStdErr();
child.stdout = std.io.getStdOut();
try child.spawn();
_ = try child.wait();
return sdk_root_dir;
},
else => err,
};
}
fn confirmAppleSDKAgreement(allocator: *std.mem.Allocator) !bool {
if (std.process.getEnvVarOwned(allocator, "AGREE")) |agree| {
defer allocator.free(agree);
return std.mem.eql(u8, agree, "true");
} else |err| switch (err) {
error.EnvironmentVariableNotFound => {},
else => |e| return e,
}
const stdin = std.io.getStdIn().reader();
const stdout = std.io.getStdOut().writer();
var buf: [10]u8 = undefined;
try stdout.print("This SDK is distributed under the terms of the Xcode and Apple SDKs agreement:\n", .{});
try stdout.print(" https://www.apple.com/legal/sla/docs/xcode.pdf\n", .{});
try stdout.print("\n", .{});
try stdout.print("Do you agree to those terms? [Y/n] ", .{});
if (try stdin.readUntilDelimiterOrEof(buf[0..], '\n')) |user_input| {
try stdout.print("\n", .{});
var in = user_input;
if (in[in.len - 1] == '\r') in = in[0 .. in.len - 1];
return std.mem.eql(u8, in, "y") or std.mem.eql(u8, in, "Y") or std.mem.eql(u8, in, "yes") or std.mem.eql(u8, in, "");
} else {
return false;
}
} | glfw/build.zig |
const std = @import("std");
const input = @import("input.zig");
pub fn run(allocator: std.mem.Allocator, stdout: anytype) anyerror!void {
const side_length = 100;
{
var input_ = try input.readFile("inputs/day15");
defer input_.deinit();
const result = try part1(side_length, allocator, &input_);
try stdout.print("15a: {}\n", .{ result });
std.debug.assert(result == 592);
}
{
var input_ = try input.readFile("inputs/day15");
defer input_.deinit();
const result = try part2(side_length, allocator, &input_);
try stdout.print("15b: {}\n", .{ result });
std.debug.assert(result == 2897);
}
}
const max_level = 9;
const Level = std.math.IntFittingRange(0, max_level);
const Sum = std.math.IntFittingRange(0, max_level * (2 * 100) * 25);
fn part1(comptime side_length: usize, allocator: std.mem.Allocator, input_: anytype) !Sum {
var levels = try parseInput(side_length, input_);
const SolveContext = struct {
levels: *[side_length][side_length]Level,
fn getLevel(self: *const @This(), row: usize, col: usize) Level {
return self.levels[row][col];
}
};
return solve(side_length, allocator, SolveContext { .levels = &levels });
}
fn part2(comptime side_length: usize, allocator: std.mem.Allocator, input_: anytype) !Sum {
var levels = try parseInput(side_length, input_);
const SolveContext = struct {
levels: *[side_length][side_length]Level,
fn getLevel(self: *const @This(), row: usize, col: usize) Level {
return @intCast(Level, ((@as(u8, self.levels[row % side_length][col % side_length]) - 1) + row / side_length + col / side_length) % 9 + 1);
}
};
return solve(side_length * 5, allocator, SolveContext { .levels = &levels });
}
fn parseInput(comptime side_length: usize, input_: anytype) ![side_length][side_length]Level {
var levels: [side_length][side_length]Level = [_][side_length]Level { [_]Level { max_level } ** side_length } ** side_length;
var row_i: usize = 0;
while (try input_.next()) |line| {
for (line) |c, col_i| {
if (c < '0' or c > '9') {
return error.InvalidInput;
}
levels[row_i][col_i] = @intCast(Level, c - '0');
}
row_i += 1;
}
return levels;
}
fn Coord(comptime side_length: usize) type {
return struct {
row: std.math.IntFittingRange(0, side_length - 1),
col: std.math.IntFittingRange(0, side_length - 1),
};
}
const SumNode = struct {
sum: Sum,
level: Level,
visited: bool,
};
fn solve(comptime side_length: usize, allocator: std.mem.Allocator, context: anytype) !Sum {
var sum: [side_length][side_length]SumNode = undefined;
for (sum) |*row, row_i| {
for (row) |*node, col_i| {
node.sum = std.math.maxInt(Sum);
node.level = context.getLevel(row_i, col_i);
node.visited = false;
}
}
sum[0][0].sum = 0;
var to_visit = std.PriorityQueue(Coord(side_length), *const [side_length][side_length]SumNode, comptime sortCoord(side_length)).init(allocator, &sum);
defer to_visit.deinit();
try to_visit.add(.{ .row = 0, .col = 0 });
while (to_visit.removeOrNull()) |coord| {
const current_sum = sum[coord.row][coord.col].sum;
if (coord.row == side_length - 1 and coord.col == side_length - 1) {
return current_sum;
}
if (sum[coord.row][coord.col].visited) {
continue;
}
sum[coord.row][coord.col].visited = true;
if (coord.row > 0) {
const up = Coord(side_length) { .row = coord.row - 1, .col = coord.col };
if (!sum[up.row][up.col].visited) {
const new_sum = std.math.min(sum[up.row][up.col].sum, current_sum + sum[up.row][up.col].level);
if (new_sum < sum[up.row][up.col].sum) {
sum[up.row][up.col].sum = new_sum;
try queueForVisiting(side_length, &to_visit, up);
}
}
}
if (coord.col > 0) {
const left = Coord(side_length) { .row = coord.row, .col = coord.col - 1 };
if (!sum[left.row][left.col].visited) {
const new_sum = std.math.min(sum[left.row][left.col].sum, current_sum + sum[left.row][left.col].level);
if (new_sum < sum[left.row][left.col].sum) {
sum[left.row][left.col].sum = new_sum;
try queueForVisiting(side_length, &to_visit, left);
}
}
}
if (coord.col < side_length - 1) {
const right = Coord(side_length) { .row = coord.row, .col = coord.col + 1 };
if (!sum[right.row][right.col].visited) {
const new_sum = std.math.min(sum[right.row][right.col].sum, current_sum + sum[right.row][right.col].level);
if (new_sum < sum[right.row][right.col].sum) {
sum[right.row][right.col].sum = new_sum;
try queueForVisiting(side_length, &to_visit, right);
}
}
}
if (coord.row < side_length - 1) {
const down = Coord(side_length) { .row = coord.row + 1, .col = coord.col };
if (!sum[down.row][down.col].visited) {
const new_sum = std.math.min(sum[down.row][down.col].sum, current_sum + sum[down.row][down.col].level);
if (new_sum < sum[down.row][down.col].sum) {
sum[down.row][down.col].sum = new_sum;
try queueForVisiting(side_length, &to_visit, down);
}
}
}
}
return error.InvalidInput;
}
fn sortCoord(comptime side_length: usize) fn(context: *const [side_length][side_length]SumNode, Coord(side_length), Coord(side_length)) std.math.Order {
const impl = struct {
fn inner(context: *const [side_length][side_length]SumNode, a: Coord(side_length), b: Coord(side_length)) std.math.Order {
return std.math.order(context[a.row][a.col].sum, context[b.row][b.col].sum);
}
};
return impl.inner;
}
fn queueForVisiting(
comptime side_length: usize,
to_visit: *std.PriorityQueue(Coord(side_length), *const [side_length][side_length]SumNode, sortCoord(side_length)),
new_coord: Coord(side_length),
) !void {
try to_visit.add(new_coord);
}
test "day 15 example 1" {
const input_ =
\\1163751742
\\1381373672
\\2136511328
\\3694931569
\\7463417111
\\1319128137
\\1359912421
\\3125421639
\\1293138521
\\2311944581
;
const side_length = 10;
try std.testing.expectEqual(@as(usize, 40), try part1(side_length, std.testing.allocator, &input.readString(input_)));
try std.testing.expectEqual(@as(usize, 315), try part2(side_length, std.testing.allocator, &input.readString(input_)));
} | src/day15.zig |
const std = @import("std");
const ArrayList = std.ArrayList;
const Allocator = std.mem.Allocator;
const parseInt = std.fmt.parseInt;
pub fn main() !void {
var gpalloc = std.heap.GeneralPurposeAllocator(.{}){};
var allocator = &gpalloc.allocator;
defer std.debug.assert(!gpalloc.deinit());
const file = try std.fs.cwd().openFile("../inputs/03.txt", .{});
defer file.close();
const reader = file.reader();
var zeros = [_]u32{0} ** 12;
var gamma = [_]u8{'0'} ** 12;
var epsilon = [_]u8{'1'} ** 12;
var lines = ArrayList([]u8).init(allocator);
defer {
for (lines.items) |line| {
allocator.free(line);
}
lines.deinit();
}
while (try reader.readUntilDelimiterOrEofAlloc(allocator, '\n', 12)) |line| {
try lines.append(line);
for (line) |char, i| {
if (char == '0') zeros[i] += 1;
}
}
const majority = lines.items.len / 2;
for (zeros) |zero_count, i| {
if (zero_count < majority) {
gamma[i] = '1';
epsilon[i] = '0';
}
}
const width = lines.items[0].len;
const gamma_num = try parseInt(u32, gamma[0..width], 2);
const epsilon_num = try parseInt(u32, epsilon[0..width], 2);
std.log.info("part 1: {d}", .{gamma_num * epsilon_num});
const o2_lines = try reduceToSingle(allocator, lines.items, 0, '1');
defer allocator.free(o2_lines);
const o2_rating = try parseInt(u32, o2_lines[0], 2);
const co2_lines = try reduceToSingle(allocator, lines.items, 0, '0');
defer allocator.free(co2_lines);
const co2_rating = try parseInt(u32, co2_lines[0], 2);
std.log.info("part 2: {d}", .{o2_rating * co2_rating});
}
fn reduceToSingle(allocator: *Allocator, lines: []const []u8, index: u32, tiebreak: u8) ![]const []u8 {
if (lines.len == 1) return lines;
var ones: u32 = 0;
for (lines) |line| {
if (line[index] == '1') ones += 1;
}
const majority = try std.math.divCeil(usize, lines.len, 2);
const o2_rule = tiebreak == '1' and ones >= majority;
const co2_rule = tiebreak == '0' and ones < majority;
const keep: u8 = if (o2_rule or co2_rule) '1' else '0';
var new_lines = ArrayList([]u8).init(allocator);
for (lines) |line| {
if (line[index] == keep) {
try new_lines.append(line);
}
}
// If we're past the first iteration, we need to free the intermediate lines slices.
if (index > 0) allocator.free(lines);
return reduceToSingle(allocator, new_lines.toOwnedSlice(), index + 1, tiebreak);
} | 2021/zig/03.zig |
extern "wapc" fn __guest_request(operation_ptr: [*]u8, payload_ptr: [*]u8) void;
extern "wapc" fn __guest_response(ptr: [*]u8, len: usize) void;
extern "wapc" fn __guest_error(ptr: [*]u8, len: usize) void;
extern "wapc" fn __host_call(binding_ptr: [*]const u8, binding_len: usize, namespace_ptr: [*]const u8, namespace_len: usize, operation_ptr: [*]const u8, operation_len: usize, payload_ptr: [*]const u8, payload_len: usize) bool;
extern "wapc" fn __host_response_len() usize;
extern "wapc" fn __host_response(ptr: [*]u8) void;
extern "wapc" fn __host_error_len() usize;
extern "wapc" fn __host_error(ptr: [*]u8) void;
const std = @import("std");
const mem = std.mem;
const heap = std.heap;
pub const Function = struct {
name: []const u8,
invoke: fn (
allocator: mem.Allocator,
payload: []u8,
) anyerror!?[]u8,
};
fn guestError(allocator: mem.Allocator, err: anyerror) !void {
var message = std.ArrayList(u8).init(allocator);
defer message.deinit();
try message.appendSlice("guest error: ");
try message.appendSlice(@errorName(err));
__guest_error(@ptrCast([*]u8, message.items), message.items.len);
}
fn functionNotFoundError(allocator: mem.Allocator, operation: []u8) !void {
var message = std.ArrayList(u8).init(allocator);
defer message.deinit();
try message.appendSlice("Could not find function ");
try message.appendSlice(operation);
__guest_error(@ptrCast([*]u8, message.items), message.items.len);
}
pub fn handleCall(allocator: mem.Allocator, operation_size: usize, payload_size: usize, comptime fns: []const Function) bool {
var operation_buf = allocator.alloc(u8, operation_size) catch return false;
defer allocator.free(operation_buf);
var payload_buf = allocator.alloc(u8, payload_size) catch return false;
defer allocator.free(payload_buf);
__guest_request(operation_buf.ptr, payload_buf.ptr);
inline for (fns) |function| {
if (mem.eql(u8, operation_buf, function.name)) {
const response_maybe = function.invoke(allocator, payload_buf) catch |err| {
guestError(allocator, err) catch return false;
return false;
};
if (response_maybe) |response| {
defer allocator.free(response);
__guest_response(response.ptr, response.len);
} else {
__guest_response(@intToPtr([*]u8, 1), 0);
}
return true;
}
}
functionNotFoundError(allocator, operation_buf) catch return false;
return false;
}
pub fn hostCall(allocator: mem.Allocator, binding: []const u8, namespace: []const u8, operation: []const u8, payload: []const u8) ![]u8 {
const result = __host_call(binding.ptr, binding.len, namespace.ptr, namespace.len, operation.ptr, operation.len, payload.ptr, payload.len);
if (!result) {
var message = std.ArrayList(u8).init(allocator);
defer message.deinit();
try message.appendSlice("Host error: ");
// ask the host what happened
const error_len = __host_error_len();
const host_message = try allocator.alloc(u8, error_len);
defer allocator.free(host_message);
__host_error(host_message.ptr);
try message.appendSlice(host_message);
// echo back the host error from the guest
__guest_error(@ptrCast([*]u8, message.items), message.items.len);
return error.HostError;
}
const response_len = __host_response_len();
const response = try allocator.alloc(u8, response_len);
__host_response(response.ptr);
return response;
} | wapc.zig |
const std = @import("std");
const common = @import("common.zig");
const Line = common.Line;
const Point = common.Point;
const expect = std.testing.expect;
const Allocator = std.mem.Allocator;
const test_allocator = std.testing.allocator;
const ArrayList = std.ArrayList;
const AutoArrayHashMap = std.AutoArrayHashMap;
// creates a hash representation (u64) of a point given by x and y coordinates.
fn hash_point(x: i32, y: i32) u64 {
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHashStrat(&hasher, Point{ .x = x, .y = y }, .Deep);
return hasher.final();
}
// produces a range based on the start/end. it works with increasing/decreasing ranges.
fn range(allocator: *Allocator, start: i32, end: i32) anyerror!ArrayList(i32) {
const d = end - start;
const d_norm = if (d < 0) @as(i32, -1) else if (d > 0) @as(i32, 1) else @as(i32, 0);
const len = std.math.absCast(d) + 1;
var i: i32 = 0;
var n = start;
var arr = ArrayList(i32).init(allocator);
while (i < len) : (i += 1) {
try arr.append(n);
n += d_norm;
}
return arr;
}
// generates the horizontal coordinates of a line.
// 1. generates a range for both x and y axis;
// 2. iterates over the ranges in a nested form, generating the coordinates;
// 3. hash the coordinates using `hash_point` function;
// 3. return the list of hashed coordinates.
fn horizontal_coordinates(allocator: *Allocator, line: Line) anyerror!ArrayList(u64) {
var result = ArrayList(u64).init(allocator);
var i_range = try range(allocator, line.start.x, line.end.x);
defer i_range.deinit();
var j_range = try range(allocator, line.start.y, line.end.y);
defer j_range.deinit();
for (i_range.items) |i| {
for (j_range.items) |j| {
try result.append(hash_point(i, j));
}
}
return result;
}
// PART 1
// 1. iterates over all lines:
// 1.1 generates coordinates for horizontal/vertical lines only using `horizontal_coordinates`.
// 1.2 store coordinates at a `ArrayHashMap` using the coordinate hash as key and counter as value;
// 1.2.1 everytime it sees a repeated increase the counter.
// 2. iterate over the `ArrayHashMap` and count the number of coordinates that have more than 2 occurrencies;
pub fn count_line_overlaps(allocator: *Allocator, lines: ArrayList(Line)) anyerror!usize {
var occurrencies = AutoArrayHashMap(u64, i32).init(allocator);
defer occurrencies.deinit();
for (lines.items) |line| {
// not an horizontal line.
if (line.start.x != line.end.x and line.start.y != line.end.y) {
continue;
}
var coordinates = try horizontal_coordinates(allocator, line);
defer coordinates.deinit();
for (coordinates.items) |coordinate| {
var curr = occurrencies.get(coordinate) orelse 0;
try occurrencies.put(coordinate, curr + 1);
}
}
var iter = occurrencies.iterator();
var count: usize = 0;
while (iter.next()) |entry| {
if (entry.value_ptr.* > 1) {
count += 1;
}
}
return count;
}
// decides if it is a vertical/horizontal or a diagonal line, and then generate
// the coordinates using the proper function.
fn line_coordinates(allocator: *Allocator, line: Line) anyerror!ArrayList(u64) {
if (line.start.x != line.end.x and line.start.y != line.end.y) {
return vertical_coordinates(allocator, line);
}
return horizontal_coordinates(allocator, line);
}
// generates vertical coordinates.
// 1. calculate the x and y "movements", this is how the coordinates are going to be generated;
// 2. iterate until the x and y are equal to the line.end coordinates;
// 3. add the last coordinate since it is inclusive;
fn vertical_coordinates(allocator: *Allocator, line: Line) anyerror!ArrayList(u64) {
var result = ArrayList(u64).init(allocator);
const x_move = if (line.start.x > line.end.x) @as(i32, -1) else @as(i32, 1);
const y_move = if (line.start.y > line.end.y) @as(i32, -1) else @as(i32, 1);
var x: i32 = line.start.x;
var y: i32 = line.start.y;
while (x != line.end.x and y != line.end.y) {
try result.append(hash_point(x, y));
x += x_move;
y += y_move;
}
// add the end point.
try result.append(hash_point(line.end.x, line.end.y));
return result;
}
// PART 2
// 1. iterates over all lines:
// 1.1 generates coordinates for horizontal/vertical and diagonal lines.
// 1.2 store coordinates at a `ArrayHashMap` using the coordinate hash as key and counter as value;
// 1.2.1 everytime it sees a repeated increase the counter.
// 2. iterate over the `ArrayHashMap` and count the number of coordinates that have more than 2 occurrencies;
pub fn count_line_overlaps_with_diagonal(allocator: *Allocator, lines: ArrayList(Line)) anyerror!usize {
var occurrencies = AutoArrayHashMap(u64, i32).init(allocator);
defer occurrencies.deinit();
for (lines.items) |line| {
// not an horizontal line.
var coordinates: ArrayList(u64) = try line_coordinates(allocator, line);
defer coordinates.deinit();
for (coordinates.items) |coordinate| {
var curr = occurrencies.get(coordinate) orelse 0;
try occurrencies.put(coordinate, curr + 1);
}
}
var iter = occurrencies.iterator();
var count: usize = 0;
while (iter.next()) |entry| {
if (entry.value_ptr.* > 1) {
count += 1;
}
}
return count;
}
test "line overlap" {
var lines = ArrayList(Line).init(test_allocator);
defer lines.deinit();
try lines.append(Line { .start = Point { .x = 0, .y = 9 }, .end = Point { .x = 5, .y = 9 } });
try lines.append(Line { .start = Point { .x = 8, .y = 0 }, .end = Point { .x = 0, .y = 8 } });
try lines.append(Line { .start = Point { .x = 9, .y = 4 }, .end = Point { .x = 3, .y = 4 } });
try lines.append(Line { .start = Point { .x = 2, .y = 2 }, .end = Point { .x = 2, .y = 1 } });
try lines.append(Line { .start = Point { .x = 7, .y = 0 }, .end = Point { .x = 7, .y = 4 } });
try lines.append(Line { .start = Point { .x = 6, .y = 4 }, .end = Point { .x = 2, .y = 0 } });
try lines.append(Line { .start = Point { .x = 0, .y = 9 }, .end = Point { .x = 2, .y = 9 } });
try lines.append(Line { .start = Point { .x = 3, .y = 4 }, .end = Point { .x = 1, .y = 4 } });
try lines.append(Line { .start = Point { .x = 0, .y = 0 }, .end = Point { .x = 8, .y = 8 } });
try lines.append(Line { .start = Point { .x = 5, .y = 5 }, .end = Point { .x = 8, .y = 2 } });
var points_with_overlap = try count_line_overlaps(test_allocator, lines);
try expect(points_with_overlap == 5);
var points_with_overlap_diagonal = try count_line_overlaps_with_diagonal(test_allocator, lines);
try expect(points_with_overlap_diagonal == 12);
} | day5/src/solution.zig |
pub const GUID_STRING_SIZE = @as(u32, 40);
pub const DATA_NOT_AVAILABLE = @as(u32, 4294967295);
pub const MTXDM_E_ENLISTRESOURCEFAILED = @as(u32, 2147803392);
pub const CRR_NO_REASON_SUPPLIED = @as(u32, 0);
pub const CRR_LIFETIME_LIMIT = @as(u32, 4294967295);
pub const CRR_ACTIVATION_LIMIT = @as(u32, 4294967294);
pub const CRR_CALL_LIMIT = @as(u32, 4294967293);
pub const CRR_MEMORY_LIMIT = @as(u32, 4294967292);
pub const CRR_RECYCLED_FROM_UI = @as(u32, 4294967291);
//--------------------------------------------------------------------------------
// Section: Types (211)
//--------------------------------------------------------------------------------
const CLSID_SecurityIdentity_Value = @import("../zig.zig").Guid.initString("ecabb0a5-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_SecurityIdentity = &CLSID_SecurityIdentity_Value;
const CLSID_SecurityCallers_Value = @import("../zig.zig").Guid.initString("ecabb0a6-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_SecurityCallers = &CLSID_SecurityCallers_Value;
const CLSID_SecurityCallContext_Value = @import("../zig.zig").Guid.initString("ecabb0a7-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_SecurityCallContext = &CLSID_SecurityCallContext_Value;
const CLSID_GetSecurityCallContextAppObject_Value = @import("../zig.zig").Guid.initString("ecabb0a8-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_GetSecurityCallContextAppObject = &CLSID_GetSecurityCallContextAppObject_Value;
const CLSID_Dummy30040732_Value = @import("../zig.zig").Guid.initString("ecabb0a9-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_Dummy30040732 = &CLSID_Dummy30040732_Value;
const CLSID_TransactionContext_Value = @import("../zig.zig").Guid.initString("7999fc25-d3c6-11cf-acab-00a024a55aef");
pub const CLSID_TransactionContext = &CLSID_TransactionContext_Value;
const CLSID_TransactionContextEx_Value = @import("../zig.zig").Guid.initString("5cb66670-d3d4-11cf-acab-00a024a55aef");
pub const CLSID_TransactionContextEx = &CLSID_TransactionContextEx_Value;
const CLSID_ByotServerEx_Value = @import("../zig.zig").Guid.initString("ecabb0aa-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_ByotServerEx = &CLSID_ByotServerEx_Value;
const CLSID_CServiceConfig_Value = @import("../zig.zig").Guid.initString("ecabb0c8-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_CServiceConfig = &CLSID_CServiceConfig_Value;
const CLSID_ServicePool_Value = @import("../zig.zig").Guid.initString("ecabb0c9-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_ServicePool = &CLSID_ServicePool_Value;
const CLSID_ServicePoolConfig_Value = @import("../zig.zig").Guid.initString("ecabb0ca-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_ServicePoolConfig = &CLSID_ServicePoolConfig_Value;
const CLSID_SharedProperty_Value = @import("../zig.zig").Guid.initString("2a005c05-a5de-11cf-9e66-00aa00a3f464");
pub const CLSID_SharedProperty = &CLSID_SharedProperty_Value;
const CLSID_SharedPropertyGroup_Value = @import("../zig.zig").Guid.initString("2a005c0b-a5de-11cf-9e66-00aa00a3f464");
pub const CLSID_SharedPropertyGroup = &CLSID_SharedPropertyGroup_Value;
const CLSID_SharedPropertyGroupManager_Value = @import("../zig.zig").Guid.initString("2a005c11-a5de-11cf-9e66-00aa00a3f464");
pub const CLSID_SharedPropertyGroupManager = &CLSID_SharedPropertyGroupManager_Value;
const CLSID_COMEvents_Value = @import("../zig.zig").Guid.initString("ecabb0ab-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_COMEvents = &CLSID_COMEvents_Value;
const CLSID_CoMTSLocator_Value = @import("../zig.zig").Guid.initString("ecabb0ac-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_CoMTSLocator = &CLSID_CoMTSLocator_Value;
const CLSID_MtsGrp_Value = @import("../zig.zig").Guid.initString("4b2e958d-0393-11d1-b1ab-00aa00ba3258");
pub const CLSID_MtsGrp = &CLSID_MtsGrp_Value;
const CLSID_ComServiceEvents_Value = @import("../zig.zig").Guid.initString("ecabb0c3-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_ComServiceEvents = &CLSID_ComServiceEvents_Value;
const CLSID_ComSystemAppEventData_Value = @import("../zig.zig").Guid.initString("ecabb0c6-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_ComSystemAppEventData = &CLSID_ComSystemAppEventData_Value;
const CLSID_CRMClerk_Value = @import("../zig.zig").Guid.initString("ecabb0bd-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_CRMClerk = &CLSID_CRMClerk_Value;
const CLSID_CRMRecoveryClerk_Value = @import("../zig.zig").Guid.initString("ecabb0be-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_CRMRecoveryClerk = &CLSID_CRMRecoveryClerk_Value;
const CLSID_LBEvents_Value = @import("../zig.zig").Guid.initString("ecabb0c1-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_LBEvents = &CLSID_LBEvents_Value;
const CLSID_MessageMover_Value = @import("../zig.zig").Guid.initString("ecabb0bf-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_MessageMover = &CLSID_MessageMover_Value;
const CLSID_DispenserManager_Value = @import("../zig.zig").Guid.initString("ecabb0c0-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_DispenserManager = &CLSID_DispenserManager_Value;
const CLSID_PoolMgr_Value = @import("../zig.zig").Guid.initString("ecabafb5-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_PoolMgr = &CLSID_PoolMgr_Value;
const CLSID_EventServer_Value = @import("../zig.zig").Guid.initString("ecabafbc-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_EventServer = &CLSID_EventServer_Value;
const CLSID_TrackerServer_Value = @import("../zig.zig").Guid.initString("ecabafb9-7f19-11d2-978e-0000f8757e2a");
pub const CLSID_TrackerServer = &CLSID_TrackerServer_Value;
const CLSID_AppDomainHelper_Value = @import("../zig.zig").Guid.initString("ef24f689-14f8-4d92-b4af-d7b1f0e70fd4");
pub const CLSID_AppDomainHelper = &CLSID_AppDomainHelper_Value;
const CLSID_ClrAssemblyLocator_Value = @import("../zig.zig").Guid.initString("458aa3b5-265a-4b75-bc05-9bea4630cf18");
pub const CLSID_ClrAssemblyLocator = &CLSID_ClrAssemblyLocator_Value;
const CLSID_COMAdminCatalog_Value = @import("../zig.zig").Guid.initString("f618c514-dfb8-11d1-a2cf-00805fc79235");
pub const CLSID_COMAdminCatalog = &CLSID_COMAdminCatalog_Value;
const CLSID_COMAdminCatalogObject_Value = @import("../zig.zig").Guid.initString("f618c515-dfb8-11d1-a2cf-00805fc79235");
pub const CLSID_COMAdminCatalogObject = &CLSID_COMAdminCatalogObject_Value;
const CLSID_COMAdminCatalogCollection_Value = @import("../zig.zig").Guid.initString("f618c516-dfb8-11d1-a2cf-00805fc79235");
pub const CLSID_COMAdminCatalogCollection = &CLSID_COMAdminCatalogCollection_Value;
// TODO: this type is limited to platform 'windows5.0'
const IID_ICOMAdminCatalog_Value = @import("../zig.zig").Guid.initString("dd662187-dfc2-11d1-a2cf-00805fc79235");
pub const IID_ICOMAdminCatalog = &IID_ICOMAdminCatalog_Value;
pub const ICOMAdminCatalog = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
GetCollection: fn(
self: *const ICOMAdminCatalog,
bstrCollName: ?BSTR,
ppCatalogCollection: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Connect: fn(
self: *const ICOMAdminCatalog,
bstrCatalogServerName: ?BSTR,
ppCatalogCollection: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_MajorVersion: fn(
self: *const ICOMAdminCatalog,
plMajorVersion: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_MinorVersion: fn(
self: *const ICOMAdminCatalog,
plMinorVersion: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCollectionByQuery: fn(
self: *const ICOMAdminCatalog,
bstrCollName: ?BSTR,
ppsaVarQuery: ?*?*SAFEARRAY,
ppCatalogCollection: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ImportComponent: fn(
self: *const ICOMAdminCatalog,
bstrApplIDOrName: ?BSTR,
bstrCLSIDOrProgID: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
InstallComponent: fn(
self: *const ICOMAdminCatalog,
bstrApplIDOrName: ?BSTR,
bstrDLL: ?BSTR,
bstrTLB: ?BSTR,
bstrPSDLL: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ShutdownApplication: fn(
self: *const ICOMAdminCatalog,
bstrApplIDOrName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ExportApplication: fn(
self: *const ICOMAdminCatalog,
bstrApplIDOrName: ?BSTR,
bstrApplicationFile: ?BSTR,
lOptions: COMAdminApplicationExportOptions,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
InstallApplication: fn(
self: *const ICOMAdminCatalog,
bstrApplicationFile: ?BSTR,
bstrDestinationDirectory: ?BSTR,
lOptions: COMAdminApplicationInstallOptions,
bstrUserId: ?BSTR,
bstrPassword: ?BSTR,
bstrRSN: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
StopRouter: fn(
self: *const ICOMAdminCatalog,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RefreshRouter: fn(
self: *const ICOMAdminCatalog,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
StartRouter: fn(
self: *const ICOMAdminCatalog,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reserved1: fn(
self: *const ICOMAdminCatalog,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reserved2: fn(
self: *const ICOMAdminCatalog,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
InstallMultipleComponents: fn(
self: *const ICOMAdminCatalog,
bstrApplIDOrName: ?BSTR,
ppsaVarFileNames: ?*?*SAFEARRAY,
ppsaVarCLSIDs: ?*?*SAFEARRAY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMultipleComponentsInfo: fn(
self: *const ICOMAdminCatalog,
bstrApplIdOrName: ?BSTR,
ppsaVarFileNames: ?*?*SAFEARRAY,
ppsaVarCLSIDs: ?*?*SAFEARRAY,
ppsaVarClassNames: ?*?*SAFEARRAY,
ppsaVarFileFlags: ?*?*SAFEARRAY,
ppsaVarComponentFlags: ?*?*SAFEARRAY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RefreshComponents: fn(
self: *const ICOMAdminCatalog,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BackupREGDB: fn(
self: *const ICOMAdminCatalog,
bstrBackupFilePath: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RestoreREGDB: fn(
self: *const ICOMAdminCatalog,
bstrBackupFilePath: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
QueryApplicationFile: fn(
self: *const ICOMAdminCatalog,
bstrApplicationFile: ?BSTR,
pbstrApplicationName: ?*?BSTR,
pbstrApplicationDescription: ?*?BSTR,
pbHasUsers: ?*i16,
pbIsProxy: ?*i16,
ppsaVarFileNames: ?*?*SAFEARRAY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
StartApplication: fn(
self: *const ICOMAdminCatalog,
bstrApplIdOrName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ServiceCheck: fn(
self: *const ICOMAdminCatalog,
lService: i32,
plStatus: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
InstallMultipleEventClasses: fn(
self: *const ICOMAdminCatalog,
bstrApplIdOrName: ?BSTR,
ppsaVarFileNames: ?*?*SAFEARRAY,
ppsaVarCLSIDS: ?*?*SAFEARRAY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
InstallEventClass: fn(
self: *const ICOMAdminCatalog,
bstrApplIdOrName: ?BSTR,
bstrDLL: ?BSTR,
bstrTLB: ?BSTR,
bstrPSDLL: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetEventClassesForIID: fn(
self: *const ICOMAdminCatalog,
bstrIID: ?BSTR,
ppsaVarCLSIDs: ?*?*SAFEARRAY,
ppsaVarProgIDs: ?*?*SAFEARRAY,
ppsaVarDescriptions: ?*?*SAFEARRAY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_GetCollection(self: *const T, bstrCollName: ?BSTR, ppCatalogCollection: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).GetCollection(@ptrCast(*const ICOMAdminCatalog, self), bstrCollName, ppCatalogCollection);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_Connect(self: *const T, bstrCatalogServerName: ?BSTR, ppCatalogCollection: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).Connect(@ptrCast(*const ICOMAdminCatalog, self), bstrCatalogServerName, ppCatalogCollection);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_get_MajorVersion(self: *const T, plMajorVersion: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).get_MajorVersion(@ptrCast(*const ICOMAdminCatalog, self), plMajorVersion);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_get_MinorVersion(self: *const T, plMinorVersion: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).get_MinorVersion(@ptrCast(*const ICOMAdminCatalog, self), plMinorVersion);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_GetCollectionByQuery(self: *const T, bstrCollName: ?BSTR, ppsaVarQuery: ?*?*SAFEARRAY, ppCatalogCollection: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).GetCollectionByQuery(@ptrCast(*const ICOMAdminCatalog, self), bstrCollName, ppsaVarQuery, ppCatalogCollection);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_ImportComponent(self: *const T, bstrApplIDOrName: ?BSTR, bstrCLSIDOrProgID: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).ImportComponent(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIDOrName, bstrCLSIDOrProgID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_InstallComponent(self: *const T, bstrApplIDOrName: ?BSTR, bstrDLL: ?BSTR, bstrTLB: ?BSTR, bstrPSDLL: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).InstallComponent(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIDOrName, bstrDLL, bstrTLB, bstrPSDLL);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_ShutdownApplication(self: *const T, bstrApplIDOrName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).ShutdownApplication(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIDOrName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_ExportApplication(self: *const T, bstrApplIDOrName: ?BSTR, bstrApplicationFile: ?BSTR, lOptions: COMAdminApplicationExportOptions) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).ExportApplication(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIDOrName, bstrApplicationFile, lOptions);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_InstallApplication(self: *const T, bstrApplicationFile: ?BSTR, bstrDestinationDirectory: ?BSTR, lOptions: COMAdminApplicationInstallOptions, bstrUserId: ?BSTR, bstrPassword: ?BSTR, bstrRSN: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).InstallApplication(@ptrCast(*const ICOMAdminCatalog, self), bstrApplicationFile, bstrDestinationDirectory, lOptions, bstrUserId, bstrPassword, bstrRSN);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_StopRouter(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).StopRouter(@ptrCast(*const ICOMAdminCatalog, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_RefreshRouter(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).RefreshRouter(@ptrCast(*const ICOMAdminCatalog, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_StartRouter(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).StartRouter(@ptrCast(*const ICOMAdminCatalog, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_Reserved1(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).Reserved1(@ptrCast(*const ICOMAdminCatalog, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_Reserved2(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).Reserved2(@ptrCast(*const ICOMAdminCatalog, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_InstallMultipleComponents(self: *const T, bstrApplIDOrName: ?BSTR, ppsaVarFileNames: ?*?*SAFEARRAY, ppsaVarCLSIDs: ?*?*SAFEARRAY) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).InstallMultipleComponents(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIDOrName, ppsaVarFileNames, ppsaVarCLSIDs);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_GetMultipleComponentsInfo(self: *const T, bstrApplIdOrName: ?BSTR, ppsaVarFileNames: ?*?*SAFEARRAY, ppsaVarCLSIDs: ?*?*SAFEARRAY, ppsaVarClassNames: ?*?*SAFEARRAY, ppsaVarFileFlags: ?*?*SAFEARRAY, ppsaVarComponentFlags: ?*?*SAFEARRAY) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).GetMultipleComponentsInfo(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIdOrName, ppsaVarFileNames, ppsaVarCLSIDs, ppsaVarClassNames, ppsaVarFileFlags, ppsaVarComponentFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_RefreshComponents(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).RefreshComponents(@ptrCast(*const ICOMAdminCatalog, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_BackupREGDB(self: *const T, bstrBackupFilePath: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).BackupREGDB(@ptrCast(*const ICOMAdminCatalog, self), bstrBackupFilePath);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_RestoreREGDB(self: *const T, bstrBackupFilePath: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).RestoreREGDB(@ptrCast(*const ICOMAdminCatalog, self), bstrBackupFilePath);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_QueryApplicationFile(self: *const T, bstrApplicationFile: ?BSTR, pbstrApplicationName: ?*?BSTR, pbstrApplicationDescription: ?*?BSTR, pbHasUsers: ?*i16, pbIsProxy: ?*i16, ppsaVarFileNames: ?*?*SAFEARRAY) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).QueryApplicationFile(@ptrCast(*const ICOMAdminCatalog, self), bstrApplicationFile, pbstrApplicationName, pbstrApplicationDescription, pbHasUsers, pbIsProxy, ppsaVarFileNames);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_StartApplication(self: *const T, bstrApplIdOrName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).StartApplication(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIdOrName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_ServiceCheck(self: *const T, lService: i32, plStatus: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).ServiceCheck(@ptrCast(*const ICOMAdminCatalog, self), lService, plStatus);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_InstallMultipleEventClasses(self: *const T, bstrApplIdOrName: ?BSTR, ppsaVarFileNames: ?*?*SAFEARRAY, ppsaVarCLSIDS: ?*?*SAFEARRAY) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).InstallMultipleEventClasses(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIdOrName, ppsaVarFileNames, ppsaVarCLSIDS);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_InstallEventClass(self: *const T, bstrApplIdOrName: ?BSTR, bstrDLL: ?BSTR, bstrTLB: ?BSTR, bstrPSDLL: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).InstallEventClass(@ptrCast(*const ICOMAdminCatalog, self), bstrApplIdOrName, bstrDLL, bstrTLB, bstrPSDLL);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog_GetEventClassesForIID(self: *const T, bstrIID: ?BSTR, ppsaVarCLSIDs: ?*?*SAFEARRAY, ppsaVarProgIDs: ?*?*SAFEARRAY, ppsaVarDescriptions: ?*?*SAFEARRAY) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog.VTable, self.vtable).GetEventClassesForIID(@ptrCast(*const ICOMAdminCatalog, self), bstrIID, ppsaVarCLSIDs, ppsaVarProgIDs, ppsaVarDescriptions);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const COMAdminInUse = enum(i32) {
NotInUse = 0,
InUseByCatalog = 1,
InUseByRegistryUnknown = 2,
InUseByRegistryProxyStub = 3,
InUseByRegistryTypeLib = 4,
InUseByRegistryClsid = 5,
};
pub const COMAdminNotInUse = COMAdminInUse.NotInUse;
pub const COMAdminInUseByCatalog = COMAdminInUse.InUseByCatalog;
pub const COMAdminInUseByRegistryUnknown = COMAdminInUse.InUseByRegistryUnknown;
pub const COMAdminInUseByRegistryProxyStub = COMAdminInUse.InUseByRegistryProxyStub;
pub const COMAdminInUseByRegistryTypeLib = COMAdminInUse.InUseByRegistryTypeLib;
pub const COMAdminInUseByRegistryClsid = COMAdminInUse.InUseByRegistryClsid;
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ICOMAdminCatalog2_Value = @import("../zig.zig").Guid.initString("790c6e0b-9194-4cc9-9426-a48a63185696");
pub const IID_ICOMAdminCatalog2 = &IID_ICOMAdminCatalog2_Value;
pub const ICOMAdminCatalog2 = extern struct {
pub const VTable = extern struct {
base: ICOMAdminCatalog.VTable,
GetCollectionByQuery2: fn(
self: *const ICOMAdminCatalog2,
bstrCollectionName: ?BSTR,
pVarQueryStrings: ?*VARIANT,
ppCatalogCollection: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetApplicationInstanceIDFromProcessID: fn(
self: *const ICOMAdminCatalog2,
lProcessID: i32,
pbstrApplicationInstanceID: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ShutdownApplicationInstances: fn(
self: *const ICOMAdminCatalog2,
pVarApplicationInstanceID: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PauseApplicationInstances: fn(
self: *const ICOMAdminCatalog2,
pVarApplicationInstanceID: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ResumeApplicationInstances: fn(
self: *const ICOMAdminCatalog2,
pVarApplicationInstanceID: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RecycleApplicationInstances: fn(
self: *const ICOMAdminCatalog2,
pVarApplicationInstanceID: ?*VARIANT,
lReasonCode: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AreApplicationInstancesPaused: fn(
self: *const ICOMAdminCatalog2,
pVarApplicationInstanceID: ?*VARIANT,
pVarBoolPaused: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DumpApplicationInstance: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationInstanceID: ?BSTR,
bstrDirectory: ?BSTR,
lMaxImages: i32,
pbstrDumpFile: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_IsApplicationInstanceDumpSupported: fn(
self: *const ICOMAdminCatalog2,
pVarBoolDumpSupported: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CreateServiceForApplication: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationIDOrName: ?BSTR,
bstrServiceName: ?BSTR,
bstrStartType: ?BSTR,
bstrErrorControl: ?BSTR,
bstrDependencies: ?BSTR,
bstrRunAs: ?BSTR,
bstrPassword: ?BSTR,
bDesktopOk: i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DeleteServiceForApplication: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationIDOrName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetPartitionID: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationIDOrName: ?BSTR,
pbstrPartitionID: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetPartitionName: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationIDOrName: ?BSTR,
pbstrPartitionName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_CurrentPartition: fn(
self: *const ICOMAdminCatalog2,
bstrPartitionIDOrName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_CurrentPartitionID: fn(
self: *const ICOMAdminCatalog2,
pbstrPartitionID: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_CurrentPartitionName: fn(
self: *const ICOMAdminCatalog2,
pbstrPartitionName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_GlobalPartitionID: fn(
self: *const ICOMAdminCatalog2,
pbstrGlobalPartitionID: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
FlushPartitionCache: fn(
self: *const ICOMAdminCatalog2,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CopyApplications: fn(
self: *const ICOMAdminCatalog2,
bstrSourcePartitionIDOrName: ?BSTR,
pVarApplicationID: ?*VARIANT,
bstrDestinationPartitionIDOrName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CopyComponents: fn(
self: *const ICOMAdminCatalog2,
bstrSourceApplicationIDOrName: ?BSTR,
pVarCLSIDOrProgID: ?*VARIANT,
bstrDestinationApplicationIDOrName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
MoveComponents: fn(
self: *const ICOMAdminCatalog2,
bstrSourceApplicationIDOrName: ?BSTR,
pVarCLSIDOrProgID: ?*VARIANT,
bstrDestinationApplicationIDOrName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AliasComponent: fn(
self: *const ICOMAdminCatalog2,
bstrSrcApplicationIDOrName: ?BSTR,
bstrCLSIDOrProgID: ?BSTR,
bstrDestApplicationIDOrName: ?BSTR,
bstrNewProgId: ?BSTR,
bstrNewClsid: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsSafeToDelete: fn(
self: *const ICOMAdminCatalog2,
bstrDllName: ?BSTR,
pCOMAdminInUse: ?*COMAdminInUse,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ImportUnconfiguredComponents: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationIDOrName: ?BSTR,
pVarCLSIDOrProgID: ?*VARIANT,
pVarComponentType: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PromoteUnconfiguredComponents: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationIDOrName: ?BSTR,
pVarCLSIDOrProgID: ?*VARIANT,
pVarComponentType: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ImportComponents: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationIDOrName: ?BSTR,
pVarCLSIDOrProgID: ?*VARIANT,
pVarComponentType: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Is64BitCatalogServer: fn(
self: *const ICOMAdminCatalog2,
pbIs64Bit: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ExportPartition: fn(
self: *const ICOMAdminCatalog2,
bstrPartitionIDOrName: ?BSTR,
bstrPartitionFileName: ?BSTR,
lOptions: COMAdminApplicationExportOptions,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
InstallPartition: fn(
self: *const ICOMAdminCatalog2,
bstrFileName: ?BSTR,
bstrDestDirectory: ?BSTR,
lOptions: COMAdminApplicationInstallOptions,
bstrUserID: ?BSTR,
bstrPassword: ?BSTR,
bstrRSN: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
QueryApplicationFile2: fn(
self: *const ICOMAdminCatalog2,
bstrApplicationFile: ?BSTR,
ppFilesForImport: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetComponentVersionCount: fn(
self: *const ICOMAdminCatalog2,
bstrCLSIDOrProgID: ?BSTR,
plVersionCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace ICOMAdminCatalog.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_GetCollectionByQuery2(self: *const T, bstrCollectionName: ?BSTR, pVarQueryStrings: ?*VARIANT, ppCatalogCollection: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).GetCollectionByQuery2(@ptrCast(*const ICOMAdminCatalog2, self), bstrCollectionName, pVarQueryStrings, ppCatalogCollection);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_GetApplicationInstanceIDFromProcessID(self: *const T, lProcessID: i32, pbstrApplicationInstanceID: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).GetApplicationInstanceIDFromProcessID(@ptrCast(*const ICOMAdminCatalog2, self), lProcessID, pbstrApplicationInstanceID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_ShutdownApplicationInstances(self: *const T, pVarApplicationInstanceID: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).ShutdownApplicationInstances(@ptrCast(*const ICOMAdminCatalog2, self), pVarApplicationInstanceID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_PauseApplicationInstances(self: *const T, pVarApplicationInstanceID: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).PauseApplicationInstances(@ptrCast(*const ICOMAdminCatalog2, self), pVarApplicationInstanceID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_ResumeApplicationInstances(self: *const T, pVarApplicationInstanceID: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).ResumeApplicationInstances(@ptrCast(*const ICOMAdminCatalog2, self), pVarApplicationInstanceID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_RecycleApplicationInstances(self: *const T, pVarApplicationInstanceID: ?*VARIANT, lReasonCode: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).RecycleApplicationInstances(@ptrCast(*const ICOMAdminCatalog2, self), pVarApplicationInstanceID, lReasonCode);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_AreApplicationInstancesPaused(self: *const T, pVarApplicationInstanceID: ?*VARIANT, pVarBoolPaused: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).AreApplicationInstancesPaused(@ptrCast(*const ICOMAdminCatalog2, self), pVarApplicationInstanceID, pVarBoolPaused);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_DumpApplicationInstance(self: *const T, bstrApplicationInstanceID: ?BSTR, bstrDirectory: ?BSTR, lMaxImages: i32, pbstrDumpFile: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).DumpApplicationInstance(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationInstanceID, bstrDirectory, lMaxImages, pbstrDumpFile);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_get_IsApplicationInstanceDumpSupported(self: *const T, pVarBoolDumpSupported: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).get_IsApplicationInstanceDumpSupported(@ptrCast(*const ICOMAdminCatalog2, self), pVarBoolDumpSupported);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_CreateServiceForApplication(self: *const T, bstrApplicationIDOrName: ?BSTR, bstrServiceName: ?BSTR, bstrStartType: ?BSTR, bstrErrorControl: ?BSTR, bstrDependencies: ?BSTR, bstrRunAs: ?BSTR, bstrPassword: ?BSTR, bDesktopOk: i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).CreateServiceForApplication(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationIDOrName, bstrServiceName, bstrStartType, bstrErrorControl, bstrDependencies, bstrRunAs, bstrPassword, bDesktopOk);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_DeleteServiceForApplication(self: *const T, bstrApplicationIDOrName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).DeleteServiceForApplication(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationIDOrName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_GetPartitionID(self: *const T, bstrApplicationIDOrName: ?BSTR, pbstrPartitionID: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).GetPartitionID(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationIDOrName, pbstrPartitionID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_GetPartitionName(self: *const T, bstrApplicationIDOrName: ?BSTR, pbstrPartitionName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).GetPartitionName(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationIDOrName, pbstrPartitionName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_put_CurrentPartition(self: *const T, bstrPartitionIDOrName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).put_CurrentPartition(@ptrCast(*const ICOMAdminCatalog2, self), bstrPartitionIDOrName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_get_CurrentPartitionID(self: *const T, pbstrPartitionID: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).get_CurrentPartitionID(@ptrCast(*const ICOMAdminCatalog2, self), pbstrPartitionID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_get_CurrentPartitionName(self: *const T, pbstrPartitionName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).get_CurrentPartitionName(@ptrCast(*const ICOMAdminCatalog2, self), pbstrPartitionName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_get_GlobalPartitionID(self: *const T, pbstrGlobalPartitionID: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).get_GlobalPartitionID(@ptrCast(*const ICOMAdminCatalog2, self), pbstrGlobalPartitionID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_FlushPartitionCache(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).FlushPartitionCache(@ptrCast(*const ICOMAdminCatalog2, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_CopyApplications(self: *const T, bstrSourcePartitionIDOrName: ?BSTR, pVarApplicationID: ?*VARIANT, bstrDestinationPartitionIDOrName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).CopyApplications(@ptrCast(*const ICOMAdminCatalog2, self), bstrSourcePartitionIDOrName, pVarApplicationID, bstrDestinationPartitionIDOrName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_CopyComponents(self: *const T, bstrSourceApplicationIDOrName: ?BSTR, pVarCLSIDOrProgID: ?*VARIANT, bstrDestinationApplicationIDOrName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).CopyComponents(@ptrCast(*const ICOMAdminCatalog2, self), bstrSourceApplicationIDOrName, pVarCLSIDOrProgID, bstrDestinationApplicationIDOrName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_MoveComponents(self: *const T, bstrSourceApplicationIDOrName: ?BSTR, pVarCLSIDOrProgID: ?*VARIANT, bstrDestinationApplicationIDOrName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).MoveComponents(@ptrCast(*const ICOMAdminCatalog2, self), bstrSourceApplicationIDOrName, pVarCLSIDOrProgID, bstrDestinationApplicationIDOrName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_AliasComponent(self: *const T, bstrSrcApplicationIDOrName: ?BSTR, bstrCLSIDOrProgID: ?BSTR, bstrDestApplicationIDOrName: ?BSTR, bstrNewProgId: ?BSTR, bstrNewClsid: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).AliasComponent(@ptrCast(*const ICOMAdminCatalog2, self), bstrSrcApplicationIDOrName, bstrCLSIDOrProgID, bstrDestApplicationIDOrName, bstrNewProgId, bstrNewClsid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_IsSafeToDelete(self: *const T, bstrDllName: ?BSTR, pCOMAdminInUse: ?*COMAdminInUse) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).IsSafeToDelete(@ptrCast(*const ICOMAdminCatalog2, self), bstrDllName, pCOMAdminInUse);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_ImportUnconfiguredComponents(self: *const T, bstrApplicationIDOrName: ?BSTR, pVarCLSIDOrProgID: ?*VARIANT, pVarComponentType: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).ImportUnconfiguredComponents(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationIDOrName, pVarCLSIDOrProgID, pVarComponentType);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_PromoteUnconfiguredComponents(self: *const T, bstrApplicationIDOrName: ?BSTR, pVarCLSIDOrProgID: ?*VARIANT, pVarComponentType: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).PromoteUnconfiguredComponents(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationIDOrName, pVarCLSIDOrProgID, pVarComponentType);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_ImportComponents(self: *const T, bstrApplicationIDOrName: ?BSTR, pVarCLSIDOrProgID: ?*VARIANT, pVarComponentType: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).ImportComponents(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationIDOrName, pVarCLSIDOrProgID, pVarComponentType);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_get_Is64BitCatalogServer(self: *const T, pbIs64Bit: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).get_Is64BitCatalogServer(@ptrCast(*const ICOMAdminCatalog2, self), pbIs64Bit);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_ExportPartition(self: *const T, bstrPartitionIDOrName: ?BSTR, bstrPartitionFileName: ?BSTR, lOptions: COMAdminApplicationExportOptions) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).ExportPartition(@ptrCast(*const ICOMAdminCatalog2, self), bstrPartitionIDOrName, bstrPartitionFileName, lOptions);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_InstallPartition(self: *const T, bstrFileName: ?BSTR, bstrDestDirectory: ?BSTR, lOptions: COMAdminApplicationInstallOptions, bstrUserID: ?BSTR, bstrPassword: ?BSTR, bstrRSN: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).InstallPartition(@ptrCast(*const ICOMAdminCatalog2, self), bstrFileName, bstrDestDirectory, lOptions, bstrUserID, bstrPassword, bstrRSN);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_QueryApplicationFile2(self: *const T, bstrApplicationFile: ?BSTR, ppFilesForImport: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).QueryApplicationFile2(@ptrCast(*const ICOMAdminCatalog2, self), bstrApplicationFile, ppFilesForImport);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMAdminCatalog2_GetComponentVersionCount(self: *const T, bstrCLSIDOrProgID: ?BSTR, plVersionCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMAdminCatalog2.VTable, self.vtable).GetComponentVersionCount(@ptrCast(*const ICOMAdminCatalog2, self), bstrCLSIDOrProgID, plVersionCount);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICatalogObject_Value = @import("../zig.zig").Guid.initString("6eb22871-8a19-11d0-81b6-00a0c9231c29");
pub const IID_ICatalogObject = &IID_ICatalogObject_Value;
pub const ICatalogObject = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Value: fn(
self: *const ICatalogObject,
bstrPropName: ?BSTR,
pvarRetVal: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_Value: fn(
self: *const ICatalogObject,
bstrPropName: ?BSTR,
val: VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Key: fn(
self: *const ICatalogObject,
pvarRetVal: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Name: fn(
self: *const ICatalogObject,
pvarRetVal: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsPropertyReadOnly: fn(
self: *const ICatalogObject,
bstrPropName: ?BSTR,
pbRetVal: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Valid: fn(
self: *const ICatalogObject,
pbRetVal: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsPropertyWriteOnly: fn(
self: *const ICatalogObject,
bstrPropName: ?BSTR,
pbRetVal: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogObject_get_Value(self: *const T, bstrPropName: ?BSTR, pvarRetVal: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogObject.VTable, self.vtable).get_Value(@ptrCast(*const ICatalogObject, self), bstrPropName, pvarRetVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogObject_put_Value(self: *const T, bstrPropName: ?BSTR, val: VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogObject.VTable, self.vtable).put_Value(@ptrCast(*const ICatalogObject, self), bstrPropName, val);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogObject_get_Key(self: *const T, pvarRetVal: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogObject.VTable, self.vtable).get_Key(@ptrCast(*const ICatalogObject, self), pvarRetVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogObject_get_Name(self: *const T, pvarRetVal: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogObject.VTable, self.vtable).get_Name(@ptrCast(*const ICatalogObject, self), pvarRetVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogObject_IsPropertyReadOnly(self: *const T, bstrPropName: ?BSTR, pbRetVal: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogObject.VTable, self.vtable).IsPropertyReadOnly(@ptrCast(*const ICatalogObject, self), bstrPropName, pbRetVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogObject_get_Valid(self: *const T, pbRetVal: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogObject.VTable, self.vtable).get_Valid(@ptrCast(*const ICatalogObject, self), pbRetVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogObject_IsPropertyWriteOnly(self: *const T, bstrPropName: ?BSTR, pbRetVal: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogObject.VTable, self.vtable).IsPropertyWriteOnly(@ptrCast(*const ICatalogObject, self), bstrPropName, pbRetVal);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICatalogCollection_Value = @import("../zig.zig").Guid.initString("6eb22872-8a19-11d0-81b6-00a0c9231c29");
pub const IID_ICatalogCollection = &IID_ICatalogCollection_Value;
pub const ICatalogCollection = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ICatalogCollection,
ppEnumVariant: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Item: fn(
self: *const ICatalogCollection,
lIndex: i32,
ppCatalogObject: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ICatalogCollection,
plObjectCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Remove: fn(
self: *const ICatalogCollection,
lIndex: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Add: fn(
self: *const ICatalogCollection,
ppCatalogObject: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Populate: fn(
self: *const ICatalogCollection,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SaveChanges: fn(
self: *const ICatalogCollection,
pcChanges: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCollection: fn(
self: *const ICatalogCollection,
bstrCollName: ?BSTR,
varObjectKey: VARIANT,
ppCatalogCollection: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Name: fn(
self: *const ICatalogCollection,
pVarNamel: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_AddEnabled: fn(
self: *const ICatalogCollection,
pVarBool: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_RemoveEnabled: fn(
self: *const ICatalogCollection,
pVarBool: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetUtilInterface: fn(
self: *const ICatalogCollection,
ppIDispatch: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_DataStoreMajorVersion: fn(
self: *const ICatalogCollection,
plMajorVersion: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_DataStoreMinorVersion: fn(
self: *const ICatalogCollection,
plMinorVersionl: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PopulateByKey: fn(
self: *const ICatalogCollection,
psaKeys: ?*SAFEARRAY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PopulateByQuery: fn(
self: *const ICatalogCollection,
bstrQueryString: ?BSTR,
lQueryType: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get__NewEnum(self: *const T, ppEnumVariant: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get__NewEnum(@ptrCast(*const ICatalogCollection, self), ppEnumVariant);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get_Item(self: *const T, lIndex: i32, ppCatalogObject: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get_Item(@ptrCast(*const ICatalogCollection, self), lIndex, ppCatalogObject);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get_Count(self: *const T, plObjectCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get_Count(@ptrCast(*const ICatalogCollection, self), plObjectCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_Remove(self: *const T, lIndex: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).Remove(@ptrCast(*const ICatalogCollection, self), lIndex);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_Add(self: *const T, ppCatalogObject: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).Add(@ptrCast(*const ICatalogCollection, self), ppCatalogObject);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_Populate(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).Populate(@ptrCast(*const ICatalogCollection, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_SaveChanges(self: *const T, pcChanges: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).SaveChanges(@ptrCast(*const ICatalogCollection, self), pcChanges);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_GetCollection(self: *const T, bstrCollName: ?BSTR, varObjectKey: VARIANT, ppCatalogCollection: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).GetCollection(@ptrCast(*const ICatalogCollection, self), bstrCollName, varObjectKey, ppCatalogCollection);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get_Name(self: *const T, pVarNamel: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get_Name(@ptrCast(*const ICatalogCollection, self), pVarNamel);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get_AddEnabled(self: *const T, pVarBool: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get_AddEnabled(@ptrCast(*const ICatalogCollection, self), pVarBool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get_RemoveEnabled(self: *const T, pVarBool: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get_RemoveEnabled(@ptrCast(*const ICatalogCollection, self), pVarBool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_GetUtilInterface(self: *const T, ppIDispatch: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).GetUtilInterface(@ptrCast(*const ICatalogCollection, self), ppIDispatch);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get_DataStoreMajorVersion(self: *const T, plMajorVersion: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get_DataStoreMajorVersion(@ptrCast(*const ICatalogCollection, self), plMajorVersion);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_get_DataStoreMinorVersion(self: *const T, plMinorVersionl: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).get_DataStoreMinorVersion(@ptrCast(*const ICatalogCollection, self), plMinorVersionl);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_PopulateByKey(self: *const T, psaKeys: ?*SAFEARRAY) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).PopulateByKey(@ptrCast(*const ICatalogCollection, self), psaKeys);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICatalogCollection_PopulateByQuery(self: *const T, bstrQueryString: ?BSTR, lQueryType: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICatalogCollection.VTable, self.vtable).PopulateByQuery(@ptrCast(*const ICatalogCollection, self), bstrQueryString, lQueryType);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const COMAdminComponentType = enum(i32) {
@"32BitComponent" = 1,
@"64BitComponent" = 2,
};
pub const COMAdmin32BitComponent = COMAdminComponentType.@"32BitComponent";
pub const COMAdmin64BitComponent = COMAdminComponentType.@"64BitComponent";
pub const COMAdminApplicationInstallOptions = enum(i32) {
NoUsers = 0,
Users = 1,
ForceOverwriteOfFiles = 2,
};
pub const COMAdminInstallNoUsers = COMAdminApplicationInstallOptions.NoUsers;
pub const COMAdminInstallUsers = COMAdminApplicationInstallOptions.Users;
pub const COMAdminInstallForceOverwriteOfFiles = COMAdminApplicationInstallOptions.ForceOverwriteOfFiles;
pub const COMAdminApplicationExportOptions = enum(i32) {
NoUsers = 0,
Users = 1,
ApplicationProxy = 2,
ForceOverwriteOfFiles = 4,
In10Format = 16,
};
pub const COMAdminExportNoUsers = COMAdminApplicationExportOptions.NoUsers;
pub const COMAdminExportUsers = COMAdminApplicationExportOptions.Users;
pub const COMAdminExportApplicationProxy = COMAdminApplicationExportOptions.ApplicationProxy;
pub const COMAdminExportForceOverwriteOfFiles = COMAdminApplicationExportOptions.ForceOverwriteOfFiles;
pub const COMAdminExportIn10Format = COMAdminApplicationExportOptions.In10Format;
pub const COMAdminThreadingModels = enum(i32) {
Apartment = 0,
Free = 1,
Main = 2,
Both = 3,
Neutral = 4,
NotSpecified = 5,
};
pub const COMAdminThreadingModelApartment = COMAdminThreadingModels.Apartment;
pub const COMAdminThreadingModelFree = COMAdminThreadingModels.Free;
pub const COMAdminThreadingModelMain = COMAdminThreadingModels.Main;
pub const COMAdminThreadingModelBoth = COMAdminThreadingModels.Both;
pub const COMAdminThreadingModelNeutral = COMAdminThreadingModels.Neutral;
pub const COMAdminThreadingModelNotSpecified = COMAdminThreadingModels.NotSpecified;
pub const COMAdminTransactionOptions = enum(i32) {
Ignored = 0,
None = 1,
Supported = 2,
Required = 3,
RequiresNew = 4,
};
pub const COMAdminTransactionIgnored = COMAdminTransactionOptions.Ignored;
pub const COMAdminTransactionNone = COMAdminTransactionOptions.None;
pub const COMAdminTransactionSupported = COMAdminTransactionOptions.Supported;
pub const COMAdminTransactionRequired = COMAdminTransactionOptions.Required;
pub const COMAdminTransactionRequiresNew = COMAdminTransactionOptions.RequiresNew;
pub const COMAdminTxIsolationLevelOptions = enum(i32) {
Any = 0,
ReadUnCommitted = 1,
ReadCommitted = 2,
RepeatableRead = 3,
Serializable = 4,
};
pub const COMAdminTxIsolationLevelAny = COMAdminTxIsolationLevelOptions.Any;
pub const COMAdminTxIsolationLevelReadUnCommitted = COMAdminTxIsolationLevelOptions.ReadUnCommitted;
pub const COMAdminTxIsolationLevelReadCommitted = COMAdminTxIsolationLevelOptions.ReadCommitted;
pub const COMAdminTxIsolationLevelRepeatableRead = COMAdminTxIsolationLevelOptions.RepeatableRead;
pub const COMAdminTxIsolationLevelSerializable = COMAdminTxIsolationLevelOptions.Serializable;
pub const COMAdminSynchronizationOptions = enum(i32) {
Ignored = 0,
None = 1,
Supported = 2,
Required = 3,
RequiresNew = 4,
};
pub const COMAdminSynchronizationIgnored = COMAdminSynchronizationOptions.Ignored;
pub const COMAdminSynchronizationNone = COMAdminSynchronizationOptions.None;
pub const COMAdminSynchronizationSupported = COMAdminSynchronizationOptions.Supported;
pub const COMAdminSynchronizationRequired = COMAdminSynchronizationOptions.Required;
pub const COMAdminSynchronizationRequiresNew = COMAdminSynchronizationOptions.RequiresNew;
pub const COMAdminActivationOptions = enum(i32) {
Inproc = 0,
Local = 1,
};
pub const COMAdminActivationInproc = COMAdminActivationOptions.Inproc;
pub const COMAdminActivationLocal = COMAdminActivationOptions.Local;
pub const COMAdminAccessChecksLevelOptions = enum(i32) {
Level = 0,
ComponentLevel = 1,
};
pub const COMAdminAccessChecksApplicationLevel = COMAdminAccessChecksLevelOptions.Level;
pub const COMAdminAccessChecksApplicationComponentLevel = COMAdminAccessChecksLevelOptions.ComponentLevel;
pub const COMAdminAuthenticationLevelOptions = enum(i32) {
Default = 0,
None = 1,
Connect = 2,
Call = 3,
Packet = 4,
Integrity = 5,
Privacy = 6,
};
pub const COMAdminAuthenticationDefault = COMAdminAuthenticationLevelOptions.Default;
pub const COMAdminAuthenticationNone = COMAdminAuthenticationLevelOptions.None;
pub const COMAdminAuthenticationConnect = COMAdminAuthenticationLevelOptions.Connect;
pub const COMAdminAuthenticationCall = COMAdminAuthenticationLevelOptions.Call;
pub const COMAdminAuthenticationPacket = COMAdminAuthenticationLevelOptions.Packet;
pub const COMAdminAuthenticationIntegrity = COMAdminAuthenticationLevelOptions.Integrity;
pub const COMAdminAuthenticationPrivacy = COMAdminAuthenticationLevelOptions.Privacy;
pub const COMAdminImpersonationLevelOptions = enum(i32) {
Anonymous = 1,
Identify = 2,
Impersonate = 3,
Delegate = 4,
};
pub const COMAdminImpersonationAnonymous = COMAdminImpersonationLevelOptions.Anonymous;
pub const COMAdminImpersonationIdentify = COMAdminImpersonationLevelOptions.Identify;
pub const COMAdminImpersonationImpersonate = COMAdminImpersonationLevelOptions.Impersonate;
pub const COMAdminImpersonationDelegate = COMAdminImpersonationLevelOptions.Delegate;
pub const COMAdminAuthenticationCapabilitiesOptions = enum(i32) {
None = 0,
SecureReference = 2,
StaticCloaking = 32,
DynamicCloaking = 64,
};
pub const COMAdminAuthenticationCapabilitiesNone = COMAdminAuthenticationCapabilitiesOptions.None;
pub const COMAdminAuthenticationCapabilitiesSecureReference = COMAdminAuthenticationCapabilitiesOptions.SecureReference;
pub const COMAdminAuthenticationCapabilitiesStaticCloaking = COMAdminAuthenticationCapabilitiesOptions.StaticCloaking;
pub const COMAdminAuthenticationCapabilitiesDynamicCloaking = COMAdminAuthenticationCapabilitiesOptions.DynamicCloaking;
pub const COMAdminOS = enum(i32) {
NotInitialized = 0,
Windows3_1 = 1,
Windows9x = 2,
Windows2000 = 3,
Windows2000AdvancedServer = 4,
Windows2000Unknown = 5,
Unknown = 6,
WindowsXPPersonal = 11,
WindowsXPProfessional = 12,
WindowsNETStandardServer = 13,
WindowsNETEnterpriseServer = 14,
WindowsNETDatacenterServer = 15,
WindowsNETWebServer = 16,
WindowsLonghornPersonal = 17,
WindowsLonghornProfessional = 18,
WindowsLonghornStandardServer = 19,
WindowsLonghornEnterpriseServer = 20,
WindowsLonghornDatacenterServer = 21,
WindowsLonghornWebServer = 22,
Windows7Personal = 23,
Windows7Professional = 24,
Windows7StandardServer = 25,
Windows7EnterpriseServer = 26,
Windows7DatacenterServer = 27,
Windows7WebServer = 28,
Windows8Personal = 29,
Windows8Professional = 30,
Windows8StandardServer = 31,
Windows8EnterpriseServer = 32,
Windows8DatacenterServer = 33,
Windows8WebServer = 34,
WindowsBluePersonal = 35,
WindowsBlueProfessional = 36,
WindowsBlueStandardServer = 37,
WindowsBlueEnterpriseServer = 38,
WindowsBlueDatacenterServer = 39,
WindowsBlueWebServer = 40,
};
pub const COMAdminOSNotInitialized = COMAdminOS.NotInitialized;
pub const COMAdminOSWindows3_1 = COMAdminOS.Windows3_1;
pub const COMAdminOSWindows9x = COMAdminOS.Windows9x;
pub const COMAdminOSWindows2000 = COMAdminOS.Windows2000;
pub const COMAdminOSWindows2000AdvancedServer = COMAdminOS.Windows2000AdvancedServer;
pub const COMAdminOSWindows2000Unknown = COMAdminOS.Windows2000Unknown;
pub const COMAdminOSUnknown = COMAdminOS.Unknown;
pub const COMAdminOSWindowsXPPersonal = COMAdminOS.WindowsXPPersonal;
pub const COMAdminOSWindowsXPProfessional = COMAdminOS.WindowsXPProfessional;
pub const COMAdminOSWindowsNETStandardServer = COMAdminOS.WindowsNETStandardServer;
pub const COMAdminOSWindowsNETEnterpriseServer = COMAdminOS.WindowsNETEnterpriseServer;
pub const COMAdminOSWindowsNETDatacenterServer = COMAdminOS.WindowsNETDatacenterServer;
pub const COMAdminOSWindowsNETWebServer = COMAdminOS.WindowsNETWebServer;
pub const COMAdminOSWindowsLonghornPersonal = COMAdminOS.WindowsLonghornPersonal;
pub const COMAdminOSWindowsLonghornProfessional = COMAdminOS.WindowsLonghornProfessional;
pub const COMAdminOSWindowsLonghornStandardServer = COMAdminOS.WindowsLonghornStandardServer;
pub const COMAdminOSWindowsLonghornEnterpriseServer = COMAdminOS.WindowsLonghornEnterpriseServer;
pub const COMAdminOSWindowsLonghornDatacenterServer = COMAdminOS.WindowsLonghornDatacenterServer;
pub const COMAdminOSWindowsLonghornWebServer = COMAdminOS.WindowsLonghornWebServer;
pub const COMAdminOSWindows7Personal = COMAdminOS.Windows7Personal;
pub const COMAdminOSWindows7Professional = COMAdminOS.Windows7Professional;
pub const COMAdminOSWindows7StandardServer = COMAdminOS.Windows7StandardServer;
pub const COMAdminOSWindows7EnterpriseServer = COMAdminOS.Windows7EnterpriseServer;
pub const COMAdminOSWindows7DatacenterServer = COMAdminOS.Windows7DatacenterServer;
pub const COMAdminOSWindows7WebServer = COMAdminOS.Windows7WebServer;
pub const COMAdminOSWindows8Personal = COMAdminOS.Windows8Personal;
pub const COMAdminOSWindows8Professional = COMAdminOS.Windows8Professional;
pub const COMAdminOSWindows8StandardServer = COMAdminOS.Windows8StandardServer;
pub const COMAdminOSWindows8EnterpriseServer = COMAdminOS.Windows8EnterpriseServer;
pub const COMAdminOSWindows8DatacenterServer = COMAdminOS.Windows8DatacenterServer;
pub const COMAdminOSWindows8WebServer = COMAdminOS.Windows8WebServer;
pub const COMAdminOSWindowsBluePersonal = COMAdminOS.WindowsBluePersonal;
pub const COMAdminOSWindowsBlueProfessional = COMAdminOS.WindowsBlueProfessional;
pub const COMAdminOSWindowsBlueStandardServer = COMAdminOS.WindowsBlueStandardServer;
pub const COMAdminOSWindowsBlueEnterpriseServer = COMAdminOS.WindowsBlueEnterpriseServer;
pub const COMAdminOSWindowsBlueDatacenterServer = COMAdminOS.WindowsBlueDatacenterServer;
pub const COMAdminOSWindowsBlueWebServer = COMAdminOS.WindowsBlueWebServer;
pub const COMAdminServiceOptions = enum(i32) {
r = 1,
};
pub const COMAdminServiceLoadBalanceRouter = COMAdminServiceOptions.r;
pub const COMAdminServiceStatusOptions = enum(i32) {
Stopped = 0,
StartPending = 1,
StopPending = 2,
Running = 3,
ContinuePending = 4,
PausePending = 5,
Paused = 6,
UnknownState = 7,
};
pub const COMAdminServiceStopped = COMAdminServiceStatusOptions.Stopped;
pub const COMAdminServiceStartPending = COMAdminServiceStatusOptions.StartPending;
pub const COMAdminServiceStopPending = COMAdminServiceStatusOptions.StopPending;
pub const COMAdminServiceRunning = COMAdminServiceStatusOptions.Running;
pub const COMAdminServiceContinuePending = COMAdminServiceStatusOptions.ContinuePending;
pub const COMAdminServicePausePending = COMAdminServiceStatusOptions.PausePending;
pub const COMAdminServicePaused = COMAdminServiceStatusOptions.Paused;
pub const COMAdminServiceUnknownState = COMAdminServiceStatusOptions.UnknownState;
pub const COMAdminQCMessageAuthenticateOptions = enum(i32) {
SecureApps = 0,
Off = 1,
On = 2,
};
pub const COMAdminQCMessageAuthenticateSecureApps = COMAdminQCMessageAuthenticateOptions.SecureApps;
pub const COMAdminQCMessageAuthenticateOff = COMAdminQCMessageAuthenticateOptions.Off;
pub const COMAdminQCMessageAuthenticateOn = COMAdminQCMessageAuthenticateOptions.On;
pub const COMAdminFileFlags = enum(i32) {
Loadable = 1,
COM = 2,
ContainsPS = 4,
ContainsComp = 8,
ContainsTLB = 16,
SelfReg = 32,
SelfUnReg = 64,
UnloadableDLL = 128,
DoesNotExist = 256,
AlreadyInstalled = 512,
BadTLB = 1024,
GetClassObjFailed = 2048,
ClassNotAvailable = 4096,
Registrar = 8192,
NoRegistrar = 16384,
DLLRegsvrFailed = 32768,
RegTLBFailed = 65536,
RegistrarFailed = 131072,
Error = 262144,
};
pub const COMAdminFileFlagLoadable = COMAdminFileFlags.Loadable;
pub const COMAdminFileFlagCOM = COMAdminFileFlags.COM;
pub const COMAdminFileFlagContainsPS = COMAdminFileFlags.ContainsPS;
pub const COMAdminFileFlagContainsComp = COMAdminFileFlags.ContainsComp;
pub const COMAdminFileFlagContainsTLB = COMAdminFileFlags.ContainsTLB;
pub const COMAdminFileFlagSelfReg = COMAdminFileFlags.SelfReg;
pub const COMAdminFileFlagSelfUnReg = COMAdminFileFlags.SelfUnReg;
pub const COMAdminFileFlagUnloadableDLL = COMAdminFileFlags.UnloadableDLL;
pub const COMAdminFileFlagDoesNotExist = COMAdminFileFlags.DoesNotExist;
pub const COMAdminFileFlagAlreadyInstalled = COMAdminFileFlags.AlreadyInstalled;
pub const COMAdminFileFlagBadTLB = COMAdminFileFlags.BadTLB;
pub const COMAdminFileFlagGetClassObjFailed = COMAdminFileFlags.GetClassObjFailed;
pub const COMAdminFileFlagClassNotAvailable = COMAdminFileFlags.ClassNotAvailable;
pub const COMAdminFileFlagRegistrar = COMAdminFileFlags.Registrar;
pub const COMAdminFileFlagNoRegistrar = COMAdminFileFlags.NoRegistrar;
pub const COMAdminFileFlagDLLRegsvrFailed = COMAdminFileFlags.DLLRegsvrFailed;
pub const COMAdminFileFlagRegTLBFailed = COMAdminFileFlags.RegTLBFailed;
pub const COMAdminFileFlagRegistrarFailed = COMAdminFileFlags.RegistrarFailed;
pub const COMAdminFileFlagError = COMAdminFileFlags.Error;
pub const COMAdminComponentFlags = enum(i32) {
TypeInfoFound = 1,
COMPlusPropertiesFound = 2,
ProxyFound = 4,
InterfacesFound = 8,
AlreadyInstalled = 16,
NotInApplication = 32,
};
pub const COMAdminCompFlagTypeInfoFound = COMAdminComponentFlags.TypeInfoFound;
pub const COMAdminCompFlagCOMPlusPropertiesFound = COMAdminComponentFlags.COMPlusPropertiesFound;
pub const COMAdminCompFlagProxyFound = COMAdminComponentFlags.ProxyFound;
pub const COMAdminCompFlagInterfacesFound = COMAdminComponentFlags.InterfacesFound;
pub const COMAdminCompFlagAlreadyInstalled = COMAdminComponentFlags.AlreadyInstalled;
pub const COMAdminCompFlagNotInApplication = COMAdminComponentFlags.NotInApplication;
pub const COMAdminErrorCodes = enum(i32) {
ObjectErrors = -2146368511,
ObjectInvalid = -2146368510,
KeyMissing = -2146368509,
AlreadyInstalled = -2146368508,
AppFileWriteFail = -2146368505,
AppFileReadFail = -2146368504,
AppFileVersion = -2146368503,
BadPath = -2146368502,
ApplicationExists = -2146368501,
RoleExists = -2146368500,
CantCopyFile = -2146368499,
NoUser = -2146368497,
InvalidUserids = -2146368496,
NoRegistryCLSID = -2146368495,
BadRegistryProgID = -2146368494,
AuthenticationLevel = -2146368493,
UserPasswdNotValid = -2146368492,
CLSIDOrIIDMismatch = -2146368488,
RemoteInterface = -2146368487,
DllRegisterServer = -2146368486,
NoServerShare = -2146368485,
DllLoadFailed = -2146368483,
BadRegistryLibID = -2146368482,
AppDirNotFound = -2146368481,
RegistrarFailed = -2146368477,
CompFileDoesNotExist = -2146368476,
CompFileLoadDLLFail = -2146368475,
CompFileGetClassObj = -2146368474,
CompFileClassNotAvail = -2146368473,
CompFileBadTLB = -2146368472,
CompFileNotInstallable = -2146368471,
NotChangeable = -2146368470,
NotDeletable = -2146368469,
Session = -2146368468,
CompMoveLocked = -2146368467,
CompMoveBadDest = -2146368466,
RegisterTLB = -2146368464,
SystemApp = -2146368461,
CompFileNoRegistrar = -2146368460,
CoReqCompInstalled = -2146368459,
ServiceNotInstalled = -2146368458,
PropertySaveFailed = -2146368457,
ObjectExists = -2146368456,
ComponentExists = -2146368455,
RegFileCorrupt = -2146368453,
PropertyOverflow = -2146368452,
NotInRegistry = -2146368450,
ObjectNotPoolable = -2146368449,
ApplidMatchesClsid = -2146368442,
RoleDoesNotExist = -2146368441,
StartAppNeedsComponents = -2146368440,
RequiresDifferentPlatform = -2146368439,
QueuingServiceNotAvailable = -2146367998,
ObjectParentMissing = -2146367480,
ObjectDoesNotExist = -2146367479,
CanNotExportAppProxy = -2146368438,
CanNotStartApp = -2146368437,
CanNotExportSystemApp = -2146368436,
CanNotSubscribeToComponent = -2146368435,
AppNotRunning = -2146367478,
EventClassCannotBeSubscriber = -2146368434,
LibAppProxyIncompatible = -2146368433,
BasePartitionOnly = -2146368432,
DuplicatePartitionName = -2146368425,
PartitionInUse = -2146368423,
ImportedComponentsNotAllowed = -2146368421,
RegdbNotInitialized = -2146368398,
RegdbNotOpen = -2146368397,
RegdbSystemErr = -2146368396,
RegdbAlreadyRunning = -2146368395,
MigVersionNotSupported = -2146368384,
MigSchemaNotFound = -2146368383,
CatBitnessMismatch = -2146368382,
CatUnacceptableBitness = -2146368381,
CatWrongAppBitnessBitness = -2146368380,
CatPauseResumeNotSupported = -2146368379,
CatServerFault = -2146368378,
CantRecycleLibraryApps = -2146367473,
CantRecycleServiceApps = -2146367471,
ProcessAlreadyRecycled = -2146367470,
PausedProcessMayNotBeRecycled = -2146367469,
InvalidPartition = -2146367477,
PartitionMsiOnly = -2146367463,
StartAppDisabled = -2146368431,
CompMoveSource = -2146367460,
CompMoveDest = -2146367459,
CompMovePrivate = -2146367458,
CannotCopyEventClass = -2146367456,
};
pub const COMAdminErrObjectErrors = COMAdminErrorCodes.ObjectErrors;
pub const COMAdminErrObjectInvalid = COMAdminErrorCodes.ObjectInvalid;
pub const COMAdminErrKeyMissing = COMAdminErrorCodes.KeyMissing;
pub const COMAdminErrAlreadyInstalled = COMAdminErrorCodes.AlreadyInstalled;
pub const COMAdminErrAppFileWriteFail = COMAdminErrorCodes.AppFileWriteFail;
pub const COMAdminErrAppFileReadFail = COMAdminErrorCodes.AppFileReadFail;
pub const COMAdminErrAppFileVersion = COMAdminErrorCodes.AppFileVersion;
pub const COMAdminErrBadPath = COMAdminErrorCodes.BadPath;
pub const COMAdminErrApplicationExists = COMAdminErrorCodes.ApplicationExists;
pub const COMAdminErrRoleExists = COMAdminErrorCodes.RoleExists;
pub const COMAdminErrCantCopyFile = COMAdminErrorCodes.CantCopyFile;
pub const COMAdminErrNoUser = COMAdminErrorCodes.NoUser;
pub const COMAdminErrInvalidUserids = COMAdminErrorCodes.InvalidUserids;
pub const COMAdminErrNoRegistryCLSID = COMAdminErrorCodes.NoRegistryCLSID;
pub const COMAdminErrBadRegistryProgID = COMAdminErrorCodes.BadRegistryProgID;
pub const COMAdminErrAuthenticationLevel = COMAdminErrorCodes.AuthenticationLevel;
pub const COMAdminErrUserPasswdNotValid = COMAdminErrorCodes.UserPasswdNotValid;
pub const COMAdminErrCLSIDOrIIDMismatch = COMAdminErrorCodes.CLSIDOrIIDMismatch;
pub const COMAdminErrRemoteInterface = COMAdminErrorCodes.RemoteInterface;
pub const COMAdminErrDllRegisterServer = COMAdminErrorCodes.DllRegisterServer;
pub const COMAdminErrNoServerShare = COMAdminErrorCodes.NoServerShare;
pub const COMAdminErrDllLoadFailed = COMAdminErrorCodes.DllLoadFailed;
pub const COMAdminErrBadRegistryLibID = COMAdminErrorCodes.BadRegistryLibID;
pub const COMAdminErrAppDirNotFound = COMAdminErrorCodes.AppDirNotFound;
pub const COMAdminErrRegistrarFailed = COMAdminErrorCodes.RegistrarFailed;
pub const COMAdminErrCompFileDoesNotExist = COMAdminErrorCodes.CompFileDoesNotExist;
pub const COMAdminErrCompFileLoadDLLFail = COMAdminErrorCodes.CompFileLoadDLLFail;
pub const COMAdminErrCompFileGetClassObj = COMAdminErrorCodes.CompFileGetClassObj;
pub const COMAdminErrCompFileClassNotAvail = COMAdminErrorCodes.CompFileClassNotAvail;
pub const COMAdminErrCompFileBadTLB = COMAdminErrorCodes.CompFileBadTLB;
pub const COMAdminErrCompFileNotInstallable = COMAdminErrorCodes.CompFileNotInstallable;
pub const COMAdminErrNotChangeable = COMAdminErrorCodes.NotChangeable;
pub const COMAdminErrNotDeletable = COMAdminErrorCodes.NotDeletable;
pub const COMAdminErrSession = COMAdminErrorCodes.Session;
pub const COMAdminErrCompMoveLocked = COMAdminErrorCodes.CompMoveLocked;
pub const COMAdminErrCompMoveBadDest = COMAdminErrorCodes.CompMoveBadDest;
pub const COMAdminErrRegisterTLB = COMAdminErrorCodes.RegisterTLB;
pub const COMAdminErrSystemApp = COMAdminErrorCodes.SystemApp;
pub const COMAdminErrCompFileNoRegistrar = COMAdminErrorCodes.CompFileNoRegistrar;
pub const COMAdminErrCoReqCompInstalled = COMAdminErrorCodes.CoReqCompInstalled;
pub const COMAdminErrServiceNotInstalled = COMAdminErrorCodes.ServiceNotInstalled;
pub const COMAdminErrPropertySaveFailed = COMAdminErrorCodes.PropertySaveFailed;
pub const COMAdminErrObjectExists = COMAdminErrorCodes.ObjectExists;
pub const COMAdminErrComponentExists = COMAdminErrorCodes.ComponentExists;
pub const COMAdminErrRegFileCorrupt = COMAdminErrorCodes.RegFileCorrupt;
pub const COMAdminErrPropertyOverflow = COMAdminErrorCodes.PropertyOverflow;
pub const COMAdminErrNotInRegistry = COMAdminErrorCodes.NotInRegistry;
pub const COMAdminErrObjectNotPoolable = COMAdminErrorCodes.ObjectNotPoolable;
pub const COMAdminErrApplidMatchesClsid = COMAdminErrorCodes.ApplidMatchesClsid;
pub const COMAdminErrRoleDoesNotExist = COMAdminErrorCodes.RoleDoesNotExist;
pub const COMAdminErrStartAppNeedsComponents = COMAdminErrorCodes.StartAppNeedsComponents;
pub const COMAdminErrRequiresDifferentPlatform = COMAdminErrorCodes.RequiresDifferentPlatform;
pub const COMAdminErrQueuingServiceNotAvailable = COMAdminErrorCodes.QueuingServiceNotAvailable;
pub const COMAdminErrObjectParentMissing = COMAdminErrorCodes.ObjectParentMissing;
pub const COMAdminErrObjectDoesNotExist = COMAdminErrorCodes.ObjectDoesNotExist;
pub const COMAdminErrCanNotExportAppProxy = COMAdminErrorCodes.CanNotExportAppProxy;
pub const COMAdminErrCanNotStartApp = COMAdminErrorCodes.CanNotStartApp;
pub const COMAdminErrCanNotExportSystemApp = COMAdminErrorCodes.CanNotExportSystemApp;
pub const COMAdminErrCanNotSubscribeToComponent = COMAdminErrorCodes.CanNotSubscribeToComponent;
pub const COMAdminErrAppNotRunning = COMAdminErrorCodes.AppNotRunning;
pub const COMAdminErrEventClassCannotBeSubscriber = COMAdminErrorCodes.EventClassCannotBeSubscriber;
pub const COMAdminErrLibAppProxyIncompatible = COMAdminErrorCodes.LibAppProxyIncompatible;
pub const COMAdminErrBasePartitionOnly = COMAdminErrorCodes.BasePartitionOnly;
pub const COMAdminErrDuplicatePartitionName = COMAdminErrorCodes.DuplicatePartitionName;
pub const COMAdminErrPartitionInUse = COMAdminErrorCodes.PartitionInUse;
pub const COMAdminErrImportedComponentsNotAllowed = COMAdminErrorCodes.ImportedComponentsNotAllowed;
pub const COMAdminErrRegdbNotInitialized = COMAdminErrorCodes.RegdbNotInitialized;
pub const COMAdminErrRegdbNotOpen = COMAdminErrorCodes.RegdbNotOpen;
pub const COMAdminErrRegdbSystemErr = COMAdminErrorCodes.RegdbSystemErr;
pub const COMAdminErrRegdbAlreadyRunning = COMAdminErrorCodes.RegdbAlreadyRunning;
pub const COMAdminErrMigVersionNotSupported = COMAdminErrorCodes.MigVersionNotSupported;
pub const COMAdminErrMigSchemaNotFound = COMAdminErrorCodes.MigSchemaNotFound;
pub const COMAdminErrCatBitnessMismatch = COMAdminErrorCodes.CatBitnessMismatch;
pub const COMAdminErrCatUnacceptableBitness = COMAdminErrorCodes.CatUnacceptableBitness;
pub const COMAdminErrCatWrongAppBitnessBitness = COMAdminErrorCodes.CatWrongAppBitnessBitness;
pub const COMAdminErrCatPauseResumeNotSupported = COMAdminErrorCodes.CatPauseResumeNotSupported;
pub const COMAdminErrCatServerFault = COMAdminErrorCodes.CatServerFault;
pub const COMAdminErrCantRecycleLibraryApps = COMAdminErrorCodes.CantRecycleLibraryApps;
pub const COMAdminErrCantRecycleServiceApps = COMAdminErrorCodes.CantRecycleServiceApps;
pub const COMAdminErrProcessAlreadyRecycled = COMAdminErrorCodes.ProcessAlreadyRecycled;
pub const COMAdminErrPausedProcessMayNotBeRecycled = COMAdminErrorCodes.PausedProcessMayNotBeRecycled;
pub const COMAdminErrInvalidPartition = COMAdminErrorCodes.InvalidPartition;
pub const COMAdminErrPartitionMsiOnly = COMAdminErrorCodes.PartitionMsiOnly;
pub const COMAdminErrStartAppDisabled = COMAdminErrorCodes.StartAppDisabled;
pub const COMAdminErrCompMoveSource = COMAdminErrorCodes.CompMoveSource;
pub const COMAdminErrCompMoveDest = COMAdminErrorCodes.CompMoveDest;
pub const COMAdminErrCompMovePrivate = COMAdminErrorCodes.CompMovePrivate;
pub const COMAdminErrCannotCopyEventClass = COMAdminErrorCodes.CannotCopyEventClass;
// TODO: this type is limited to platform 'windows5.0'
const IID_ISecurityIdentityColl_Value = @import("../zig.zig").Guid.initString("cafc823c-b441-11d1-b82b-0000f8757e2a");
pub const IID_ISecurityIdentityColl = &IID_ISecurityIdentityColl_Value;
pub const ISecurityIdentityColl = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ISecurityIdentityColl,
plCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Item: fn(
self: *const ISecurityIdentityColl,
name: ?BSTR,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ISecurityIdentityColl,
ppEnum: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityIdentityColl_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityIdentityColl.VTable, self.vtable).get_Count(@ptrCast(*const ISecurityIdentityColl, self), plCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityIdentityColl_get_Item(self: *const T, name: ?BSTR, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityIdentityColl.VTable, self.vtable).get_Item(@ptrCast(*const ISecurityIdentityColl, self), name, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityIdentityColl_get__NewEnum(self: *const T, ppEnum: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityIdentityColl.VTable, self.vtable).get__NewEnum(@ptrCast(*const ISecurityIdentityColl, self), ppEnum);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISecurityCallersColl_Value = @import("../zig.zig").Guid.initString("cafc823d-b441-11d1-b82b-0000f8757e2a");
pub const IID_ISecurityCallersColl = &IID_ISecurityCallersColl_Value;
pub const ISecurityCallersColl = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ISecurityCallersColl,
plCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Item: fn(
self: *const ISecurityCallersColl,
lIndex: i32,
pObj: ?*?*ISecurityIdentityColl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ISecurityCallersColl,
ppEnum: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallersColl_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallersColl.VTable, self.vtable).get_Count(@ptrCast(*const ISecurityCallersColl, self), plCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallersColl_get_Item(self: *const T, lIndex: i32, pObj: ?*?*ISecurityIdentityColl) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallersColl.VTable, self.vtable).get_Item(@ptrCast(*const ISecurityCallersColl, self), lIndex, pObj);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallersColl_get__NewEnum(self: *const T, ppEnum: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallersColl.VTable, self.vtable).get__NewEnum(@ptrCast(*const ISecurityCallersColl, self), ppEnum);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISecurityCallContext_Value = @import("../zig.zig").Guid.initString("cafc823e-b441-11d1-b82b-0000f8757e2a");
pub const IID_ISecurityCallContext = &IID_ISecurityCallContext_Value;
pub const ISecurityCallContext = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ISecurityCallContext,
plCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Item: fn(
self: *const ISecurityCallContext,
name: ?BSTR,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ISecurityCallContext,
ppEnum: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsCallerInRole: fn(
self: *const ISecurityCallContext,
bstrRole: ?BSTR,
pfInRole: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsSecurityEnabled: fn(
self: *const ISecurityCallContext,
pfIsEnabled: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsUserInRole: fn(
self: *const ISecurityCallContext,
pUser: ?*VARIANT,
bstrRole: ?BSTR,
pfInRole: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallContext_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallContext.VTable, self.vtable).get_Count(@ptrCast(*const ISecurityCallContext, self), plCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallContext_get_Item(self: *const T, name: ?BSTR, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallContext.VTable, self.vtable).get_Item(@ptrCast(*const ISecurityCallContext, self), name, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallContext_get__NewEnum(self: *const T, ppEnum: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallContext.VTable, self.vtable).get__NewEnum(@ptrCast(*const ISecurityCallContext, self), ppEnum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallContext_IsCallerInRole(self: *const T, bstrRole: ?BSTR, pfInRole: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallContext.VTable, self.vtable).IsCallerInRole(@ptrCast(*const ISecurityCallContext, self), bstrRole, pfInRole);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallContext_IsSecurityEnabled(self: *const T, pfIsEnabled: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallContext.VTable, self.vtable).IsSecurityEnabled(@ptrCast(*const ISecurityCallContext, self), pfIsEnabled);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityCallContext_IsUserInRole(self: *const T, pUser: ?*VARIANT, bstrRole: ?BSTR, pfInRole: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityCallContext.VTable, self.vtable).IsUserInRole(@ptrCast(*const ISecurityCallContext, self), pUser, bstrRole, pfInRole);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IGetSecurityCallContext_Value = @import("../zig.zig").Guid.initString("cafc823f-b441-11d1-b82b-0000f8757e2a");
pub const IID_IGetSecurityCallContext = &IID_IGetSecurityCallContext_Value;
pub const IGetSecurityCallContext = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
GetSecurityCallContext: fn(
self: *const IGetSecurityCallContext,
ppObject: ?*?*ISecurityCallContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetSecurityCallContext_GetSecurityCallContext(self: *const T, ppObject: ?*?*ISecurityCallContext) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetSecurityCallContext.VTable, self.vtable).GetSecurityCallContext(@ptrCast(*const IGetSecurityCallContext, self), ppObject);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_SecurityProperty_Value = @import("../zig.zig").Guid.initString("e74a7215-014d-11d1-a63c-00a0c911b4e0");
pub const IID_SecurityProperty = &IID_SecurityProperty_Value;
pub const SecurityProperty = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
GetDirectCallerName: fn(
self: *const SecurityProperty,
bstrUserName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDirectCreatorName: fn(
self: *const SecurityProperty,
bstrUserName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOriginalCallerName: fn(
self: *const SecurityProperty,
bstrUserName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOriginalCreatorName: fn(
self: *const SecurityProperty,
bstrUserName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn SecurityProperty_GetDirectCallerName(self: *const T, bstrUserName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const SecurityProperty.VTable, self.vtable).GetDirectCallerName(@ptrCast(*const SecurityProperty, self), bstrUserName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn SecurityProperty_GetDirectCreatorName(self: *const T, bstrUserName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const SecurityProperty.VTable, self.vtable).GetDirectCreatorName(@ptrCast(*const SecurityProperty, self), bstrUserName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn SecurityProperty_GetOriginalCallerName(self: *const T, bstrUserName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const SecurityProperty.VTable, self.vtable).GetOriginalCallerName(@ptrCast(*const SecurityProperty, self), bstrUserName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn SecurityProperty_GetOriginalCreatorName(self: *const T, bstrUserName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const SecurityProperty.VTable, self.vtable).GetOriginalCreatorName(@ptrCast(*const SecurityProperty, self), bstrUserName);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ContextInfo_Value = @import("../zig.zig").Guid.initString("19a5a02c-0ac8-11d2-b286-00c04f8ef934");
pub const IID_ContextInfo = &IID_ContextInfo_Value;
pub const ContextInfo = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
IsInTransaction: fn(
self: *const ContextInfo,
pbIsInTx: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTransaction: fn(
self: *const ContextInfo,
ppTx: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTransactionId: fn(
self: *const ContextInfo,
pbstrTxId: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetActivityId: fn(
self: *const ContextInfo,
pbstrActivityId: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetContextId: fn(
self: *const ContextInfo,
pbstrCtxId: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo_IsInTransaction(self: *const T, pbIsInTx: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo.VTable, self.vtable).IsInTransaction(@ptrCast(*const ContextInfo, self), pbIsInTx);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo_GetTransaction(self: *const T, ppTx: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo.VTable, self.vtable).GetTransaction(@ptrCast(*const ContextInfo, self), ppTx);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo_GetTransactionId(self: *const T, pbstrTxId: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo.VTable, self.vtable).GetTransactionId(@ptrCast(*const ContextInfo, self), pbstrTxId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo_GetActivityId(self: *const T, pbstrActivityId: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo.VTable, self.vtable).GetActivityId(@ptrCast(*const ContextInfo, self), pbstrActivityId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo_GetContextId(self: *const T, pbstrCtxId: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo.VTable, self.vtable).GetContextId(@ptrCast(*const ContextInfo, self), pbstrCtxId);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ContextInfo2_Value = @import("../zig.zig").Guid.initString("c99d6e75-2375-11d4-8331-00c04f605588");
pub const IID_ContextInfo2 = &IID_ContextInfo2_Value;
pub const ContextInfo2 = extern struct {
pub const VTable = extern struct {
base: ContextInfo.VTable,
GetPartitionId: fn(
self: *const ContextInfo2,
__MIDL__ContextInfo20000: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetApplicationId: fn(
self: *const ContextInfo2,
__MIDL__ContextInfo20001: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetApplicationInstanceId: fn(
self: *const ContextInfo2,
__MIDL__ContextInfo20002: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace ContextInfo.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo2_GetPartitionId(self: *const T, __MIDL__ContextInfo20000: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo2.VTable, self.vtable).GetPartitionId(@ptrCast(*const ContextInfo2, self), __MIDL__ContextInfo20000);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo2_GetApplicationId(self: *const T, __MIDL__ContextInfo20001: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo2.VTable, self.vtable).GetApplicationId(@ptrCast(*const ContextInfo2, self), __MIDL__ContextInfo20001);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ContextInfo2_GetApplicationInstanceId(self: *const T, __MIDL__ContextInfo20002: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ContextInfo2.VTable, self.vtable).GetApplicationInstanceId(@ptrCast(*const ContextInfo2, self), __MIDL__ContextInfo20002);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ObjectContext_Value = @import("../zig.zig").Guid.initString("74c08646-cedb-11cf-8b49-00aa00b8a790");
pub const IID_ObjectContext = &IID_ObjectContext_Value;
pub const ObjectContext = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
CreateInstance: fn(
self: *const ObjectContext,
bstrProgID: ?BSTR,
pObject: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetComplete: fn(
self: *const ObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetAbort: fn(
self: *const ObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnableCommit: fn(
self: *const ObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DisableCommit: fn(
self: *const ObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsInTransaction: fn(
self: *const ObjectContext,
pbIsInTx: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsSecurityEnabled: fn(
self: *const ObjectContext,
pbIsEnabled: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsCallerInRole: fn(
self: *const ObjectContext,
bstrRole: ?BSTR,
pbInRole: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ObjectContext,
plCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Item: fn(
self: *const ObjectContext,
name: ?BSTR,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ObjectContext,
ppEnum: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Security: fn(
self: *const ObjectContext,
ppSecurityProperty: ?*?*SecurityProperty,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_ContextInfo: fn(
self: *const ObjectContext,
ppContextInfo: ?*?*ContextInfo,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_CreateInstance(self: *const T, bstrProgID: ?BSTR, pObject: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).CreateInstance(@ptrCast(*const ObjectContext, self), bstrProgID, pObject);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_SetComplete(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).SetComplete(@ptrCast(*const ObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_SetAbort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).SetAbort(@ptrCast(*const ObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_EnableCommit(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).EnableCommit(@ptrCast(*const ObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_DisableCommit(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).DisableCommit(@ptrCast(*const ObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_IsInTransaction(self: *const T, pbIsInTx: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).IsInTransaction(@ptrCast(*const ObjectContext, self), pbIsInTx);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_IsSecurityEnabled(self: *const T, pbIsEnabled: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).IsSecurityEnabled(@ptrCast(*const ObjectContext, self), pbIsEnabled);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_IsCallerInRole(self: *const T, bstrRole: ?BSTR, pbInRole: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).IsCallerInRole(@ptrCast(*const ObjectContext, self), bstrRole, pbInRole);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_get_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).get_Count(@ptrCast(*const ObjectContext, self), plCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_get_Item(self: *const T, name: ?BSTR, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).get_Item(@ptrCast(*const ObjectContext, self), name, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_get__NewEnum(self: *const T, ppEnum: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).get__NewEnum(@ptrCast(*const ObjectContext, self), ppEnum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_get_Security(self: *const T, ppSecurityProperty: ?*?*SecurityProperty) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).get_Security(@ptrCast(*const ObjectContext, self), ppSecurityProperty);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectContext_get_ContextInfo(self: *const T, ppContextInfo: ?*?*ContextInfo) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectContext.VTable, self.vtable).get_ContextInfo(@ptrCast(*const ObjectContext, self), ppContextInfo);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ITransactionContextEx_Value = @import("../zig.zig").Guid.initString("7999fc22-d3c6-11cf-acab-00a024a55aef");
pub const IID_ITransactionContextEx = &IID_ITransactionContextEx_Value;
pub const ITransactionContextEx = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateInstance: fn(
self: *const ITransactionContextEx,
rclsid: ?*const Guid,
riid: ?*const Guid,
pObject: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Commit: fn(
self: *const ITransactionContextEx,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Abort: fn(
self: *const ITransactionContextEx,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionContextEx_CreateInstance(self: *const T, rclsid: ?*const Guid, riid: ?*const Guid, pObject: ?*?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionContextEx.VTable, self.vtable).CreateInstance(@ptrCast(*const ITransactionContextEx, self), rclsid, riid, pObject);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionContextEx_Commit(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionContextEx.VTable, self.vtable).Commit(@ptrCast(*const ITransactionContextEx, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionContextEx_Abort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionContextEx.VTable, self.vtable).Abort(@ptrCast(*const ITransactionContextEx, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ITransactionContext_Value = @import("../zig.zig").Guid.initString("7999fc21-d3c6-11cf-acab-00a024a55aef");
pub const IID_ITransactionContext = &IID_ITransactionContext_Value;
pub const ITransactionContext = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
CreateInstance: fn(
self: *const ITransactionContext,
pszProgId: ?BSTR,
pObject: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Commit: fn(
self: *const ITransactionContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Abort: fn(
self: *const ITransactionContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionContext_CreateInstance(self: *const T, pszProgId: ?BSTR, pObject: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionContext.VTable, self.vtable).CreateInstance(@ptrCast(*const ITransactionContext, self), pszProgId, pObject);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionContext_Commit(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionContext.VTable, self.vtable).Commit(@ptrCast(*const ITransactionContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionContext_Abort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionContext.VTable, self.vtable).Abort(@ptrCast(*const ITransactionContext, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICreateWithTransactionEx_Value = @import("../zig.zig").Guid.initString("455acf57-5345-11d2-99cf-00c04f797bc9");
pub const IID_ICreateWithTransactionEx = &IID_ICreateWithTransactionEx_Value;
pub const ICreateWithTransactionEx = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateInstance: fn(
self: *const ICreateWithTransactionEx,
pTransaction: ?*ITransaction,
rclsid: ?*const Guid,
riid: ?*const Guid,
pObject: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICreateWithTransactionEx_CreateInstance(self: *const T, pTransaction: ?*ITransaction, rclsid: ?*const Guid, riid: ?*const Guid, pObject: ?*?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const ICreateWithTransactionEx.VTable, self.vtable).CreateInstance(@ptrCast(*const ICreateWithTransactionEx, self), pTransaction, rclsid, riid, pObject);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ICreateWithLocalTransaction_Value = @import("../zig.zig").Guid.initString("227ac7a8-8423-42ce-b7cf-03061ec9aaa3");
pub const IID_ICreateWithLocalTransaction = &IID_ICreateWithLocalTransaction_Value;
pub const ICreateWithLocalTransaction = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateInstanceWithSysTx: fn(
self: *const ICreateWithLocalTransaction,
pTransaction: ?*IUnknown,
rclsid: ?*const Guid,
riid: ?*const Guid,
pObject: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICreateWithLocalTransaction_CreateInstanceWithSysTx(self: *const T, pTransaction: ?*IUnknown, rclsid: ?*const Guid, riid: ?*const Guid, pObject: ?*?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const ICreateWithLocalTransaction.VTable, self.vtable).CreateInstanceWithSysTx(@ptrCast(*const ICreateWithLocalTransaction, self), pTransaction, rclsid, riid, pObject);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICreateWithTipTransactionEx_Value = @import("../zig.zig").Guid.initString("455acf59-5345-11d2-99cf-00c04f797bc9");
pub const IID_ICreateWithTipTransactionEx = &IID_ICreateWithTipTransactionEx_Value;
pub const ICreateWithTipTransactionEx = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateInstance: fn(
self: *const ICreateWithTipTransactionEx,
bstrTipUrl: ?BSTR,
rclsid: ?*const Guid,
riid: ?*const Guid,
pObject: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICreateWithTipTransactionEx_CreateInstance(self: *const T, bstrTipUrl: ?BSTR, rclsid: ?*const Guid, riid: ?*const Guid, pObject: ?*?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const ICreateWithTipTransactionEx.VTable, self.vtable).CreateInstance(@ptrCast(*const ICreateWithTipTransactionEx, self), bstrTipUrl, rclsid, riid, pObject);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const COMSVCSEVENTINFO = extern struct {
cbSize: u32,
dwPid: u32,
lTime: i64,
lMicroTime: i32,
perfCount: i64,
guidApp: Guid,
sMachineName: ?PWSTR,
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IComLTxEvents_Value = @import("../zig.zig").Guid.initString("605cf82c-578e-4298-975d-82babcd9e053");
pub const IID_IComLTxEvents = &IID_IComLTxEvents_Value;
pub const IComLTxEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnLtxTransactionStart: fn(
self: *const IComLTxEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidLtx: Guid,
tsid: Guid,
fRoot: BOOL,
nIsolationLevel: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnLtxTransactionPrepare: fn(
self: *const IComLTxEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidLtx: Guid,
fVote: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnLtxTransactionAbort: fn(
self: *const IComLTxEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidLtx: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnLtxTransactionCommit: fn(
self: *const IComLTxEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidLtx: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnLtxTransactionPromote: fn(
self: *const IComLTxEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidLtx: Guid,
txnId: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComLTxEvents_OnLtxTransactionStart(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidLtx: Guid, tsid: Guid, fRoot: BOOL, nIsolationLevel: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComLTxEvents.VTable, self.vtable).OnLtxTransactionStart(@ptrCast(*const IComLTxEvents, self), pInfo, guidLtx, tsid, fRoot, nIsolationLevel);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComLTxEvents_OnLtxTransactionPrepare(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidLtx: Guid, fVote: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComLTxEvents.VTable, self.vtable).OnLtxTransactionPrepare(@ptrCast(*const IComLTxEvents, self), pInfo, guidLtx, fVote);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComLTxEvents_OnLtxTransactionAbort(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidLtx: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComLTxEvents.VTable, self.vtable).OnLtxTransactionAbort(@ptrCast(*const IComLTxEvents, self), pInfo, guidLtx);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComLTxEvents_OnLtxTransactionCommit(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidLtx: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComLTxEvents.VTable, self.vtable).OnLtxTransactionCommit(@ptrCast(*const IComLTxEvents, self), pInfo, guidLtx);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComLTxEvents_OnLtxTransactionPromote(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidLtx: Guid, txnId: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComLTxEvents.VTable, self.vtable).OnLtxTransactionPromote(@ptrCast(*const IComLTxEvents, self), pInfo, guidLtx, txnId);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComUserEvent_Value = @import("../zig.zig").Guid.initString("683130a4-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComUserEvent = &IID_IComUserEvent_Value;
pub const IComUserEvent = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnUserEvent: fn(
self: *const IComUserEvent,
pInfo: ?*COMSVCSEVENTINFO,
pvarEvent: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComUserEvent_OnUserEvent(self: *const T, pInfo: ?*COMSVCSEVENTINFO, pvarEvent: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComUserEvent.VTable, self.vtable).OnUserEvent(@ptrCast(*const IComUserEvent, self), pInfo, pvarEvent);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComThreadEvents_Value = @import("../zig.zig").Guid.initString("683130a5-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComThreadEvents = &IID_IComThreadEvents_Value;
pub const IComThreadEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnThreadStart: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
dwThread: u32,
dwTheadCnt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadTerminate: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
dwThread: u32,
dwTheadCnt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadBindToApartment: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
AptID: u64,
dwActCnt: u32,
dwLowCnt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadUnBind: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
AptID: u64,
dwActCnt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadWorkEnque: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
MsgWorkID: u64,
QueueLen: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadWorkPrivate: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
MsgWorkID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadWorkPublic: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
MsgWorkID: u64,
QueueLen: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadWorkRedirect: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
MsgWorkID: u64,
QueueLen: u32,
ThreadNum: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadWorkReject: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
ThreadID: u64,
MsgWorkID: u64,
QueueLen: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadAssignApartment: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
AptID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnThreadUnassignApartment: fn(
self: *const IComThreadEvents,
pInfo: ?*COMSVCSEVENTINFO,
AptID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadStart(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, dwThread: u32, dwTheadCnt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadStart(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, dwThread, dwTheadCnt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadTerminate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, dwThread: u32, dwTheadCnt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadTerminate(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, dwThread, dwTheadCnt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadBindToApartment(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, AptID: u64, dwActCnt: u32, dwLowCnt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadBindToApartment(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, AptID, dwActCnt, dwLowCnt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadUnBind(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, AptID: u64, dwActCnt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadUnBind(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, AptID, dwActCnt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadWorkEnque(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, MsgWorkID: u64, QueueLen: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadWorkEnque(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, MsgWorkID, QueueLen);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadWorkPrivate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, MsgWorkID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadWorkPrivate(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, MsgWorkID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadWorkPublic(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, MsgWorkID: u64, QueueLen: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadWorkPublic(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, MsgWorkID, QueueLen);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadWorkRedirect(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, MsgWorkID: u64, QueueLen: u32, ThreadNum: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadWorkRedirect(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, MsgWorkID, QueueLen, ThreadNum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadWorkReject(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ThreadID: u64, MsgWorkID: u64, QueueLen: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadWorkReject(@ptrCast(*const IComThreadEvents, self), pInfo, ThreadID, MsgWorkID, QueueLen);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadAssignApartment(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, AptID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadAssignApartment(@ptrCast(*const IComThreadEvents, self), pInfo, guidActivity, AptID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComThreadEvents_OnThreadUnassignApartment(self: *const T, pInfo: ?*COMSVCSEVENTINFO, AptID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComThreadEvents.VTable, self.vtable).OnThreadUnassignApartment(@ptrCast(*const IComThreadEvents, self), pInfo, AptID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComAppEvents_Value = @import("../zig.zig").Guid.initString("683130a6-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComAppEvents = &IID_IComAppEvents_Value;
pub const IComAppEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnAppActivation: fn(
self: *const IComAppEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnAppShutdown: fn(
self: *const IComAppEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnAppForceShutdown: fn(
self: *const IComAppEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComAppEvents_OnAppActivation(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComAppEvents.VTable, self.vtable).OnAppActivation(@ptrCast(*const IComAppEvents, self), pInfo, guidApp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComAppEvents_OnAppShutdown(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComAppEvents.VTable, self.vtable).OnAppShutdown(@ptrCast(*const IComAppEvents, self), pInfo, guidApp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComAppEvents_OnAppForceShutdown(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComAppEvents.VTable, self.vtable).OnAppForceShutdown(@ptrCast(*const IComAppEvents, self), pInfo, guidApp);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComInstanceEvents_Value = @import("../zig.zig").Guid.initString("683130a7-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComInstanceEvents = &IID_IComInstanceEvents_Value;
pub const IComInstanceEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjectCreate: fn(
self: *const IComInstanceEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
clsid: ?*const Guid,
tsid: ?*const Guid,
CtxtID: u64,
ObjectID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjectDestroy: fn(
self: *const IComInstanceEvents,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComInstanceEvents_OnObjectCreate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, clsid: ?*const Guid, tsid: ?*const Guid, CtxtID: u64, ObjectID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComInstanceEvents.VTable, self.vtable).OnObjectCreate(@ptrCast(*const IComInstanceEvents, self), pInfo, guidActivity, clsid, tsid, CtxtID, ObjectID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComInstanceEvents_OnObjectDestroy(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComInstanceEvents.VTable, self.vtable).OnObjectDestroy(@ptrCast(*const IComInstanceEvents, self), pInfo, CtxtID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComTransactionEvents_Value = @import("../zig.zig").Guid.initString("683130a8-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComTransactionEvents = &IID_IComTransactionEvents_Value;
pub const IComTransactionEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnTransactionStart: fn(
self: *const IComTransactionEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
tsid: ?*const Guid,
fRoot: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnTransactionPrepare: fn(
self: *const IComTransactionEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
fVoteYes: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnTransactionAbort: fn(
self: *const IComTransactionEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnTransactionCommit: fn(
self: *const IComTransactionEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransactionEvents_OnTransactionStart(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid, tsid: ?*const Guid, fRoot: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransactionEvents.VTable, self.vtable).OnTransactionStart(@ptrCast(*const IComTransactionEvents, self), pInfo, guidTx, tsid, fRoot);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransactionEvents_OnTransactionPrepare(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid, fVoteYes: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransactionEvents.VTable, self.vtable).OnTransactionPrepare(@ptrCast(*const IComTransactionEvents, self), pInfo, guidTx, fVoteYes);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransactionEvents_OnTransactionAbort(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransactionEvents.VTable, self.vtable).OnTransactionAbort(@ptrCast(*const IComTransactionEvents, self), pInfo, guidTx);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransactionEvents_OnTransactionCommit(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransactionEvents.VTable, self.vtable).OnTransactionCommit(@ptrCast(*const IComTransactionEvents, self), pInfo, guidTx);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComMethodEvents_Value = @import("../zig.zig").Guid.initString("683130a9-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComMethodEvents = &IID_IComMethodEvents_Value;
pub const IComMethodEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnMethodCall: fn(
self: *const IComMethodEvents,
pInfo: ?*COMSVCSEVENTINFO,
oid: u64,
guidCid: ?*const Guid,
guidRid: ?*const Guid,
iMeth: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnMethodReturn: fn(
self: *const IComMethodEvents,
pInfo: ?*COMSVCSEVENTINFO,
oid: u64,
guidCid: ?*const Guid,
guidRid: ?*const Guid,
iMeth: u32,
hresult: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnMethodException: fn(
self: *const IComMethodEvents,
pInfo: ?*COMSVCSEVENTINFO,
oid: u64,
guidCid: ?*const Guid,
guidRid: ?*const Guid,
iMeth: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMethodEvents_OnMethodCall(self: *const T, pInfo: ?*COMSVCSEVENTINFO, oid: u64, guidCid: ?*const Guid, guidRid: ?*const Guid, iMeth: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMethodEvents.VTable, self.vtable).OnMethodCall(@ptrCast(*const IComMethodEvents, self), pInfo, oid, guidCid, guidRid, iMeth);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMethodEvents_OnMethodReturn(self: *const T, pInfo: ?*COMSVCSEVENTINFO, oid: u64, guidCid: ?*const Guid, guidRid: ?*const Guid, iMeth: u32, hresult: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMethodEvents.VTable, self.vtable).OnMethodReturn(@ptrCast(*const IComMethodEvents, self), pInfo, oid, guidCid, guidRid, iMeth, hresult);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMethodEvents_OnMethodException(self: *const T, pInfo: ?*COMSVCSEVENTINFO, oid: u64, guidCid: ?*const Guid, guidRid: ?*const Guid, iMeth: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMethodEvents.VTable, self.vtable).OnMethodException(@ptrCast(*const IComMethodEvents, self), pInfo, oid, guidCid, guidRid, iMeth);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComObjectEvents_Value = @import("../zig.zig").Guid.initString("683130aa-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComObjectEvents = &IID_IComObjectEvents_Value;
pub const IComObjectEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjectActivate: fn(
self: *const IComObjectEvents,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
ObjectID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjectDeactivate: fn(
self: *const IComObjectEvents,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
ObjectID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnDisableCommit: fn(
self: *const IComObjectEvents,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnEnableCommit: fn(
self: *const IComObjectEvents,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnSetComplete: fn(
self: *const IComObjectEvents,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnSetAbort: fn(
self: *const IComObjectEvents,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectEvents_OnObjectActivate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64, ObjectID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectEvents.VTable, self.vtable).OnObjectActivate(@ptrCast(*const IComObjectEvents, self), pInfo, CtxtID, ObjectID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectEvents_OnObjectDeactivate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64, ObjectID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectEvents.VTable, self.vtable).OnObjectDeactivate(@ptrCast(*const IComObjectEvents, self), pInfo, CtxtID, ObjectID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectEvents_OnDisableCommit(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectEvents.VTable, self.vtable).OnDisableCommit(@ptrCast(*const IComObjectEvents, self), pInfo, CtxtID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectEvents_OnEnableCommit(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectEvents.VTable, self.vtable).OnEnableCommit(@ptrCast(*const IComObjectEvents, self), pInfo, CtxtID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectEvents_OnSetComplete(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectEvents.VTable, self.vtable).OnSetComplete(@ptrCast(*const IComObjectEvents, self), pInfo, CtxtID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectEvents_OnSetAbort(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectEvents.VTable, self.vtable).OnSetAbort(@ptrCast(*const IComObjectEvents, self), pInfo, CtxtID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComResourceEvents_Value = @import("../zig.zig").Guid.initString("683130ab-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComResourceEvents = &IID_IComResourceEvents_Value;
pub const IComResourceEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnResourceCreate: fn(
self: *const IComResourceEvents,
pInfo: ?*COMSVCSEVENTINFO,
ObjectID: u64,
pszType: ?[*:0]const u16,
resId: u64,
enlisted: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnResourceAllocate: fn(
self: *const IComResourceEvents,
pInfo: ?*COMSVCSEVENTINFO,
ObjectID: u64,
pszType: ?[*:0]const u16,
resId: u64,
enlisted: BOOL,
NumRated: u32,
Rating: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnResourceRecycle: fn(
self: *const IComResourceEvents,
pInfo: ?*COMSVCSEVENTINFO,
ObjectID: u64,
pszType: ?[*:0]const u16,
resId: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnResourceDestroy: fn(
self: *const IComResourceEvents,
pInfo: ?*COMSVCSEVENTINFO,
ObjectID: u64,
hr: HRESULT,
pszType: ?[*:0]const u16,
resId: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnResourceTrack: fn(
self: *const IComResourceEvents,
pInfo: ?*COMSVCSEVENTINFO,
ObjectID: u64,
pszType: ?[*:0]const u16,
resId: u64,
enlisted: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComResourceEvents_OnResourceCreate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ObjectID: u64, pszType: ?[*:0]const u16, resId: u64, enlisted: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComResourceEvents.VTable, self.vtable).OnResourceCreate(@ptrCast(*const IComResourceEvents, self), pInfo, ObjectID, pszType, resId, enlisted);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComResourceEvents_OnResourceAllocate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ObjectID: u64, pszType: ?[*:0]const u16, resId: u64, enlisted: BOOL, NumRated: u32, Rating: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComResourceEvents.VTable, self.vtable).OnResourceAllocate(@ptrCast(*const IComResourceEvents, self), pInfo, ObjectID, pszType, resId, enlisted, NumRated, Rating);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComResourceEvents_OnResourceRecycle(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ObjectID: u64, pszType: ?[*:0]const u16, resId: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComResourceEvents.VTable, self.vtable).OnResourceRecycle(@ptrCast(*const IComResourceEvents, self), pInfo, ObjectID, pszType, resId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComResourceEvents_OnResourceDestroy(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ObjectID: u64, hr: HRESULT, pszType: ?[*:0]const u16, resId: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComResourceEvents.VTable, self.vtable).OnResourceDestroy(@ptrCast(*const IComResourceEvents, self), pInfo, ObjectID, hr, pszType, resId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComResourceEvents_OnResourceTrack(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ObjectID: u64, pszType: ?[*:0]const u16, resId: u64, enlisted: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComResourceEvents.VTable, self.vtable).OnResourceTrack(@ptrCast(*const IComResourceEvents, self), pInfo, ObjectID, pszType, resId, enlisted);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComSecurityEvents_Value = @import("../zig.zig").Guid.initString("683130ac-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComSecurityEvents = &IID_IComSecurityEvents_Value;
pub const IComSecurityEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnAuthenticate: fn(
self: *const IComSecurityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
ObjectID: u64,
guidIID: ?*const Guid,
iMeth: u32,
cbByteOrig: u32,
pSidOriginalUser: [*:0]u8,
cbByteCur: u32,
pSidCurrentUser: [*:0]u8,
bCurrentUserInpersonatingInProc: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnAuthenticateFail: fn(
self: *const IComSecurityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
ObjectID: u64,
guidIID: ?*const Guid,
iMeth: u32,
cbByteOrig: u32,
pSidOriginalUser: [*:0]u8,
cbByteCur: u32,
pSidCurrentUser: [*:0]u8,
bCurrentUserInpersonatingInProc: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComSecurityEvents_OnAuthenticate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, ObjectID: u64, guidIID: ?*const Guid, iMeth: u32, cbByteOrig: u32, pSidOriginalUser: [*:0]u8, cbByteCur: u32, pSidCurrentUser: [*:0]u8, bCurrentUserInpersonatingInProc: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComSecurityEvents.VTable, self.vtable).OnAuthenticate(@ptrCast(*const IComSecurityEvents, self), pInfo, guidActivity, ObjectID, guidIID, iMeth, cbByteOrig, pSidOriginalUser, cbByteCur, pSidCurrentUser, bCurrentUserInpersonatingInProc);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComSecurityEvents_OnAuthenticateFail(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, ObjectID: u64, guidIID: ?*const Guid, iMeth: u32, cbByteOrig: u32, pSidOriginalUser: [*:0]u8, cbByteCur: u32, pSidCurrentUser: [*:0]u8, bCurrentUserInpersonatingInProc: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComSecurityEvents.VTable, self.vtable).OnAuthenticateFail(@ptrCast(*const IComSecurityEvents, self), pInfo, guidActivity, ObjectID, guidIID, iMeth, cbByteOrig, pSidOriginalUser, cbByteCur, pSidCurrentUser, bCurrentUserInpersonatingInProc);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComObjectPoolEvents_Value = @import("../zig.zig").Guid.initString("683130ad-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComObjectPoolEvents = &IID_IComObjectPoolEvents_Value;
pub const IComObjectPoolEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjPoolPutObject: fn(
self: *const IComObjectPoolEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
nReason: i32,
dwAvailable: u32,
oid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolGetObject: fn(
self: *const IComObjectPoolEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
guidObject: ?*const Guid,
dwAvailable: u32,
oid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolRecycleToTx: fn(
self: *const IComObjectPoolEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
guidObject: ?*const Guid,
guidTx: ?*const Guid,
objid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolGetFromTx: fn(
self: *const IComObjectPoolEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
guidObject: ?*const Guid,
guidTx: ?*const Guid,
objid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents_OnObjPoolPutObject(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, nReason: i32, dwAvailable: u32, oid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents.VTable, self.vtable).OnObjPoolPutObject(@ptrCast(*const IComObjectPoolEvents, self), pInfo, guidObject, nReason, dwAvailable, oid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents_OnObjPoolGetObject(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, guidObject: ?*const Guid, dwAvailable: u32, oid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents.VTable, self.vtable).OnObjPoolGetObject(@ptrCast(*const IComObjectPoolEvents, self), pInfo, guidActivity, guidObject, dwAvailable, oid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents_OnObjPoolRecycleToTx(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, guidObject: ?*const Guid, guidTx: ?*const Guid, objid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents.VTable, self.vtable).OnObjPoolRecycleToTx(@ptrCast(*const IComObjectPoolEvents, self), pInfo, guidActivity, guidObject, guidTx, objid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents_OnObjPoolGetFromTx(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, guidObject: ?*const Guid, guidTx: ?*const Guid, objid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents.VTable, self.vtable).OnObjPoolGetFromTx(@ptrCast(*const IComObjectPoolEvents, self), pInfo, guidActivity, guidObject, guidTx, objid);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComObjectPoolEvents2_Value = @import("../zig.zig").Guid.initString("683130ae-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComObjectPoolEvents2 = &IID_IComObjectPoolEvents2_Value;
pub const IComObjectPoolEvents2 = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjPoolCreateObject: fn(
self: *const IComObjectPoolEvents2,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
dwObjsCreated: u32,
oid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolDestroyObject: fn(
self: *const IComObjectPoolEvents2,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
dwObjsCreated: u32,
oid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolCreateDecision: fn(
self: *const IComObjectPoolEvents2,
pInfo: ?*COMSVCSEVENTINFO,
dwThreadsWaiting: u32,
dwAvail: u32,
dwCreated: u32,
dwMin: u32,
dwMax: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolTimeout: fn(
self: *const IComObjectPoolEvents2,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
guidActivity: ?*const Guid,
dwTimeout: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolCreatePool: fn(
self: *const IComObjectPoolEvents2,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
dwMin: u32,
dwMax: u32,
dwTimeout: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents2_OnObjPoolCreateObject(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, dwObjsCreated: u32, oid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents2.VTable, self.vtable).OnObjPoolCreateObject(@ptrCast(*const IComObjectPoolEvents2, self), pInfo, guidObject, dwObjsCreated, oid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents2_OnObjPoolDestroyObject(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, dwObjsCreated: u32, oid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents2.VTable, self.vtable).OnObjPoolDestroyObject(@ptrCast(*const IComObjectPoolEvents2, self), pInfo, guidObject, dwObjsCreated, oid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents2_OnObjPoolCreateDecision(self: *const T, pInfo: ?*COMSVCSEVENTINFO, dwThreadsWaiting: u32, dwAvail: u32, dwCreated: u32, dwMin: u32, dwMax: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents2.VTable, self.vtable).OnObjPoolCreateDecision(@ptrCast(*const IComObjectPoolEvents2, self), pInfo, dwThreadsWaiting, dwAvail, dwCreated, dwMin, dwMax);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents2_OnObjPoolTimeout(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, guidActivity: ?*const Guid, dwTimeout: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents2.VTable, self.vtable).OnObjPoolTimeout(@ptrCast(*const IComObjectPoolEvents2, self), pInfo, guidObject, guidActivity, dwTimeout);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPoolEvents2_OnObjPoolCreatePool(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, dwMin: u32, dwMax: u32, dwTimeout: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPoolEvents2.VTable, self.vtable).OnObjPoolCreatePool(@ptrCast(*const IComObjectPoolEvents2, self), pInfo, guidObject, dwMin, dwMax, dwTimeout);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComObjectConstructionEvents_Value = @import("../zig.zig").Guid.initString("683130af-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComObjectConstructionEvents = &IID_IComObjectConstructionEvents_Value;
pub const IComObjectConstructionEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjectConstruct: fn(
self: *const IComObjectConstructionEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
sConstructString: ?[*:0]const u16,
oid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectConstructionEvents_OnObjectConstruct(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, sConstructString: ?[*:0]const u16, oid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectConstructionEvents.VTable, self.vtable).OnObjectConstruct(@ptrCast(*const IComObjectConstructionEvents, self), pInfo, guidObject, sConstructString, oid);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComActivityEvents_Value = @import("../zig.zig").Guid.initString("683130b0-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComActivityEvents = &IID_IComActivityEvents_Value;
pub const IComActivityEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnActivityCreate: fn(
self: *const IComActivityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnActivityDestroy: fn(
self: *const IComActivityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnActivityEnter: fn(
self: *const IComActivityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidCurrent: ?*const Guid,
guidEntered: ?*const Guid,
dwThread: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnActivityTimeout: fn(
self: *const IComActivityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidCurrent: ?*const Guid,
guidEntered: ?*const Guid,
dwThread: u32,
dwTimeout: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnActivityReenter: fn(
self: *const IComActivityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidCurrent: ?*const Guid,
dwThread: u32,
dwCallDepth: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnActivityLeave: fn(
self: *const IComActivityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidCurrent: ?*const Guid,
guidLeft: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnActivityLeaveSame: fn(
self: *const IComActivityEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidCurrent: ?*const Guid,
dwCallDepth: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComActivityEvents_OnActivityCreate(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComActivityEvents.VTable, self.vtable).OnActivityCreate(@ptrCast(*const IComActivityEvents, self), pInfo, guidActivity);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComActivityEvents_OnActivityDestroy(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComActivityEvents.VTable, self.vtable).OnActivityDestroy(@ptrCast(*const IComActivityEvents, self), pInfo, guidActivity);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComActivityEvents_OnActivityEnter(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidCurrent: ?*const Guid, guidEntered: ?*const Guid, dwThread: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComActivityEvents.VTable, self.vtable).OnActivityEnter(@ptrCast(*const IComActivityEvents, self), pInfo, guidCurrent, guidEntered, dwThread);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComActivityEvents_OnActivityTimeout(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidCurrent: ?*const Guid, guidEntered: ?*const Guid, dwThread: u32, dwTimeout: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComActivityEvents.VTable, self.vtable).OnActivityTimeout(@ptrCast(*const IComActivityEvents, self), pInfo, guidCurrent, guidEntered, dwThread, dwTimeout);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComActivityEvents_OnActivityReenter(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidCurrent: ?*const Guid, dwThread: u32, dwCallDepth: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComActivityEvents.VTable, self.vtable).OnActivityReenter(@ptrCast(*const IComActivityEvents, self), pInfo, guidCurrent, dwThread, dwCallDepth);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComActivityEvents_OnActivityLeave(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidCurrent: ?*const Guid, guidLeft: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComActivityEvents.VTable, self.vtable).OnActivityLeave(@ptrCast(*const IComActivityEvents, self), pInfo, guidCurrent, guidLeft);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComActivityEvents_OnActivityLeaveSame(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidCurrent: ?*const Guid, dwCallDepth: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComActivityEvents.VTable, self.vtable).OnActivityLeaveSame(@ptrCast(*const IComActivityEvents, self), pInfo, guidCurrent, dwCallDepth);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComIdentityEvents_Value = @import("../zig.zig").Guid.initString("683130b1-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComIdentityEvents = &IID_IComIdentityEvents_Value;
pub const IComIdentityEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnIISRequestInfo: fn(
self: *const IComIdentityEvents,
pInfo: ?*COMSVCSEVENTINFO,
ObjId: u64,
pszClientIP: ?[*:0]const u16,
pszServerIP: ?[*:0]const u16,
pszURL: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComIdentityEvents_OnIISRequestInfo(self: *const T, pInfo: ?*COMSVCSEVENTINFO, ObjId: u64, pszClientIP: ?[*:0]const u16, pszServerIP: ?[*:0]const u16, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IComIdentityEvents.VTable, self.vtable).OnIISRequestInfo(@ptrCast(*const IComIdentityEvents, self), pInfo, ObjId, pszClientIP, pszServerIP, pszURL);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComQCEvents_Value = @import("../zig.zig").Guid.initString("683130b2-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComQCEvents = &IID_IComQCEvents_Value;
pub const IComQCEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnQCRecord: fn(
self: *const IComQCEvents,
pInfo: ?*COMSVCSEVENTINFO,
objid: u64,
szQueue: *[60]u16,
guidMsgId: ?*const Guid,
guidWorkFlowId: ?*const Guid,
msmqhr: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnQCQueueOpen: fn(
self: *const IComQCEvents,
pInfo: ?*COMSVCSEVENTINFO,
szQueue: *[60]u16,
QueueID: u64,
hr: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnQCReceive: fn(
self: *const IComQCEvents,
pInfo: ?*COMSVCSEVENTINFO,
QueueID: u64,
guidMsgId: ?*const Guid,
guidWorkFlowId: ?*const Guid,
hr: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnQCReceiveFail: fn(
self: *const IComQCEvents,
pInfo: ?*COMSVCSEVENTINFO,
QueueID: u64,
msmqhr: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnQCMoveToReTryQueue: fn(
self: *const IComQCEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidMsgId: ?*const Guid,
guidWorkFlowId: ?*const Guid,
RetryIndex: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnQCMoveToDeadQueue: fn(
self: *const IComQCEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidMsgId: ?*const Guid,
guidWorkFlowId: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnQCPlayback: fn(
self: *const IComQCEvents,
pInfo: ?*COMSVCSEVENTINFO,
objid: u64,
guidMsgId: ?*const Guid,
guidWorkFlowId: ?*const Guid,
hr: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComQCEvents_OnQCRecord(self: *const T, pInfo: ?*COMSVCSEVENTINFO, objid: u64, szQueue: *[60]u16, guidMsgId: ?*const Guid, guidWorkFlowId: ?*const Guid, msmqhr: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComQCEvents.VTable, self.vtable).OnQCRecord(@ptrCast(*const IComQCEvents, self), pInfo, objid, szQueue, guidMsgId, guidWorkFlowId, msmqhr);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComQCEvents_OnQCQueueOpen(self: *const T, pInfo: ?*COMSVCSEVENTINFO, szQueue: *[60]u16, QueueID: u64, hr: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComQCEvents.VTable, self.vtable).OnQCQueueOpen(@ptrCast(*const IComQCEvents, self), pInfo, szQueue, QueueID, hr);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComQCEvents_OnQCReceive(self: *const T, pInfo: ?*COMSVCSEVENTINFO, QueueID: u64, guidMsgId: ?*const Guid, guidWorkFlowId: ?*const Guid, hr: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComQCEvents.VTable, self.vtable).OnQCReceive(@ptrCast(*const IComQCEvents, self), pInfo, QueueID, guidMsgId, guidWorkFlowId, hr);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComQCEvents_OnQCReceiveFail(self: *const T, pInfo: ?*COMSVCSEVENTINFO, QueueID: u64, msmqhr: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComQCEvents.VTable, self.vtable).OnQCReceiveFail(@ptrCast(*const IComQCEvents, self), pInfo, QueueID, msmqhr);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComQCEvents_OnQCMoveToReTryQueue(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidMsgId: ?*const Guid, guidWorkFlowId: ?*const Guid, RetryIndex: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComQCEvents.VTable, self.vtable).OnQCMoveToReTryQueue(@ptrCast(*const IComQCEvents, self), pInfo, guidMsgId, guidWorkFlowId, RetryIndex);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComQCEvents_OnQCMoveToDeadQueue(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidMsgId: ?*const Guid, guidWorkFlowId: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComQCEvents.VTable, self.vtable).OnQCMoveToDeadQueue(@ptrCast(*const IComQCEvents, self), pInfo, guidMsgId, guidWorkFlowId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComQCEvents_OnQCPlayback(self: *const T, pInfo: ?*COMSVCSEVENTINFO, objid: u64, guidMsgId: ?*const Guid, guidWorkFlowId: ?*const Guid, hr: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComQCEvents.VTable, self.vtable).OnQCPlayback(@ptrCast(*const IComQCEvents, self), pInfo, objid, guidMsgId, guidWorkFlowId, hr);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComExceptionEvents_Value = @import("../zig.zig").Guid.initString("683130b3-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComExceptionEvents = &IID_IComExceptionEvents_Value;
pub const IComExceptionEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnExceptionUser: fn(
self: *const IComExceptionEvents,
pInfo: ?*COMSVCSEVENTINFO,
code: u32,
address: u64,
pszStackTrace: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComExceptionEvents_OnExceptionUser(self: *const T, pInfo: ?*COMSVCSEVENTINFO, code: u32, address: u64, pszStackTrace: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IComExceptionEvents.VTable, self.vtable).OnExceptionUser(@ptrCast(*const IComExceptionEvents, self), pInfo, code, address, pszStackTrace);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_ILBEvents_Value = @import("../zig.zig").Guid.initString("683130b4-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_ILBEvents = &IID_ILBEvents_Value;
pub const ILBEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
TargetUp: fn(
self: *const ILBEvents,
bstrServerName: ?BSTR,
bstrClsidEng: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
TargetDown: fn(
self: *const ILBEvents,
bstrServerName: ?BSTR,
bstrClsidEng: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EngineDefined: fn(
self: *const ILBEvents,
bstrPropName: ?BSTR,
varPropValue: ?*VARIANT,
bstrClsidEng: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ILBEvents_TargetUp(self: *const T, bstrServerName: ?BSTR, bstrClsidEng: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ILBEvents.VTable, self.vtable).TargetUp(@ptrCast(*const ILBEvents, self), bstrServerName, bstrClsidEng);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ILBEvents_TargetDown(self: *const T, bstrServerName: ?BSTR, bstrClsidEng: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ILBEvents.VTable, self.vtable).TargetDown(@ptrCast(*const ILBEvents, self), bstrServerName, bstrClsidEng);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ILBEvents_EngineDefined(self: *const T, bstrPropName: ?BSTR, varPropValue: ?*VARIANT, bstrClsidEng: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ILBEvents.VTable, self.vtable).EngineDefined(@ptrCast(*const ILBEvents, self), bstrPropName, varPropValue, bstrClsidEng);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComCRMEvents_Value = @import("../zig.zig").Guid.initString("683130b5-2e50-11d2-98a5-00c04f8ee1c4");
pub const IID_IComCRMEvents = &IID_IComCRMEvents_Value;
pub const IComCRMEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnCRMRecoveryStart: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMRecoveryDone: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMCheckpoint: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMBegin: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
guidActivity: Guid,
guidTx: Guid,
szProgIdCompensator: *[64]u16,
szDescription: *[64]u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMPrepare: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMCommit: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMAbort: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMIndoubt: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMDone: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMRelease: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMAnalyze: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
dwCrmRecordType: u32,
dwRecordSize: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMWrite: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
fVariants: BOOL,
dwRecordSize: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMForget: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMForce: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnCRMDeliver: fn(
self: *const IComCRMEvents,
pInfo: ?*COMSVCSEVENTINFO,
guidClerkCLSID: Guid,
fVariants: BOOL,
dwRecordSize: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMRecoveryStart(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMRecoveryStart(@ptrCast(*const IComCRMEvents, self), pInfo, guidApp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMRecoveryDone(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMRecoveryDone(@ptrCast(*const IComCRMEvents, self), pInfo, guidApp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMCheckpoint(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMCheckpoint(@ptrCast(*const IComCRMEvents, self), pInfo, guidApp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMBegin(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid, guidActivity: Guid, guidTx: Guid, szProgIdCompensator: *[64]u16, szDescription: *[64]u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMBegin(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID, guidActivity, guidTx, szProgIdCompensator, szDescription);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMPrepare(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMPrepare(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMCommit(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMCommit(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMAbort(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMAbort(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMIndoubt(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMIndoubt(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMDone(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMDone(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMRelease(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMRelease(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMAnalyze(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid, dwCrmRecordType: u32, dwRecordSize: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMAnalyze(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID, dwCrmRecordType, dwRecordSize);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMWrite(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid, fVariants: BOOL, dwRecordSize: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMWrite(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID, fVariants, dwRecordSize);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMForget(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMForget(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMForce(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMForce(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComCRMEvents_OnCRMDeliver(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidClerkCLSID: Guid, fVariants: BOOL, dwRecordSize: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComCRMEvents.VTable, self.vtable).OnCRMDeliver(@ptrCast(*const IComCRMEvents, self), pInfo, guidClerkCLSID, fVariants, dwRecordSize);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComMethod2Events_Value = @import("../zig.zig").Guid.initString("fb388aaa-567d-4024-af8e-6e93ee748573");
pub const IID_IComMethod2Events = &IID_IComMethod2Events_Value;
pub const IComMethod2Events = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnMethodCall2: fn(
self: *const IComMethod2Events,
pInfo: ?*COMSVCSEVENTINFO,
oid: u64,
guidCid: ?*const Guid,
guidRid: ?*const Guid,
dwThread: u32,
iMeth: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnMethodReturn2: fn(
self: *const IComMethod2Events,
pInfo: ?*COMSVCSEVENTINFO,
oid: u64,
guidCid: ?*const Guid,
guidRid: ?*const Guid,
dwThread: u32,
iMeth: u32,
hresult: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnMethodException2: fn(
self: *const IComMethod2Events,
pInfo: ?*COMSVCSEVENTINFO,
oid: u64,
guidCid: ?*const Guid,
guidRid: ?*const Guid,
dwThread: u32,
iMeth: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMethod2Events_OnMethodCall2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, oid: u64, guidCid: ?*const Guid, guidRid: ?*const Guid, dwThread: u32, iMeth: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMethod2Events.VTable, self.vtable).OnMethodCall2(@ptrCast(*const IComMethod2Events, self), pInfo, oid, guidCid, guidRid, dwThread, iMeth);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMethod2Events_OnMethodReturn2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, oid: u64, guidCid: ?*const Guid, guidRid: ?*const Guid, dwThread: u32, iMeth: u32, hresult: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMethod2Events.VTable, self.vtable).OnMethodReturn2(@ptrCast(*const IComMethod2Events, self), pInfo, oid, guidCid, guidRid, dwThread, iMeth, hresult);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMethod2Events_OnMethodException2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, oid: u64, guidCid: ?*const Guid, guidRid: ?*const Guid, dwThread: u32, iMeth: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMethod2Events.VTable, self.vtable).OnMethodException2(@ptrCast(*const IComMethod2Events, self), pInfo, oid, guidCid, guidRid, dwThread, iMeth);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IComTrackingInfoEvents_Value = @import("../zig.zig").Guid.initString("4e6cdcc9-fb25-4fd5-9cc5-c9f4b6559cec");
pub const IID_IComTrackingInfoEvents = &IID_IComTrackingInfoEvents_Value;
pub const IComTrackingInfoEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnNewTrackingInfo: fn(
self: *const IComTrackingInfoEvents,
pToplevelCollection: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTrackingInfoEvents_OnNewTrackingInfo(self: *const T, pToplevelCollection: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTrackingInfoEvents.VTable, self.vtable).OnNewTrackingInfo(@ptrCast(*const IComTrackingInfoEvents, self), pToplevelCollection);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const TRACKING_COLL_TYPE = enum(i32) {
PROCESSES = 0,
APPLICATIONS = 1,
COMPONENTS = 2,
};
pub const TRKCOLL_PROCESSES = TRACKING_COLL_TYPE.PROCESSES;
pub const TRKCOLL_APPLICATIONS = TRACKING_COLL_TYPE.APPLICATIONS;
pub const TRKCOLL_COMPONENTS = TRACKING_COLL_TYPE.COMPONENTS;
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IComTrackingInfoCollection_Value = @import("../zig.zig").Guid.initString("c266c677-c9ad-49ab-9fd9-d9661078588a");
pub const IID_IComTrackingInfoCollection = &IID_IComTrackingInfoCollection_Value;
pub const IComTrackingInfoCollection = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Type: fn(
self: *const IComTrackingInfoCollection,
pType: ?*TRACKING_COLL_TYPE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Count: fn(
self: *const IComTrackingInfoCollection,
pCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Item: fn(
self: *const IComTrackingInfoCollection,
ulIndex: u32,
riid: ?*const Guid,
ppv: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTrackingInfoCollection_Type(self: *const T, pType: ?*TRACKING_COLL_TYPE) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTrackingInfoCollection.VTable, self.vtable).Type(@ptrCast(*const IComTrackingInfoCollection, self), pType);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTrackingInfoCollection_Count(self: *const T, pCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTrackingInfoCollection.VTable, self.vtable).Count(@ptrCast(*const IComTrackingInfoCollection, self), pCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTrackingInfoCollection_Item(self: *const T, ulIndex: u32, riid: ?*const Guid, ppv: ?*?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTrackingInfoCollection.VTable, self.vtable).Item(@ptrCast(*const IComTrackingInfoCollection, self), ulIndex, riid, ppv);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IComTrackingInfoObject_Value = @import("../zig.zig").Guid.initString("116e42c5-d8b1-47bf-ab1e-c895ed3e2372");
pub const IID_IComTrackingInfoObject = &IID_IComTrackingInfoObject_Value;
pub const IComTrackingInfoObject = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetValue: fn(
self: *const IComTrackingInfoObject,
szPropertyName: ?PWSTR,
pvarOut: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTrackingInfoObject_GetValue(self: *const T, szPropertyName: ?PWSTR, pvarOut: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTrackingInfoObject.VTable, self.vtable).GetValue(@ptrCast(*const IComTrackingInfoObject, self), szPropertyName, pvarOut);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IComTrackingInfoProperties_Value = @import("../zig.zig").Guid.initString("789b42be-6f6b-443a-898e-67abf390aa14");
pub const IID_IComTrackingInfoProperties = &IID_IComTrackingInfoProperties_Value;
pub const IComTrackingInfoProperties = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
PropCount: fn(
self: *const IComTrackingInfoProperties,
pCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetPropName: fn(
self: *const IComTrackingInfoProperties,
ulIndex: u32,
ppszPropName: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTrackingInfoProperties_PropCount(self: *const T, pCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTrackingInfoProperties.VTable, self.vtable).PropCount(@ptrCast(*const IComTrackingInfoProperties, self), pCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTrackingInfoProperties_GetPropName(self: *const T, ulIndex: u32, ppszPropName: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTrackingInfoProperties.VTable, self.vtable).GetPropName(@ptrCast(*const IComTrackingInfoProperties, self), ulIndex, ppszPropName);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IComApp2Events_Value = @import("../zig.zig").Guid.initString("1290bc1a-b219-418d-b078-5934ded08242");
pub const IID_IComApp2Events = &IID_IComApp2Events_Value;
pub const IComApp2Events = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnAppActivation2: fn(
self: *const IComApp2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
guidProcess: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnAppShutdown2: fn(
self: *const IComApp2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnAppForceShutdown2: fn(
self: *const IComApp2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnAppPaused2: fn(
self: *const IComApp2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
bPaused: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnAppRecycle2: fn(
self: *const IComApp2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidApp: Guid,
guidProcess: Guid,
lReason: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComApp2Events_OnAppActivation2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid, guidProcess: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComApp2Events.VTable, self.vtable).OnAppActivation2(@ptrCast(*const IComApp2Events, self), pInfo, guidApp, guidProcess);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComApp2Events_OnAppShutdown2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComApp2Events.VTable, self.vtable).OnAppShutdown2(@ptrCast(*const IComApp2Events, self), pInfo, guidApp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComApp2Events_OnAppForceShutdown2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComApp2Events.VTable, self.vtable).OnAppForceShutdown2(@ptrCast(*const IComApp2Events, self), pInfo, guidApp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComApp2Events_OnAppPaused2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid, bPaused: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComApp2Events.VTable, self.vtable).OnAppPaused2(@ptrCast(*const IComApp2Events, self), pInfo, guidApp, bPaused);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComApp2Events_OnAppRecycle2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidApp: Guid, guidProcess: Guid, lReason: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComApp2Events.VTable, self.vtable).OnAppRecycle2(@ptrCast(*const IComApp2Events, self), pInfo, guidApp, guidProcess, lReason);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComTransaction2Events_Value = @import("../zig.zig").Guid.initString("a136f62a-2f94-4288-86e0-d8a1fa4c0299");
pub const IID_IComTransaction2Events = &IID_IComTransaction2Events_Value;
pub const IComTransaction2Events = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnTransactionStart2: fn(
self: *const IComTransaction2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
tsid: ?*const Guid,
fRoot: BOOL,
nIsolationLevel: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnTransactionPrepare2: fn(
self: *const IComTransaction2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
fVoteYes: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnTransactionAbort2: fn(
self: *const IComTransaction2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnTransactionCommit2: fn(
self: *const IComTransaction2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidTx: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransaction2Events_OnTransactionStart2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid, tsid: ?*const Guid, fRoot: BOOL, nIsolationLevel: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransaction2Events.VTable, self.vtable).OnTransactionStart2(@ptrCast(*const IComTransaction2Events, self), pInfo, guidTx, tsid, fRoot, nIsolationLevel);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransaction2Events_OnTransactionPrepare2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid, fVoteYes: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransaction2Events.VTable, self.vtable).OnTransactionPrepare2(@ptrCast(*const IComTransaction2Events, self), pInfo, guidTx, fVoteYes);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransaction2Events_OnTransactionAbort2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransaction2Events.VTable, self.vtable).OnTransactionAbort2(@ptrCast(*const IComTransaction2Events, self), pInfo, guidTx);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComTransaction2Events_OnTransactionCommit2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidTx: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComTransaction2Events.VTable, self.vtable).OnTransactionCommit2(@ptrCast(*const IComTransaction2Events, self), pInfo, guidTx);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComInstance2Events_Value = @import("../zig.zig").Guid.initString("20e3bf07-b506-4ad5-a50c-d2ca5b9c158e");
pub const IID_IComInstance2Events = &IID_IComInstance2Events_Value;
pub const IComInstance2Events = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjectCreate2: fn(
self: *const IComInstance2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
clsid: ?*const Guid,
tsid: ?*const Guid,
CtxtID: u64,
ObjectID: u64,
guidPartition: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjectDestroy2: fn(
self: *const IComInstance2Events,
pInfo: ?*COMSVCSEVENTINFO,
CtxtID: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComInstance2Events_OnObjectCreate2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, clsid: ?*const Guid, tsid: ?*const Guid, CtxtID: u64, ObjectID: u64, guidPartition: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComInstance2Events.VTable, self.vtable).OnObjectCreate2(@ptrCast(*const IComInstance2Events, self), pInfo, guidActivity, clsid, tsid, CtxtID, ObjectID, guidPartition);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComInstance2Events_OnObjectDestroy2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, CtxtID: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComInstance2Events.VTable, self.vtable).OnObjectDestroy2(@ptrCast(*const IComInstance2Events, self), pInfo, CtxtID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComObjectPool2Events_Value = @import("../zig.zig").Guid.initString("65bf6534-85ea-4f64-8cf4-3d974b2ab1cf");
pub const IID_IComObjectPool2Events = &IID_IComObjectPool2Events_Value;
pub const IComObjectPool2Events = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjPoolPutObject2: fn(
self: *const IComObjectPool2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
nReason: i32,
dwAvailable: u32,
oid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolGetObject2: fn(
self: *const IComObjectPool2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
guidObject: ?*const Guid,
dwAvailable: u32,
oid: u64,
guidPartition: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolRecycleToTx2: fn(
self: *const IComObjectPool2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
guidObject: ?*const Guid,
guidTx: ?*const Guid,
objid: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnObjPoolGetFromTx2: fn(
self: *const IComObjectPool2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidActivity: ?*const Guid,
guidObject: ?*const Guid,
guidTx: ?*const Guid,
objid: u64,
guidPartition: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPool2Events_OnObjPoolPutObject2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, nReason: i32, dwAvailable: u32, oid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPool2Events.VTable, self.vtable).OnObjPoolPutObject2(@ptrCast(*const IComObjectPool2Events, self), pInfo, guidObject, nReason, dwAvailable, oid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPool2Events_OnObjPoolGetObject2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, guidObject: ?*const Guid, dwAvailable: u32, oid: u64, guidPartition: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPool2Events.VTable, self.vtable).OnObjPoolGetObject2(@ptrCast(*const IComObjectPool2Events, self), pInfo, guidActivity, guidObject, dwAvailable, oid, guidPartition);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPool2Events_OnObjPoolRecycleToTx2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, guidObject: ?*const Guid, guidTx: ?*const Guid, objid: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPool2Events.VTable, self.vtable).OnObjPoolRecycleToTx2(@ptrCast(*const IComObjectPool2Events, self), pInfo, guidActivity, guidObject, guidTx, objid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectPool2Events_OnObjPoolGetFromTx2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidActivity: ?*const Guid, guidObject: ?*const Guid, guidTx: ?*const Guid, objid: u64, guidPartition: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectPool2Events.VTable, self.vtable).OnObjPoolGetFromTx2(@ptrCast(*const IComObjectPool2Events, self), pInfo, guidActivity, guidObject, guidTx, objid, guidPartition);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IComObjectConstruction2Events_Value = @import("../zig.zig").Guid.initString("4b5a7827-8df2-45c0-8f6f-57ea1f856a9f");
pub const IID_IComObjectConstruction2Events = &IID_IComObjectConstruction2Events_Value;
pub const IComObjectConstruction2Events = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnObjectConstruct2: fn(
self: *const IComObjectConstruction2Events,
pInfo: ?*COMSVCSEVENTINFO,
guidObject: ?*const Guid,
sConstructString: ?[*:0]const u16,
oid: u64,
guidPartition: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComObjectConstruction2Events_OnObjectConstruct2(self: *const T, pInfo: ?*COMSVCSEVENTINFO, guidObject: ?*const Guid, sConstructString: ?[*:0]const u16, oid: u64, guidPartition: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IComObjectConstruction2Events.VTable, self.vtable).OnObjectConstruct2(@ptrCast(*const IComObjectConstruction2Events, self), pInfo, guidObject, sConstructString, oid, guidPartition);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISystemAppEventData_Value = @import("../zig.zig").Guid.initString("d6d48a3c-d5c5-49e7-8c74-99e4889ed52f");
pub const IID_ISystemAppEventData = &IID_ISystemAppEventData_Value;
pub const ISystemAppEventData = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Startup: fn(
self: *const ISystemAppEventData,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnDataChanged: fn(
self: *const ISystemAppEventData,
dwPID: u32,
dwMask: u32,
dwNumberSinks: u32,
bstrDwMethodMask: ?BSTR,
dwReason: u32,
u64TraceHandle: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISystemAppEventData_Startup(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISystemAppEventData.VTable, self.vtable).Startup(@ptrCast(*const ISystemAppEventData, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISystemAppEventData_OnDataChanged(self: *const T, dwPID: u32, dwMask: u32, dwNumberSinks: u32, bstrDwMethodMask: ?BSTR, dwReason: u32, u64TraceHandle: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const ISystemAppEventData.VTable, self.vtable).OnDataChanged(@ptrCast(*const ISystemAppEventData, self), dwPID, dwMask, dwNumberSinks, bstrDwMethodMask, dwReason, u64TraceHandle);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IMtsEvents_Value = @import("../zig.zig").Guid.initString("bacedf4d-74ab-11d0-b162-00aa00ba3258");
pub const IID_IMtsEvents = &IID_IMtsEvents_Value;
pub const IMtsEvents = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_PackageName: fn(
self: *const IMtsEvents,
pVal: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_PackageGuid: fn(
self: *const IMtsEvents,
pVal: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PostEvent: fn(
self: *const IMtsEvents,
vEvent: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_FireEvents: fn(
self: *const IMtsEvents,
pVal: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProcessID: fn(
self: *const IMtsEvents,
id: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEvents_get_PackageName(self: *const T, pVal: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEvents.VTable, self.vtable).get_PackageName(@ptrCast(*const IMtsEvents, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEvents_get_PackageGuid(self: *const T, pVal: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEvents.VTable, self.vtable).get_PackageGuid(@ptrCast(*const IMtsEvents, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEvents_PostEvent(self: *const T, vEvent: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEvents.VTable, self.vtable).PostEvent(@ptrCast(*const IMtsEvents, self), vEvent);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEvents_get_FireEvents(self: *const T, pVal: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEvents.VTable, self.vtable).get_FireEvents(@ptrCast(*const IMtsEvents, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEvents_GetProcessID(self: *const T, id: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEvents.VTable, self.vtable).GetProcessID(@ptrCast(*const IMtsEvents, self), id);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IMtsEventInfo_Value = @import("../zig.zig").Guid.initString("d56c3dc1-8482-11d0-b170-00aa00ba3258");
pub const IID_IMtsEventInfo = &IID_IMtsEventInfo_Value;
pub const IMtsEventInfo = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Names: fn(
self: *const IMtsEventInfo,
pUnk: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_DisplayName: fn(
self: *const IMtsEventInfo,
sDisplayName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_EventID: fn(
self: *const IMtsEventInfo,
sGuidEventID: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const IMtsEventInfo,
lCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Value: fn(
self: *const IMtsEventInfo,
sKey: ?BSTR,
pVal: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEventInfo_get_Names(self: *const T, pUnk: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEventInfo.VTable, self.vtable).get_Names(@ptrCast(*const IMtsEventInfo, self), pUnk);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEventInfo_get_DisplayName(self: *const T, sDisplayName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEventInfo.VTable, self.vtable).get_DisplayName(@ptrCast(*const IMtsEventInfo, self), sDisplayName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEventInfo_get_EventID(self: *const T, sGuidEventID: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEventInfo.VTable, self.vtable).get_EventID(@ptrCast(*const IMtsEventInfo, self), sGuidEventID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEventInfo_get_Count(self: *const T, lCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEventInfo.VTable, self.vtable).get_Count(@ptrCast(*const IMtsEventInfo, self), lCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsEventInfo_get_Value(self: *const T, sKey: ?BSTR, pVal: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsEventInfo.VTable, self.vtable).get_Value(@ptrCast(*const IMtsEventInfo, self), sKey, pVal);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IMTSLocator_Value = @import("../zig.zig").Guid.initString("d19b8bfd-7f88-11d0-b16e-00aa00ba3258");
pub const IID_IMTSLocator = &IID_IMTSLocator_Value;
pub const IMTSLocator = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
GetEventDispatcher: fn(
self: *const IMTSLocator,
pUnk: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMTSLocator_GetEventDispatcher(self: *const T, pUnk: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IMTSLocator.VTable, self.vtable).GetEventDispatcher(@ptrCast(*const IMTSLocator, self), pUnk);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IMtsGrp_Value = @import("../zig.zig").Guid.initString("4b2e958c-0393-11d1-b1ab-00aa00ba3258");
pub const IID_IMtsGrp = &IID_IMtsGrp_Value;
pub const IMtsGrp = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const IMtsGrp,
pVal: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Item: fn(
self: *const IMtsGrp,
lIndex: i32,
ppUnkDispatcher: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Refresh: fn(
self: *const IMtsGrp,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsGrp_get_Count(self: *const T, pVal: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsGrp.VTable, self.vtable).get_Count(@ptrCast(*const IMtsGrp, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsGrp_Item(self: *const T, lIndex: i32, ppUnkDispatcher: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsGrp.VTable, self.vtable).Item(@ptrCast(*const IMtsGrp, self), lIndex, ppUnkDispatcher);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMtsGrp_Refresh(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IMtsGrp.VTable, self.vtable).Refresh(@ptrCast(*const IMtsGrp, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IMessageMover_Value = @import("../zig.zig").Guid.initString("588a085a-b795-11d1-8054-00c04fc340ee");
pub const IID_IMessageMover = &IID_IMessageMover_Value;
pub const IMessageMover = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_SourcePath: fn(
self: *const IMessageMover,
pVal: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_SourcePath: fn(
self: *const IMessageMover,
newVal: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_DestPath: fn(
self: *const IMessageMover,
pVal: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_DestPath: fn(
self: *const IMessageMover,
newVal: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_CommitBatchSize: fn(
self: *const IMessageMover,
pVal: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_CommitBatchSize: fn(
self: *const IMessageMover,
newVal: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
MoveMessages: fn(
self: *const IMessageMover,
plMessagesMoved: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMessageMover_get_SourcePath(self: *const T, pVal: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMessageMover.VTable, self.vtable).get_SourcePath(@ptrCast(*const IMessageMover, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMessageMover_put_SourcePath(self: *const T, newVal: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMessageMover.VTable, self.vtable).put_SourcePath(@ptrCast(*const IMessageMover, self), newVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMessageMover_get_DestPath(self: *const T, pVal: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMessageMover.VTable, self.vtable).get_DestPath(@ptrCast(*const IMessageMover, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMessageMover_put_DestPath(self: *const T, newVal: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IMessageMover.VTable, self.vtable).put_DestPath(@ptrCast(*const IMessageMover, self), newVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMessageMover_get_CommitBatchSize(self: *const T, pVal: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IMessageMover.VTable, self.vtable).get_CommitBatchSize(@ptrCast(*const IMessageMover, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMessageMover_put_CommitBatchSize(self: *const T, newVal: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IMessageMover.VTable, self.vtable).put_CommitBatchSize(@ptrCast(*const IMessageMover, self), newVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMessageMover_MoveMessages(self: *const T, plMessagesMoved: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IMessageMover.VTable, self.vtable).MoveMessages(@ptrCast(*const IMessageMover, self), plMessagesMoved);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IEventServerTrace_Value = @import("../zig.zig").Guid.initString("9a9f12b8-80af-47ab-a579-35ea57725370");
pub const IID_IEventServerTrace = &IID_IEventServerTrace_Value;
pub const IEventServerTrace = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
StartTraceGuid: fn(
self: *const IEventServerTrace,
bstrguidEvent: ?BSTR,
bstrguidFilter: ?BSTR,
lPidFilter: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
StopTraceGuid: fn(
self: *const IEventServerTrace,
bstrguidEvent: ?BSTR,
bstrguidFilter: ?BSTR,
lPidFilter: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumTraceGuid: fn(
self: *const IEventServerTrace,
plCntGuids: ?*i32,
pbstrGuidList: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEventServerTrace_StartTraceGuid(self: *const T, bstrguidEvent: ?BSTR, bstrguidFilter: ?BSTR, lPidFilter: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEventServerTrace.VTable, self.vtable).StartTraceGuid(@ptrCast(*const IEventServerTrace, self), bstrguidEvent, bstrguidFilter, lPidFilter);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEventServerTrace_StopTraceGuid(self: *const T, bstrguidEvent: ?BSTR, bstrguidFilter: ?BSTR, lPidFilter: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEventServerTrace.VTable, self.vtable).StopTraceGuid(@ptrCast(*const IEventServerTrace, self), bstrguidEvent, bstrguidFilter, lPidFilter);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEventServerTrace_EnumTraceGuid(self: *const T, plCntGuids: ?*i32, pbstrGuidList: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IEventServerTrace.VTable, self.vtable).EnumTraceGuid(@ptrCast(*const IEventServerTrace, self), plCntGuids, pbstrGuidList);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const RECYCLE_INFO = extern struct {
guidCombaseProcessIdentifier: Guid,
ProcessStartTime: i64,
dwRecycleLifetimeLimit: u32,
dwRecycleMemoryLimit: u32,
dwRecycleExpirationTimeout: u32,
};
pub const DUMPTYPE = enum(i32) {
FULL = 0,
MINI = 1,
NONE = 2,
};
pub const DUMPTYPE_FULL = DUMPTYPE.FULL;
pub const DUMPTYPE_MINI = DUMPTYPE.MINI;
pub const DUMPTYPE_NONE = DUMPTYPE.NONE;
pub const HANG_INFO = extern struct {
fAppHangMonitorEnabled: BOOL,
fTerminateOnHang: BOOL,
DumpType: DUMPTYPE,
dwHangTimeout: u32,
dwDumpCount: u32,
dwInfoMsgCount: u32,
};
pub const COMPLUS_APPTYPE = enum(i32) {
UNKNOWN = -1,
SERVER = 1,
LIBRARY = 0,
SWC = 2,
};
pub const APPTYPE_UNKNOWN = COMPLUS_APPTYPE.UNKNOWN;
pub const APPTYPE_SERVER = COMPLUS_APPTYPE.SERVER;
pub const APPTYPE_LIBRARY = COMPLUS_APPTYPE.LIBRARY;
pub const APPTYPE_SWC = COMPLUS_APPTYPE.SWC;
pub const CAppStatistics = extern struct {
m_cTotalCalls: u32,
m_cTotalInstances: u32,
m_cTotalClasses: u32,
m_cCallsPerSecond: u32,
};
pub const CAppData = extern struct {
m_idApp: u32,
m_szAppGuid: [40]u16,
m_dwAppProcessId: u32,
m_AppStatistics: CAppStatistics,
};
pub const CCLSIDData = extern struct {
m_clsid: Guid,
m_cReferences: u32,
m_cBound: u32,
m_cPooled: u32,
m_cInCall: u32,
m_dwRespTime: u32,
m_cCallsCompleted: u32,
m_cCallsFailed: u32,
};
pub const CCLSIDData2 = extern struct {
m_clsid: Guid,
m_appid: Guid,
m_partid: Guid,
m_pwszAppName: ?PWSTR,
m_pwszCtxName: ?PWSTR,
m_eAppType: COMPLUS_APPTYPE,
m_cReferences: u32,
m_cBound: u32,
m_cPooled: u32,
m_cInCall: u32,
m_dwRespTime: u32,
m_cCallsCompleted: u32,
m_cCallsFailed: u32,
};
pub const GetAppTrackerDataFlags = enum(i32) {
PROCESS_EXE_NAME = 1,
LIBRARY_APPS = 2,
SWC = 4,
CLASS_NAME = 8,
APPLICATION_NAME = 16,
};
pub const GATD_INCLUDE_PROCESS_EXE_NAME = GetAppTrackerDataFlags.PROCESS_EXE_NAME;
pub const GATD_INCLUDE_LIBRARY_APPS = GetAppTrackerDataFlags.LIBRARY_APPS;
pub const GATD_INCLUDE_SWC = GetAppTrackerDataFlags.SWC;
pub const GATD_INCLUDE_CLASS_NAME = GetAppTrackerDataFlags.CLASS_NAME;
pub const GATD_INCLUDE_APPLICATION_NAME = GetAppTrackerDataFlags.APPLICATION_NAME;
pub const ApplicationProcessSummary = extern struct {
PartitionIdPrimaryApplication: Guid,
ApplicationIdPrimaryApplication: Guid,
ApplicationInstanceId: Guid,
ProcessId: u32,
Type: COMPLUS_APPTYPE,
ProcessExeName: ?PWSTR,
IsService: BOOL,
IsPaused: BOOL,
IsRecycled: BOOL,
};
pub const ApplicationProcessStatistics = extern struct {
NumCallsOutstanding: u32,
NumTrackedComponents: u32,
NumComponentInstances: u32,
AvgCallsPerSecond: u32,
Reserved1: u32,
Reserved2: u32,
Reserved3: u32,
Reserved4: u32,
};
pub const ApplicationProcessRecycleInfo = extern struct {
IsRecyclable: BOOL,
IsRecycled: BOOL,
TimeRecycled: FILETIME,
TimeToTerminate: FILETIME,
RecycleReasonCode: i32,
IsPendingRecycle: BOOL,
HasAutomaticLifetimeRecycling: BOOL,
TimeForAutomaticRecycling: FILETIME,
MemoryLimitInKB: u32,
MemoryUsageInKBLastCheck: u32,
ActivationLimit: u32,
NumActivationsLastReported: u32,
CallLimit: u32,
NumCallsLastReported: u32,
};
pub const ApplicationSummary = extern struct {
ApplicationInstanceId: Guid,
PartitionId: Guid,
ApplicationId: Guid,
Type: COMPLUS_APPTYPE,
ApplicationName: ?PWSTR,
NumTrackedComponents: u32,
NumComponentInstances: u32,
};
pub const ComponentSummary = extern struct {
ApplicationInstanceId: Guid,
PartitionId: Guid,
ApplicationId: Guid,
Clsid: Guid,
ClassName: ?PWSTR,
ApplicationName: ?PWSTR,
};
pub const ComponentStatistics = extern struct {
NumInstances: u32,
NumBoundReferences: u32,
NumPooledObjects: u32,
NumObjectsInCall: u32,
AvgResponseTimeInMs: u32,
NumCallsCompletedRecent: u32,
NumCallsFailedRecent: u32,
NumCallsCompletedTotal: u32,
NumCallsFailedTotal: u32,
Reserved1: u32,
Reserved2: u32,
Reserved3: u32,
Reserved4: u32,
};
pub const ComponentHangMonitorInfo = extern struct {
IsMonitored: BOOL,
TerminateOnHang: BOOL,
AvgCallThresholdInMs: u32,
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IGetAppTrackerData_Value = @import("../zig.zig").Guid.initString("507c3ac8-3e12-4cb0-9366-653d3e050638");
pub const IID_IGetAppTrackerData = &IID_IGetAppTrackerData_Value;
pub const IGetAppTrackerData = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetApplicationProcesses: fn(
self: *const IGetAppTrackerData,
PartitionId: ?*const Guid,
ApplicationId: ?*const Guid,
Flags: u32,
NumApplicationProcesses: ?*u32,
ApplicationProcesses: ?[*]?*ApplicationProcessSummary,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetApplicationProcessDetails: fn(
self: *const IGetAppTrackerData,
ApplicationInstanceId: ?*const Guid,
ProcessId: u32,
Flags: u32,
Summary: ?*ApplicationProcessSummary,
Statistics: ?*ApplicationProcessStatistics,
RecycleInfo: ?*ApplicationProcessRecycleInfo,
AnyComponentsHangMonitored: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetApplicationsInProcess: fn(
self: *const IGetAppTrackerData,
ApplicationInstanceId: ?*const Guid,
ProcessId: u32,
PartitionId: ?*const Guid,
Flags: u32,
NumApplicationsInProcess: ?*u32,
Applications: ?[*]?*ApplicationSummary,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetComponentsInProcess: fn(
self: *const IGetAppTrackerData,
ApplicationInstanceId: ?*const Guid,
ProcessId: u32,
PartitionId: ?*const Guid,
ApplicationId: ?*const Guid,
Flags: u32,
NumComponentsInProcess: ?*u32,
Components: ?[*]?*ComponentSummary,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetComponentDetails: fn(
self: *const IGetAppTrackerData,
ApplicationInstanceId: ?*const Guid,
ProcessId: u32,
Clsid: ?*const Guid,
Flags: u32,
Summary: ?*ComponentSummary,
Statistics: ?*ComponentStatistics,
HangMonitorInfo: ?*ComponentHangMonitorInfo,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTrackerDataAsCollectionObject: fn(
self: *const IGetAppTrackerData,
TopLevelCollection: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetSuggestedPollingInterval: fn(
self: *const IGetAppTrackerData,
PollingIntervalInSeconds: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetAppTrackerData_GetApplicationProcesses(self: *const T, PartitionId: ?*const Guid, ApplicationId: ?*const Guid, Flags: u32, NumApplicationProcesses: ?*u32, ApplicationProcesses: ?[*]?*ApplicationProcessSummary) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetAppTrackerData.VTable, self.vtable).GetApplicationProcesses(@ptrCast(*const IGetAppTrackerData, self), PartitionId, ApplicationId, Flags, NumApplicationProcesses, ApplicationProcesses);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetAppTrackerData_GetApplicationProcessDetails(self: *const T, ApplicationInstanceId: ?*const Guid, ProcessId: u32, Flags: u32, Summary: ?*ApplicationProcessSummary, Statistics: ?*ApplicationProcessStatistics, RecycleInfo: ?*ApplicationProcessRecycleInfo, AnyComponentsHangMonitored: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetAppTrackerData.VTable, self.vtable).GetApplicationProcessDetails(@ptrCast(*const IGetAppTrackerData, self), ApplicationInstanceId, ProcessId, Flags, Summary, Statistics, RecycleInfo, AnyComponentsHangMonitored);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetAppTrackerData_GetApplicationsInProcess(self: *const T, ApplicationInstanceId: ?*const Guid, ProcessId: u32, PartitionId: ?*const Guid, Flags: u32, NumApplicationsInProcess: ?*u32, Applications: ?[*]?*ApplicationSummary) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetAppTrackerData.VTable, self.vtable).GetApplicationsInProcess(@ptrCast(*const IGetAppTrackerData, self), ApplicationInstanceId, ProcessId, PartitionId, Flags, NumApplicationsInProcess, Applications);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetAppTrackerData_GetComponentsInProcess(self: *const T, ApplicationInstanceId: ?*const Guid, ProcessId: u32, PartitionId: ?*const Guid, ApplicationId: ?*const Guid, Flags: u32, NumComponentsInProcess: ?*u32, Components: ?[*]?*ComponentSummary) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetAppTrackerData.VTable, self.vtable).GetComponentsInProcess(@ptrCast(*const IGetAppTrackerData, self), ApplicationInstanceId, ProcessId, PartitionId, ApplicationId, Flags, NumComponentsInProcess, Components);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetAppTrackerData_GetComponentDetails(self: *const T, ApplicationInstanceId: ?*const Guid, ProcessId: u32, Clsid: ?*const Guid, Flags: u32, Summary: ?*ComponentSummary, Statistics: ?*ComponentStatistics, HangMonitorInfo: ?*ComponentHangMonitorInfo) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetAppTrackerData.VTable, self.vtable).GetComponentDetails(@ptrCast(*const IGetAppTrackerData, self), ApplicationInstanceId, ProcessId, Clsid, Flags, Summary, Statistics, HangMonitorInfo);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetAppTrackerData_GetTrackerDataAsCollectionObject(self: *const T, TopLevelCollection: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetAppTrackerData.VTable, self.vtable).GetTrackerDataAsCollectionObject(@ptrCast(*const IGetAppTrackerData, self), TopLevelCollection);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetAppTrackerData_GetSuggestedPollingInterval(self: *const T, PollingIntervalInSeconds: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetAppTrackerData.VTable, self.vtable).GetSuggestedPollingInterval(@ptrCast(*const IGetAppTrackerData, self), PollingIntervalInSeconds);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IDispenserManager_Value = @import("../zig.zig").Guid.initString("5cb31e10-2b5f-11cf-be10-00aa00a2fa25");
pub const IID_IDispenserManager = &IID_IDispenserManager_Value;
pub const IDispenserManager = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
RegisterDispenser: fn(
self: *const IDispenserManager,
__MIDL__IDispenserManager0000: ?*IDispenserDriver,
szDispenserName: ?[*:0]const u16,
__MIDL__IDispenserManager0001: ?*?*IHolder,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetContext: fn(
self: *const IDispenserManager,
__MIDL__IDispenserManager0002: ?*usize,
__MIDL__IDispenserManager0003: ?*usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserManager_RegisterDispenser(self: *const T, __MIDL__IDispenserManager0000: ?*IDispenserDriver, szDispenserName: ?[*:0]const u16, __MIDL__IDispenserManager0001: ?*?*IHolder) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserManager.VTable, self.vtable).RegisterDispenser(@ptrCast(*const IDispenserManager, self), __MIDL__IDispenserManager0000, szDispenserName, __MIDL__IDispenserManager0001);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserManager_GetContext(self: *const T, __MIDL__IDispenserManager0002: ?*usize, __MIDL__IDispenserManager0003: ?*usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserManager.VTable, self.vtable).GetContext(@ptrCast(*const IDispenserManager, self), __MIDL__IDispenserManager0002, __MIDL__IDispenserManager0003);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IHolder_Value = @import("../zig.zig").Guid.initString("bf6a1850-2b45-11cf-be10-00aa00a2fa25");
pub const IID_IHolder = &IID_IHolder_Value;
pub const IHolder = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
AllocResource: fn(
self: *const IHolder,
__MIDL__IHolder0000: usize,
__MIDL__IHolder0001: ?*usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
FreeResource: fn(
self: *const IHolder,
__MIDL__IHolder0002: usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
TrackResource: fn(
self: *const IHolder,
__MIDL__IHolder0003: usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
TrackResourceS: fn(
self: *const IHolder,
__MIDL__IHolder0004: ?*u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
UntrackResource: fn(
self: *const IHolder,
__MIDL__IHolder0005: usize,
__MIDL__IHolder0006: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
UntrackResourceS: fn(
self: *const IHolder,
__MIDL__IHolder0007: ?*u16,
__MIDL__IHolder0008: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Close: fn(
self: *const IHolder,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RequestDestroyResource: fn(
self: *const IHolder,
__MIDL__IHolder0009: usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_AllocResource(self: *const T, __MIDL__IHolder0000: usize, __MIDL__IHolder0001: ?*usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).AllocResource(@ptrCast(*const IHolder, self), __MIDL__IHolder0000, __MIDL__IHolder0001);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_FreeResource(self: *const T, __MIDL__IHolder0002: usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).FreeResource(@ptrCast(*const IHolder, self), __MIDL__IHolder0002);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_TrackResource(self: *const T, __MIDL__IHolder0003: usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).TrackResource(@ptrCast(*const IHolder, self), __MIDL__IHolder0003);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_TrackResourceS(self: *const T, __MIDL__IHolder0004: ?*u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).TrackResourceS(@ptrCast(*const IHolder, self), __MIDL__IHolder0004);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_UntrackResource(self: *const T, __MIDL__IHolder0005: usize, __MIDL__IHolder0006: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).UntrackResource(@ptrCast(*const IHolder, self), __MIDL__IHolder0005, __MIDL__IHolder0006);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_UntrackResourceS(self: *const T, __MIDL__IHolder0007: ?*u16, __MIDL__IHolder0008: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).UntrackResourceS(@ptrCast(*const IHolder, self), __MIDL__IHolder0007, __MIDL__IHolder0008);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_Close(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).Close(@ptrCast(*const IHolder, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IHolder_RequestDestroyResource(self: *const T, __MIDL__IHolder0009: usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IHolder.VTable, self.vtable).RequestDestroyResource(@ptrCast(*const IHolder, self), __MIDL__IHolder0009);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IDispenserDriver_Value = @import("../zig.zig").Guid.initString("208b3651-2b48-11cf-be10-00aa00a2fa25");
pub const IID_IDispenserDriver = &IID_IDispenserDriver_Value;
pub const IDispenserDriver = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateResource: fn(
self: *const IDispenserDriver,
ResTypId: usize,
pResId: ?*usize,
pSecsFreeBeforeDestroy: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RateResource: fn(
self: *const IDispenserDriver,
ResTypId: usize,
ResId: usize,
fRequiresTransactionEnlistment: BOOL,
pRating: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnlistResource: fn(
self: *const IDispenserDriver,
ResId: usize,
TransId: usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ResetResource: fn(
self: *const IDispenserDriver,
ResId: usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DestroyResource: fn(
self: *const IDispenserDriver,
ResId: usize,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DestroyResourceS: fn(
self: *const IDispenserDriver,
ResId: ?*u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserDriver_CreateResource(self: *const T, ResTypId: usize, pResId: ?*usize, pSecsFreeBeforeDestroy: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserDriver.VTable, self.vtable).CreateResource(@ptrCast(*const IDispenserDriver, self), ResTypId, pResId, pSecsFreeBeforeDestroy);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserDriver_RateResource(self: *const T, ResTypId: usize, ResId: usize, fRequiresTransactionEnlistment: BOOL, pRating: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserDriver.VTable, self.vtable).RateResource(@ptrCast(*const IDispenserDriver, self), ResTypId, ResId, fRequiresTransactionEnlistment, pRating);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserDriver_EnlistResource(self: *const T, ResId: usize, TransId: usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserDriver.VTable, self.vtable).EnlistResource(@ptrCast(*const IDispenserDriver, self), ResId, TransId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserDriver_ResetResource(self: *const T, ResId: usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserDriver.VTable, self.vtable).ResetResource(@ptrCast(*const IDispenserDriver, self), ResId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserDriver_DestroyResource(self: *const T, ResId: usize) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserDriver.VTable, self.vtable).DestroyResource(@ptrCast(*const IDispenserDriver, self), ResId);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IDispenserDriver_DestroyResourceS(self: *const T, ResId: ?*u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IDispenserDriver.VTable, self.vtable).DestroyResourceS(@ptrCast(*const IDispenserDriver, self), ResId);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ITransactionProxy_Value = @import("../zig.zig").Guid.initString("02558374-df2e-4dae-bd6b-1d5c994f9bdc");
pub const IID_ITransactionProxy = &IID_ITransactionProxy_Value;
pub const ITransactionProxy = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Commit: fn(
self: *const ITransactionProxy,
guid: Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Abort: fn(
self: *const ITransactionProxy,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Promote: fn(
self: *const ITransactionProxy,
pTransaction: ?*?*ITransaction,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CreateVoter: fn(
self: *const ITransactionProxy,
pTxAsync: ?*ITransactionVoterNotifyAsync2,
ppBallot: ?*?*ITransactionVoterBallotAsync2,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetIsolationLevel: fn(
self: *const ITransactionProxy,
__MIDL__ITransactionProxy0000: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetIdentifier: fn(
self: *const ITransactionProxy,
pbstrIdentifier: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsReusable: fn(
self: *const ITransactionProxy,
pfIsReusable: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProxy_Commit(self: *const T, guid: Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProxy.VTable, self.vtable).Commit(@ptrCast(*const ITransactionProxy, self), guid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProxy_Abort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProxy.VTable, self.vtable).Abort(@ptrCast(*const ITransactionProxy, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProxy_Promote(self: *const T, pTransaction: ?*?*ITransaction) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProxy.VTable, self.vtable).Promote(@ptrCast(*const ITransactionProxy, self), pTransaction);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProxy_CreateVoter(self: *const T, pTxAsync: ?*ITransactionVoterNotifyAsync2, ppBallot: ?*?*ITransactionVoterBallotAsync2) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProxy.VTable, self.vtable).CreateVoter(@ptrCast(*const ITransactionProxy, self), pTxAsync, ppBallot);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProxy_GetIsolationLevel(self: *const T, __MIDL__ITransactionProxy0000: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProxy.VTable, self.vtable).GetIsolationLevel(@ptrCast(*const ITransactionProxy, self), __MIDL__ITransactionProxy0000);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProxy_GetIdentifier(self: *const T, pbstrIdentifier: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProxy.VTable, self.vtable).GetIdentifier(@ptrCast(*const ITransactionProxy, self), pbstrIdentifier);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProxy_IsReusable(self: *const T, pfIsReusable: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProxy.VTable, self.vtable).IsReusable(@ptrCast(*const ITransactionProxy, self), pfIsReusable);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IContextSecurityPerimeter_Value = @import("../zig.zig").Guid.initString("a7549a29-a7c4-42e1-8dc1-7e3d748dc24a");
pub const IID_IContextSecurityPerimeter = &IID_IContextSecurityPerimeter_Value;
pub const IContextSecurityPerimeter = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetPerimeterFlag: fn(
self: *const IContextSecurityPerimeter,
pFlag: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetPerimeterFlag: fn(
self: *const IContextSecurityPerimeter,
fFlag: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextSecurityPerimeter_GetPerimeterFlag(self: *const T, pFlag: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextSecurityPerimeter.VTable, self.vtable).GetPerimeterFlag(@ptrCast(*const IContextSecurityPerimeter, self), pFlag);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextSecurityPerimeter_SetPerimeterFlag(self: *const T, fFlag: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextSecurityPerimeter.VTable, self.vtable).SetPerimeterFlag(@ptrCast(*const IContextSecurityPerimeter, self), fFlag);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_ITxProxyHolder_Value = @import("../zig.zig").Guid.initString("13d86f31-0139-41af-bcad-c7d50435fe9f");
pub const IID_ITxProxyHolder = &IID_ITxProxyHolder_Value;
pub const ITxProxyHolder = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetIdentifier: fn(
self: *const ITxProxyHolder,
pGuidLtx: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) void,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITxProxyHolder_GetIdentifier(self: *const T, pGuidLtx: ?*Guid) callconv(.Inline) void {
return @ptrCast(*const ITxProxyHolder.VTable, self.vtable).GetIdentifier(@ptrCast(*const ITxProxyHolder, self), pGuidLtx);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IObjectContext_Value = @import("../zig.zig").Guid.initString("51372ae0-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IObjectContext = &IID_IObjectContext_Value;
pub const IObjectContext = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateInstance: fn(
self: *const IObjectContext,
rclsid: ?*const Guid,
riid: ?*const Guid,
ppv: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetComplete: fn(
self: *const IObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetAbort: fn(
self: *const IObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnableCommit: fn(
self: *const IObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DisableCommit: fn(
self: *const IObjectContext,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsInTransaction: fn(
self: *const IObjectContext,
) callconv(@import("std").os.windows.WINAPI) BOOL,
IsSecurityEnabled: fn(
self: *const IObjectContext,
) callconv(@import("std").os.windows.WINAPI) BOOL,
IsCallerInRole: fn(
self: *const IObjectContext,
bstrRole: ?BSTR,
pfIsInRole: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_CreateInstance(self: *const T, rclsid: ?*const Guid, riid: ?*const Guid, ppv: ?*?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContext.VTable, self.vtable).CreateInstance(@ptrCast(*const IObjectContext, self), rclsid, riid, ppv);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_SetComplete(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContext.VTable, self.vtable).SetComplete(@ptrCast(*const IObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_SetAbort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContext.VTable, self.vtable).SetAbort(@ptrCast(*const IObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_EnableCommit(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContext.VTable, self.vtable).EnableCommit(@ptrCast(*const IObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_DisableCommit(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContext.VTable, self.vtable).DisableCommit(@ptrCast(*const IObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_IsInTransaction(self: *const T) callconv(.Inline) BOOL {
return @ptrCast(*const IObjectContext.VTable, self.vtable).IsInTransaction(@ptrCast(*const IObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_IsSecurityEnabled(self: *const T) callconv(.Inline) BOOL {
return @ptrCast(*const IObjectContext.VTable, self.vtable).IsSecurityEnabled(@ptrCast(*const IObjectContext, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContext_IsCallerInRole(self: *const T, bstrRole: ?BSTR, pfIsInRole: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContext.VTable, self.vtable).IsCallerInRole(@ptrCast(*const IObjectContext, self), bstrRole, pfIsInRole);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IObjectControl_Value = @import("../zig.zig").Guid.initString("51372aec-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IObjectControl = &IID_IObjectControl_Value;
pub const IObjectControl = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Activate: fn(
self: *const IObjectControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Deactivate: fn(
self: *const IObjectControl,
) callconv(@import("std").os.windows.WINAPI) void,
CanBePooled: fn(
self: *const IObjectControl,
) callconv(@import("std").os.windows.WINAPI) BOOL,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectControl_Activate(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectControl.VTable, self.vtable).Activate(@ptrCast(*const IObjectControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectControl_Deactivate(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IObjectControl.VTable, self.vtable).Deactivate(@ptrCast(*const IObjectControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectControl_CanBePooled(self: *const T) callconv(.Inline) BOOL {
return @ptrCast(*const IObjectControl.VTable, self.vtable).CanBePooled(@ptrCast(*const IObjectControl, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IEnumNames_Value = @import("../zig.zig").Guid.initString("51372af2-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IEnumNames = &IID_IEnumNames_Value;
pub const IEnumNames = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Next: fn(
self: *const IEnumNames,
celt: u32,
rgname: ?*?BSTR,
pceltFetched: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Skip: fn(
self: *const IEnumNames,
celt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reset: fn(
self: *const IEnumNames,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Clone: fn(
self: *const IEnumNames,
ppenum: ?*?*IEnumNames,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumNames_Next(self: *const T, celt: u32, rgname: ?*?BSTR, pceltFetched: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumNames.VTable, self.vtable).Next(@ptrCast(*const IEnumNames, self), celt, rgname, pceltFetched);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumNames_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumNames.VTable, self.vtable).Skip(@ptrCast(*const IEnumNames, self), celt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumNames_Reset(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumNames.VTable, self.vtable).Reset(@ptrCast(*const IEnumNames, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumNames_Clone(self: *const T, ppenum: ?*?*IEnumNames) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumNames.VTable, self.vtable).Clone(@ptrCast(*const IEnumNames, self), ppenum);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISecurityProperty_Value = @import("../zig.zig").Guid.initString("51372aea-cae7-11cf-be81-00aa00a2fa25");
pub const IID_ISecurityProperty = &IID_ISecurityProperty_Value;
pub const ISecurityProperty = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetDirectCreatorSID: fn(
self: *const ISecurityProperty,
pSID: ?*?PSID,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOriginalCreatorSID: fn(
self: *const ISecurityProperty,
pSID: ?*?PSID,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDirectCallerSID: fn(
self: *const ISecurityProperty,
pSID: ?*?PSID,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOriginalCallerSID: fn(
self: *const ISecurityProperty,
pSID: ?*?PSID,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ReleaseSID: fn(
self: *const ISecurityProperty,
pSID: ?PSID,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityProperty_GetDirectCreatorSID(self: *const T, pSID: ?*?PSID) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityProperty.VTable, self.vtable).GetDirectCreatorSID(@ptrCast(*const ISecurityProperty, self), pSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityProperty_GetOriginalCreatorSID(self: *const T, pSID: ?*?PSID) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityProperty.VTable, self.vtable).GetOriginalCreatorSID(@ptrCast(*const ISecurityProperty, self), pSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityProperty_GetDirectCallerSID(self: *const T, pSID: ?*?PSID) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityProperty.VTable, self.vtable).GetDirectCallerSID(@ptrCast(*const ISecurityProperty, self), pSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityProperty_GetOriginalCallerSID(self: *const T, pSID: ?*?PSID) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityProperty.VTable, self.vtable).GetOriginalCallerSID(@ptrCast(*const ISecurityProperty, self), pSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISecurityProperty_ReleaseSID(self: *const T, pSID: ?PSID) callconv(.Inline) HRESULT {
return @ptrCast(*const ISecurityProperty.VTable, self.vtable).ReleaseSID(@ptrCast(*const ISecurityProperty, self), pSID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ObjectControl_Value = @import("../zig.zig").Guid.initString("7dc41850-0c31-11d0-8b79-00aa00b8a790");
pub const IID_ObjectControl = &IID_ObjectControl_Value;
pub const ObjectControl = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Activate: fn(
self: *const ObjectControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Deactivate: fn(
self: *const ObjectControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CanBePooled: fn(
self: *const ObjectControl,
pbPoolable: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectControl_Activate(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectControl.VTable, self.vtable).Activate(@ptrCast(*const ObjectControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectControl_Deactivate(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectControl.VTable, self.vtable).Deactivate(@ptrCast(*const ObjectControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ObjectControl_CanBePooled(self: *const T, pbPoolable: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ObjectControl.VTable, self.vtable).CanBePooled(@ptrCast(*const ObjectControl, self), pbPoolable);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISharedProperty_Value = @import("../zig.zig").Guid.initString("2a005c01-a5de-11cf-9e66-00aa00a3f464");
pub const IID_ISharedProperty = &IID_ISharedProperty_Value;
pub const ISharedProperty = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Value: fn(
self: *const ISharedProperty,
pVal: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_Value: fn(
self: *const ISharedProperty,
val: VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedProperty_get_Value(self: *const T, pVal: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedProperty.VTable, self.vtable).get_Value(@ptrCast(*const ISharedProperty, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedProperty_put_Value(self: *const T, val: VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedProperty.VTable, self.vtable).put_Value(@ptrCast(*const ISharedProperty, self), val);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISharedPropertyGroup_Value = @import("../zig.zig").Guid.initString("2a005c07-a5de-11cf-9e66-00aa00a3f464");
pub const IID_ISharedPropertyGroup = &IID_ISharedPropertyGroup_Value;
pub const ISharedPropertyGroup = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
CreatePropertyByPosition: fn(
self: *const ISharedPropertyGroup,
Index: i32,
fExists: ?*i16,
ppProp: ?*?*ISharedProperty,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_PropertyByPosition: fn(
self: *const ISharedPropertyGroup,
Index: i32,
ppProperty: ?*?*ISharedProperty,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CreateProperty: fn(
self: *const ISharedPropertyGroup,
Name: ?BSTR,
fExists: ?*i16,
ppProp: ?*?*ISharedProperty,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Property: fn(
self: *const ISharedPropertyGroup,
Name: ?BSTR,
ppProperty: ?*?*ISharedProperty,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedPropertyGroup_CreatePropertyByPosition(self: *const T, Index: i32, fExists: ?*i16, ppProp: ?*?*ISharedProperty) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedPropertyGroup.VTable, self.vtable).CreatePropertyByPosition(@ptrCast(*const ISharedPropertyGroup, self), Index, fExists, ppProp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedPropertyGroup_get_PropertyByPosition(self: *const T, Index: i32, ppProperty: ?*?*ISharedProperty) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedPropertyGroup.VTable, self.vtable).get_PropertyByPosition(@ptrCast(*const ISharedPropertyGroup, self), Index, ppProperty);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedPropertyGroup_CreateProperty(self: *const T, Name: ?BSTR, fExists: ?*i16, ppProp: ?*?*ISharedProperty) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedPropertyGroup.VTable, self.vtable).CreateProperty(@ptrCast(*const ISharedPropertyGroup, self), Name, fExists, ppProp);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedPropertyGroup_get_Property(self: *const T, Name: ?BSTR, ppProperty: ?*?*ISharedProperty) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedPropertyGroup.VTable, self.vtable).get_Property(@ptrCast(*const ISharedPropertyGroup, self), Name, ppProperty);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISharedPropertyGroupManager_Value = @import("../zig.zig").Guid.initString("2a005c0d-a5de-11cf-9e66-00aa00a3f464");
pub const IID_ISharedPropertyGroupManager = &IID_ISharedPropertyGroupManager_Value;
pub const ISharedPropertyGroupManager = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
CreatePropertyGroup: fn(
self: *const ISharedPropertyGroupManager,
Name: ?BSTR,
dwIsoMode: ?*i32,
dwRelMode: ?*i32,
fExists: ?*i16,
ppGroup: ?*?*ISharedPropertyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Group: fn(
self: *const ISharedPropertyGroupManager,
Name: ?BSTR,
ppGroup: ?*?*ISharedPropertyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ISharedPropertyGroupManager,
retval: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedPropertyGroupManager_CreatePropertyGroup(self: *const T, Name: ?BSTR, dwIsoMode: ?*i32, dwRelMode: ?*i32, fExists: ?*i16, ppGroup: ?*?*ISharedPropertyGroup) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedPropertyGroupManager.VTable, self.vtable).CreatePropertyGroup(@ptrCast(*const ISharedPropertyGroupManager, self), Name, dwIsoMode, dwRelMode, fExists, ppGroup);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedPropertyGroupManager_get_Group(self: *const T, Name: ?BSTR, ppGroup: ?*?*ISharedPropertyGroup) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedPropertyGroupManager.VTable, self.vtable).get_Group(@ptrCast(*const ISharedPropertyGroupManager, self), Name, ppGroup);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISharedPropertyGroupManager_get__NewEnum(self: *const T, retval: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISharedPropertyGroupManager.VTable, self.vtable).get__NewEnum(@ptrCast(*const ISharedPropertyGroupManager, self), retval);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IObjectConstruct_Value = @import("../zig.zig").Guid.initString("41c4f8b3-7439-11d2-98cb-00c04f8ee1c4");
pub const IID_IObjectConstruct = &IID_IObjectConstruct_Value;
pub const IObjectConstruct = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Construct: fn(
self: *const IObjectConstruct,
pCtorObj: ?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectConstruct_Construct(self: *const T, pCtorObj: ?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectConstruct.VTable, self.vtable).Construct(@ptrCast(*const IObjectConstruct, self), pCtorObj);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IObjectConstructString_Value = @import("../zig.zig").Guid.initString("41c4f8b2-7439-11d2-98cb-00c04f8ee1c4");
pub const IID_IObjectConstructString = &IID_IObjectConstructString_Value;
pub const IObjectConstructString = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_ConstructString: fn(
self: *const IObjectConstructString,
pVal: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectConstructString_get_ConstructString(self: *const T, pVal: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectConstructString.VTable, self.vtable).get_ConstructString(@ptrCast(*const IObjectConstructString, self), pVal);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IObjectContextActivity_Value = @import("../zig.zig").Guid.initString("51372afc-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IObjectContextActivity = &IID_IObjectContextActivity_Value;
pub const IObjectContextActivity = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetActivityId: fn(
self: *const IObjectContextActivity,
pGUID: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextActivity_GetActivityId(self: *const T, pGUID: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextActivity.VTable, self.vtable).GetActivityId(@ptrCast(*const IObjectContextActivity, self), pGUID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IObjectContextInfo_Value = @import("../zig.zig").Guid.initString("75b52ddb-e8ed-11d1-93ad-00aa00ba3258");
pub const IID_IObjectContextInfo = &IID_IObjectContextInfo_Value;
pub const IObjectContextInfo = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
IsInTransaction: fn(
self: *const IObjectContextInfo,
) callconv(@import("std").os.windows.WINAPI) BOOL,
GetTransaction: fn(
self: *const IObjectContextInfo,
pptrans: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTransactionId: fn(
self: *const IObjectContextInfo,
pGuid: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetActivityId: fn(
self: *const IObjectContextInfo,
pGUID: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetContextId: fn(
self: *const IObjectContextInfo,
pGuid: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo_IsInTransaction(self: *const T) callconv(.Inline) BOOL {
return @ptrCast(*const IObjectContextInfo.VTable, self.vtable).IsInTransaction(@ptrCast(*const IObjectContextInfo, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo_GetTransaction(self: *const T, pptrans: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextInfo.VTable, self.vtable).GetTransaction(@ptrCast(*const IObjectContextInfo, self), pptrans);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo_GetTransactionId(self: *const T, pGuid: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextInfo.VTable, self.vtable).GetTransactionId(@ptrCast(*const IObjectContextInfo, self), pGuid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo_GetActivityId(self: *const T, pGUID: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextInfo.VTable, self.vtable).GetActivityId(@ptrCast(*const IObjectContextInfo, self), pGUID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo_GetContextId(self: *const T, pGuid: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextInfo.VTable, self.vtable).GetContextId(@ptrCast(*const IObjectContextInfo, self), pGuid);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IObjectContextInfo2_Value = @import("../zig.zig").Guid.initString("594be71a-4bc4-438b-9197-cfd176248b09");
pub const IID_IObjectContextInfo2 = &IID_IObjectContextInfo2_Value;
pub const IObjectContextInfo2 = extern struct {
pub const VTable = extern struct {
base: IObjectContextInfo.VTable,
GetPartitionId: fn(
self: *const IObjectContextInfo2,
pGuid: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetApplicationId: fn(
self: *const IObjectContextInfo2,
pGuid: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetApplicationInstanceId: fn(
self: *const IObjectContextInfo2,
pGuid: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IObjectContextInfo.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo2_GetPartitionId(self: *const T, pGuid: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextInfo2.VTable, self.vtable).GetPartitionId(@ptrCast(*const IObjectContextInfo2, self), pGuid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo2_GetApplicationId(self: *const T, pGuid: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextInfo2.VTable, self.vtable).GetApplicationId(@ptrCast(*const IObjectContextInfo2, self), pGuid);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextInfo2_GetApplicationInstanceId(self: *const T, pGuid: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextInfo2.VTable, self.vtable).GetApplicationInstanceId(@ptrCast(*const IObjectContextInfo2, self), pGuid);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ITransactionStatus_Value = @import("../zig.zig").Guid.initString("61f589e8-3724-4898-a0a4-664ae9e1d1b4");
pub const IID_ITransactionStatus = &IID_ITransactionStatus_Value;
pub const ITransactionStatus = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetTransactionStatus: fn(
self: *const ITransactionStatus,
hrStatus: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTransactionStatus: fn(
self: *const ITransactionStatus,
pHrStatus: ?*HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionStatus_SetTransactionStatus(self: *const T, hrStatus: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionStatus.VTable, self.vtable).SetTransactionStatus(@ptrCast(*const ITransactionStatus, self), hrStatus);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionStatus_GetTransactionStatus(self: *const T, pHrStatus: ?*HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionStatus.VTable, self.vtable).GetTransactionStatus(@ptrCast(*const ITransactionStatus, self), pHrStatus);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IObjectContextTip_Value = @import("../zig.zig").Guid.initString("92fd41ca-bad9-11d2-9a2d-00c04f797bc9");
pub const IID_IObjectContextTip = &IID_IObjectContextTip_Value;
pub const IObjectContextTip = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetTipUrl: fn(
self: *const IObjectContextTip,
pTipUrl: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjectContextTip_GetTipUrl(self: *const T, pTipUrl: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IObjectContextTip.VTable, self.vtable).GetTipUrl(@ptrCast(*const IObjectContextTip, self), pTipUrl);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IPlaybackControl_Value = @import("../zig.zig").Guid.initString("51372afd-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IPlaybackControl = &IID_IPlaybackControl_Value;
pub const IPlaybackControl = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
FinalClientRetry: fn(
self: *const IPlaybackControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
FinalServerRetry: fn(
self: *const IPlaybackControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IPlaybackControl_FinalClientRetry(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IPlaybackControl.VTable, self.vtable).FinalClientRetry(@ptrCast(*const IPlaybackControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IPlaybackControl_FinalServerRetry(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IPlaybackControl.VTable, self.vtable).FinalServerRetry(@ptrCast(*const IPlaybackControl, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IGetContextProperties_Value = @import("../zig.zig").Guid.initString("51372af4-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IGetContextProperties = &IID_IGetContextProperties_Value;
pub const IGetContextProperties = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Count: fn(
self: *const IGetContextProperties,
plCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProperty: fn(
self: *const IGetContextProperties,
name: ?BSTR,
pProperty: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumNames: fn(
self: *const IGetContextProperties,
ppenum: ?*?*IEnumNames,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetContextProperties_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetContextProperties.VTable, self.vtable).Count(@ptrCast(*const IGetContextProperties, self), plCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetContextProperties_GetProperty(self: *const T, name: ?BSTR, pProperty: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetContextProperties.VTable, self.vtable).GetProperty(@ptrCast(*const IGetContextProperties, self), name, pProperty);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IGetContextProperties_EnumNames(self: *const T, ppenum: ?*?*IEnumNames) callconv(.Inline) HRESULT {
return @ptrCast(*const IGetContextProperties.VTable, self.vtable).EnumNames(@ptrCast(*const IGetContextProperties, self), ppenum);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const TransactionVote = enum(i32) {
Commit = 0,
Abort = 1,
};
pub const TxCommit = TransactionVote.Commit;
pub const TxAbort = TransactionVote.Abort;
// TODO: this type is limited to platform 'windows5.0'
const IID_IContextState_Value = @import("../zig.zig").Guid.initString("3c05e54b-a42a-11d2-afc4-00c04f8ee1c4");
pub const IID_IContextState = &IID_IContextState_Value;
pub const IContextState = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetDeactivateOnReturn: fn(
self: *const IContextState,
bDeactivate: i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDeactivateOnReturn: fn(
self: *const IContextState,
pbDeactivate: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMyTransactionVote: fn(
self: *const IContextState,
txVote: TransactionVote,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMyTransactionVote: fn(
self: *const IContextState,
ptxVote: ?*TransactionVote,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextState_SetDeactivateOnReturn(self: *const T, bDeactivate: i16) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextState.VTable, self.vtable).SetDeactivateOnReturn(@ptrCast(*const IContextState, self), bDeactivate);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextState_GetDeactivateOnReturn(self: *const T, pbDeactivate: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextState.VTable, self.vtable).GetDeactivateOnReturn(@ptrCast(*const IContextState, self), pbDeactivate);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextState_SetMyTransactionVote(self: *const T, txVote: TransactionVote) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextState.VTable, self.vtable).SetMyTransactionVote(@ptrCast(*const IContextState, self), txVote);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextState_GetMyTransactionVote(self: *const T, ptxVote: ?*TransactionVote) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextState.VTable, self.vtable).GetMyTransactionVote(@ptrCast(*const IContextState, self), ptxVote);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IPoolManager_Value = @import("../zig.zig").Guid.initString("0a469861-5a91-43a0-99b6-d5e179bb0631");
pub const IID_IPoolManager = &IID_IPoolManager_Value;
pub const IPoolManager = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
ShutdownPool: fn(
self: *const IPoolManager,
CLSIDOrProgID: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IPoolManager_ShutdownPool(self: *const T, CLSIDOrProgID: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IPoolManager.VTable, self.vtable).ShutdownPool(@ptrCast(*const IPoolManager, self), CLSIDOrProgID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISelectCOMLBServer_Value = @import("../zig.zig").Guid.initString("dcf443f4-3f8a-4872-b9f0-369a796d12d6");
pub const IID_ISelectCOMLBServer = &IID_ISelectCOMLBServer_Value;
pub const ISelectCOMLBServer = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Init: fn(
self: *const ISelectCOMLBServer,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLBServer: fn(
self: *const ISelectCOMLBServer,
pUnk: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISelectCOMLBServer_Init(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISelectCOMLBServer.VTable, self.vtable).Init(@ptrCast(*const ISelectCOMLBServer, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISelectCOMLBServer_GetLBServer(self: *const T, pUnk: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISelectCOMLBServer.VTable, self.vtable).GetLBServer(@ptrCast(*const ISelectCOMLBServer, self), pUnk);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICOMLBArguments_Value = @import("../zig.zig").Guid.initString("3a0f150f-8ee5-4b94-b40e-aef2f9e42ed2");
pub const IID_ICOMLBArguments = &IID_ICOMLBArguments_Value;
pub const ICOMLBArguments = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetCLSID: fn(
self: *const ICOMLBArguments,
pCLSID: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetCLSID: fn(
self: *const ICOMLBArguments,
pCLSID: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMachineName: fn(
self: *const ICOMLBArguments,
cchSvr: u32,
szServerName: [*:0]u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMachineName: fn(
self: *const ICOMLBArguments,
cchSvr: u32,
szServerName: [*:0]u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMLBArguments_GetCLSID(self: *const T, pCLSID: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMLBArguments.VTable, self.vtable).GetCLSID(@ptrCast(*const ICOMLBArguments, self), pCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMLBArguments_SetCLSID(self: *const T, pCLSID: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMLBArguments.VTable, self.vtable).SetCLSID(@ptrCast(*const ICOMLBArguments, self), pCLSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMLBArguments_GetMachineName(self: *const T, cchSvr: u32, szServerName: [*:0]u16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMLBArguments.VTable, self.vtable).GetMachineName(@ptrCast(*const ICOMLBArguments, self), cchSvr, szServerName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICOMLBArguments_SetMachineName(self: *const T, cchSvr: u32, szServerName: [*:0]u16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICOMLBArguments.VTable, self.vtable).SetMachineName(@ptrCast(*const ICOMLBArguments, self), cchSvr, szServerName);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICrmLogControl_Value = @import("../zig.zig").Guid.initString("a0e174b3-d26e-11d2-8f84-00805fc7bcd9");
pub const IID_ICrmLogControl = &IID_ICrmLogControl_Value;
pub const ICrmLogControl = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_TransactionUOW: fn(
self: *const ICrmLogControl,
pVal: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RegisterCompensator: fn(
self: *const ICrmLogControl,
lpcwstrProgIdCompensator: ?[*:0]const u16,
lpcwstrDescription: ?[*:0]const u16,
lCrmRegFlags: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
WriteLogRecordVariants: fn(
self: *const ICrmLogControl,
pLogRecord: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ForceLog: fn(
self: *const ICrmLogControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ForgetLogRecord: fn(
self: *const ICrmLogControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ForceTransactionToAbort: fn(
self: *const ICrmLogControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
WriteLogRecord: fn(
self: *const ICrmLogControl,
rgBlob: [*]BLOB,
cBlob: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmLogControl_get_TransactionUOW(self: *const T, pVal: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmLogControl.VTable, self.vtable).get_TransactionUOW(@ptrCast(*const ICrmLogControl, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmLogControl_RegisterCompensator(self: *const T, lpcwstrProgIdCompensator: ?[*:0]const u16, lpcwstrDescription: ?[*:0]const u16, lCrmRegFlags: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmLogControl.VTable, self.vtable).RegisterCompensator(@ptrCast(*const ICrmLogControl, self), lpcwstrProgIdCompensator, lpcwstrDescription, lCrmRegFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmLogControl_WriteLogRecordVariants(self: *const T, pLogRecord: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmLogControl.VTable, self.vtable).WriteLogRecordVariants(@ptrCast(*const ICrmLogControl, self), pLogRecord);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmLogControl_ForceLog(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmLogControl.VTable, self.vtable).ForceLog(@ptrCast(*const ICrmLogControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmLogControl_ForgetLogRecord(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmLogControl.VTable, self.vtable).ForgetLogRecord(@ptrCast(*const ICrmLogControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmLogControl_ForceTransactionToAbort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmLogControl.VTable, self.vtable).ForceTransactionToAbort(@ptrCast(*const ICrmLogControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmLogControl_WriteLogRecord(self: *const T, rgBlob: [*]BLOB, cBlob: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmLogControl.VTable, self.vtable).WriteLogRecord(@ptrCast(*const ICrmLogControl, self), rgBlob, cBlob);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICrmCompensatorVariants_Value = @import("../zig.zig").Guid.initString("f0baf8e4-7804-11d1-82e9-00a0c91eede9");
pub const IID_ICrmCompensatorVariants = &IID_ICrmCompensatorVariants_Value;
pub const ICrmCompensatorVariants = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetLogControlVariants: fn(
self: *const ICrmCompensatorVariants,
pLogControl: ?*ICrmLogControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BeginPrepareVariants: fn(
self: *const ICrmCompensatorVariants,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PrepareRecordVariants: fn(
self: *const ICrmCompensatorVariants,
pLogRecord: ?*VARIANT,
pbForget: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EndPrepareVariants: fn(
self: *const ICrmCompensatorVariants,
pbOkToPrepare: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BeginCommitVariants: fn(
self: *const ICrmCompensatorVariants,
bRecovery: i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CommitRecordVariants: fn(
self: *const ICrmCompensatorVariants,
pLogRecord: ?*VARIANT,
pbForget: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EndCommitVariants: fn(
self: *const ICrmCompensatorVariants,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BeginAbortVariants: fn(
self: *const ICrmCompensatorVariants,
bRecovery: i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AbortRecordVariants: fn(
self: *const ICrmCompensatorVariants,
pLogRecord: ?*VARIANT,
pbForget: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EndAbortVariants: fn(
self: *const ICrmCompensatorVariants,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_SetLogControlVariants(self: *const T, pLogControl: ?*ICrmLogControl) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).SetLogControlVariants(@ptrCast(*const ICrmCompensatorVariants, self), pLogControl);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_BeginPrepareVariants(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).BeginPrepareVariants(@ptrCast(*const ICrmCompensatorVariants, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_PrepareRecordVariants(self: *const T, pLogRecord: ?*VARIANT, pbForget: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).PrepareRecordVariants(@ptrCast(*const ICrmCompensatorVariants, self), pLogRecord, pbForget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_EndPrepareVariants(self: *const T, pbOkToPrepare: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).EndPrepareVariants(@ptrCast(*const ICrmCompensatorVariants, self), pbOkToPrepare);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_BeginCommitVariants(self: *const T, bRecovery: i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).BeginCommitVariants(@ptrCast(*const ICrmCompensatorVariants, self), bRecovery);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_CommitRecordVariants(self: *const T, pLogRecord: ?*VARIANT, pbForget: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).CommitRecordVariants(@ptrCast(*const ICrmCompensatorVariants, self), pLogRecord, pbForget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_EndCommitVariants(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).EndCommitVariants(@ptrCast(*const ICrmCompensatorVariants, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_BeginAbortVariants(self: *const T, bRecovery: i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).BeginAbortVariants(@ptrCast(*const ICrmCompensatorVariants, self), bRecovery);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_AbortRecordVariants(self: *const T, pLogRecord: ?*VARIANT, pbForget: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).AbortRecordVariants(@ptrCast(*const ICrmCompensatorVariants, self), pLogRecord, pbForget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensatorVariants_EndAbortVariants(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensatorVariants.VTable, self.vtable).EndAbortVariants(@ptrCast(*const ICrmCompensatorVariants, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const CrmLogRecordRead = extern struct {
dwCrmFlags: u32,
dwSequenceNumber: u32,
blobUserData: BLOB,
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICrmCompensator_Value = @import("../zig.zig").Guid.initString("bbc01830-8d3b-11d1-82ec-00a0c91eede9");
pub const IID_ICrmCompensator = &IID_ICrmCompensator_Value;
pub const ICrmCompensator = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetLogControl: fn(
self: *const ICrmCompensator,
pLogControl: ?*ICrmLogControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BeginPrepare: fn(
self: *const ICrmCompensator,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PrepareRecord: fn(
self: *const ICrmCompensator,
crmLogRec: CrmLogRecordRead,
pfForget: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EndPrepare: fn(
self: *const ICrmCompensator,
pfOkToPrepare: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BeginCommit: fn(
self: *const ICrmCompensator,
fRecovery: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CommitRecord: fn(
self: *const ICrmCompensator,
crmLogRec: CrmLogRecordRead,
pfForget: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EndCommit: fn(
self: *const ICrmCompensator,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BeginAbort: fn(
self: *const ICrmCompensator,
fRecovery: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AbortRecord: fn(
self: *const ICrmCompensator,
crmLogRec: CrmLogRecordRead,
pfForget: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EndAbort: fn(
self: *const ICrmCompensator,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_SetLogControl(self: *const T, pLogControl: ?*ICrmLogControl) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).SetLogControl(@ptrCast(*const ICrmCompensator, self), pLogControl);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_BeginPrepare(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).BeginPrepare(@ptrCast(*const ICrmCompensator, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_PrepareRecord(self: *const T, crmLogRec: CrmLogRecordRead, pfForget: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).PrepareRecord(@ptrCast(*const ICrmCompensator, self), crmLogRec, pfForget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_EndPrepare(self: *const T, pfOkToPrepare: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).EndPrepare(@ptrCast(*const ICrmCompensator, self), pfOkToPrepare);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_BeginCommit(self: *const T, fRecovery: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).BeginCommit(@ptrCast(*const ICrmCompensator, self), fRecovery);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_CommitRecord(self: *const T, crmLogRec: CrmLogRecordRead, pfForget: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).CommitRecord(@ptrCast(*const ICrmCompensator, self), crmLogRec, pfForget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_EndCommit(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).EndCommit(@ptrCast(*const ICrmCompensator, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_BeginAbort(self: *const T, fRecovery: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).BeginAbort(@ptrCast(*const ICrmCompensator, self), fRecovery);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_AbortRecord(self: *const T, crmLogRec: CrmLogRecordRead, pfForget: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).AbortRecord(@ptrCast(*const ICrmCompensator, self), crmLogRec, pfForget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmCompensator_EndAbort(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmCompensator.VTable, self.vtable).EndAbort(@ptrCast(*const ICrmCompensator, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const CrmTransactionState = enum(i32) {
Active = 0,
Committed = 1,
Aborted = 2,
Indoubt = 3,
};
pub const TxState_Active = CrmTransactionState.Active;
pub const TxState_Committed = CrmTransactionState.Committed;
pub const TxState_Aborted = CrmTransactionState.Aborted;
pub const TxState_Indoubt = CrmTransactionState.Indoubt;
// TODO: this type is limited to platform 'windows5.0'
const IID_ICrmMonitorLogRecords_Value = @import("../zig.zig").Guid.initString("70c8e441-c7ed-11d1-82fb-00a0c91eede9");
pub const IID_ICrmMonitorLogRecords = &IID_ICrmMonitorLogRecords_Value;
pub const ICrmMonitorLogRecords = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ICrmMonitorLogRecords,
pVal: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_TransactionState: fn(
self: *const ICrmMonitorLogRecords,
pVal: ?*CrmTransactionState,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_StructuredRecords: fn(
self: *const ICrmMonitorLogRecords,
pVal: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLogRecord: fn(
self: *const ICrmMonitorLogRecords,
dwIndex: u32,
pCrmLogRec: ?*CrmLogRecordRead,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLogRecordVariants: fn(
self: *const ICrmMonitorLogRecords,
IndexNumber: VARIANT,
pLogRecord: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorLogRecords_get_Count(self: *const T, pVal: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorLogRecords.VTable, self.vtable).get_Count(@ptrCast(*const ICrmMonitorLogRecords, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorLogRecords_get_TransactionState(self: *const T, pVal: ?*CrmTransactionState) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorLogRecords.VTable, self.vtable).get_TransactionState(@ptrCast(*const ICrmMonitorLogRecords, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorLogRecords_get_StructuredRecords(self: *const T, pVal: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorLogRecords.VTable, self.vtable).get_StructuredRecords(@ptrCast(*const ICrmMonitorLogRecords, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorLogRecords_GetLogRecord(self: *const T, dwIndex: u32, pCrmLogRec: ?*CrmLogRecordRead) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorLogRecords.VTable, self.vtable).GetLogRecord(@ptrCast(*const ICrmMonitorLogRecords, self), dwIndex, pCrmLogRec);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorLogRecords_GetLogRecordVariants(self: *const T, IndexNumber: VARIANT, pLogRecord: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorLogRecords.VTable, self.vtable).GetLogRecordVariants(@ptrCast(*const ICrmMonitorLogRecords, self), IndexNumber, pLogRecord);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICrmMonitorClerks_Value = @import("../zig.zig").Guid.initString("70c8e442-c7ed-11d1-82fb-00a0c91eede9");
pub const IID_ICrmMonitorClerks = &IID_ICrmMonitorClerks_Value;
pub const ICrmMonitorClerks = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
Item: fn(
self: *const ICrmMonitorClerks,
Index: VARIANT,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ICrmMonitorClerks,
pVal: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ICrmMonitorClerks,
pVal: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProgIdCompensator: fn(
self: *const ICrmMonitorClerks,
Index: VARIANT,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Description: fn(
self: *const ICrmMonitorClerks,
Index: VARIANT,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
TransactionUOW: fn(
self: *const ICrmMonitorClerks,
Index: VARIANT,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ActivityId: fn(
self: *const ICrmMonitorClerks,
Index: VARIANT,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorClerks_Item(self: *const T, Index: VARIANT, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorClerks.VTable, self.vtable).Item(@ptrCast(*const ICrmMonitorClerks, self), Index, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorClerks_get__NewEnum(self: *const T, pVal: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorClerks.VTable, self.vtable).get__NewEnum(@ptrCast(*const ICrmMonitorClerks, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorClerks_get_Count(self: *const T, pVal: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorClerks.VTable, self.vtable).get_Count(@ptrCast(*const ICrmMonitorClerks, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorClerks_ProgIdCompensator(self: *const T, Index: VARIANT, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorClerks.VTable, self.vtable).ProgIdCompensator(@ptrCast(*const ICrmMonitorClerks, self), Index, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorClerks_Description(self: *const T, Index: VARIANT, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorClerks.VTable, self.vtable).Description(@ptrCast(*const ICrmMonitorClerks, self), Index, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorClerks_TransactionUOW(self: *const T, Index: VARIANT, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorClerks.VTable, self.vtable).TransactionUOW(@ptrCast(*const ICrmMonitorClerks, self), Index, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitorClerks_ActivityId(self: *const T, Index: VARIANT, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitorClerks.VTable, self.vtable).ActivityId(@ptrCast(*const ICrmMonitorClerks, self), Index, pItem);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICrmMonitor_Value = @import("../zig.zig").Guid.initString("70c8e443-c7ed-11d1-82fb-00a0c91eede9");
pub const IID_ICrmMonitor = &IID_ICrmMonitor_Value;
pub const ICrmMonitor = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetClerks: fn(
self: *const ICrmMonitor,
pClerks: ?*?*ICrmMonitorClerks,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
HoldClerk: fn(
self: *const ICrmMonitor,
Index: VARIANT,
pItem: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitor_GetClerks(self: *const T, pClerks: ?*?*ICrmMonitorClerks) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitor.VTable, self.vtable).GetClerks(@ptrCast(*const ICrmMonitor, self), pClerks);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmMonitor_HoldClerk(self: *const T, Index: VARIANT, pItem: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmMonitor.VTable, self.vtable).HoldClerk(@ptrCast(*const ICrmMonitor, self), Index, pItem);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ICrmFormatLogRecords_Value = @import("../zig.zig").Guid.initString("9c51d821-c98b-11d1-82fb-00a0c91eede9");
pub const IID_ICrmFormatLogRecords = &IID_ICrmFormatLogRecords_Value;
pub const ICrmFormatLogRecords = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetColumnCount: fn(
self: *const ICrmFormatLogRecords,
plColumnCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetColumnHeaders: fn(
self: *const ICrmFormatLogRecords,
pHeaders: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetColumn: fn(
self: *const ICrmFormatLogRecords,
CrmLogRec: CrmLogRecordRead,
pFormattedLogRecord: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetColumnVariants: fn(
self: *const ICrmFormatLogRecords,
LogRecord: VARIANT,
pFormattedLogRecord: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmFormatLogRecords_GetColumnCount(self: *const T, plColumnCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmFormatLogRecords.VTable, self.vtable).GetColumnCount(@ptrCast(*const ICrmFormatLogRecords, self), plColumnCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmFormatLogRecords_GetColumnHeaders(self: *const T, pHeaders: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmFormatLogRecords.VTable, self.vtable).GetColumnHeaders(@ptrCast(*const ICrmFormatLogRecords, self), pHeaders);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmFormatLogRecords_GetColumn(self: *const T, CrmLogRec: CrmLogRecordRead, pFormattedLogRecord: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmFormatLogRecords.VTable, self.vtable).GetColumn(@ptrCast(*const ICrmFormatLogRecords, self), CrmLogRec, pFormattedLogRecord);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICrmFormatLogRecords_GetColumnVariants(self: *const T, LogRecord: VARIANT, pFormattedLogRecord: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ICrmFormatLogRecords.VTable, self.vtable).GetColumnVariants(@ptrCast(*const ICrmFormatLogRecords, self), LogRecord, pFormattedLogRecord);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const CSC_InheritanceConfig = enum(i32) {
nherit = 0,
gnore = 1,
};
pub const CSC_Inherit = CSC_InheritanceConfig.nherit;
pub const CSC_Ignore = CSC_InheritanceConfig.gnore;
pub const CSC_ThreadPool = enum(i32) {
ThreadPoolNone = 0,
ThreadPoolInherit = 1,
STAThreadPool = 2,
MTAThreadPool = 3,
};
pub const CSC_ThreadPoolNone = CSC_ThreadPool.ThreadPoolNone;
pub const CSC_ThreadPoolInherit = CSC_ThreadPool.ThreadPoolInherit;
pub const CSC_STAThreadPool = CSC_ThreadPool.STAThreadPool;
pub const CSC_MTAThreadPool = CSC_ThreadPool.MTAThreadPool;
pub const CSC_Binding = enum(i32) {
NoBinding = 0,
BindToPoolThread = 1,
};
pub const CSC_NoBinding = CSC_Binding.NoBinding;
pub const CSC_BindToPoolThread = CSC_Binding.BindToPoolThread;
pub const CSC_TransactionConfig = enum(i32) {
NoTransaction = 0,
IfContainerIsTransactional = 1,
CreateTransactionIfNecessary = 2,
NewTransaction = 3,
};
pub const CSC_NoTransaction = CSC_TransactionConfig.NoTransaction;
pub const CSC_IfContainerIsTransactional = CSC_TransactionConfig.IfContainerIsTransactional;
pub const CSC_CreateTransactionIfNecessary = CSC_TransactionConfig.CreateTransactionIfNecessary;
pub const CSC_NewTransaction = CSC_TransactionConfig.NewTransaction;
pub const CSC_SynchronizationConfig = enum(i32) {
NoSynchronization = 0,
IfContainerIsSynchronized = 1,
NewSynchronizationIfNecessary = 2,
NewSynchronization = 3,
};
pub const CSC_NoSynchronization = CSC_SynchronizationConfig.NoSynchronization;
pub const CSC_IfContainerIsSynchronized = CSC_SynchronizationConfig.IfContainerIsSynchronized;
pub const CSC_NewSynchronizationIfNecessary = CSC_SynchronizationConfig.NewSynchronizationIfNecessary;
pub const CSC_NewSynchronization = CSC_SynchronizationConfig.NewSynchronization;
pub const CSC_TrackerConfig = enum(i32) {
DontUseTracker = 0,
UseTracker = 1,
};
pub const CSC_DontUseTracker = CSC_TrackerConfig.DontUseTracker;
pub const CSC_UseTracker = CSC_TrackerConfig.UseTracker;
pub const CSC_PartitionConfig = enum(i32) {
NoPartition = 0,
InheritPartition = 1,
NewPartition = 2,
};
pub const CSC_NoPartition = CSC_PartitionConfig.NoPartition;
pub const CSC_InheritPartition = CSC_PartitionConfig.InheritPartition;
pub const CSC_NewPartition = CSC_PartitionConfig.NewPartition;
pub const CSC_IISIntrinsicsConfig = enum(i32) {
NoIISIntrinsics = 0,
InheritIISIntrinsics = 1,
};
pub const CSC_NoIISIntrinsics = CSC_IISIntrinsicsConfig.NoIISIntrinsics;
pub const CSC_InheritIISIntrinsics = CSC_IISIntrinsicsConfig.InheritIISIntrinsics;
pub const CSC_COMTIIntrinsicsConfig = enum(i32) {
NoCOMTIIntrinsics = 0,
InheritCOMTIIntrinsics = 1,
};
pub const CSC_NoCOMTIIntrinsics = CSC_COMTIIntrinsicsConfig.NoCOMTIIntrinsics;
pub const CSC_InheritCOMTIIntrinsics = CSC_COMTIIntrinsicsConfig.InheritCOMTIIntrinsics;
pub const CSC_SxsConfig = enum(i32) {
NoSxs = 0,
InheritSxs = 1,
NewSxs = 2,
};
pub const CSC_NoSxs = CSC_SxsConfig.NoSxs;
pub const CSC_InheritSxs = CSC_SxsConfig.InheritSxs;
pub const CSC_NewSxs = CSC_SxsConfig.NewSxs;
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceIISIntrinsicsConfig_Value = @import("../zig.zig").Guid.initString("1a0cf920-d452-46f4-bc36-48118d54ea52");
pub const IID_IServiceIISIntrinsicsConfig = &IID_IServiceIISIntrinsicsConfig_Value;
pub const IServiceIISIntrinsicsConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
IISIntrinsicsConfig: fn(
self: *const IServiceIISIntrinsicsConfig,
iisIntrinsicsConfig: CSC_IISIntrinsicsConfig,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceIISIntrinsicsConfig_IISIntrinsicsConfig(self: *const T, iisIntrinsicsConfig: CSC_IISIntrinsicsConfig) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceIISIntrinsicsConfig.VTable, self.vtable).IISIntrinsicsConfig(@ptrCast(*const IServiceIISIntrinsicsConfig, self), iisIntrinsicsConfig);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceComTIIntrinsicsConfig_Value = @import("../zig.zig").Guid.initString("09e6831e-04e1-4ed4-9d0f-e8b168bafeaf");
pub const IID_IServiceComTIIntrinsicsConfig = &IID_IServiceComTIIntrinsicsConfig_Value;
pub const IServiceComTIIntrinsicsConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
ComTIIntrinsicsConfig: fn(
self: *const IServiceComTIIntrinsicsConfig,
comtiIntrinsicsConfig: CSC_COMTIIntrinsicsConfig,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceComTIIntrinsicsConfig_ComTIIntrinsicsConfig(self: *const T, comtiIntrinsicsConfig: CSC_COMTIIntrinsicsConfig) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceComTIIntrinsicsConfig.VTable, self.vtable).ComTIIntrinsicsConfig(@ptrCast(*const IServiceComTIIntrinsicsConfig, self), comtiIntrinsicsConfig);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceSxsConfig_Value = @import("../zig.zig").Guid.initString("c7cd7379-f3f2-4634-811b-703281d73e08");
pub const IID_IServiceSxsConfig = &IID_IServiceSxsConfig_Value;
pub const IServiceSxsConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SxsConfig: fn(
self: *const IServiceSxsConfig,
scsConfig: CSC_SxsConfig,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SxsName: fn(
self: *const IServiceSxsConfig,
szSxsName: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SxsDirectory: fn(
self: *const IServiceSxsConfig,
szSxsDirectory: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceSxsConfig_SxsConfig(self: *const T, scsConfig: CSC_SxsConfig) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceSxsConfig.VTable, self.vtable).SxsConfig(@ptrCast(*const IServiceSxsConfig, self), scsConfig);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceSxsConfig_SxsName(self: *const T, szSxsName: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceSxsConfig.VTable, self.vtable).SxsName(@ptrCast(*const IServiceSxsConfig, self), szSxsName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceSxsConfig_SxsDirectory(self: *const T, szSxsDirectory: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceSxsConfig.VTable, self.vtable).SxsDirectory(@ptrCast(*const IServiceSxsConfig, self), szSxsDirectory);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ICheckSxsConfig_Value = @import("../zig.zig").Guid.initString("0ff5a96f-11fc-47d1-baa6-25dd347e7242");
pub const IID_ICheckSxsConfig = &IID_ICheckSxsConfig_Value;
pub const ICheckSxsConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
IsSameSxsConfig: fn(
self: *const ICheckSxsConfig,
wszSxsName: ?[*:0]const u16,
wszSxsDirectory: ?[*:0]const u16,
wszSxsAppName: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ICheckSxsConfig_IsSameSxsConfig(self: *const T, wszSxsName: ?[*:0]const u16, wszSxsDirectory: ?[*:0]const u16, wszSxsAppName: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const ICheckSxsConfig.VTable, self.vtable).IsSameSxsConfig(@ptrCast(*const ICheckSxsConfig, self), wszSxsName, wszSxsDirectory, wszSxsAppName);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceInheritanceConfig_Value = @import("../zig.zig").Guid.initString("92186771-d3b4-4d77-a8ea-ee842d586f35");
pub const IID_IServiceInheritanceConfig = &IID_IServiceInheritanceConfig_Value;
pub const IServiceInheritanceConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
ContainingContextTreatment: fn(
self: *const IServiceInheritanceConfig,
inheritanceConfig: CSC_InheritanceConfig,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceInheritanceConfig_ContainingContextTreatment(self: *const T, inheritanceConfig: CSC_InheritanceConfig) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceInheritanceConfig.VTable, self.vtable).ContainingContextTreatment(@ptrCast(*const IServiceInheritanceConfig, self), inheritanceConfig);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceThreadPoolConfig_Value = @import("../zig.zig").Guid.initString("186d89bc-f277-4bcc-80d5-4df7b836ef4a");
pub const IID_IServiceThreadPoolConfig = &IID_IServiceThreadPoolConfig_Value;
pub const IServiceThreadPoolConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SelectThreadPool: fn(
self: *const IServiceThreadPoolConfig,
threadPool: CSC_ThreadPool,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetBindingInfo: fn(
self: *const IServiceThreadPoolConfig,
binding: CSC_Binding,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceThreadPoolConfig_SelectThreadPool(self: *const T, threadPool: CSC_ThreadPool) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceThreadPoolConfig.VTable, self.vtable).SelectThreadPool(@ptrCast(*const IServiceThreadPoolConfig, self), threadPool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceThreadPoolConfig_SetBindingInfo(self: *const T, binding: CSC_Binding) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceThreadPoolConfig.VTable, self.vtable).SetBindingInfo(@ptrCast(*const IServiceThreadPoolConfig, self), binding);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceTransactionConfigBase_Value = @import("../zig.zig").Guid.initString("772b3fbe-6ffd-42fb-b5f8-8f9b260f3810");
pub const IID_IServiceTransactionConfigBase = &IID_IServiceTransactionConfigBase_Value;
pub const IServiceTransactionConfigBase = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
ConfigureTransaction: fn(
self: *const IServiceTransactionConfigBase,
transactionConfig: CSC_TransactionConfig,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsolationLevel: fn(
self: *const IServiceTransactionConfigBase,
option: COMAdminTxIsolationLevelOptions,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
TransactionTimeout: fn(
self: *const IServiceTransactionConfigBase,
ulTimeoutSec: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BringYourOwnTransaction: fn(
self: *const IServiceTransactionConfigBase,
szTipURL: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
NewTransactionDescription: fn(
self: *const IServiceTransactionConfigBase,
szTxDesc: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceTransactionConfigBase_ConfigureTransaction(self: *const T, transactionConfig: CSC_TransactionConfig) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceTransactionConfigBase.VTable, self.vtable).ConfigureTransaction(@ptrCast(*const IServiceTransactionConfigBase, self), transactionConfig);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceTransactionConfigBase_IsolationLevel(self: *const T, option: COMAdminTxIsolationLevelOptions) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceTransactionConfigBase.VTable, self.vtable).IsolationLevel(@ptrCast(*const IServiceTransactionConfigBase, self), option);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceTransactionConfigBase_TransactionTimeout(self: *const T, ulTimeoutSec: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceTransactionConfigBase.VTable, self.vtable).TransactionTimeout(@ptrCast(*const IServiceTransactionConfigBase, self), ulTimeoutSec);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceTransactionConfigBase_BringYourOwnTransaction(self: *const T, szTipURL: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceTransactionConfigBase.VTable, self.vtable).BringYourOwnTransaction(@ptrCast(*const IServiceTransactionConfigBase, self), szTipURL);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceTransactionConfigBase_NewTransactionDescription(self: *const T, szTxDesc: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceTransactionConfigBase.VTable, self.vtable).NewTransactionDescription(@ptrCast(*const IServiceTransactionConfigBase, self), szTxDesc);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceTransactionConfig_Value = @import("../zig.zig").Guid.initString("59f4c2a3-d3d7-4a31-b6e4-6ab3177c50b9");
pub const IID_IServiceTransactionConfig = &IID_IServiceTransactionConfig_Value;
pub const IServiceTransactionConfig = extern struct {
pub const VTable = extern struct {
base: IServiceTransactionConfigBase.VTable,
ConfigureBYOT: fn(
self: *const IServiceTransactionConfig,
pITxByot: ?*ITransaction,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IServiceTransactionConfigBase.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceTransactionConfig_ConfigureBYOT(self: *const T, pITxByot: ?*ITransaction) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceTransactionConfig.VTable, self.vtable).ConfigureBYOT(@ptrCast(*const IServiceTransactionConfig, self), pITxByot);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceSysTxnConfig_Value = @import("../zig.zig").Guid.initString("33caf1a1-fcb8-472b-b45e-967448ded6d8");
pub const IID_IServiceSysTxnConfig = &IID_IServiceSysTxnConfig_Value;
pub const IServiceSysTxnConfig = extern struct {
pub const VTable = extern struct {
base: IServiceTransactionConfig.VTable,
ConfigureBYOTSysTxn: fn(
self: *const IServiceSysTxnConfig,
pTxProxy: ?*ITransactionProxy,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IServiceTransactionConfig.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceSysTxnConfig_ConfigureBYOTSysTxn(self: *const T, pTxProxy: ?*ITransactionProxy) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceSysTxnConfig.VTable, self.vtable).ConfigureBYOTSysTxn(@ptrCast(*const IServiceSysTxnConfig, self), pTxProxy);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceSynchronizationConfig_Value = @import("../zig.zig").Guid.initString("fd880e81-6dce-4c58-af83-a208846c0030");
pub const IID_IServiceSynchronizationConfig = &IID_IServiceSynchronizationConfig_Value;
pub const IServiceSynchronizationConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
ConfigureSynchronization: fn(
self: *const IServiceSynchronizationConfig,
synchConfig: CSC_SynchronizationConfig,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceSynchronizationConfig_ConfigureSynchronization(self: *const T, synchConfig: CSC_SynchronizationConfig) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceSynchronizationConfig.VTable, self.vtable).ConfigureSynchronization(@ptrCast(*const IServiceSynchronizationConfig, self), synchConfig);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceTrackerConfig_Value = @import("../zig.zig").Guid.initString("6c3a3e1d-0ba6-4036-b76f-d0404db816c9");
pub const IID_IServiceTrackerConfig = &IID_IServiceTrackerConfig_Value;
pub const IServiceTrackerConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
TrackerConfig: fn(
self: *const IServiceTrackerConfig,
trackerConfig: CSC_TrackerConfig,
szTrackerAppName: ?[*:0]const u16,
szTrackerCtxName: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceTrackerConfig_TrackerConfig(self: *const T, trackerConfig: CSC_TrackerConfig, szTrackerAppName: ?[*:0]const u16, szTrackerCtxName: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceTrackerConfig.VTable, self.vtable).TrackerConfig(@ptrCast(*const IServiceTrackerConfig, self), trackerConfig, szTrackerAppName, szTrackerCtxName);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServicePartitionConfig_Value = @import("../zig.zig").Guid.initString("80182d03-5ea4-4831-ae97-55beffc2e590");
pub const IID_IServicePartitionConfig = &IID_IServicePartitionConfig_Value;
pub const IServicePartitionConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
PartitionConfig: fn(
self: *const IServicePartitionConfig,
partitionConfig: CSC_PartitionConfig,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PartitionID: fn(
self: *const IServicePartitionConfig,
guidPartitionID: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePartitionConfig_PartitionConfig(self: *const T, partitionConfig: CSC_PartitionConfig) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePartitionConfig.VTable, self.vtable).PartitionConfig(@ptrCast(*const IServicePartitionConfig, self), partitionConfig);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePartitionConfig_PartitionID(self: *const T, guidPartitionID: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePartitionConfig.VTable, self.vtable).PartitionID(@ptrCast(*const IServicePartitionConfig, self), guidPartitionID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceCall_Value = @import("../zig.zig").Guid.initString("bd3e2e12-42dd-40f4-a09a-95a50c58304b");
pub const IID_IServiceCall = &IID_IServiceCall_Value;
pub const IServiceCall = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnCall: fn(
self: *const IServiceCall,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceCall_OnCall(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceCall.VTable, self.vtable).OnCall(@ptrCast(*const IServiceCall, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IAsyncErrorNotify_Value = @import("../zig.zig").Guid.initString("fe6777fb-a674-4177-8f32-6d707e113484");
pub const IID_IAsyncErrorNotify = &IID_IAsyncErrorNotify_Value;
pub const IAsyncErrorNotify = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnError: fn(
self: *const IAsyncErrorNotify,
hr: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IAsyncErrorNotify_OnError(self: *const T, hr: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IAsyncErrorNotify.VTable, self.vtable).OnError(@ptrCast(*const IAsyncErrorNotify, self), hr);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServiceActivity_Value = @import("../zig.zig").Guid.initString("67532e0c-9e2f-4450-a354-035633944e17");
pub const IID_IServiceActivity = &IID_IServiceActivity_Value;
pub const IServiceActivity = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SynchronousCall: fn(
self: *const IServiceActivity,
pIServiceCall: ?*IServiceCall,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AsynchronousCall: fn(
self: *const IServiceActivity,
pIServiceCall: ?*IServiceCall,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
BindToCurrentThread: fn(
self: *const IServiceActivity,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
UnbindFromThread: fn(
self: *const IServiceActivity,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceActivity_SynchronousCall(self: *const T, pIServiceCall: ?*IServiceCall) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceActivity.VTable, self.vtable).SynchronousCall(@ptrCast(*const IServiceActivity, self), pIServiceCall);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceActivity_AsynchronousCall(self: *const T, pIServiceCall: ?*IServiceCall) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceActivity.VTable, self.vtable).AsynchronousCall(@ptrCast(*const IServiceActivity, self), pIServiceCall);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceActivity_BindToCurrentThread(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceActivity.VTable, self.vtable).BindToCurrentThread(@ptrCast(*const IServiceActivity, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServiceActivity_UnbindFromThread(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IServiceActivity.VTable, self.vtable).UnbindFromThread(@ptrCast(*const IServiceActivity, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IThreadPoolKnobs_Value = @import("../zig.zig").Guid.initString("51372af7-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IThreadPoolKnobs = &IID_IThreadPoolKnobs_Value;
pub const IThreadPoolKnobs = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetMaxThreads: fn(
self: *const IThreadPoolKnobs,
plcMaxThreads: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCurrentThreads: fn(
self: *const IThreadPoolKnobs,
plcCurrentThreads: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaxThreads: fn(
self: *const IThreadPoolKnobs,
lcMaxThreads: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDeleteDelay: fn(
self: *const IThreadPoolKnobs,
pmsecDeleteDelay: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetDeleteDelay: fn(
self: *const IThreadPoolKnobs,
msecDeleteDelay: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaxQueuedRequests: fn(
self: *const IThreadPoolKnobs,
plcMaxQueuedRequests: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCurrentQueuedRequests: fn(
self: *const IThreadPoolKnobs,
plcCurrentQueuedRequests: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaxQueuedRequests: fn(
self: *const IThreadPoolKnobs,
lcMaxQueuedRequests: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMinThreads: fn(
self: *const IThreadPoolKnobs,
lcMinThreads: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetQueueDepth: fn(
self: *const IThreadPoolKnobs,
lcQueueDepth: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_GetMaxThreads(self: *const T, plcMaxThreads: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).GetMaxThreads(@ptrCast(*const IThreadPoolKnobs, self), plcMaxThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_GetCurrentThreads(self: *const T, plcCurrentThreads: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).GetCurrentThreads(@ptrCast(*const IThreadPoolKnobs, self), plcCurrentThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_SetMaxThreads(self: *const T, lcMaxThreads: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).SetMaxThreads(@ptrCast(*const IThreadPoolKnobs, self), lcMaxThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_GetDeleteDelay(self: *const T, pmsecDeleteDelay: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).GetDeleteDelay(@ptrCast(*const IThreadPoolKnobs, self), pmsecDeleteDelay);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_SetDeleteDelay(self: *const T, msecDeleteDelay: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).SetDeleteDelay(@ptrCast(*const IThreadPoolKnobs, self), msecDeleteDelay);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_GetMaxQueuedRequests(self: *const T, plcMaxQueuedRequests: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).GetMaxQueuedRequests(@ptrCast(*const IThreadPoolKnobs, self), plcMaxQueuedRequests);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_GetCurrentQueuedRequests(self: *const T, plcCurrentQueuedRequests: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).GetCurrentQueuedRequests(@ptrCast(*const IThreadPoolKnobs, self), plcCurrentQueuedRequests);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_SetMaxQueuedRequests(self: *const T, lcMaxQueuedRequests: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).SetMaxQueuedRequests(@ptrCast(*const IThreadPoolKnobs, self), lcMaxQueuedRequests);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_SetMinThreads(self: *const T, lcMinThreads: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).SetMinThreads(@ptrCast(*const IThreadPoolKnobs, self), lcMinThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IThreadPoolKnobs_SetQueueDepth(self: *const T, lcQueueDepth: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IThreadPoolKnobs.VTable, self.vtable).SetQueueDepth(@ptrCast(*const IThreadPoolKnobs, self), lcQueueDepth);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IComStaThreadPoolKnobs_Value = @import("../zig.zig").Guid.initString("324b64fa-33b6-11d2-98b7-00c04f8ee1c4");
pub const IID_IComStaThreadPoolKnobs = &IID_IComStaThreadPoolKnobs_Value;
pub const IComStaThreadPoolKnobs = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetMinThreadCount: fn(
self: *const IComStaThreadPoolKnobs,
minThreads: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMinThreadCount: fn(
self: *const IComStaThreadPoolKnobs,
minThreads: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaxThreadCount: fn(
self: *const IComStaThreadPoolKnobs,
maxThreads: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaxThreadCount: fn(
self: *const IComStaThreadPoolKnobs,
maxThreads: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetActivityPerThread: fn(
self: *const IComStaThreadPoolKnobs,
activitiesPerThread: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetActivityPerThread: fn(
self: *const IComStaThreadPoolKnobs,
activitiesPerThread: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetActivityRatio: fn(
self: *const IComStaThreadPoolKnobs,
activityRatio: f64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetActivityRatio: fn(
self: *const IComStaThreadPoolKnobs,
activityRatio: ?*f64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetThreadCount: fn(
self: *const IComStaThreadPoolKnobs,
pdwThreads: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetQueueDepth: fn(
self: *const IComStaThreadPoolKnobs,
pdwQDepth: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetQueueDepth: fn(
self: *const IComStaThreadPoolKnobs,
dwQDepth: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_SetMinThreadCount(self: *const T, minThreads: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).SetMinThreadCount(@ptrCast(*const IComStaThreadPoolKnobs, self), minThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_GetMinThreadCount(self: *const T, minThreads: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).GetMinThreadCount(@ptrCast(*const IComStaThreadPoolKnobs, self), minThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_SetMaxThreadCount(self: *const T, maxThreads: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).SetMaxThreadCount(@ptrCast(*const IComStaThreadPoolKnobs, self), maxThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_GetMaxThreadCount(self: *const T, maxThreads: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).GetMaxThreadCount(@ptrCast(*const IComStaThreadPoolKnobs, self), maxThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_SetActivityPerThread(self: *const T, activitiesPerThread: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).SetActivityPerThread(@ptrCast(*const IComStaThreadPoolKnobs, self), activitiesPerThread);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_GetActivityPerThread(self: *const T, activitiesPerThread: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).GetActivityPerThread(@ptrCast(*const IComStaThreadPoolKnobs, self), activitiesPerThread);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_SetActivityRatio(self: *const T, activityRatio: f64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).SetActivityRatio(@ptrCast(*const IComStaThreadPoolKnobs, self), activityRatio);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_GetActivityRatio(self: *const T, activityRatio: ?*f64) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).GetActivityRatio(@ptrCast(*const IComStaThreadPoolKnobs, self), activityRatio);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_GetThreadCount(self: *const T, pdwThreads: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).GetThreadCount(@ptrCast(*const IComStaThreadPoolKnobs, self), pdwThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_GetQueueDepth(self: *const T, pdwQDepth: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).GetQueueDepth(@ptrCast(*const IComStaThreadPoolKnobs, self), pdwQDepth);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs_SetQueueDepth(self: *const T, dwQDepth: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs.VTable, self.vtable).SetQueueDepth(@ptrCast(*const IComStaThreadPoolKnobs, self), dwQDepth);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IComMtaThreadPoolKnobs_Value = @import("../zig.zig").Guid.initString("f9a76d2e-76a5-43eb-a0c4-49bec8e48480");
pub const IID_IComMtaThreadPoolKnobs = &IID_IComMtaThreadPoolKnobs_Value;
pub const IComMtaThreadPoolKnobs = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
MTASetMaxThreadCount: fn(
self: *const IComMtaThreadPoolKnobs,
dwMaxThreads: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
MTAGetMaxThreadCount: fn(
self: *const IComMtaThreadPoolKnobs,
pdwMaxThreads: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
MTASetThrottleValue: fn(
self: *const IComMtaThreadPoolKnobs,
dwThrottle: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
MTAGetThrottleValue: fn(
self: *const IComMtaThreadPoolKnobs,
pdwThrottle: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMtaThreadPoolKnobs_MTASetMaxThreadCount(self: *const T, dwMaxThreads: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMtaThreadPoolKnobs.VTable, self.vtable).MTASetMaxThreadCount(@ptrCast(*const IComMtaThreadPoolKnobs, self), dwMaxThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMtaThreadPoolKnobs_MTAGetMaxThreadCount(self: *const T, pdwMaxThreads: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMtaThreadPoolKnobs.VTable, self.vtable).MTAGetMaxThreadCount(@ptrCast(*const IComMtaThreadPoolKnobs, self), pdwMaxThreads);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMtaThreadPoolKnobs_MTASetThrottleValue(self: *const T, dwThrottle: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMtaThreadPoolKnobs.VTable, self.vtable).MTASetThrottleValue(@ptrCast(*const IComMtaThreadPoolKnobs, self), dwThrottle);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComMtaThreadPoolKnobs_MTAGetThrottleValue(self: *const T, pdwThrottle: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComMtaThreadPoolKnobs.VTable, self.vtable).MTAGetThrottleValue(@ptrCast(*const IComMtaThreadPoolKnobs, self), pdwThrottle);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IComStaThreadPoolKnobs2_Value = @import("../zig.zig").Guid.initString("73707523-ff9a-4974-bf84-2108dc213740");
pub const IID_IComStaThreadPoolKnobs2 = &IID_IComStaThreadPoolKnobs2_Value;
pub const IComStaThreadPoolKnobs2 = extern struct {
pub const VTable = extern struct {
base: IComStaThreadPoolKnobs.VTable,
GetMaxCPULoad: fn(
self: *const IComStaThreadPoolKnobs2,
pdwLoad: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaxCPULoad: fn(
self: *const IComStaThreadPoolKnobs2,
pdwLoad: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCPUMetricEnabled: fn(
self: *const IComStaThreadPoolKnobs2,
pbMetricEnabled: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetCPUMetricEnabled: fn(
self: *const IComStaThreadPoolKnobs2,
bMetricEnabled: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCreateThreadsAggressively: fn(
self: *const IComStaThreadPoolKnobs2,
pbMetricEnabled: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetCreateThreadsAggressively: fn(
self: *const IComStaThreadPoolKnobs2,
bMetricEnabled: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaxCSR: fn(
self: *const IComStaThreadPoolKnobs2,
pdwCSR: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaxCSR: fn(
self: *const IComStaThreadPoolKnobs2,
dwCSR: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetWaitTimeForThreadCleanup: fn(
self: *const IComStaThreadPoolKnobs2,
pdwThreadCleanupWaitTime: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetWaitTimeForThreadCleanup: fn(
self: *const IComStaThreadPoolKnobs2,
dwThreadCleanupWaitTime: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IComStaThreadPoolKnobs.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_GetMaxCPULoad(self: *const T, pdwLoad: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).GetMaxCPULoad(@ptrCast(*const IComStaThreadPoolKnobs2, self), pdwLoad);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_SetMaxCPULoad(self: *const T, pdwLoad: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).SetMaxCPULoad(@ptrCast(*const IComStaThreadPoolKnobs2, self), pdwLoad);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_GetCPUMetricEnabled(self: *const T, pbMetricEnabled: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).GetCPUMetricEnabled(@ptrCast(*const IComStaThreadPoolKnobs2, self), pbMetricEnabled);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_SetCPUMetricEnabled(self: *const T, bMetricEnabled: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).SetCPUMetricEnabled(@ptrCast(*const IComStaThreadPoolKnobs2, self), bMetricEnabled);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_GetCreateThreadsAggressively(self: *const T, pbMetricEnabled: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).GetCreateThreadsAggressively(@ptrCast(*const IComStaThreadPoolKnobs2, self), pbMetricEnabled);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_SetCreateThreadsAggressively(self: *const T, bMetricEnabled: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).SetCreateThreadsAggressively(@ptrCast(*const IComStaThreadPoolKnobs2, self), bMetricEnabled);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_GetMaxCSR(self: *const T, pdwCSR: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).GetMaxCSR(@ptrCast(*const IComStaThreadPoolKnobs2, self), pdwCSR);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_SetMaxCSR(self: *const T, dwCSR: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).SetMaxCSR(@ptrCast(*const IComStaThreadPoolKnobs2, self), dwCSR);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_GetWaitTimeForThreadCleanup(self: *const T, pdwThreadCleanupWaitTime: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).GetWaitTimeForThreadCleanup(@ptrCast(*const IComStaThreadPoolKnobs2, self), pdwThreadCleanupWaitTime);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IComStaThreadPoolKnobs2_SetWaitTimeForThreadCleanup(self: *const T, dwThreadCleanupWaitTime: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IComStaThreadPoolKnobs2.VTable, self.vtable).SetWaitTimeForThreadCleanup(@ptrCast(*const IComStaThreadPoolKnobs2, self), dwThreadCleanupWaitTime);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IProcessInitializer_Value = @import("../zig.zig").Guid.initString("1113f52d-dc7f-4943-aed6-88d04027e32a");
pub const IID_IProcessInitializer = &IID_IProcessInitializer_Value;
pub const IProcessInitializer = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Startup: fn(
self: *const IProcessInitializer,
punkProcessControl: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Shutdown: fn(
self: *const IProcessInitializer,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IProcessInitializer_Startup(self: *const T, punkProcessControl: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IProcessInitializer.VTable, self.vtable).Startup(@ptrCast(*const IProcessInitializer, self), punkProcessControl);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IProcessInitializer_Shutdown(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IProcessInitializer.VTable, self.vtable).Shutdown(@ptrCast(*const IProcessInitializer, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IServicePoolConfig_Value = @import("../zig.zig").Guid.initString("a9690656-5bca-470c-8451-250c1f43a33e");
pub const IID_IServicePoolConfig = &IID_IServicePoolConfig_Value;
pub const IServicePoolConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_MaxPoolSize: fn(
self: *const IServicePoolConfig,
dwMaxPool: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_MaxPoolSize: fn(
self: *const IServicePoolConfig,
pdwMaxPool: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_MinPoolSize: fn(
self: *const IServicePoolConfig,
dwMinPool: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_MinPoolSize: fn(
self: *const IServicePoolConfig,
pdwMinPool: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_CreationTimeout: fn(
self: *const IServicePoolConfig,
dwCreationTimeout: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_CreationTimeout: fn(
self: *const IServicePoolConfig,
pdwCreationTimeout: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_TransactionAffinity: fn(
self: *const IServicePoolConfig,
fTxAffinity: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_TransactionAffinity: fn(
self: *const IServicePoolConfig,
pfTxAffinity: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_ClassFactory: fn(
self: *const IServicePoolConfig,
pFactory: ?*IClassFactory,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_ClassFactory: fn(
self: *const IServicePoolConfig,
pFactory: ?*?*IClassFactory,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_put_MaxPoolSize(self: *const T, dwMaxPool: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).put_MaxPoolSize(@ptrCast(*const IServicePoolConfig, self), dwMaxPool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_get_MaxPoolSize(self: *const T, pdwMaxPool: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).get_MaxPoolSize(@ptrCast(*const IServicePoolConfig, self), pdwMaxPool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_put_MinPoolSize(self: *const T, dwMinPool: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).put_MinPoolSize(@ptrCast(*const IServicePoolConfig, self), dwMinPool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_get_MinPoolSize(self: *const T, pdwMinPool: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).get_MinPoolSize(@ptrCast(*const IServicePoolConfig, self), pdwMinPool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_put_CreationTimeout(self: *const T, dwCreationTimeout: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).put_CreationTimeout(@ptrCast(*const IServicePoolConfig, self), dwCreationTimeout);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_get_CreationTimeout(self: *const T, pdwCreationTimeout: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).get_CreationTimeout(@ptrCast(*const IServicePoolConfig, self), pdwCreationTimeout);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_put_TransactionAffinity(self: *const T, fTxAffinity: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).put_TransactionAffinity(@ptrCast(*const IServicePoolConfig, self), fTxAffinity);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_get_TransactionAffinity(self: *const T, pfTxAffinity: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).get_TransactionAffinity(@ptrCast(*const IServicePoolConfig, self), pfTxAffinity);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_put_ClassFactory(self: *const T, pFactory: ?*IClassFactory) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).put_ClassFactory(@ptrCast(*const IServicePoolConfig, self), pFactory);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePoolConfig_get_ClassFactory(self: *const T, pFactory: ?*?*IClassFactory) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePoolConfig.VTable, self.vtable).get_ClassFactory(@ptrCast(*const IServicePoolConfig, self), pFactory);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IServicePool_Value = @import("../zig.zig").Guid.initString("b302df81-ea45-451e-99a2-09f9fd1b1e13");
pub const IID_IServicePool = &IID_IServicePool_Value;
pub const IServicePool = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Initialize: fn(
self: *const IServicePool,
pPoolConfig: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetObject: fn(
self: *const IServicePool,
riid: ?*const Guid,
ppv: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Shutdown: fn(
self: *const IServicePool,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePool_Initialize(self: *const T, pPoolConfig: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePool.VTable, self.vtable).Initialize(@ptrCast(*const IServicePool, self), pPoolConfig);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePool_GetObject(self: *const T, riid: ?*const Guid, ppv: ?*?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePool.VTable, self.vtable).GetObject(@ptrCast(*const IServicePool, self), riid, ppv);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IServicePool_Shutdown(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IServicePool.VTable, self.vtable).Shutdown(@ptrCast(*const IServicePool, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IManagedPooledObj_Value = @import("../zig.zig").Guid.initString("c5da4bea-1b42-4437-8926-b6a38860a770");
pub const IID_IManagedPooledObj = &IID_IManagedPooledObj_Value;
pub const IManagedPooledObj = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetHeld: fn(
self: *const IManagedPooledObj,
m_bHeld: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedPooledObj_SetHeld(self: *const T, m_bHeld: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedPooledObj.VTable, self.vtable).SetHeld(@ptrCast(*const IManagedPooledObj, self), m_bHeld);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IManagedPoolAction_Value = @import("../zig.zig").Guid.initString("da91b74e-5388-4783-949d-c1cd5fb00506");
pub const IID_IManagedPoolAction = &IID_IManagedPoolAction_Value;
pub const IManagedPoolAction = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
LastRelease: fn(
self: *const IManagedPoolAction,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedPoolAction_LastRelease(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedPoolAction.VTable, self.vtable).LastRelease(@ptrCast(*const IManagedPoolAction, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IManagedObjectInfo_Value = @import("../zig.zig").Guid.initString("1427c51a-4584-49d8-90a0-c50d8086cbe9");
pub const IID_IManagedObjectInfo = &IID_IManagedObjectInfo_Value;
pub const IManagedObjectInfo = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetIUnknown: fn(
self: *const IManagedObjectInfo,
pUnk: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetIObjectControl: fn(
self: *const IManagedObjectInfo,
pCtrl: ?*?*IObjectControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetInPool: fn(
self: *const IManagedObjectInfo,
bInPool: BOOL,
pPooledObj: ?*IManagedPooledObj,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetWrapperStrength: fn(
self: *const IManagedObjectInfo,
bStrong: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedObjectInfo_GetIUnknown(self: *const T, pUnk: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedObjectInfo.VTable, self.vtable).GetIUnknown(@ptrCast(*const IManagedObjectInfo, self), pUnk);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedObjectInfo_GetIObjectControl(self: *const T, pCtrl: ?*?*IObjectControl) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedObjectInfo.VTable, self.vtable).GetIObjectControl(@ptrCast(*const IManagedObjectInfo, self), pCtrl);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedObjectInfo_SetInPool(self: *const T, bInPool: BOOL, pPooledObj: ?*IManagedPooledObj) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedObjectInfo.VTable, self.vtable).SetInPool(@ptrCast(*const IManagedObjectInfo, self), bInPool, pPooledObj);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedObjectInfo_SetWrapperStrength(self: *const T, bStrong: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedObjectInfo.VTable, self.vtable).SetWrapperStrength(@ptrCast(*const IManagedObjectInfo, self), bStrong);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IAppDomainHelper_Value = @import("../zig.zig").Guid.initString("c7b67079-8255-42c6-9ec0-6994a3548780");
pub const IID_IAppDomainHelper = &IID_IAppDomainHelper_Value;
pub const IAppDomainHelper = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
Initialize: fn(
self: *const IAppDomainHelper,
pUnkAD: ?*IUnknown,
__MIDL__IAppDomainHelper0000: isize,
pPool: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DoCallback: fn(
self: *const IAppDomainHelper,
pUnkAD: ?*IUnknown,
__MIDL__IAppDomainHelper0001: isize,
pPool: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IAppDomainHelper_Initialize(self: *const T, pUnkAD: ?*IUnknown, __MIDL__IAppDomainHelper0000: isize, pPool: ?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const IAppDomainHelper.VTable, self.vtable).Initialize(@ptrCast(*const IAppDomainHelper, self), pUnkAD, __MIDL__IAppDomainHelper0000, pPool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IAppDomainHelper_DoCallback(self: *const T, pUnkAD: ?*IUnknown, __MIDL__IAppDomainHelper0001: isize, pPool: ?*anyopaque) callconv(.Inline) HRESULT {
return @ptrCast(*const IAppDomainHelper.VTable, self.vtable).DoCallback(@ptrCast(*const IAppDomainHelper, self), pUnkAD, __MIDL__IAppDomainHelper0001, pPool);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IAssemblyLocator_Value = @import("../zig.zig").Guid.initString("391ffbb9-a8ee-432a-abc8-baa238dab90f");
pub const IID_IAssemblyLocator = &IID_IAssemblyLocator_Value;
pub const IAssemblyLocator = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
GetModules: fn(
self: *const IAssemblyLocator,
applicationDir: ?BSTR,
applicationName: ?BSTR,
assemblyName: ?BSTR,
pModules: ?*?*SAFEARRAY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IAssemblyLocator_GetModules(self: *const T, applicationDir: ?BSTR, applicationName: ?BSTR, assemblyName: ?BSTR, pModules: ?*?*SAFEARRAY) callconv(.Inline) HRESULT {
return @ptrCast(*const IAssemblyLocator.VTable, self.vtable).GetModules(@ptrCast(*const IAssemblyLocator, self), applicationDir, applicationName, assemblyName, pModules);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IManagedActivationEvents_Value = @import("../zig.zig").Guid.initString("a5f325af-572f-46da-b8ab-827c3d95d99e");
pub const IID_IManagedActivationEvents = &IID_IManagedActivationEvents_Value;
pub const IManagedActivationEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateManagedStub: fn(
self: *const IManagedActivationEvents,
pInfo: ?*IManagedObjectInfo,
fDist: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DestroyManagedStub: fn(
self: *const IManagedActivationEvents,
pInfo: ?*IManagedObjectInfo,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedActivationEvents_CreateManagedStub(self: *const T, pInfo: ?*IManagedObjectInfo, fDist: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedActivationEvents.VTable, self.vtable).CreateManagedStub(@ptrCast(*const IManagedActivationEvents, self), pInfo, fDist);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IManagedActivationEvents_DestroyManagedStub(self: *const T, pInfo: ?*IManagedObjectInfo) callconv(.Inline) HRESULT {
return @ptrCast(*const IManagedActivationEvents.VTable, self.vtable).DestroyManagedStub(@ptrCast(*const IManagedActivationEvents, self), pInfo);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_ISendMethodEvents_Value = @import("../zig.zig").Guid.initString("2732fd59-b2b4-4d44-878c-8b8f09626008");
pub const IID_ISendMethodEvents = &IID_ISendMethodEvents_Value;
pub const ISendMethodEvents = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SendMethodCall: fn(
self: *const ISendMethodEvents,
pIdentity: ?*const anyopaque,
riid: ?*const Guid,
dwMeth: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SendMethodReturn: fn(
self: *const ISendMethodEvents,
pIdentity: ?*const anyopaque,
riid: ?*const Guid,
dwMeth: u32,
hrCall: HRESULT,
hrServer: HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISendMethodEvents_SendMethodCall(self: *const T, pIdentity: ?*const anyopaque, riid: ?*const Guid, dwMeth: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const ISendMethodEvents.VTable, self.vtable).SendMethodCall(@ptrCast(*const ISendMethodEvents, self), pIdentity, riid, dwMeth);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISendMethodEvents_SendMethodReturn(self: *const T, pIdentity: ?*const anyopaque, riid: ?*const Guid, dwMeth: u32, hrCall: HRESULT, hrServer: HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISendMethodEvents.VTable, self.vtable).SendMethodReturn(@ptrCast(*const ISendMethodEvents, self), pIdentity, riid, dwMeth, hrCall, hrServer);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ITransactionResourcePool_Value = @import("../zig.zig").Guid.initString("c5feb7c1-346a-11d1-b1cc-00aa00ba3258");
pub const IID_ITransactionResourcePool = &IID_ITransactionResourcePool_Value;
pub const ITransactionResourcePool = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
PutResource: fn(
self: *const ITransactionResourcePool,
pPool: ?*IObjPool,
pUnk: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetResource: fn(
self: *const ITransactionResourcePool,
pPool: ?*IObjPool,
ppUnk: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionResourcePool_PutResource(self: *const T, pPool: ?*IObjPool, pUnk: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionResourcePool.VTable, self.vtable).PutResource(@ptrCast(*const ITransactionResourcePool, self), pPool, pUnk);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionResourcePool_GetResource(self: *const T, pPool: ?*IObjPool, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionResourcePool.VTable, self.vtable).GetResource(@ptrCast(*const ITransactionResourcePool, self), pPool, ppUnk);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IMTSCall_Value = @import("../zig.zig").Guid.initString("51372aef-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IMTSCall = &IID_IMTSCall_Value;
pub const IMTSCall = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnCall: fn(
self: *const IMTSCall,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMTSCall_OnCall(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IMTSCall.VTable, self.vtable).OnCall(@ptrCast(*const IMTSCall, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IContextProperties_Value = @import("../zig.zig").Guid.initString("d396da85-bf8f-11d1-bbae-00c04fc2fa5f");
pub const IID_IContextProperties = &IID_IContextProperties_Value;
pub const IContextProperties = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Count: fn(
self: *const IContextProperties,
plCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProperty: fn(
self: *const IContextProperties,
name: ?BSTR,
pProperty: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumNames: fn(
self: *const IContextProperties,
ppenum: ?*?*IEnumNames,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetProperty: fn(
self: *const IContextProperties,
name: ?BSTR,
property: VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RemoveProperty: fn(
self: *const IContextProperties,
name: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextProperties_Count(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextProperties.VTable, self.vtable).Count(@ptrCast(*const IContextProperties, self), plCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextProperties_GetProperty(self: *const T, name: ?BSTR, pProperty: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextProperties.VTable, self.vtable).GetProperty(@ptrCast(*const IContextProperties, self), name, pProperty);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextProperties_EnumNames(self: *const T, ppenum: ?*?*IEnumNames) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextProperties.VTable, self.vtable).EnumNames(@ptrCast(*const IContextProperties, self), ppenum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextProperties_SetProperty(self: *const T, name: ?BSTR, property: VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextProperties.VTable, self.vtable).SetProperty(@ptrCast(*const IContextProperties, self), name, property);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IContextProperties_RemoveProperty(self: *const T, name: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IContextProperties.VTable, self.vtable).RemoveProperty(@ptrCast(*const IContextProperties, self), name);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IObjPool_Value = @import("../zig.zig").Guid.initString("7d8805a0-2ea7-11d1-b1cc-00aa00ba3258");
pub const IID_IObjPool = &IID_IObjPool_Value;
pub const IObjPool = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Reserved1: fn(
self: *const IObjPool,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved2: fn(
self: *const IObjPool,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved3: fn(
self: *const IObjPool,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved4: fn(
self: *const IObjPool,
) callconv(@import("std").os.windows.WINAPI) void,
PutEndTx: fn(
self: *const IObjPool,
pObj: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved5: fn(
self: *const IObjPool,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved6: fn(
self: *const IObjPool,
) callconv(@import("std").os.windows.WINAPI) void,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjPool_Reserved1(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IObjPool.VTable, self.vtable).Reserved1(@ptrCast(*const IObjPool, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjPool_Reserved2(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IObjPool.VTable, self.vtable).Reserved2(@ptrCast(*const IObjPool, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjPool_Reserved3(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IObjPool.VTable, self.vtable).Reserved3(@ptrCast(*const IObjPool, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjPool_Reserved4(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IObjPool.VTable, self.vtable).Reserved4(@ptrCast(*const IObjPool, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjPool_PutEndTx(self: *const T, pObj: ?*IUnknown) callconv(.Inline) void {
return @ptrCast(*const IObjPool.VTable, self.vtable).PutEndTx(@ptrCast(*const IObjPool, self), pObj);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjPool_Reserved5(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IObjPool.VTable, self.vtable).Reserved5(@ptrCast(*const IObjPool, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IObjPool_Reserved6(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IObjPool.VTable, self.vtable).Reserved6(@ptrCast(*const IObjPool, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_ITransactionProperty_Value = @import("../zig.zig").Guid.initString("788ea814-87b1-11d1-bba6-00c04fc2fa5f");
pub const IID_ITransactionProperty = &IID_ITransactionProperty_Value;
pub const ITransactionProperty = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Reserved1: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved2: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved3: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved4: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved5: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved6: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved7: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved8: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved9: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
GetTransactionResourcePool: fn(
self: *const ITransactionProperty,
ppTxPool: ?*?*ITransactionResourcePool,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reserved10: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved11: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved12: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved13: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved14: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved15: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved16: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
Reserved17: fn(
self: *const ITransactionProperty,
) callconv(@import("std").os.windows.WINAPI) void,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved1(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved1(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved2(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved2(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved3(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved3(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved4(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved4(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved5(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved5(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved6(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved6(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved7(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved7(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved8(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved8(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved9(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved9(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_GetTransactionResourcePool(self: *const T, ppTxPool: ?*?*ITransactionResourcePool) callconv(.Inline) HRESULT {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).GetTransactionResourcePool(@ptrCast(*const ITransactionProperty, self), ppTxPool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved10(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved10(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved11(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved11(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved12(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved12(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved13(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved13(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved14(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved14(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved15(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved15(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved16(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved16(@ptrCast(*const ITransactionProperty, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITransactionProperty_Reserved17(self: *const T) callconv(.Inline) void {
return @ptrCast(*const ITransactionProperty.VTable, self.vtable).Reserved17(@ptrCast(*const ITransactionProperty, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.0'
const IID_IMTSActivity_Value = @import("../zig.zig").Guid.initString("51372af0-cae7-11cf-be81-00aa00a2fa25");
pub const IID_IMTSActivity = &IID_IMTSActivity_Value;
pub const IMTSActivity = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SynchronousCall: fn(
self: *const IMTSActivity,
pCall: ?*IMTSCall,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AsyncCall: fn(
self: *const IMTSActivity,
pCall: ?*IMTSCall,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reserved1: fn(
self: *const IMTSActivity,
) callconv(@import("std").os.windows.WINAPI) void,
BindToCurrentThread: fn(
self: *const IMTSActivity,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
UnbindFromThread: fn(
self: *const IMTSActivity,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMTSActivity_SynchronousCall(self: *const T, pCall: ?*IMTSCall) callconv(.Inline) HRESULT {
return @ptrCast(*const IMTSActivity.VTable, self.vtable).SynchronousCall(@ptrCast(*const IMTSActivity, self), pCall);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMTSActivity_AsyncCall(self: *const T, pCall: ?*IMTSCall) callconv(.Inline) HRESULT {
return @ptrCast(*const IMTSActivity.VTable, self.vtable).AsyncCall(@ptrCast(*const IMTSActivity, self), pCall);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMTSActivity_Reserved1(self: *const T) callconv(.Inline) void {
return @ptrCast(*const IMTSActivity.VTable, self.vtable).Reserved1(@ptrCast(*const IMTSActivity, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMTSActivity_BindToCurrentThread(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IMTSActivity.VTable, self.vtable).BindToCurrentThread(@ptrCast(*const IMTSActivity, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IMTSActivity_UnbindFromThread(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IMTSActivity.VTable, self.vtable).UnbindFromThread(@ptrCast(*const IMTSActivity, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const AutoSvcs_Error_Constants = enum(u32) {
mtsErrCtxAborted = 2147803138,
mtsErrCtxAborting = 2147803139,
mtsErrCtxNoContext = 2147803140,
mtsErrCtxNotRegistered = 2147803141,
mtsErrCtxSynchTimeout = 2147803142,
mtsErrCtxOldReference = 2147803143,
mtsErrCtxRoleNotFound = 2147803148,
mtsErrCtxNoSecurity = 2147803149,
mtsErrCtxWrongThread = 2147803150,
mtsErrCtxTMNotAvailable = 2147803151,
comQCErrApplicationNotQueued = 2148599296,
comQCErrNoQueueableInterfaces = 2148599297,
comQCErrQueuingServiceNotAvailable = 2148599298,
comQCErrQueueTransactMismatch = 2148599299,
comqcErrRecorderMarshalled = 2148599300,
comqcErrOutParam = 2148599301,
comqcErrRecorderNotTrusted = 2148599302,
comqcErrPSLoad = 2148599303,
comqcErrMarshaledObjSameTxn = 2148599304,
comqcErrInvalidMessage = 2148599376,
comqcErrMsmqSidUnavailable = 2148599377,
comqcErrWrongMsgExtension = 2148599378,
comqcErrMsmqServiceUnavailable = 2148599379,
comqcErrMsgNotAuthenticated = 2148599380,
comqcErrMsmqConnectorUsed = 2148599381,
comqcErrBadMarshaledObject = 2148599382,
};
pub const mtsErrCtxAborted = AutoSvcs_Error_Constants.mtsErrCtxAborted;
pub const mtsErrCtxAborting = AutoSvcs_Error_Constants.mtsErrCtxAborting;
pub const mtsErrCtxNoContext = AutoSvcs_Error_Constants.mtsErrCtxNoContext;
pub const mtsErrCtxNotRegistered = AutoSvcs_Error_Constants.mtsErrCtxNotRegistered;
pub const mtsErrCtxSynchTimeout = AutoSvcs_Error_Constants.mtsErrCtxSynchTimeout;
pub const mtsErrCtxOldReference = AutoSvcs_Error_Constants.mtsErrCtxOldReference;
pub const mtsErrCtxRoleNotFound = AutoSvcs_Error_Constants.mtsErrCtxRoleNotFound;
pub const mtsErrCtxNoSecurity = AutoSvcs_Error_Constants.mtsErrCtxNoSecurity;
pub const mtsErrCtxWrongThread = AutoSvcs_Error_Constants.mtsErrCtxWrongThread;
pub const mtsErrCtxTMNotAvailable = AutoSvcs_Error_Constants.mtsErrCtxTMNotAvailable;
pub const comQCErrApplicationNotQueued = AutoSvcs_Error_Constants.comQCErrApplicationNotQueued;
pub const comQCErrNoQueueableInterfaces = AutoSvcs_Error_Constants.comQCErrNoQueueableInterfaces;
pub const comQCErrQueuingServiceNotAvailable = AutoSvcs_Error_Constants.comQCErrQueuingServiceNotAvailable;
pub const comQCErrQueueTransactMismatch = AutoSvcs_Error_Constants.comQCErrQueueTransactMismatch;
pub const comqcErrRecorderMarshalled = AutoSvcs_Error_Constants.comqcErrRecorderMarshalled;
pub const comqcErrOutParam = AutoSvcs_Error_Constants.comqcErrOutParam;
pub const comqcErrRecorderNotTrusted = AutoSvcs_Error_Constants.comqcErrRecorderNotTrusted;
pub const comqcErrPSLoad = AutoSvcs_Error_Constants.comqcErrPSLoad;
pub const comqcErrMarshaledObjSameTxn = AutoSvcs_Error_Constants.comqcErrMarshaledObjSameTxn;
pub const comqcErrInvalidMessage = AutoSvcs_Error_Constants.comqcErrInvalidMessage;
pub const comqcErrMsmqSidUnavailable = AutoSvcs_Error_Constants.comqcErrMsmqSidUnavailable;
pub const comqcErrWrongMsgExtension = AutoSvcs_Error_Constants.comqcErrWrongMsgExtension;
pub const comqcErrMsmqServiceUnavailable = AutoSvcs_Error_Constants.comqcErrMsmqServiceUnavailable;
pub const comqcErrMsgNotAuthenticated = AutoSvcs_Error_Constants.comqcErrMsgNotAuthenticated;
pub const comqcErrMsmqConnectorUsed = AutoSvcs_Error_Constants.comqcErrMsmqConnectorUsed;
pub const comqcErrBadMarshaledObject = AutoSvcs_Error_Constants.comqcErrBadMarshaledObject;
pub const LockModes = enum(i32) {
SetGet = 0,
Method = 1,
};
pub const LockSetGet = LockModes.SetGet;
pub const LockMethod = LockModes.Method;
pub const ReleaseModes = enum(i32) {
Standard = 0,
Process = 1,
};
pub const Standard = ReleaseModes.Standard;
pub const Process = ReleaseModes.Process;
pub const CRMFLAGS = enum(i32) {
FORGETTARGET = 1,
WRITTENDURINGPREPARE = 2,
WRITTENDURINGCOMMIT = 4,
WRITTENDURINGABORT = 8,
WRITTENDURINGRECOVERY = 16,
WRITTENDURINGREPLAY = 32,
REPLAYINPROGRESS = 64,
};
pub const CRMFLAG_FORGETTARGET = CRMFLAGS.FORGETTARGET;
pub const CRMFLAG_WRITTENDURINGPREPARE = CRMFLAGS.WRITTENDURINGPREPARE;
pub const CRMFLAG_WRITTENDURINGCOMMIT = CRMFLAGS.WRITTENDURINGCOMMIT;
pub const CRMFLAG_WRITTENDURINGABORT = CRMFLAGS.WRITTENDURINGABORT;
pub const CRMFLAG_WRITTENDURINGRECOVERY = CRMFLAGS.WRITTENDURINGRECOVERY;
pub const CRMFLAG_WRITTENDURINGREPLAY = CRMFLAGS.WRITTENDURINGREPLAY;
pub const CRMFLAG_REPLAYINPROGRESS = CRMFLAGS.REPLAYINPROGRESS;
pub const CRMREGFLAGS = enum(i32) {
PREPAREPHASE = 1,
COMMITPHASE = 2,
ABORTPHASE = 4,
ALLPHASES = 7,
FAILIFINDOUBTSREMAIN = 16,
};
pub const CRMREGFLAG_PREPAREPHASE = CRMREGFLAGS.PREPAREPHASE;
pub const CRMREGFLAG_COMMITPHASE = CRMREGFLAGS.COMMITPHASE;
pub const CRMREGFLAG_ABORTPHASE = CRMREGFLAGS.ABORTPHASE;
pub const CRMREGFLAG_ALLPHASES = CRMREGFLAGS.ALLPHASES;
pub const CRMREGFLAG_FAILIFINDOUBTSREMAIN = CRMREGFLAGS.FAILIFINDOUBTSREMAIN;
//--------------------------------------------------------------------------------
// Section: Functions (9)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows5.1.2600'
pub extern "OLE32" fn CoGetDefaultContext(
aptType: APTTYPE,
riid: ?*const Guid,
ppv: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows5.1.2600'
pub extern "comsvcs" fn CoCreateActivity(
pIUnknown: ?*IUnknown,
riid: ?*const Guid,
ppObj: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows5.1.2600'
pub extern "comsvcs" fn CoEnterServiceDomain(
pConfigObject: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows5.1.2600'
pub extern "comsvcs" fn CoLeaveServiceDomain(
pUnkStatus: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.1.2600'
pub extern "comsvcs" fn GetManagedExtensions(
dwExts: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows5.0'
pub extern "comsvcs" fn SafeRef(
rid: ?*const Guid,
pUnk: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) ?*anyopaque;
// TODO: this type is limited to platform 'windows5.0'
pub extern "comsvcs" fn RecycleSurrogate(
lReasonCode: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows5.0'
pub extern "comsvcs" fn MTSCreateActivity(
riid: ?*const Guid,
ppobj: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows5.0'
pub extern "MTxDM" fn GetDispenserManager(
param0: ?*?*IDispenserManager,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (17)
//--------------------------------------------------------------------------------
const Guid = @import("../zig.zig").Guid;
const APTTYPE = @import("../system/com.zig").APTTYPE;
const BLOB = @import("../system/com.zig").BLOB;
const BOOL = @import("../foundation.zig").BOOL;
const BSTR = @import("../foundation.zig").BSTR;
const FILETIME = @import("../foundation.zig").FILETIME;
const HRESULT = @import("../foundation.zig").HRESULT;
const IClassFactory = @import("../system/com.zig").IClassFactory;
const IDispatch = @import("../system/com.zig").IDispatch;
const ITransaction = @import("../system/distributed_transaction_coordinator.zig").ITransaction;
const ITransactionVoterBallotAsync2 = @import("../system/distributed_transaction_coordinator.zig").ITransactionVoterBallotAsync2;
const ITransactionVoterNotifyAsync2 = @import("../system/distributed_transaction_coordinator.zig").ITransactionVoterNotifyAsync2;
const IUnknown = @import("../system/com.zig").IUnknown;
const PSID = @import("../foundation.zig").PSID;
const PWSTR = @import("../foundation.zig").PWSTR;
const SAFEARRAY = @import("../system/com.zig").SAFEARRAY;
const VARIANT = @import("../system/com.zig").VARIANT;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | win32/system/component_services.zig |
const idt = @import("idt.zig");
// Interrupt Service Routines defined externally in assembly.
extern fn isr0() void;
extern fn isr1() void;
extern fn isr2() void;
extern fn isr3() void;
extern fn isr4() void;
extern fn isr5() void;
extern fn isr6() void;
extern fn isr7() void;
extern fn isr8() void;
extern fn isr9() void;
extern fn isr10() void;
extern fn isr11() void;
extern fn isr12() void;
extern fn isr13() void;
extern fn isr14() void;
extern fn isr15() void;
extern fn isr16() void;
extern fn isr17() void;
extern fn isr18() void;
extern fn isr19() void;
extern fn isr20() void;
extern fn isr21() void;
extern fn isr22() void;
extern fn isr23() void;
extern fn isr24() void;
extern fn isr25() void;
extern fn isr26() void;
extern fn isr27() void;
extern fn isr28() void;
extern fn isr29() void;
extern fn isr30() void;
extern fn isr31() void;
extern fn isr32() void;
extern fn isr33() void;
extern fn isr34() void;
extern fn isr35() void;
extern fn isr36() void;
extern fn isr37() void;
extern fn isr38() void;
extern fn isr39() void;
extern fn isr40() void;
extern fn isr41() void;
extern fn isr42() void;
extern fn isr43() void;
extern fn isr44() void;
extern fn isr45() void;
extern fn isr46() void;
extern fn isr47() void;
extern fn isr128() void;
////
// Install the Interrupt Service Routines in the IDT.
//
pub fn install_exceptions() void {
// Exceptions.
idt.setGate(0, idt.InterruptGateFlags, isr0);
idt.setGate(1, idt.InterruptGateFlags, isr1);
idt.setGate(2, idt.InterruptGateFlags, isr2);
idt.setGate(3, idt.InterruptGateFlags, isr3);
idt.setGate(4, idt.InterruptGateFlags, isr4);
idt.setGate(5, idt.InterruptGateFlags, isr5);
idt.setGate(6, idt.InterruptGateFlags, isr6);
idt.setGate(7, idt.InterruptGateFlags, isr7);
idt.setGate(8, idt.InterruptGateFlags, isr8);
idt.setGate(9, idt.InterruptGateFlags, isr9);
idt.setGate(10, idt.InterruptGateFlags, isr10);
idt.setGate(11, idt.InterruptGateFlags, isr11);
idt.setGate(12, idt.InterruptGateFlags, isr12);
idt.setGate(13, idt.InterruptGateFlags, isr13);
idt.setGate(14, idt.InterruptGateFlags, isr14);
idt.setGate(15, idt.InterruptGateFlags, isr15);
idt.setGate(16, idt.InterruptGateFlags, isr16);
idt.setGate(17, idt.InterruptGateFlags, isr17);
idt.setGate(18, idt.InterruptGateFlags, isr18);
idt.setGate(19, idt.InterruptGateFlags, isr19);
idt.setGate(20, idt.InterruptGateFlags, isr20);
idt.setGate(21, idt.InterruptGateFlags, isr21);
idt.setGate(22, idt.InterruptGateFlags, isr22);
idt.setGate(23, idt.InterruptGateFlags, isr23);
idt.setGate(24, idt.InterruptGateFlags, isr24);
idt.setGate(25, idt.InterruptGateFlags, isr25);
idt.setGate(26, idt.InterruptGateFlags, isr26);
idt.setGate(27, idt.InterruptGateFlags, isr27);
idt.setGate(28, idt.InterruptGateFlags, isr28);
idt.setGate(29, idt.InterruptGateFlags, isr29);
idt.setGate(30, idt.InterruptGateFlags, isr30);
idt.setGate(31, idt.InterruptGateFlags, isr31);
}
// IRQs.
pub fn install_irqs() void {
idt.setGate(32, idt.InterruptGateFlags, isr32);
idt.setGate(33, idt.InterruptGateFlags, isr33);
idt.setGate(34, idt.InterruptGateFlags, isr34);
idt.setGate(35, idt.InterruptGateFlags, isr35);
idt.setGate(36, idt.InterruptGateFlags, isr36);
idt.setGate(37, idt.InterruptGateFlags, isr37);
idt.setGate(38, idt.InterruptGateFlags, isr38);
idt.setGate(39, idt.InterruptGateFlags, isr39);
idt.setGate(40, idt.InterruptGateFlags, isr40);
idt.setGate(41, idt.InterruptGateFlags, isr41);
idt.setGate(42, idt.InterruptGateFlags, isr42);
idt.setGate(43, idt.InterruptGateFlags, isr43);
idt.setGate(44, idt.InterruptGateFlags, isr44);
idt.setGate(45, idt.InterruptGateFlags, isr45);
idt.setGate(46, idt.InterruptGateFlags, isr46);
idt.setGate(47, idt.InterruptGateFlags, isr47);
}
// Syscalls.
pub fn install_syscalls() void {
idt.setGate(128, idt.SYSCALL_GATE, isr128);
} | src/kernel/arch/x86/isr.zig |
const std = @import("std");
const lsp = @import("lsp");
pub fn main() !void {
comptime @setEvalBranchQuota(10_000);
const allocator = std.heap.page_allocator;
var server = try lsp.Server.init(allocator);
while (true) {
var message = try server.readMessage();
defer server.flushArena();
std.debug.print("{s}\n", .{message});
switch (message) {
.notification => |notification| {
switch (notification.params) {
.initialized => {
std.log.info("Successfully initialized!", .{});
try server.notify(.{
.show_message = .{
.@"type" = .info,
.message = "hello but from lsp!",
},
});
},
.did_open => |open| {
std.log.info("{s}!", .{open});
},
.did_change => |change| {
std.log.info("{s}", .{change.contentChanges[0].full.text});
},
else => @panic("NO!"),
}
},
.request => |request| {
switch (request.params) {
.initialize => |init| {
server.processInitialize(init);
if (init.capabilities.isSupported(.show_message_action_item)) {
// try server.
// TODO: Make requests serializable
}
try server.respond(request, .{
.initialize_result = .{
.offsetEncoding = if (server.offset_encoding == .utf8)
@as([]const u8, "utf-8")
else
"utf-16",
.serverInfo = .{
.name = "zls",
.version = "0.1.0",
},
.capabilities = .{
.signatureHelpProvider = .{
.triggerCharacters = &.{"("},
.retriggerCharacters = &.{","},
},
.textDocumentSync = .full,
.renameProvider = false,
.completionProvider = .{
.resolveProvider = false,
.triggerCharacters = &[_][]const u8{ ".", ":", "@" },
},
.documentHighlightProvider = false,
.hoverProvider = false,
.codeActionProvider = false,
.declarationProvider = false,
.definitionProvider = false,
.typeDefinitionProvider = false,
.implementationProvider = false,
.referencesProvider = false,
.documentSymbolProvider = false,
.colorProvider = false,
.documentFormattingProvider = false,
.documentRangeFormattingProvider = false,
.foldingRangeProvider = false,
.selectionRangeProvider = false,
.workspaceSymbolProvider = false,
.rangeProvider = false,
.documentProvider = false,
.workspace = .{
.workspaceFolders = .{
.supported = false,
.changeNotifications = false,
},
},
.semanticTokensProvider = null,
},
},
});
},
.completion => |comp| {
_ = comp;
try server.respond(request, .{
.completion = .{
.completion_list = .{
.isIncomplete = false,
.items = &[1]lsp.types.language_features.CompletionItem{
.{
.label = "<NAME>",
.kind = .text,
.textEdit = null,
.filterText = null,
.insertText = "<NAME>",
.insertTextFormat = .plaintext,
.detail = "<NAME>.",
.documentation = .{ .kind = .markdown, .value =
\\A clever name used to insult another individual's mother.
\\It is a play on words that refers to the saying, "Yo mama!"
\\
\\Person 1: "Where's Joe?"\
\\Victim 1: "Joe? ... Joe who?"\
\\Person 1: "<NAME>!"\
\\Victim 1: *Proceeds to feel insulted*
},
},
},
},
},
});
},
// else => {},
}
},
}
}
} | example/main.zig |
const c = @import("c.zig");
usingnamespace @import("wren.zig");
// Common
pub extern fn wrenGetVersionNumber(...) c_int;
pub extern fn wrenInitConfiguration(configuration: [*c]Configuration) void;
pub extern fn wrenNewVM(configuration: [*c]Configuration) ?*VM;
pub extern fn wrenFreeVM(vm: ?*VM) void;
pub extern fn wrenCollectGarbage(vm: ?*VM) void;
pub extern fn wrenInterpret(vm: ?*VM, module: [*c]const u8, source: [*c]const u8) InterpretResult;
pub extern fn wrenMakeCallHandle(vm: ?*VM, signature: [*c]const u8) ?*Handle;
pub extern fn wrenCall(vm: ?*VM, method: ?*Handle) InterpretResult;
pub extern fn wrenReleaseHandle(vm: ?*VM, handle: ?*Handle) void;
pub extern fn wrenGetSlotCount(vm: ?*VM) c_int;
pub extern fn wrenEnsureSlots(vm: ?*VM, numSlots: c_int) void;
pub extern fn wrenGetSlotType(vm: ?*VM, slot: c_int) Type;
pub extern fn wrenGetSlotBool(vm: ?*VM, slot: c_int) bool;
pub extern fn wrenGetSlotBytes(vm: ?*VM, slot: c_int, length: [*c]c_int) [*c]const u8;
pub extern fn wrenGetSlotDouble(vm: ?*VM, slot: c_int) f64;
pub extern fn wrenGetSlotForeign(vm: ?*VM, slot: c_int) ?*c_void;
pub extern fn wrenGetSlotString(vm: ?*VM, slot: c_int) [*c]const u8;
pub extern fn wrenGetSlotHandle(vm: ?*VM, slot: c_int) ?*Handle;
pub extern fn wrenSetSlotBool(vm: ?*VM, slot: c_int, value: bool) void;
pub extern fn wrenSetSlotBytes(vm: ?*VM, slot: c_int, bytes: [*c]const u8, length: usize) void;
pub extern fn wrenSetSlotDouble(vm: ?*VM, slot: c_int, value: f64) void;
pub extern fn wrenSetSlotNewForeign(vm: ?*VM, slot: c_int, classSlot: c_int, size: usize) ?*c_void;
pub extern fn wrenSetSlotNewList(vm: ?*VM, slot: c_int) void;
pub extern fn wrenSetSlotNewMap(vm: ?*VM, slot: c_int) void;
pub extern fn wrenSetSlotNull(vm: ?*VM, slot: c_int) void;
pub extern fn wrenSetSlotString(vm: ?*VM, slot: c_int, text: [*c]const u8) void;
pub extern fn wrenSetSlotHandle(vm: ?*VM, slot: c_int, handle: ?*Handle) void;
pub extern fn wrenGetListCount(vm: ?*VM, slot: c_int) c_int;
pub extern fn wrenGetListElement(vm: ?*VM, listSlot: c_int, index: c_int, elementSlot: c_int) void;
pub extern fn wrenSetListElement(vm: ?*VM, listSlot: c_int, index: c_int, elementSlot: c_int) void;
pub extern fn wrenInsertInList(vm: ?*VM, listSlot: c_int, index: c_int, elementSlot: c_int) void;
pub extern fn wrenGetMapCount(vm: ?*VM, slot: c_int) c_int;
pub extern fn wrenGetMapContainsKey(vm: ?*VM, mapSlot: c_int, keySlot: c_int) bool;
pub extern fn wrenGetMapElement(vm: ?*VM, mapSlot: c_int, index: c_int, keySlot: c_int, valueSlot: c_int) void;
pub extern fn wrenGetMapValue(vm: ?*VM, mapSlot: c_int, keySlot: c_int, valueSlot: c_int) void;
pub extern fn wrenSetMapValue(vm: ?*VM, mapSlot: c_int, keySlot: c_int, valueSlot: c_int) void;
pub extern fn wrenRemoveMapValue(vm: ?*VM, mapSlot: c_int, keySlot: c_int, removedValueSlot: c_int) void;
pub extern fn wrenGetVariable(vm: ?*VM, module: [*c]const u8, name: [*c]const u8, slot: c_int) void;
pub extern fn wrenHasVariable(vm: ?*VM, module: [*c]const u8, name: [*c]const u8) bool;
pub extern fn wrenHasModule(vm: ?*VM, module: [*c]const u8) bool;
pub extern fn wrenAbortFiber(vm: ?*VM, slot: c_int) void;
pub extern fn wrenGetUserData(vm: ?*VM) ?*c_void;
pub extern fn wrenSetUserData(vm: ?*VM, userData: ?*c_void) void;
// Meta extension
pub extern fn wrenMetaSource() [*c]const u8;
pub extern fn wrenMetaBindForeignMethod(vm: ?*VM, className: [*c]const u8, isStatic: bool, signature: [*c]const u8) ForeignMethodFnC;
// Random extension
pub extern fn wrenRandomSource() [*c]const u8;
pub extern fn wrenRandomBindForeignClass(vm: ?*VM, module: [*c]const u8, className: [*c]const u8) ForeignClassMethods;
pub extern fn wrenRandomBindForeignMethod(vm: ?*VM, className: [*c]const u8, isStatic: bool, signature: [*c]const u8) ForeignMethodFnC; | src/externs.zig |
pub const addr = 0x03;
pub const deref = 0x06;
pub const const1u = 0x08;
pub const const1s = 0x09;
pub const const2u = 0x0a;
pub const const2s = 0x0b;
pub const const4u = 0x0c;
pub const const4s = 0x0d;
pub const const8u = 0x0e;
pub const const8s = 0x0f;
pub const constu = 0x10;
pub const consts = 0x11;
pub const dup = 0x12;
pub const drop = 0x13;
pub const over = 0x14;
pub const pick = 0x15;
pub const swap = 0x16;
pub const rot = 0x17;
pub const xderef = 0x18;
pub const abs = 0x19;
pub const @"and" = 0x1a;
pub const div = 0x1b;
pub const minus = 0x1c;
pub const mod = 0x1d;
pub const mul = 0x1e;
pub const neg = 0x1f;
pub const not = 0x20;
pub const @"or" = 0x21;
pub const plus = 0x22;
pub const plus_uconst = 0x23;
pub const shl = 0x24;
pub const shr = 0x25;
pub const shra = 0x26;
pub const xor = 0x27;
pub const bra = 0x28;
pub const eq = 0x29;
pub const ge = 0x2a;
pub const gt = 0x2b;
pub const le = 0x2c;
pub const lt = 0x2d;
pub const ne = 0x2e;
pub const skip = 0x2f;
pub const lit0 = 0x30;
pub const lit1 = 0x31;
pub const lit2 = 0x32;
pub const lit3 = 0x33;
pub const lit4 = 0x34;
pub const lit5 = 0x35;
pub const lit6 = 0x36;
pub const lit7 = 0x37;
pub const lit8 = 0x38;
pub const lit9 = 0x39;
pub const lit10 = 0x3a;
pub const lit11 = 0x3b;
pub const lit12 = 0x3c;
pub const lit13 = 0x3d;
pub const lit14 = 0x3e;
pub const lit15 = 0x3f;
pub const lit16 = 0x40;
pub const lit17 = 0x41;
pub const lit18 = 0x42;
pub const lit19 = 0x43;
pub const lit20 = 0x44;
pub const lit21 = 0x45;
pub const lit22 = 0x46;
pub const lit23 = 0x47;
pub const lit24 = 0x48;
pub const lit25 = 0x49;
pub const lit26 = 0x4a;
pub const lit27 = 0x4b;
pub const lit28 = 0x4c;
pub const lit29 = 0x4d;
pub const lit30 = 0x4e;
pub const lit31 = 0x4f;
pub const reg0 = 0x50;
pub const reg1 = 0x51;
pub const reg2 = 0x52;
pub const reg3 = 0x53;
pub const reg4 = 0x54;
pub const reg5 = 0x55;
pub const reg6 = 0x56;
pub const reg7 = 0x57;
pub const reg8 = 0x58;
pub const reg9 = 0x59;
pub const reg10 = 0x5a;
pub const reg11 = 0x5b;
pub const reg12 = 0x5c;
pub const reg13 = 0x5d;
pub const reg14 = 0x5e;
pub const reg15 = 0x5f;
pub const reg16 = 0x60;
pub const reg17 = 0x61;
pub const reg18 = 0x62;
pub const reg19 = 0x63;
pub const reg20 = 0x64;
pub const reg21 = 0x65;
pub const reg22 = 0x66;
pub const reg23 = 0x67;
pub const reg24 = 0x68;
pub const reg25 = 0x69;
pub const reg26 = 0x6a;
pub const reg27 = 0x6b;
pub const reg28 = 0x6c;
pub const reg29 = 0x6d;
pub const reg30 = 0x6e;
pub const reg31 = 0x6f;
pub const breg0 = 0x70;
pub const breg1 = 0x71;
pub const breg2 = 0x72;
pub const breg3 = 0x73;
pub const breg4 = 0x74;
pub const breg5 = 0x75;
pub const breg6 = 0x76;
pub const breg7 = 0x77;
pub const breg8 = 0x78;
pub const breg9 = 0x79;
pub const breg10 = 0x7a;
pub const breg11 = 0x7b;
pub const breg12 = 0x7c;
pub const breg13 = 0x7d;
pub const breg14 = 0x7e;
pub const breg15 = 0x7f;
pub const breg16 = 0x80;
pub const breg17 = 0x81;
pub const breg18 = 0x82;
pub const breg19 = 0x83;
pub const breg20 = 0x84;
pub const breg21 = 0x85;
pub const breg22 = 0x86;
pub const breg23 = 0x87;
pub const breg24 = 0x88;
pub const breg25 = 0x89;
pub const breg26 = 0x8a;
pub const breg27 = 0x8b;
pub const breg28 = 0x8c;
pub const breg29 = 0x8d;
pub const breg30 = 0x8e;
pub const breg31 = 0x8f;
pub const regx = 0x90;
pub const fbreg = 0x91;
pub const bregx = 0x92;
pub const piece = 0x93;
pub const deref_size = 0x94;
pub const xderef_size = 0x95;
pub const nop = 0x96;
// DWARF 3 extensions.
pub const push_object_address = 0x97;
pub const call2 = 0x98;
pub const call4 = 0x99;
pub const call_ref = 0x9a;
pub const form_tls_address = 0x9b;
pub const call_frame_cfa = 0x9c;
pub const bit_piece = 0x9d;
// DWARF 4 extensions.
pub const implicit_value = 0x9e;
pub const stack_value = 0x9f;
// DWARF 5 extensions.
pub const implicit_pointer = 0xa0;
pub const addrx = 0xa1;
pub const constx = 0xa2;
pub const entry_value = 0xa3;
pub const const_type = 0xa4;
pub const regval_type = 0xa5;
pub const deref_type = 0xa6;
pub const xderef_type = 0xa7;
pub const convert = 0xa8;
pub const reinterpret = 0xa9;
pub const lo_user = 0xe0; // Implementation-defined range start.
pub const hi_user = 0xff; // Implementation-defined range end.
// GNU extensions.
pub const GNU_push_tls_address = 0xe0;
// The following is for marking variables that are uninitialized.
pub const GNU_uninit = 0xf0;
pub const GNU_encoded_addr = 0xf1;
// The GNU implicit pointer extension.
// See http://www.dwarfstd.org/ShowIssue.php?issue=100831.1&type=open .
pub const GNU_implicit_pointer = 0xf2;
// The GNU entry value extension.
// See http://www.dwarfstd.org/ShowIssue.php?issue=100909.1&type=open .
pub const GNU_entry_value = 0xf3;
// The GNU typed stack extension.
// See http://www.dwarfstd.org/doc/040408.1.html .
pub const GNU_const_type = 0xf4;
pub const GNU_regval_type = 0xf5;
pub const GNU_deref_type = 0xf6;
pub const GNU_convert = 0xf7;
pub const GNU_reinterpret = 0xf9;
// The GNU parameter ref extension.
pub const GNU_parameter_ref = 0xfa;
// Extension for Fission. See http://gcc.gnu.org/wiki/DebugFission.
pub const GNU_addr_index = 0xfb;
pub const GNU_const_index = 0xfc;
// HP extensions.
pub const HP_unknown = 0xe0; // Ouch, the same as GNU_push_tls_address.
pub const HP_is_value = 0xe1;
pub const HP_fltconst4 = 0xe2;
pub const HP_fltconst8 = 0xe3;
pub const HP_mod_range = 0xe4;
pub const HP_unmod_range = 0xe5;
pub const HP_tls = 0xe6;
// PGI (STMicroelectronics) extensions.
pub const PGI_omp_thread_num = 0xf8; | lib/std/dwarf/OP.zig |
const enums = @import("enums.zig");
pub const RootContext = struct {
const Self = @This();
// Implementations used by interfaces.
// Note that these are optional so we can have the "default" (nop) implementation.
/// onVmStart is called after the VM is created and _initialize is called.
/// During this call, hostcalls.getVmConfiguration is available and can be used to
/// retrieve the configuration set at vm_config.configuration in envoy.yaml
/// Note that only one RootContext is called on this function;
/// There's Wasm VM: RootContext = 1: N correspondence, and
/// each RootContext corresponds to each config.configuration, not vm_config.configuration.
onVmStartImpl: ?fn (self: *Self, configuration_size: usize) bool = null,
/// onPluginStart is called after onVmStart and for each different plugin configurations.
/// During this call, hostcalls.getPluginConfiguration is available and can be used to
/// retrieve the configuration set at config.configuration in envoy.yaml
onPluginStartImpl: ?fn (self: *Self, configuration_size: usize) bool = null,
/// onPluginDone is called right before deinit is called.
/// Return false to indicate it's in a pending state to do some more work left,
/// And must call hostcalls.done after the work is done to invoke deinit and other
/// cleanup in the host implementation.
onPluginDoneImpl: ?fn (self: *Self) bool = null,
/// onDelete is called when the host is deleting this context.
onDeleteImpl: ?fn (self: *Self) void = null,
/// newHttpContext is used for creating HttpContext for http filters.
/// Return null to indicate this RootContext is not for HTTP streams.
/// Deallocation of contexts created here should only be performed in HttpContext.onDelete.
newHttpContextImpl: ?fn (self: *Self, context_id: u32) ?*HttpContext = null,
/// newTcpContext is used for creating TcpContext for tcp filters.
/// Return null to indicate this RootContext is not for TCP streams.
/// Deallocation of contexts created here should only be performed in TcpContext.onDelete.
newTcpContextImpl: ?fn (self: *Self, context_id: u32) ?*TcpContext = null,
/// onQueueReady is called when the queue is ready after calling hostcalls.RegisterQueue.
/// Note that the queue is dequeued by another VM running in another thread, so possibly
/// the queue is empty during onQueueReady.
onQueueReadyImpl: ?fn (self: *Self, quque_id: u32) void = null,
/// onTick is called when the queue is called when SetTickPeriod hostcall
/// is called by this root context.
onTickImpl: ?fn (self: *Self) void = null,
/// onHttpCalloutResponse is called when a dispatched http call by hostcalls.dispatchHttpCall
/// has received a response.
onHttpCalloutResponseImpl: ?fn (self: *Self, callout_id: u32, num_headers: usize, body_size: usize, num_trailers: usize) void = null,
// The followings are only used by SDK internally. See state.zig.
pub fn onVmStart(self: *Self, configuration_size: usize) bool {
if (self.onVmStartImpl) |impl| {
return impl(self, configuration_size);
}
return true;
}
pub fn onPluginStart(self: *Self, configuration_size: usize) bool {
if (self.onPluginStartImpl) |impl| {
return impl(self, configuration_size);
}
return true;
}
pub fn onPluginDone(self: *Self) bool {
if (self.onPluginDoneImpl) |impl| {
return impl(self);
}
return true;
}
pub fn onDelete(self: *Self) void {
if (self.onDeleteImpl) |impl| {
return impl(self);
}
}
pub fn newTcpContext(self: *Self, context_id: u32) ?*TcpContext {
if (self.newTcpContextImpl) |impl| {
return impl(self, context_id);
}
return null;
}
pub fn newHttpContext(self: *Self, context_id: u32) ?*HttpContext {
if (self.newHttpContextImpl) |impl| {
return impl(self, context_id);
}
return null;
}
pub fn onQueueReady(self: *Self, quque_id: u32) void {
if (self.onQueueReadyImpl) |impl| {
impl(self, quque_id);
}
}
pub fn onTick(self: *Self) void {
if (self.onTickImpl) |impl| {
impl(self);
}
}
pub fn onHttpCalloutResponse(self: *Self, callout_id: u32, num_headers: usize, body_size: usize, num_trailers: usize) void {
if (self.onHttpCalloutResponseImpl) |impl| {
impl(self, callout_id, num_headers, body_size, num_trailers);
}
}
};
pub const TcpContext = struct {
const Self = @This();
// Implementations used by interfaces.
// Note that these types are optional so we can have the "default" (nop) implementation.
/// onNewConnection is called when the tcp connection is established between Down and Upstreams.
onNewConnectionImpl: ?fn (self: *Self) enums.Action = null,
/// onDownstreamData is called when the data fram arrives from the downstream connection.
onDownstreamDataImpl: ?fn (self: *Self, data_size: usize, end_of_stream: bool) enums.Action = null,
/// onDownstreamClose is called when the downstream connection is closed.
onDownstreamCloseImpl: ?fn (self: *Self, peer_type: enums.PeerType) void = null,
/// onUpstreamData is called when the data fram arrives from the upstream connection.
onUpstreamDataImpl: ?fn (self: *Self, data_size: usize, end_of_stream: bool) enums.Action = null,
/// onUpstreamClose is called when the upstream connection is closed.
onUpstreamCloseImpl: ?fn (self: *Self, peer_type: enums.PeerType) void = null,
/// onUpstreamClose is called before the host calls onDelete.
/// You can retreive the stream information (such as remote addesses, etc.) during this calls
/// Can be used for implementing logging feature.
onLogImpl: ?fn (self: *Self) void = null,
/// onDelete is called when the host is deleting this context.
onDeleteImpl: ?fn (self: *Self) void = null,
/// onHttpCalloutResponse is called when a dispatched http call by hostcalls.dispatchHttpCall
/// has received a response.
onHttpCalloutResponseImpl: ?fn (self: *Self, callout_id: u32, num_headers: usize, body_size: usize, num_trailers: usize) void = null,
// The followings are only used by SDK internally. See state.zig.
pub fn onDownstreamData(self: *Self, data_size: usize, end_of_stream: bool) enums.Action {
if (self.onDownstreamDataImpl) |impl| {
return impl(self, data_size, end_of_stream);
}
return enums.Action.Continue;
}
pub fn onDownstreamClose(self: *Self, peer_type: enums.PeerType) void {
if (self.onDownstreamCloseImpl) |impl| {
impl(self, peer_type);
}
}
pub fn onNewConnection(self: *Self) enums.Action {
if (self.onNewConnectionImpl) |impl| {
return impl(self);
}
return enums.Action.Continue;
}
pub fn onUpstreamData(self: *Self, data_size: usize, end_of_stream: bool) enums.Action {
if (self.onDownstreamDataImpl) |impl| {
return impl(self, data_size, end_of_stream);
}
return enums.Action.Continue;
}
pub fn onUpstreamClose(self: *Self, peer_type: enums.PeerType) void {
if (self.onUpstreamCloseImpl) |impl| {
impl(self, peer_type);
}
}
pub fn onLog(self: *Self) void {
if (self.onLogImpl) |impl| {
impl(self);
}
}
pub fn onHttpCalloutResponse(self: *Self, callout_id: u32, num_headers: usize, body_size: usize, num_trailers: usize) void {
if (self.onHttpCalloutResponseImpl) |impl| {
impl(self, callout_id, num_headers, body_size, num_trailers);
}
}
pub fn onDelete(self: *Self) void {
if (self.onDeleteImpl) |impl| {
impl(self);
}
}
};
pub const HttpContext = struct {
const Self = @This();
// Implementations used by interfaces.
// Note that these types are optional so we can have the "default" (nop) implementation.
/// onHttpRequestHeaders is called when request headers arrives.
onHttpRequestHeadersImpl: ?fn (self: *Self, num_headers: usize, end_of_stream: bool) enums.Action = null,
/// onHttpRequestHeaders is called when a request body *frame* arrives.
/// Note that this is possibly called multiple times until we see end_of_stream = true,
onHttpRequestBodyImpl: ?fn (self: *Self, body_size: usize, end_of_stream: bool) enums.Action = null,
/// onHttpRequestTrailers is called when request trailers arrives.
onHttpRequestTrailersImpl: ?fn (self: *Self, num_trailers: usize) enums.Action = null,
/// onHttpResponseHeaders is called when response headers arrives.
onHttpResponseHeadersImpl: ?fn (self: *Self, num_headers: usize, end_of_stream: bool) enums.Action = null,
/// onHttpResponseBody is called when a response body *frame* arrives.
/// Note that this is possibly called multiple times until we see end_of_stream = true,
onHttpResponseBodyImpl: ?fn (self: *Self, body_size: usize, end_of_stream: bool) enums.Action = null,
/// onHttpResponseTrailers is called when response trailers arrives.
onHttpResponseTrailersImpl: ?fn (self: *Self, num_trailers: usize) enums.Action = null,
/// onUpstreamClose is called before the host calls onDelete.
/// You can retreive the HTTP request/response information (such headers, etc.) during this calls
/// Can be used for implementing logging feature.
onLogImpl: ?fn (self: *Self) void = null,
/// onDelete is called when the host is deleting this context.
onDeleteImpl: ?fn (self: *Self) void = null,
/// onHttpCalloutResponse is called when a dispatched http call by hostcalls.dispatchHttpCall
/// has received a response.
onHttpCalloutResponseImpl: ?fn (self: *Self, callout_id: u32, num_headers: usize, body_size: usize, num_trailers: usize) void = null,
// The followings are only used by SDK internally. See state.zig.
pub fn onHttpRequestHeaders(self: *Self, num_headers: usize, end_of_stream: bool) enums.Action {
if (self.onHttpRequestHeadersImpl) |impl| {
return impl(self, num_headers, end_of_stream);
}
return enums.Action.Continue;
}
pub fn onHttpRequestBody(self: *Self, body_size: usize, end_of_stream: bool) enums.Action {
if (self.onHttpRequestBodyImpl) |impl| {
return impl(self, body_size, end_of_stream);
}
return enums.Action.Continue;
}
pub fn onHttpRequestTrailers(self: *Self, num_trailers: usize) enums.Action {
if (self.onHttpRequestTrailersImpl) |impl| {
return impl(self, num_trailers);
}
return enums.Action.Continue;
}
pub fn onHttpResponseHeaders(self: *Self, num_headers: usize, end_of_stream: bool) enums.Action {
if (self.onHttpResponseHeadersImpl) |impl| {
return impl(self, num_headers, end_of_stream);
}
return enums.Action.Continue;
}
pub fn onHttpResponseBody(self: *Self, body_size: usize, end_of_stream: bool) enums.Action {
if (self.onHttpResponseBodyImpl) |impl| {
return impl(self, body_size, end_of_stream);
}
return enums.Action.Continue;
}
pub fn onHttpResponseTrailers(self: *Self, num_trailers: usize) enums.Action {
if (self.onHttpResponseTrailersImpl) |impl| {
return impl(self, num_trailers);
}
return enums.Action.Continue;
}
pub fn onLog(self: *Self) void {
if (self.onLogImpl) |impl| {
impl(self);
}
}
pub fn onHttpCalloutResponse(self: *Self, callout_id: u32, num_headers: usize, body_size: usize, num_trailers: usize) void {
if (self.onHttpCalloutResponseImpl) |impl| {
impl(self, callout_id, num_headers, body_size, num_trailers);
}
}
pub fn onDelete(self: *Self) void {
if (self.onDeleteImpl) |impl| {
impl(self);
}
}
}; | lib/contexts.zig |
const aoc = @import("../aoc.zig");
const std = @import("std");
const BlackTiles = std.AutoHashMap(aoc.Coord, void);
const HexCoord = struct {
const NW = aoc.Coord.init(.{-1, -1});
const NE = aoc.Coord.init(.{-1, 1});
const SW = aoc.Coord.init(.{1, -1});
const SE = aoc.Coord.init(.{1, 1});
const W = aoc.Coord.init(.{0, -2});
const E = aoc.Coord.init(.{0, 2});
underlying: aoc.Coord = aoc.PredefinedCoord.ORIGIN,
fn add(self: *const HexCoord, other: aoc.Coord) HexCoord {
return .{ .underlying = self.underlying.add(other) };
}
fn mutAdd(self: *HexCoord, other: aoc.Coord) void {
self.underlying.mutAdd(other);
}
pub fn neighbors(self: *const HexCoord, _: bool) NeighborIterator {
return NeighborIterator.init(self.*);
}
const NeighborIterator = struct {
idx: u8 = 0,
neighbors: [6]HexCoord,
fn init(center: HexCoord) NeighborIterator {
return .{
.neighbors = [_]HexCoord {
center.add(NW),
center.add(NE),
center.add(SW),
center.add(SE),
center.add(W),
center.add(E),
}
};
}
pub fn next(self: *NeighborIterator) ?HexCoord {
if (self.idx == self.neighbors.len) {
return null;
}
const res = self.neighbors[self.idx];
self.idx += 1;
return res;
}
};
};
pub fn run(problem: *aoc.Problem) !aoc.Solution {
var conway = aoc.Conway(HexCoord).init(problem.allocator);
defer conway.deinit();
while (problem.line()) |line| {
var coord = HexCoord {};
var idx: usize = 0;
while (idx < line.len) : (idx += 1) {
const delta = switch (line[idx]) {
'n' => blk: {
idx += 1;
break :blk switch (line[idx]) {
'w' => HexCoord.NW,
'e' => HexCoord.NE,
else => unreachable
};
},
's' => blk: {
idx += 1;
break :blk switch (line[idx]) {
'w' => HexCoord.SW,
'e' => HexCoord.SE,
else => unreachable
};
},
'w' => HexCoord.W,
'e' => HexCoord.E,
else => unreachable
};
coord.mutAdd(delta);
}
if (!conway.active_spots.remove(coord)) {
try conway.active_spots.put(coord, {});
}
}
const solution1 = conway.active_spots.count();
var cycle: u8 = 0;
while (cycle < 100) : (cycle += 1) {
var iter = conway.stepIterator();
defer iter.deinit();
while (try iter.next()) {
try iter.setActive(
(iter.active and !(iter.active_neighbors == 0 or iter.active_neighbors > 2)) or
(!iter.active and iter.active_neighbors == 2)
);
}
}
const solution2 = conway.active_spots.count();
return problem.solution(solution1, solution2);
} | src/main/zig/2020/day24.zig |
const std = @import("std");
const ptk = @import("parser-toolkit");
const matchers = ptk.matchers;
const Allocator = std.mem.Allocator;
pub const Parser = struct {
core: ParserCore,
source: []const u8,
allocator: Allocator,
const TokenType = enum {
number,
identifier,
whitespace,
linefeed,
double_quoted_string,
@"(",
@")",
@",",
@"+",
@";",
define,
char,
};
const Pattern = ptk.Pattern(TokenType);
const Tokenizer = ptk.Tokenizer(TokenType, &[_]Pattern{
Pattern.create(.number, matchers.sequenceOf(.{ matchers.decimalNumber })),
Pattern.create(.number, matchers.sequenceOf(.{ matchers.decimalNumber, matchers.literal("."), matchers.decimalNumber })),
Pattern.create(.identifier, matchers.identifier),
Pattern.create(.linefeed, matchers.linefeed),
Pattern.create(.whitespace, matchers.whitespace),
Pattern.create(.@"+", matchers.literal("+")),
Pattern.create(.@"(", matchers.literal("(")),
Pattern.create(.@")", matchers.literal(")")),
Pattern.create(.@",", matchers.literal(",")),
Pattern.create(.@";", matchers.literal(";")),
Pattern.create(.define, matchers.literal("=")),
Pattern.create(.double_quoted_string, matchers.sequenceOf(.{
matchers.literal("\""),
matchers.takeNoneOf("\"\r\n"),
matchers.literal("\""),
})),
});
const ParserCore = ptk.ParserCore(Tokenizer, .{ .whitespace });
const ruleset = ptk.RuleSet(TokenType);
pub fn parse(allocator: Allocator, block: []const u8, fileName: ?[]const u8) !Block {
var tokenizer = Tokenizer.init(block, fileName);
var parser = Parser {
.core = ParserCore.init(&tokenizer),
.source = block,
.allocator = allocator
};
const root = try parser.acceptBlock();
errdefer allocator.free(root);
if ((try parser.core.peek()) != null) {
const str = parser.core.tokenizer.source[parser.core.tokenizer.offset..];
std.log.info("remaining: {s}", .{ str });
return error.SyntaxError;
}
return root;
}
const Error = ParserCore.Error || std.mem.Allocator.Error || std.fmt.ParseFloatError;
pub const FunctionCall = struct {
name: []const u8,
args: []Expression
};
pub const Statement = union(enum) {
SetLocal: struct {
name: []const u8,
value: Expression
},
FunctionCall: FunctionCall,
pub fn deinit(self: *const Statement, allocator: Allocator) void {
switch (self.*) {
.FunctionCall => |stat| {
// assume function's arguments were free before
allocator.free(stat.args);
},
else => {}
}
}
pub fn deinitAll(self: *const Statement, allocator: Allocator) void {
switch (self.*) {
.FunctionCall => |stat| {
for (stat.args) |*arg| arg.deinit(allocator);
},
else => {}
}
self.deinit(allocator);
}
};
pub const Number = std.math.big.Rational;
pub const Block = []Statement;
pub const Expression = union(enum) {
// TODO: pointers to expression instead of number
Add: struct { lhs: *Expression, rhs: *Expression },
FunctionCall: FunctionCall,
Number: Number,
Local: []const u8,
StringLiteral: []const u8,
pub fn deinit(self: *Expression, allocator: Allocator) void {
switch (self.*) {
.Add => |expr| {
//expr.lhs.deinit(allocator);
allocator.destroy(expr.lhs);
//expr.rhs.deinit(allocator);
allocator.destroy(expr.rhs);
},
.FunctionCall => |expr| {
for (expr.args) |*arg| arg.deinit(allocator);
allocator.free(expr.args);
},
.Number => |*number| {
number.deinit();
},
else => {}
}
}
};
pub fn acceptBlock(self: *Parser) Error!Block {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
var statements = std.ArrayList(Statement).init(self.allocator);
errdefer {
for (statements.items) |stat| stat.deinitAll(self.allocator);
statements.deinit();
}
while (self.acceptStatement()) |optStat| {
if (optStat) |stat| {
try statements.append(stat);
}
} else |err| {
switch (err) {
error.EndOfStream => {},
else => return err
}
}
return statements.toOwnedSlice();
}
pub fn acceptStatement(self: *Parser) Error!?Statement {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
if (self.acceptFunctionCall()) |call| {
if (call) |result| {
_ = try self.core.accept(comptime ruleset.oneOf(.{ .linefeed, .@";" }));
return Statement { .FunctionCall = result };
} else |err| return err;
} else {
if (self.acceptLocalDefinition()) |expr| {
if (expr) |result| {
_ = try self.core.accept(comptime ruleset.oneOf(.{ .linefeed, .@";" }));
return result;
} else |err| return err;
} else {
// std.log.info("{}", .{ try self.core.peek() });
_ = try self.core.accept(comptime ruleset.oneOf(.{ .linefeed, .@";" }));
return null;
}
}
}
pub fn acceptLocalDefinition(self: *Parser) ?Error!Statement {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
const id = self.core.accept(comptime ruleset.is(.identifier)) catch {
self.core.restoreState(state); return null; };
_ = self.core.accept(comptime ruleset.is(.define)) catch {
self.core.restoreState(state); return null; };
const expr = try self.acceptExpression();
return Statement {
.SetLocal = .{
.name = id.text,
.value = expr
}
};
}
pub fn acceptFunctionCall(self: *Parser) ?Error!FunctionCall {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
const id = self.core.accept(comptime ruleset.is(.identifier)) catch {
self.core.restoreState(state); return null; };
_ = self.core.accept(comptime ruleset.is(.@"(")) catch {
self.core.restoreState(state); return null; };
var arguments = std.ArrayList(Expression).init(self.allocator);
errdefer {
for (arguments.items) |*arg| arg.deinit(self.allocator);
arguments.deinit();
}
var first = true;
while (true) {
if (!first) {
_ = self.core.accept(comptime ruleset.is(.@",")) catch |err| switch (err) {
error.UnexpectedToken => break,
else => return err
};
}
const arg = self.acceptExpression() catch |err| if (first) switch (err) {
error.UnexpectedToken => break,
else => return err
} else return err;
try arguments.append(arg);
first = false;
}
_ = try self.core.accept(comptime ruleset.is(.@")"));
return FunctionCall { .name = id.text, .args = arguments.toOwnedSlice() };
}
pub fn acceptExpression(self: *Parser) Error!Expression {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
if (self.acceptFunctionCall()) |call| {
return Expression { .FunctionCall = try call };
} else {
return try self.acceptAddExpression();
}
}
pub fn acceptAddExpression(self: *Parser) Error!Expression {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
const lhs = try self.acceptVarExpression();
if (self.core.accept(comptime ruleset.is(.@"+"))) |_| {
const rhs = try self.acceptVarExpression();
const lhsDupe = try self.allocator.create(Expression);
errdefer self.allocator.destroy(lhsDupe);
lhsDupe.* = lhs;
const rhsDupe = try self.allocator.create(Expression);
errdefer self.allocator.destroy(rhsDupe);
rhsDupe.* = rhs;
return Expression { .Add = .{
.lhs = lhsDupe,
.rhs = rhsDupe
}};
} else |_| {
return lhs;
}
}
pub fn acceptVarExpression(self: *Parser) Error!Expression {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
if (self.core.accept(comptime ruleset.is(.identifier))) |token| {
return Expression { .Local = token.text };
} else |_| {
if (self.acceptNumber()) |number| {
return Expression { .Number = number };
} else |_| {
return Expression { .StringLiteral = try self.acceptStringLiteral() };
}
}
}
pub fn acceptStringLiteral(self: *Parser) Error![]const u8 {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
const token = try self.core.accept(comptime ruleset.is(.double_quoted_string));
const literal = token.text[1..token.text.len-1];
return literal;
}
// TODO: convert number to rationals
pub fn acceptNumber(self: *Parser) Error!Number {
const state = self.core.saveState();
errdefer self.core.restoreState(state);
const token = try self.core.accept(comptime ruleset.is(.number));
var rational = try Number.init(self.allocator);
rational.setFloatString(token.text) catch unreachable;
return rational;
}
}; | src/parser.zig |
const std = @import("std");
const Context = @import("context.zig").Context;
const Source = @import("context.zig").Source;
const SourceLocation = @import("context.zig").SourceLocation;
const SourceRange = @import("context.zig").SourceRange;
const Token = @import("tokenize.zig").Token;
const TokenType = @import("tokenize.zig").TokenType;
const Tokenizer = @import("tokenize.zig").Tokenizer;
const fail = @import("fail.zig").fail;
const BuiltinEnum = @import("builtins.zig").BuiltinEnum;
const BuiltinPackage = @import("builtins.zig").BuiltinPackage;
const parsePrintModule = @import("parse_print.zig").parsePrintModule;
pub const Curve = struct {
points: []const CurvePoint,
};
pub const CurvePoint = struct {
t: NumberLiteral,
value: NumberLiteral,
};
pub const Track = struct {
params: []const ModuleParam,
notes: []const TrackNote,
};
pub const TrackNote = struct {
t: NumberLiteral,
args_source_range: SourceRange,
args: []const CallArg,
};
pub const ParamType = union(enum) {
boolean,
buffer,
constant,
constant_or_buffer,
curve,
one_of: BuiltinEnum,
};
pub const ModuleParam = struct {
name: []const u8,
param_type: ParamType,
};
pub const ParsedModuleInfo = struct {
scope: *Scope,
locals: []const Local,
};
pub const Module = struct {
params: []const ModuleParam,
builtin_name: ?[]const u8,
zig_package_name: ?[]const u8, // only set for builtin modules
info: ?ParsedModuleInfo, // null for builtin modules
};
pub const Scope = struct {
parent: ?*const Scope,
statements: std.ArrayList(Statement),
};
pub const CallArg = struct {
param_name: []const u8,
param_name_token: Token,
value: *const Expression,
};
pub const Call = struct {
field_expr: *const Expression, // should be a literal_module
args: []const CallArg,
};
pub const TrackCall = struct {
track_expr: *const Expression,
speed: *const Expression,
scope: *Scope,
};
pub const Delay = struct {
num_samples: usize,
scope: *Scope,
};
pub const UnArithOp = enum {
abs,
cos,
neg,
sin,
sqrt,
};
pub const UnArith = struct {
op: UnArithOp,
a: *const Expression,
};
pub const BinArithOp = enum {
add,
div,
max,
min,
mul,
pow,
sub,
};
pub const BinArith = struct {
op: BinArithOp,
a: *const Expression,
b: *const Expression,
};
pub const Local = struct {
name: []const u8,
};
pub const Global = struct {
name: []const u8,
value: *const Expression,
};
pub const NumberLiteral = struct {
value: f32,
// copy the number literal verbatim from the script so we don't get things
// like 0.7 becoming 0.699999988079071
verbatim: []const u8,
};
pub const EnumLiteral = struct {
label: []const u8,
payload: ?*const Expression,
};
pub const ExpressionInner = union(enum) {
call: Call,
track_call: TrackCall,
delay: Delay,
literal_boolean: bool,
literal_number: NumberLiteral,
literal_enum_value: EnumLiteral,
literal_curve: usize,
literal_track: usize,
literal_module: usize,
un_arith: UnArith,
bin_arith: BinArith,
local: usize, // index into flat `locals` array
feedback, // only allowed within `delay` expressions
name: Token, // a name that isn't a local, so it can't be resolved until codegen
};
pub const Expression = struct {
source_range: SourceRange,
inner: ExpressionInner,
};
pub const Statement = union(enum) {
let_assignment: struct { local_index: usize, expression: *const Expression },
output: *const Expression,
feedback: *const Expression,
};
const ParseState = struct {
arena_allocator: *std.mem.Allocator,
tokenizer: Tokenizer,
globals: std.ArrayList(Global),
enums: std.ArrayList(BuiltinEnum),
curves: std.ArrayList(Curve),
tracks: std.ArrayList(Track),
modules: std.ArrayList(Module),
};
const ParseModuleState = struct {
params: []const ModuleParam,
locals: std.ArrayList(Local),
};
const ParseContext = union(enum) {
global,
module: ParseContextModule,
};
const ParseContextModule = struct {
ps_mod: *ParseModuleState,
scope: *const Scope,
};
// names that you can't use for params or locals because they are builtin functions or constants
const reserved_names = [_][]const u8{
"abs",
"cos",
"max",
"min",
"pi",
"pow",
"sample_rate",
"sin",
"sqrt",
};
fn defineCurve(ps: *ParseState) !usize {
var points = std.ArrayList(CurvePoint).init(ps.arena_allocator);
var maybe_last_t: ?f32 = null;
while (true) {
const token = try ps.tokenizer.next();
switch (token.tt) {
.kw_end => break,
.number => |t| {
if (maybe_last_t) |last_t| {
if (t <= last_t) {
return fail(ps.tokenizer.ctx, token.source_range, "time value must be greater than the previous time value", .{});
}
}
maybe_last_t = t;
const value_token = try ps.tokenizer.next();
const value = switch (value_token.tt) {
.number => |v| v,
else => return ps.tokenizer.failExpected("number", value_token),
};
try points.append(.{
.t = .{ .value = t, .verbatim = ps.tokenizer.ctx.source.getString(token.source_range) },
.value = .{ .value = value, .verbatim = ps.tokenizer.ctx.source.getString(value_token.source_range) },
});
},
else => return ps.tokenizer.failExpected("number or `end`", token),
}
}
const curve_index = ps.curves.items.len;
try ps.curves.append(.{
.points = points.toOwnedSlice(),
});
return curve_index;
}
fn expectParamType(ps: *ParseState, for_track: bool) !ParamType {
const type_token = try ps.tokenizer.next();
const type_name = ps.tokenizer.ctx.source.getString(type_token.source_range);
if (type_token.tt != .name) {
return ps.tokenizer.failExpected("param type", type_token);
}
const param_type: ParamType = blk: {
if (std.mem.eql(u8, type_name, "boolean")) break :blk .boolean;
if (std.mem.eql(u8, type_name, "constant")) break :blk .constant;
if (std.mem.eql(u8, type_name, "waveform")) break :blk .buffer;
if (std.mem.eql(u8, type_name, "cob")) break :blk .constant_or_buffer;
if (std.mem.eql(u8, type_name, "curve")) break :blk .curve;
for (ps.enums.items) |e| {
if (std.mem.eql(u8, e.name, type_name)) {
break :blk ParamType{ .one_of = e };
}
}
return ps.tokenizer.failExpected("param type", type_token);
};
if (for_track and (param_type == .buffer or param_type == .constant_or_buffer)) {
return fail(ps.tokenizer.ctx, type_token.source_range, "track param cannot be cob or waveform", .{});
}
return param_type;
}
fn parseParamDeclarations(ps: *ParseState, params: *std.ArrayList(ModuleParam), for_track: bool) !void {
while (true) {
const token = try ps.tokenizer.next();
switch (token.tt) {
.kw_begin => break,
.name => {
const param_name = ps.tokenizer.ctx.source.getString(token.source_range);
for (reserved_names) |name| {
if (std.mem.eql(u8, name, param_name)) {
return fail(ps.tokenizer.ctx, token.source_range, "`<` is a reserved name", .{});
}
}
for (params.items) |param| {
if (std.mem.eql(u8, param.name, param_name)) {
return fail(ps.tokenizer.ctx, token.source_range, "redeclaration of param `<`", .{});
}
}
try ps.tokenizer.expectNext(.sym_colon);
const param_type = try expectParamType(ps, for_track);
try ps.tokenizer.expectNext(.sym_comma);
try params.append(.{
.name = param_name,
.param_type = param_type,
});
},
else => return ps.tokenizer.failExpected("param declaration or `begin`", token),
}
}
}
fn defineTrack(ps: *ParseState) !usize {
var params = std.ArrayList(ModuleParam).init(ps.arena_allocator);
try parseParamDeclarations(ps, ¶ms, true);
var notes = std.ArrayList(TrackNote).init(ps.arena_allocator);
var maybe_last_t: ?f32 = null;
while (true) {
const token = try ps.tokenizer.next();
switch (token.tt) {
.kw_end => break,
.number => |t| {
if (maybe_last_t) |last_t| {
if (t <= last_t) {
return fail(ps.tokenizer.ctx, token.source_range, "time value must be greater than the previous time value", .{});
}
}
maybe_last_t = t;
const loc0 = ps.tokenizer.loc; // FIXME - not perfect - includes whitespace before the `(`
const args = try parseCallArgs(ps, .global);
try notes.append(.{
.t = .{ .value = t, .verbatim = ps.tokenizer.ctx.source.getString(token.source_range) },
.args_source_range = .{ .loc0 = loc0, .loc1 = ps.tokenizer.loc },
.args = args,
});
},
else => return ps.tokenizer.failExpected("number or `end`", token),
}
}
const track_index = ps.tracks.items.len;
try ps.tracks.append(.{
.params = params.toOwnedSlice(),
.notes = notes.toOwnedSlice(),
});
return track_index;
}
fn defineModule(ps: *ParseState) !usize {
var params = std.ArrayList(ModuleParam).init(ps.arena_allocator);
// all modules have an implicitly declared param called "sample_rate"
try params.append(.{ .name = "sample_rate", .param_type = .constant });
try parseParamDeclarations(ps, ¶ms, false);
// parse paint block
var ps_mod: ParseModuleState = .{
.params = params.toOwnedSlice(),
.locals = std.ArrayList(Local).init(ps.arena_allocator),
};
const top_scope = try parseStatements(ps, &ps_mod, null);
const module_index = ps.modules.items.len;
// FIXME a zig compiler bug prevents me from doing this all in one literal
// (it compiles but then segfaults at runtime)
var module: Module = .{
.builtin_name = null,
.zig_package_name = null,
.params = ps_mod.params,
.info = null,
};
module.info = .{
.scope = top_scope,
.locals = ps_mod.locals.toOwnedSlice(),
};
try ps.modules.append(module);
return module_index;
}
const ParseError = error{
Failed,
OutOfMemory,
};
fn parseCallArgs(ps: *ParseState, pc: ParseContext) ![]const CallArg {
try ps.tokenizer.expectNext(.sym_left_paren);
var args = std.ArrayList(CallArg).init(ps.arena_allocator);
var token = try ps.tokenizer.next();
while (token.tt != .sym_right_paren) {
if (args.items.len > 0) {
if (token.tt != .sym_comma) {
return ps.tokenizer.failExpected("`,` or `)`", token);
}
token = try ps.tokenizer.next();
}
if (token.tt != .name) {
return ps.tokenizer.failExpected("callee param name", token);
}
const param_name = ps.tokenizer.ctx.source.getString(token.source_range);
const equals_token = try ps.tokenizer.next();
if (equals_token.tt == .sym_equals) {
try args.append(.{
.param_name = param_name,
.param_name_token = token,
.value = try expectExpression(ps, pc),
});
token = try ps.tokenizer.next();
} else {
switch (pc) {
.module => |pcm| {
// shorthand param passing: `val` expands to `val=val`
const subexpr = try createExprWithSourceRange(ps, token.source_range, resolveName(ps, pc, token));
try args.append(.{
.param_name = param_name,
.param_name_token = token,
.value = subexpr,
});
token = equals_token;
},
else => {},
}
}
}
return args.toOwnedSlice();
}
fn parseTrackCall(ps: *ParseState, pcm: ParseContextModule) ParseError!TrackCall {
const track_expr = try expectExpression(ps, .{ .module = pcm });
try ps.tokenizer.expectNext(.sym_comma);
const speed_expr = try expectExpression(ps, .{ .module = pcm });
try ps.tokenizer.expectNext(.kw_begin);
const inner_scope = try parseStatements(ps, pcm.ps_mod, pcm.scope);
return TrackCall{
.track_expr = track_expr,
.speed = speed_expr,
.scope = inner_scope,
};
}
fn parseDelay(ps: *ParseState, pcm: ParseContextModule) ParseError!Delay {
// constant number for the number of delay samples (this is a limitation of my current delay implementation)
const num_samples = blk: {
const token = try ps.tokenizer.next();
if (token.tt != .number) {
return ps.tokenizer.failExpected("number", token);
}
const s = ps.tokenizer.ctx.source.getString(token.source_range);
const n = std.fmt.parseInt(usize, s, 10) catch {
return fail(ps.tokenizer.ctx, token.source_range, "malformatted integer", .{});
};
break :blk n;
};
// keyword `begin`
try ps.tokenizer.expectNext(.kw_begin);
// inner statements
const inner_scope = try parseStatements(ps, pcm.ps_mod, pcm.scope);
return Delay{
.num_samples = num_samples,
.scope = inner_scope,
};
}
fn createExprWithSourceRange(ps: *ParseState, source_range: SourceRange, inner: ExpressionInner) !*const Expression {
const expr = try ps.arena_allocator.create(Expression);
expr.* = .{
.source_range = source_range,
.inner = inner,
};
return expr;
}
fn createExpr(ps: *ParseState, loc0: SourceLocation, inner: ExpressionInner) !*const Expression {
// you pass the location of the start of the expression. this function will use the tokenizer's
// current location to set the expression's end location
return createExprWithSourceRange(ps, .{ .loc0 = loc0, .loc1 = ps.tokenizer.loc }, inner);
}
fn resolveName(ps: *ParseState, pc: ParseContext, token: Token) ExpressionInner {
// it's either a local...
switch (pc) {
.global => {},
.module => |pcm| {
const name = ps.tokenizer.ctx.source.getString(token.source_range);
var maybe_s: ?*const Scope = pcm.scope;
while (maybe_s) |sc| : (maybe_s = sc.parent) {
for (sc.statements.items) |statement| {
switch (statement) {
.let_assignment => |x| {
if (std.mem.eql(u8, pcm.ps_mod.locals.items[x.local_index].name, name)) {
return ExpressionInner{ .local = x.local_index };
}
},
else => {},
}
}
}
},
}
// ...or a name that will be resolved during codegen (param or global)
return .{ .name = token };
}
const BinaryOperator = struct {
symbol: TokenType,
priority: usize,
op: BinArithOp,
};
const binary_operators = [_]BinaryOperator{
.{ .symbol = .sym_plus, .priority = 1, .op = .add },
.{ .symbol = .sym_minus, .priority = 1, .op = .sub },
.{ .symbol = .sym_asterisk, .priority = 2, .op = .mul },
.{ .symbol = .sym_slash, .priority = 2, .op = .div },
};
fn expectExpression(ps: *ParseState, pc: ParseContext) ParseError!*const Expression {
return expectExpression2(ps, pc, 0);
}
fn expectExpression2(ps: *ParseState, pc: ParseContext, priority: usize) ParseError!*const Expression {
var negate = false;
if ((try ps.tokenizer.peek()).tt == .sym_minus) {
_ = try ps.tokenizer.next(); // skip the peeked token
negate = true;
}
var a = try expectTerm(ps, pc);
const loc0 = a.source_range.loc0;
if ((try ps.tokenizer.peek()).tt == .sym_left_paren) {
switch (pc) {
.module => |pcm| {
const args = try parseCallArgs(ps, .{ .module = pcm });
a = try createExpr(ps, loc0, .{ .call = .{ .field_expr = a, .args = args } });
},
else => return fail(ps.tokenizer.ctx, a.source_range, "not a function", .{}),
}
}
if (negate) {
a = try createExpr(ps, loc0, .{ .un_arith = .{ .op = .neg, .a = a } });
}
while (true) {
const token = try ps.tokenizer.peek();
for (binary_operators) |bo| {
const T = @TagType(TokenType);
if (@as(T, token.tt) == @as(T, bo.symbol) and priority < bo.priority) {
_ = try ps.tokenizer.next(); // skip the peeked token
const b = try expectExpression2(ps, pc, bo.priority);
a = try createExpr(ps, loc0, .{ .bin_arith = .{ .op = bo.op, .a = a, .b = b } });
break;
}
} else {
break;
}
}
return a;
}
fn parseUnaryFunction(ps: *ParseState, pc: ParseContext, loc0: SourceLocation, op: UnArithOp) !*const Expression {
try ps.tokenizer.expectNext(.sym_left_paren);
const a = try expectExpression(ps, pc);
try ps.tokenizer.expectNext(.sym_right_paren);
return try createExpr(ps, loc0, .{ .un_arith = .{ .op = op, .a = a } });
}
fn parseBinaryFunction(ps: *ParseState, pc: ParseContext, loc0: SourceLocation, op: BinArithOp) !*const Expression {
try ps.tokenizer.expectNext(.sym_left_paren);
const a = try expectExpression(ps, pc);
try ps.tokenizer.expectNext(.sym_comma);
const b = try expectExpression(ps, pc);
try ps.tokenizer.expectNext(.sym_right_paren);
return try createExpr(ps, loc0, .{ .bin_arith = .{ .op = op, .a = a, .b = b } });
}
fn expectTerm(ps: *ParseState, pc: ParseContext) ParseError!*const Expression {
const token = try ps.tokenizer.next();
const loc0 = token.source_range.loc0;
switch (token.tt) {
.sym_left_paren => {
const a = try expectExpression(ps, pc);
try ps.tokenizer.expectNext(.sym_right_paren);
return a;
},
.kw_defmodule => {
const module_index = try defineModule(ps);
return try createExpr(ps, loc0, .{ .literal_module = module_index });
},
.kw_defcurve => {
const curve_index = try defineCurve(ps);
return try createExpr(ps, loc0, .{ .literal_curve = curve_index });
},
.kw_deftrack => {
const track_index = try defineTrack(ps);
return try createExpr(ps, loc0, .{ .literal_track = track_index });
},
.kw_from => {
switch (pc) {
.module => |pcm| {
const track_call = try parseTrackCall(ps, pcm);
return try createExpr(ps, loc0, .{ .track_call = track_call });
},
else => return fail(ps.tokenizer.ctx, token.source_range, "cannot call track outside of module context", .{}),
}
},
.name => {
const s = ps.tokenizer.ctx.source.getString(token.source_range);
// this list of builtins corresponds to the `reserved_names` list
if (std.mem.eql(u8, s, "abs")) {
return parseUnaryFunction(ps, pc, loc0, .abs);
} else if (std.mem.eql(u8, s, "cos")) {
return parseUnaryFunction(ps, pc, loc0, .cos);
} else if (std.mem.eql(u8, s, "max")) {
return parseBinaryFunction(ps, pc, loc0, .max);
} else if (std.mem.eql(u8, s, "min")) {
return parseBinaryFunction(ps, pc, loc0, .min);
} else if (std.mem.eql(u8, s, "pi")) {
return try createExpr(ps, loc0, .{ .literal_number = .{ .value = std.math.pi, .verbatim = "std.math.pi" } });
} else if (std.mem.eql(u8, s, "pow")) {
return parseBinaryFunction(ps, pc, loc0, .pow);
} else if (std.mem.eql(u8, s, "sin")) {
return parseUnaryFunction(ps, pc, loc0, .sin);
} else if (std.mem.eql(u8, s, "sqrt")) {
return parseUnaryFunction(ps, pc, loc0, .sqrt);
}
return try createExpr(ps, loc0, resolveName(ps, pc, token));
},
.kw_false => {
return try createExpr(ps, loc0, .{ .literal_boolean = false });
},
.kw_true => {
return try createExpr(ps, loc0, .{ .literal_boolean = true });
},
.number => |n| {
return try createExpr(ps, loc0, .{
.literal_number = .{
.value = n,
.verbatim = ps.tokenizer.ctx.source.getString(token.source_range),
},
});
},
.enum_value => {
const s = ps.tokenizer.ctx.source.getString(token.source_range);
const peeked_token = try ps.tokenizer.peek();
if (peeked_token.tt == .sym_left_paren) {
_ = try ps.tokenizer.next();
const payload = try expectExpression(ps, pc);
try ps.tokenizer.expectNext(.sym_right_paren);
const enum_literal: EnumLiteral = .{ .label = s, .payload = payload };
return try createExpr(ps, loc0, .{ .literal_enum_value = enum_literal });
} else {
const enum_literal: EnumLiteral = .{ .label = s, .payload = null };
return try createExprWithSourceRange(ps, token.source_range, .{ .literal_enum_value = enum_literal });
}
},
.kw_delay => {
switch (pc) {
.module => |pcm| {
const delay = try parseDelay(ps, pcm);
return try createExpr(ps, loc0, .{ .delay = delay });
},
else => return fail(ps.tokenizer.ctx, token.source_range, "cannot use delay outside of module context", .{}),
}
},
.kw_feedback => {
switch (pc) {
.module => |pcm| return try createExpr(ps, loc0, .feedback),
else => return fail(ps.tokenizer.ctx, token.source_range, "cannot use feedback outside of module context", .{}),
}
},
else => return ps.tokenizer.failExpected("expression", token),
}
}
fn parseLocalDecl(ps: *ParseState, ps_mod: *ParseModuleState, scope: *Scope, name_token: Token) !void {
const name = ps.tokenizer.ctx.source.getString(name_token.source_range);
try ps.tokenizer.expectNext(.sym_equals);
for (reserved_names) |reserved_name| {
if (std.mem.eql(u8, name, reserved_name)) {
return fail(ps.tokenizer.ctx, name_token.source_range, "`<` is a reserved name", .{});
}
}
// note: locals are allowed to shadow globals, params, and even previous locals
const expr = try expectExpression(ps, .{ .module = .{ .ps_mod = ps_mod, .scope = scope } });
const local_index = ps_mod.locals.items.len;
try ps_mod.locals.append(.{
.name = name,
});
try scope.statements.append(.{
.let_assignment = .{
.local_index = local_index,
.expression = expr,
},
});
}
fn parseGlobalDecl(ps: *ParseState, name_token: Token) !void {
const name = ps.tokenizer.ctx.source.getString(name_token.source_range);
try ps.tokenizer.expectNext(.sym_equals);
for (reserved_names) |reserved_name| {
if (std.mem.eql(u8, name, reserved_name)) {
return fail(ps.tokenizer.ctx, name_token.source_range, "`<` is a reserved name", .{});
}
}
// globals are different from locals in that they are define-anywhere, where shadowing doesn't make sense
for (ps.globals.items) |global| {
if (std.mem.eql(u8, name, global.name)) {
return fail(ps.tokenizer.ctx, name_token.source_range, "redeclaration of global `<`", .{});
}
}
const expr = try expectExpression(ps, .global);
try ps.globals.append(.{
.name = name,
.value = expr,
});
}
fn parseStatements(ps: *ParseState, ps_mod: *ParseModuleState, parent_scope: ?*const Scope) !*Scope {
var scope = try ps.arena_allocator.create(Scope);
scope.* = .{
.parent = parent_scope,
.statements = std.ArrayList(Statement).init(ps.arena_allocator),
};
const pc: ParseContext = .{
.module = .{ .ps_mod = ps_mod, .scope = scope },
};
while (true) {
const token = try ps.tokenizer.next();
switch (token.tt) {
.kw_end => break,
.name => {
try parseLocalDecl(ps, ps_mod, scope, token);
},
.kw_out => {
const expr = try expectExpression(ps, pc);
try scope.statements.append(.{ .output = expr });
},
.kw_feedback => {
const expr = try expectExpression(ps, pc);
try scope.statements.append(.{ .feedback = expr });
},
else => return ps.tokenizer.failExpected("local declaration, `out`, `feedback` or `end`", token),
}
}
return scope;
}
pub const ParseResult = struct {
arena: std.heap.ArenaAllocator,
globals: []const Global,
curves: []const Curve,
tracks: []const Track,
modules: []const Module,
pub fn deinit(self: *ParseResult) void {
self.arena.deinit();
}
};
pub fn parse(
ctx: Context,
inner_allocator: *std.mem.Allocator,
dump_parse_out: ?std.io.StreamSource.OutStream,
) !ParseResult {
var arena = std.heap.ArenaAllocator.init(inner_allocator);
errdefer arena.deinit();
var ps: ParseState = .{
.arena_allocator = &arena.allocator,
.tokenizer = Tokenizer.init(ctx),
.globals = std.ArrayList(Global).init(&arena.allocator),
.enums = std.ArrayList(BuiltinEnum).init(&arena.allocator),
.curves = std.ArrayList(Curve).init(&arena.allocator),
.tracks = std.ArrayList(Track).init(&arena.allocator),
.modules = std.ArrayList(Module).init(&arena.allocator),
};
// add builtins
for (ctx.builtin_packages) |pkg| {
try ps.enums.appendSlice(pkg.enums);
for (pkg.builtins) |builtin| {
const module_index = ps.modules.items.len;
try ps.modules.append(.{
.builtin_name = builtin.name,
.zig_package_name = pkg.zig_package_name,
.params = builtin.params,
.info = null,
});
// add a global declaration for this builtin. hopefully this never comes up in a
// compile error, because this source range is bogus
const sr: SourceRange = .{
.loc0 = .{ .line = 0, .index = 0 },
.loc1 = .{ .line = 0, .index = 0 },
};
try ps.globals.append(.{
.name = builtin.name,
.value = try createExprWithSourceRange(&ps, sr, .{ .literal_module = module_index }),
});
}
}
// parse the file
while (true) {
const token = try ps.tokenizer.next();
switch (token.tt) {
.end_of_file => break,
.name => try parseGlobalDecl(&ps, token),
else => return ps.tokenizer.failExpected("declaration or end of file", token),
}
}
const modules = ps.modules.toOwnedSlice();
// diagnostic print
if (dump_parse_out) |out| {
for (modules) |module, module_index| {
parsePrintModule(out, ctx.source, modules, module_index, module) catch |err| std.debug.warn("parsePrintModule failed: {}\n", .{err});
}
}
return ParseResult{
.arena = arena,
.globals = ps.globals.toOwnedSlice(),
.curves = ps.curves.toOwnedSlice(),
.tracks = ps.tracks.toOwnedSlice(),
.modules = modules,
};
} | src/zangscript/parse.zig |
const std = @import("std");
const c = @cImport({
@cInclude("epoxy/gl.h"); // Include statically if possible
@cInclude("GLFW/glfw3.h"); // Redo build script so it is in zig and cross-platform
@cInclude("fontstash.h");
@cInclude("gl3corefontstash.h");
});
const zs = @import("zstack.zig");
const Piece = zs.Piece;
const BitSet = zs.BitSet;
const VirtualKey = zs.input.VirtualKey;
const Key = zs.input.Key;
const debug_opengl = true;
const stack_color = zs.piece.Color{
.r = 140,
.g = 140,
.b = 140,
.a = 255,
};
const well_color = zs.piece.Color{
.r = 180,
.g = 180,
.b = 180,
.a = 255,
};
fn checkShader(id: c_uint, status_type: c_uint) !void {
var status: c_int = undefined;
var buffer = [_]u8{0} ** 512;
c.glGetShaderiv(id, status_type, &status);
if (status != c.GL_TRUE) {
c.glGetShaderInfoLog(id, buffer.len - 1, null, &buffer[0]);
std.debug.warn("{}\n", buffer[0..]);
return error.GlShaderError;
}
}
fn checkProgram(id: c_uint, status_type: c_uint) !void {
var status: c_int = undefined;
var buffer = [_]u8{0} ** 512;
c.glGetProgramiv(id, status_type, &status);
if (status != c.GL_TRUE) {
c.glGetProgramInfoLog(id, buffer.len - 1, null, &buffer[0]);
std.debug.warn("{}\n", buffer[0..]);
return error.GlProgramError;
}
}
fn getUniform(id: c_uint, name: [*c]const u8) !c_int {
const uniform_id = c.glGetUniformLocation(id, name);
if (uniform_id == -1) {
return error.GlUniformError;
}
return uniform_id;
}
fn getAttribute(id: c_uint, name: [*c]const u8) !c_uint {
const attr_id = c.glGetAttribLocation(id, name);
if (attr_id == -1) {
return error.GlAttribError;
}
return @intCast(c_uint, attr_id);
}
extern fn glDebugCallback(
source: c.GLenum,
ty: c.GLenum,
id: c.GLuint,
severity: c.GLenum,
length: c.GLsizei,
message: [*c]const u8,
user_param: ?*const c_void,
) void {
std.debug.warn("{}: {}: {}\n", source, id, message[0..@intCast(usize, length)]);
}
const offsets = struct {
// Block width is doubled since this viewport is (-1, 1)
const block_width: f32 = 0.02625;
const hold_x: f32 = 0.1;
const hold_y: f32 = 0.1;
const well_x: f32 = hold_x + 5.0 * block_width;
const well_y: f32 = 0.1;
const well_h: f32 = 0.7;
const well_w: f32 = 10.0 * block_width; // TODO: Varies based on engine width
// width of well is dependent on the usual width, but assume 10.
const preview_x: f32 = well_x + well_w + block_width;
const preview_y: f32 = 0.1;
};
// TODO: Need a quad and/or a uniform to disable all lighting. Take
// a bool to the shader?
// Draw an aribtrary quadrilateral aligned to the x, y plane.
const gl_quad = struct {
// Our offsets are percentages so assume a (0, 1) coordinate system.
// Also includes face normals. TODO: orient so front-facing correctly.
// (x, y, z, N_x, N_y, N_z),
const vertices = [_]c.GLfloat{
// Face 1 (Front)
0, 0, 0, 0, 0, -1,
0, 1, 0, 0, 0, -1,
1, 0, 0, 0, 0, -1,
1, 0, 0, 0, 0, -1,
0, 1, 0, 0, 0, -1,
1, 1, 0, 0, 0, -1,
// Face 2 (Rear)
0, 0, 1, 0, 0, 1,
1, 0, 1, 0, 0, 1,
1, 1, 1, 0, 0, 1,
1, 1, 1, 0, 0, 1,
0, 1, 1, 0, 0, 1,
1, 1, 1, 0, 0, 1,
// Face 3 (Left)
0, 0, 0, -1, 0, 0,
0, 1, 0, -1, 0, 0,
0, 0, 1, -1, 0, 0,
0, 0, 1, -1, 0, 0,
0, 1, 0, -1, 0, 0,
0, 1, 1, -1, 0, 0,
// Face 4 (Right)
1, 0, 0, 1, 0, 0,
1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 0,
1, 0, 1, 1, 0, 0,
1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 0,
// Face 5 (Bottom)
0, 0, 0, 0, -1, 0,
0, 0, 1, 0, -1, 0,
1, 0, 0, 0, -1, 0,
1, 0, 0, 0, -1, 0,
1, 0, 0, 0, -1, 0,
1, 0, 1, 0, -1, 0,
// Face 6 (Top)
0, 1, 0, 0, 1, 0,
0, 1, 1, 0, 1, 0,
1, 1, 0, 0, 1, 0,
1, 1, 0, 0, 1, 0,
1, 1, 0, 0, 1, 0,
1, 1, 1, 0, 1, 0,
};
var vao_id: c_uint = undefined;
var vbo_id: c_uint = undefined;
var vertex_shader_id: c_uint = undefined;
var fragment_shader_id: c_uint = undefined;
var program_id: c_uint = undefined;
// vertex shader
var attr_position: c_uint = undefined;
var attr_face_normal: c_uint = undefined;
var uniform_offset: c_int = undefined;
var uniform_scale: c_int = undefined;
var uniform_viewport: c_int = undefined;
// fragment shader
var attr_normal: c_uint = undefined;
var attr_frag_pos: c_uint = undefined;
var uniform_view_pos: c_int = undefined;
var uniform_surface_color: c_int = undefined;
var uniform_light_color: c_int = undefined;
var uniform_light_pos: c_int = undefined;
var uniform_enable_lighting: c_int = undefined;
pub fn init(options: Options) !void {
const vertex_shader_source =
c\\#version 150 core
c\\
c\\in vec3 position;
c\\in vec3 faceNormal;
c\\
c\\out vec3 normal;
c\\out vec3 fragPos;
c\\
c\\uniform vec2 offset;
c\\uniform mat3 scale;
c\\uniform mat4 viewport;
c\\
c\\void main()
c\\{
c\\ //fragPos = vec3(viewport * vec4(position, 1.0));
c\\ //normal = mat3(transpose(inverse(viewport))) * faceNormal;
c\\
c\\ gl_Position = vec4(position, 1.0) * mat4(scale);
c\\ gl_Position.x += offset.x;
c\\ gl_Position.y += offset.y;
c\\ gl_Position = gl_Position * viewport;
c\\
c\\ fragPos = vec3(gl_Position);
c\\ normal = faceNormal;
c\\}
;
vertex_shader_id = c.glCreateShader(c.GL_VERTEX_SHADER);
errdefer c.glDeleteShader(vertex_shader_id);
c.glShaderSource(vertex_shader_id, 1, &vertex_shader_source, null);
c.glCompileShader(vertex_shader_id);
try checkShader(vertex_shader_id, c.GL_COMPILE_STATUS);
const fragment_shader_source =
c\\#version 150 core
c\\
c\\out vec4 outColor;
c\\
c\\in vec3 normal;
c\\in vec3 fragPos;
c\\
c\\uniform vec3 viewPos;
c\\uniform vec3 surfaceColor;
c\\uniform vec3 lightColor;
c\\uniform vec3 lightPos;
c\\uniform bool enableLighting;
c\\
c\\void main()
c\\{
c\\ if (!enableLighting) {
c\\ outColor = vec4(surfaceColor, 1.0);
c\\ } else {
c\\ float ambientStrength = 0.7;
c\\ vec3 ambient = ambientStrength * lightColor;
c\\
c\\ vec3 norm = normalize(normal);
c\\ vec3 lightDir = normalize(lightPos - fragPos);
c\\ float diff = max(dot(norm, lightDir), 0.0);
c\\ vec3 diffuse = diff * lightColor;
c\\
c\\ float specularStrength = 0.5;
c\\ vec3 viewDir = normalize(viewPos - fragPos);
c\\ vec3 reflectDir = reflect(-lightDir, norm);
c\\ float spec = pow(max(dot(viewDir, reflectDir), 0.0), 32);
c\\ vec3 specular = specularStrength * spec * lightColor;
c\\
c\\ vec3 result = (ambient + diffuse + specular) * surfaceColor;
c\\ outColor = vec4(result, 1.0);
c\\ }
c\\}
;
fragment_shader_id = c.glCreateShader(c.GL_FRAGMENT_SHADER);
errdefer c.glDeleteShader(fragment_shader_id);
c.glShaderSource(fragment_shader_id, 1, &fragment_shader_source, null);
c.glCompileShader(fragment_shader_id);
try checkShader(fragment_shader_id, c.GL_COMPILE_STATUS);
program_id = c.glCreateProgram();
errdefer c.glDeleteProgram(program_id);
c.glAttachShader(program_id, vertex_shader_id);
c.glAttachShader(program_id, fragment_shader_id);
c.glLinkProgram(program_id);
try checkProgram(program_id, c.GL_LINK_STATUS);
// Only have one program, don't bother using anywhere else.
c.glUseProgram(program_id);
// vertex shader
attr_position = try getAttribute(program_id, c"position");
attr_face_normal = try getAttribute(program_id, c"faceNormal");
uniform_offset = try getUniform(program_id, c"offset");
uniform_scale = try getUniform(program_id, c"scale");
uniform_viewport = try getUniform(program_id, c"viewport");
// fragment shader
uniform_view_pos = try getUniform(program_id, c"viewPos");
uniform_surface_color = try getUniform(program_id, c"surfaceColor");
uniform_light_color = try getUniform(program_id, c"lightColor");
uniform_light_pos = try getUniform(program_id, c"lightPos");
uniform_enable_lighting = try getUniform(program_id, c"enableLighting");
c.glGenVertexArrays(1, &vao_id);
errdefer c.glDeleteVertexArrays(1, &vao_id);
c.glGenBuffers(1, &vbo_id);
errdefer c.glDeleteBuffers(1, &vbo_id);
c.glBindVertexArray(vao_id);
c.glBindBuffer(c.GL_ARRAY_BUFFER, vbo_id);
c.glBufferData(c.GL_ARRAY_BUFFER, vertices.len * @sizeOf(c.GLfloat), &vertices[0], c.GL_STATIC_DRAW);
c.glVertexAttribPointer(attr_position, 3, c.GL_FLOAT, c.GL_FALSE, 6 * @sizeOf(c.GLfloat), null);
c.glEnableVertexAttribArray(attr_position);
c.glVertexAttribPointer(attr_face_normal, 3, c.GL_FLOAT, c.GL_FALSE, 6 * @sizeOf(c.GLfloat), null);
c.glEnableVertexAttribArray(attr_face_normal);
// Always bound, only use the one vertex array
c.glBindVertexArray(vao_id);
// Fixed viewport for now
if (options.render_3d) {
const viewport = [_]f32{
1, 0, 0.5, 0,
0, 1, 0.3, 0,
0, 0, 1, 0,
0, 0, 0, 1,
};
c.glUniformMatrix4fv(uniform_viewport, 1, c.GL_FALSE, &viewport[0]);
} else {
const viewport = [_]f32{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
};
c.glUniformMatrix4fv(uniform_viewport, 1, c.GL_FALSE, &viewport[0]);
}
// Fixed light color for now
c.glUniform3f(uniform_light_color, 0.5, 0.5, 0.5);
c.glUniform3f(uniform_light_pos, 0.5, 0.5, 0.5);
// Fixed camera position
c.glUniform3f(uniform_view_pos, 0.5, 0.5, 0.5);
// Enable lighting
c.glUniform1i(uniform_enable_lighting, @boolToInt(options.render_lighting));
}
pub fn deinit() void {
c.glDeleteProgram(program_id);
c.glDeleteShader(fragment_shader_id);
c.glDeleteShader(vertex_shader_id);
c.glDeleteBuffers(1, &vbo_id);
c.glDeleteVertexArrays(1, &vao_id);
}
pub fn setColor(r: u8, g: u8, b: u8) void {
const x = @intToFloat(c.GLfloat, r) / 255.0;
const y = @intToFloat(c.GLfloat, g) / 255.0;
const z = @intToFloat(c.GLfloat, b) / 255.0;
c.glUniform3f(uniform_surface_color, x, y, z);
}
pub fn setScale(x: f32, y: f32, z: f32) void {
std.debug.assert(0.0 <= x and x <= 1.0);
std.debug.assert(0.0 <= y and y <= 1.0);
std.debug.assert(0.0 <= z and z <= 1.0);
var scale_matrix = [_]f32{
2 * x, 0, 0,
0, 2 * y, 0,
0, 0, 2 * z,
};
c.glUniformMatrix3fv(uniform_scale, 1, c.GL_FALSE, &scale_matrix[0]);
}
pub fn draw(
x: f32,
y: f32,
// TODO: Fix zig-fmt case here, indent enum content once more (and this doc-comment).
// comptime fill: enum {
// Fill,
// Frame,
//},
) void {
std.debug.assert(0.0 <= x and x <= 1.0);
std.debug.assert(0.0 <= y and y <= 1.0);
// Normalize window (x, y) to (-1, 1) system. We need to invert the
// y-axis since the window starts with (0, 0) at the top-left.
const norm_x = 2 * x - 1;
const norm_y = -(2 * y - 1);
c.glUniform2f(uniform_offset, norm_x, norm_y);
// TODO: Actually, we need a lightsource at a pre-defined location so depth can be
// properly visible.
c.glDrawArrays(c.GL_TRIANGLES, 0, vertices.len / 6);
}
};
pub const Options = struct {
width: c_int = 640,
height: c_int = 480,
render_3d: bool = false,
render_lighting: bool = false,
debug: bool = true,
};
// Need copy-elision to avoid this. Or, pass the window and init that way. Kind of annoying.
var actual_keymap: zs.input.KeyBindings = undefined;
pub const Window = struct {
window: ?*c.GLFWwindow,
font: ?*c.FONScontext,
keymap: zs.input.KeyBindings,
width: c_int,
height: c_int,
debug: bool,
pub fn init(options: Options, keymap: zs.input.KeyBindings) !Window {
var w = Window{
.window = null,
.font = null,
.keymap = keymap,
.width = options.width,
.height = options.height,
.debug = options.debug,
};
// TODO: Pass as glfw user window data, see above.
actual_keymap = keymap;
if (c.glfwInit() == 0) {
return error.GlfwError;
}
errdefer c.glfwTerminate();
// Target OpenGL 3.2 Core Profile.
c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MAJOR, 3);
c.glfwWindowHint(c.GLFW_CONTEXT_VERSION_MINOR, 2);
c.glfwWindowHint(c.GLFW_OPENGL_PROFILE, c.GLFW_OPENGL_CORE_PROFILE);
c.glfwWindowHint(c.GLFW_OPENGL_FORWARD_COMPAT, c.GL_TRUE);
c.glfwWindowHint(c.GLFW_RESIZABLE, c.GL_FALSE);
// We use OpenGL as a 2-D view so don't need any depth.
//c.glfwWindowHint(c.GLFW_DEPTH_BITS, 0);
//c.glfwWindowHint(c.GLFW_STENCIL_BITS, 0);
if (debug_opengl) {
c.glfwWindowHint(c.GLFW_OPENGL_DEBUG_CONTEXT, c.GL_TRUE);
}
w.window = c.glfwCreateWindow(w.width, w.height, c"zstack", null, null);
if (w.window == null) {
return error.GlfwError;
}
errdefer c.glfwDestroyWindow(w.window);
c.glfwMakeContextCurrent(w.window);
_ = c.glfwSetKeyCallback(w.window, keyCallback);
c.glfwSwapInterval(0);
if (debug_opengl) {
c.glDebugMessageCallback(glDebugCallback, null);
c.glDebugMessageControl(c.GL_DONT_CARE, c.GL_DONT_CARE, c.GL_DONT_CARE, 0, null, c.GL_TRUE);
}
c.glClearColor(0, 0, 0, 1);
c.glClear(c.GL_COLOR_BUFFER_BIT);
if (false) {
w.font = c.glfonsCreate(512, 512, @enumToInt(c.FONS_ZERO_TOPLEFT));
if (w.font == null) {
return error.FontStashError;
}
var ttf_font = []const u8{}; //@embedFile("unscii-16.ttf");
const font_id = c.fonsAddFontMem(
w.font,
c"unscii",
&ttf_font[0],
ttf_font.len,
0,
);
if (font_id == c.FONS_INVALID) {
return error.FontStashError;
}
c.fonsSetFont(w.font, font_id);
c.fonsSetSize(w.font, 12);
c.fonsSetColor(w.font, c.glfonsRGBA(255, 255, 255, 255));
}
try gl_quad.init(options);
if (debug_opengl) {
//c.glPolygonMode(c.GL_FRONT_AND_BACK, c.GL_LINE);
}
return w;
}
pub fn deinit(w: Window) void {
gl_quad.deinit();
c.glfwDestroyWindow(w.window);
c.glfwTerminate();
}
fn mapKeyToGlfwKey(key: Key) c_int {
// TODO: This switch doesn't compile correctly in release fast?
return switch (key) {
.space => c_int(c.GLFW_KEY_SPACE),
.enter => c.GLFW_KEY_ENTER,
.tab => c.GLFW_KEY_TAB,
.right => c.GLFW_KEY_RIGHT,
.left => c.GLFW_KEY_LEFT,
.down => c.GLFW_KEY_DOWN,
.up => c.GLFW_KEY_UP,
.rshift => c.GLFW_KEY_RIGHT_SHIFT,
.lshift => c.GLFW_KEY_LEFT_SHIFT,
.capslock => c.GLFW_KEY_CAPS_LOCK,
.comma => c.GLFW_KEY_COMMA,
.period => c.GLFW_KEY_PERIOD,
.slash => c.GLFW_KEY_SLASH,
.semicolon => c.GLFW_KEY_SEMICOLON,
.apostrophe => c.GLFW_KEY_APOSTROPHE,
.lbracket => c.GLFW_KEY_LEFT_BRACKET,
.rbracket => c.GLFW_KEY_RIGHT_BRACKET,
.backslash => c.GLFW_KEY_BACKSLASH,
.a => c.GLFW_KEY_A,
.b => c.GLFW_KEY_B,
.c => c.GLFW_KEY_C,
.d => c.GLFW_KEY_D,
.e => c.GLFW_KEY_E,
.f => c.GLFW_KEY_F,
.g => c.GLFW_KEY_G,
.h => c.GLFW_KEY_H,
.i => c.GLFW_KEY_I,
.j => c.GLFW_KEY_J,
.k => c.GLFW_KEY_K,
.l => c.GLFW_KEY_L,
.m => c.GLFW_KEY_M,
.n => c.GLFW_KEY_N,
.o => c.GLFW_KEY_O,
.p => c.GLFW_KEY_P,
.q => c.GLFW_KEY_Q,
.r => c.GLFW_KEY_R,
.s => c.GLFW_KEY_S,
.t => c.GLFW_KEY_T,
.u => c.GLFW_KEY_U,
.v => c.GLFW_KEY_V,
.w => c.GLFW_KEY_W,
.x => c.GLFW_KEY_X,
.y => c.GLFW_KEY_Y,
.z => c.GLFW_KEY_Z,
};
}
var keys_pressed = BitSet(VirtualKey).init();
extern fn keyCallback(window: ?*c.GLFWwindow, key: c_int, scancode: c_int, action: c_int, mods: c_int) void {
if (c.glfwGetWindowAttrib(window, c.GLFW_FOCUSED) == 0) {
return;
}
for (actual_keymap.entries()) |km, i| {
if (key == mapKeyToGlfwKey(km)) {
const to = VirtualKey.fromIndex(i);
switch (action) {
c.GLFW_PRESS => keys_pressed.set(to),
c.GLFW_RELEASE => keys_pressed.clear(to),
else => {},
}
break;
}
}
}
pub fn readKeys(w: *Window) BitSet(VirtualKey) {
c.glfwPollEvents();
var keys = keys_pressed;
if (c.glfwWindowShouldClose(w.window) != 0) {
keys.set(.Quit);
}
return keys;
}
fn renderWell(w: Window, e: zs.Engine) void {
// TODO: Decide on the format to use (0, 1) (-1, 1), (0, w) and use that everywhere
// instead of converting here and there.
//
// Well border. This is not a one-pixel rectangle but instead made up of 3 3-dimensional
// planes (think a box with a missing back/front/top.
//
// TODO: We can maybe add a back into the well, but adjust the color and check shading.
const well_thickness = offsets.block_width * 0.1;
gl_quad.setColor(well_color.r, well_color.g, well_color.b);
// Sides
gl_quad.setScale(
well_thickness,
offsets.block_width * @intToFloat(f32, e.options.well_height),
offsets.block_width,
);
gl_quad.draw(
offsets.well_x - well_thickness,
offsets.well_y + offsets.block_width * @intToFloat(f32, e.options.well_height - 1),
);
gl_quad.draw(
offsets.well_x + 10 * offsets.block_width,
offsets.well_y + offsets.block_width * @intToFloat(f32, e.options.well_height - 1),
);
// Bottom
gl_quad.setScale(
offsets.well_w,
well_thickness,
offsets.block_width,
);
gl_quad.draw(
offsets.well_x,
offsets.well_y + offsets.block_width * @intToFloat(f32, e.options.well_height - 1) + well_thickness,
);
gl_quad.setColor(stack_color.r, stack_color.g, stack_color.b);
gl_quad.setScale(offsets.block_width, offsets.block_width, offsets.block_width);
var y = e.options.well_hidden;
while (y < e.options.well_height) : (y += 1) {
const block_y = offsets.well_y + offsets.block_width * @intToFloat(f32, y - e.options.well_hidden);
var x: usize = 0;
while (x < e.options.well_width) : (x += 1) {
const block_x = offsets.well_x + offsets.block_width * @intToFloat(f32, x);
if (e.well[y][x] != null) {
gl_quad.draw(block_x, block_y);
}
}
}
}
fn renderHoldPiece(w: Window, e: zs.Engine) void {
const id = e.hold_piece orelse return;
const color = id.color();
gl_quad.setScale(offsets.block_width, offsets.block_width, offsets.block_width);
gl_quad.setColor(color.r, color.g, color.b);
const bx_off = if (id != .O) offsets.block_width / 2.0 else 0.0;
const by_off = if (id == .I) offsets.block_width / 2.0 else 0.0;
const blocks = e.rotation_system.blocks(id, .R0);
for (blocks) |b| {
const block_x = bx_off + offsets.hold_x + offsets.block_width * @intToFloat(f32, b.x);
const block_y = by_off + offsets.hold_y + offsets.block_width * @intToFloat(f32, b.y);
gl_quad.draw(block_x, block_y);
}
}
fn renderCurrentPieceAndShadow(w: Window, e: zs.Engine) void {
const p = e.piece orelse return;
const color = p.id.color();
const blocks = e.rotation_system.blocks(p.id, p.theta);
gl_quad.setScale(offsets.block_width, offsets.block_width, offsets.block_width);
// Dim ghost color
gl_quad.setColor(color.r / 2, color.g / 2, color.b / 2);
// Ghost
if (e.options.show_ghost) {
for (blocks) |b| {
const x = @intCast(u8, p.x + @intCast(i8, b.x));
const y = @intCast(u8, p.y_hard_drop) + b.y - e.options.well_hidden;
// Filter blocks greater than visible field height
if (b.y < 0) {
continue;
}
const block_x = offsets.well_x + offsets.block_width * @intToFloat(f32, x);
const block_y = offsets.well_y + offsets.block_width * @intToFloat(f32, y);
gl_quad.draw(block_x, block_y);
}
}
// Slowly dim block to stack color if locking.
var nc = color;
if (e.options.lock_delay_ms != 0) {
const lock_ratio = @intToFloat(f32, p.lock_timer) / @intToFloat(f32, zs.ticks(e.options.lock_delay_ms));
if (lock_ratio != 0) {
inline for (([_][]const u8{ "r", "g", "b" })[0..]) |entry| {
if (@field(nc, entry) < @field(stack_color, entry)) {
@field(nc, entry) += @floatToInt(u8, @intToFloat(f32, @field(stack_color, entry) - @field(nc, entry)) * lock_ratio);
} else {
@field(nc, entry) -= @floatToInt(u8, @intToFloat(f32, @field(nc, entry) - @field(stack_color, entry)) * lock_ratio);
}
}
}
}
// Slowly dim block if in ARE state.
if (e.state == .Are and e.options.are_delay_ms != 0) {
const lock_ratio = @intToFloat(f32, e.are_counter) / @intToFloat(f32, zs.ticks(e.options.are_delay_ms));
if (lock_ratio != 0) {
inline for (([_][]const u8{ "r", "g", "b" })[0..]) |entry| {
if (@field(nc, entry) < @field(stack_color, entry)) {
@field(nc, entry) += @floatToInt(u8, @intToFloat(f32, @field(stack_color, entry) - @field(nc, entry)) * lock_ratio);
} else {
@field(nc, entry) -= @floatToInt(u8, @intToFloat(f32, @field(nc, entry) - @field(stack_color, entry)) * lock_ratio);
}
}
}
}
gl_quad.setColor(nc.r, nc.g, nc.b);
// Piece
for (blocks) |b| {
const x = @intCast(u8, p.x + @intCast(i8, b.x));
// TODO: fix non-zero well_hidden
const y = p.uy() + b.y - e.options.well_hidden;
if (y < 0) {
continue;
}
const block_x = offsets.well_x + offsets.block_width * @intToFloat(f32, x);
const block_y = offsets.well_y + offsets.block_width * @intToFloat(f32, y);
gl_quad.draw(block_x, block_y);
}
}
fn renderPreviewPieces(w: Window, e: zs.Engine) void {
gl_quad.setScale(offsets.block_width, offsets.block_width, offsets.block_width);
var i: usize = 0;
while (i < e.options.preview_piece_count) : (i += 1) {
const id = e.preview_pieces.peek(i);
const color = id.color();
const blocks = e.rotation_system.blocks(id, .R0);
gl_quad.setColor(color.r, color.g, color.b);
const by = offsets.preview_y + offsets.block_width * @intToFloat(f32, 4 * i);
for (blocks) |b| {
var block_x = offsets.preview_x + offsets.block_width * @intToFloat(f32, b.x);
var block_y = by + offsets.block_width * @intToFloat(f32, b.y);
switch (id) {
.I => block_y -= offsets.block_width / 2.0,
.O => block_x += offsets.block_width / 2.0,
else => {},
}
gl_quad.draw(block_x, block_y);
}
}
}
fn renderString(w: Window, x: usize, y: usize, comptime fmt: []const u8, args: ...) !void {
var buffer: [128]u8 = undefined;
const line = try std.fmt.bufPrint(buffer[0..], fmt ++ "\x00", args);
}
fn renderFieldString(w: Window, comptime fmt: []const u8, args: ...) void {
var buffer: [128]u8 = undefined;
const line = std.fmt.bufPrint(buffer[0..], fmt ++ "\x00", args);
const width = c.FC_GetWidth(w.font, line);
const x = len(w, e, .WellX) + len(w, e, .WellW) / 2 - width / 2;
const y = len(w, e, .WellY) + len(w, e, .WellH) / 2;
c.FC_Draw(w.font, w.renderer, x, y, line[0..]);
}
fn renderDebug(w: Window, e: zs.Engine) void {
const ux = w.width * 0.7;
const uy = 1;
const elapsed_time = 0;
const render_fps = elapsed_time / (1000 * e.total_ticks / zs.ticks_per_draw);
const logic_fps = elapsed_time / (1000 * e.total_ticks);
const line_skip_y = c.FC_GetLineHeight(w.font);
const pos_y = 0;
w.renderString(ux, uy + pos_y * line_skip.uy(), "Render FPS: {.5}", render_fps);
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), "Logic GPS: {.5}", logic_fps);
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), "Block:");
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), " x: {}", engine.piece_x);
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), " y: {}", engine.piece_y);
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), " theta: {}", engine.piece_theta);
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), " low-y: {}", engine.y_hard_drop);
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), "Field:");
pos_y += 1;
w.renderString(ux, uy + pos_y * line_skip.uy(), " gravity: {.3}", engine.gravity);
pos_y += 1;
}
pub fn render(w: Window, e: zs.Engine) error{}!void {
c.glClearColor(0, 0, 0, 1);
c.glClear(c.GL_COLOR_BUFFER_BIT);
w.renderWell(e);
w.renderHoldPiece(e);
w.renderCurrentPieceAndShadow(e);
w.renderPreviewPieces(e);
//try w.renderStatistics(e);
if (false) {
const text = "Here is some text";
_ = c.fonsDrawText(w.font, 10, 10, &text[0], text.len);
}
//switch (e.State) {
// .Excellent => w.renderFieldString("EXCELLENT"),
// .Ready => w.renderFieldString("READY"),
// .Go => w.renderFieldString("GO"),
// else => {},
//}
//if (w.debug) {
// w.renderDebug();
//}
c.glfwSwapBuffers(w.window);
}
}; | src/window_gl.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const regex = @import("regex");
const clap = @import("clap");
const walkdir = @import("walkdir");
const filters = @import("filter.zig");
const Filter = filters.Filter;
const TypeFilter = filters.TypeFilter;
const actions = @import("actions.zig");
const Action = actions.Action;
const ColorOption = actions.ColorOption;
const PrintOptions = actions.PrintOptions;
pub const CliOptions = struct {
arena: *std.heap.ArenaAllocator,
paths: []const []const u8,
walkdir: walkdir.Options,
filter: Filter,
action: Action,
color: ColorOption,
print: PrintOptions,
pub fn deinit(self: *CliOptions) void {
const base_allocator = self.arena.child_allocator;
self.arena.deinit();
base_allocator.destroy(self.arena);
}
};
/// The command-line flags and options
pub const params = [_]clap.Param(u8){
// flags
clap.Param(u8){
.id = 'h',
.names = clap.Names{ .short = 'h', .long = "help" },
},
clap.Param(u8){
.id = 'v',
.names = clap.Names{ .short = 'v', .long = "version" },
},
clap.Param(u8){
.id = 'H',
.names = clap.Names{ .short = 'H', .long = "hidden" },
},
clap.Param(u8){
.id = 'p',
.names = clap.Names{ .short = 'p', .long = "full-path" },
},
clap.Param(u8){
.id = '0',
.names = clap.Names{ .short = '0', .long = "print0" },
},
clap.Param(u8){
.id = 's',
.names = clap.Names{ .long = "show-errors" },
},
// Options
clap.Param(u8){
.id = 'd',
.names = clap.Names{ .short = 'd', .long = "max-depth" },
.takes_value = clap.Values.one,
},
clap.Param(u8){
.id = 't',
.names = clap.Names{ .short = 't', .long = "type" },
.takes_value = clap.Values.many,
},
clap.Param(u8){
.id = 'e',
.names = clap.Names{ .short = 'e', .long = "extension" },
.takes_value = clap.Values.many,
},
clap.Param(u8){
.id = 'c',
.names = clap.Names{ .short = 'c', .long = "color" },
.takes_value = clap.Values.one,
},
clap.Param(u8){
.id = 'x',
.names = clap.Names{ .short = 'x', .long = "exec" },
},
clap.Param(u8){
.id = 'X',
.names = clap.Names{ .short = 'X', .long = "exec-batch" },
},
// Positionals
clap.Param(u8){
.id = '*',
.takes_value = clap.Values.many,
},
};
pub fn helpText(param: clap.Param(u8)) []const u8 {
return switch (param.id) {
'h' => "Display this help and exit.",
'v' => "Display version info and exit.",
'H' => "Include hidden files and directories",
'p' => "Match the pattern against the full path instead of the file name",
'0' => "Separate search results with a null character",
's' => "Show errors which were encountered during searching",
'd' => "Set a limit for the depth",
't' => "Filter by entry type",
'e' => "Additionally filter by a file extension",
'c' => "Declare when to use colored output",
'x' => "Execute a command for each search result",
'X' => "Execute a command with all search results at once",
'*' => "Pattern or search paths",
else => unreachable,
};
}
pub fn valueText(param: clap.Param(u8)) []const u8 {
return switch (param.id) {
'd' => "NUM",
't' => "type",
'e' => "ext",
'c' => "when",
'*' => "pattern/path",
else => unreachable,
};
}
pub fn parseCliOptions(base_allocator: Allocator) !CliOptions {
const arena = try base_allocator.create(std.heap.ArenaAllocator);
arena.* = std.heap.ArenaAllocator.init(base_allocator);
errdefer arena.deinit();
const allocator = arena.allocator();
// We then initialize an argument iterator. We will use the OsIterator as it nicely
// wraps iterating over arguments the most efficient way on each os.
var iter = try clap.args.OsIterator.init(allocator);
var diag: clap.Diagnostic = undefined;
// Finally we can parse the arguments
var parser = clap.StreamingClap(u8, clap.args.OsIterator){
.params = ¶ms,
.iter = &iter,
};
// Walk options
var walk_options = walkdir.Options{};
// Filter
var filter = Filter{};
// Action
var action = Action.default;
// Print options
var color_option = ColorOption.default;
var print_options = PrintOptions.default;
// Search paths
var paths = ArrayList([]const u8).init(allocator);
const ParseState = enum {
Normal,
Command,
};
var state: ParseState = .Normal;
while (true) {
switch (state) {
.Normal => if (parser.next() catch |err| {
// Report useful error and exit
diag.report(std.io.getStdErr().writer(), err) catch {};
return err;
}) |arg| {
// arg.param will point to the parameter which matched the argument.
switch (arg.param.id) {
'h' => {
try clap.helpEx(
std.io.getStdErr().writer(),
u8,
¶ms,
helpText,
valueText,
);
return error.Help;
},
'v' => {
try std.io.getStdErr().writer().print("zigfd version {s}\n", .{"0.0.1"});
return error.Help;
},
'H' => walk_options.include_hidden = true,
'p' => filter.full_path = true,
'0' => print_options.null_sep = true,
's' => print_options.errors = true,
'd' => walk_options.max_depth = try std.fmt.parseInt(usize, arg.value.?, 10),
't' => {
if (filter.types == null) filter.types = TypeFilter{};
if (std.mem.eql(u8, "f", arg.value.?) or std.mem.eql(u8, "file", arg.value.?)) {
filter.types.?.file = true;
} else if (std.mem.eql(u8, "d", arg.value.?) or std.mem.eql(u8, "directory", arg.value.?)) {
filter.types.?.directory = true;
} else if (std.mem.eql(u8, "l", arg.value.?) or std.mem.eql(u8, "link", arg.value.?)) {
filter.types.?.symlink = true;
} else {
std.log.err("'{s}' is not a valid type.", .{arg.value.?});
return error.ParseCliError;
}
},
'e' => {
if (filter.extensions == null) filter.extensions = ArrayList([]const u8).init(allocator);
try filter.extensions.?.append(try allocator.dupe(u8, arg.value.?));
},
'c' => {
if (std.mem.eql(u8, "auto", arg.value.?)) {
color_option = .Auto;
} else if (std.mem.eql(u8, "always", arg.value.?)) {
color_option = .Always;
} else if (std.mem.eql(u8, "never", arg.value.?)) {
color_option = .Never;
} else {
std.log.err("'{s}' is not a valid color argument.", .{arg.value.?});
return error.ParseCliError;
}
},
'x' => {
action.deinit();
action = Action{
.Execute = actions.ExecuteTarget.init(allocator),
};
state = .Command;
},
'X' => {
action.deinit();
action = Action{
.ExecuteBatch = actions.ExecuteBatchTarget.init(allocator),
};
state = .Command;
},
'*' => {
// Positionals
// If a regex is already compiled, we are looking at paths
if (filter.pattern) |_| {
try paths.append(arg.value.?);
} else {
filter.pattern = try regex.Regex.compile(allocator, arg.value.?);
}
},
else => unreachable,
}
} else break,
.Command => if (try iter.next()) |arg| {
if (std.mem.eql(u8, ";", arg)) {
state = .Normal;
} else {
switch (action) {
.Execute => |*x| try x.cmd.append(arg),
.ExecuteBatch => |*x| try x.cmd.append(arg),
else => unreachable, // We can only get to this state by -x or -X
}
}
} else break,
}
}
// Providing an empty command is an error
const no_command = switch (action) {
.Execute => |x| x.cmd.items.len == 0,
.ExecuteBatch => |x| x.cmd.items.len == 0,
else => false,
};
if (no_command) {
std.log.err("Expected a command after -x or -X", .{});
return error.ParseCliError;
}
return CliOptions{
.arena = arena,
.paths = paths.items,
.walkdir = walk_options,
.filter = filter,
.action = action,
.color = color_option,
.print = print_options,
};
} | src/cli.zig |
const builtin = @import("builtin");
comptime {
switch (builtin.arch) {
builtin.Arch.i386 => _ = @import("c/i386.zig"),
builtin.Arch.x86_64 => _ = @import("c/x86_64.zig"),
else => {},
}
}
export fn @"error"() void {}
export fn _Exit() void {}
export fn _IO_2_1_stderr_() void {}
export fn _IO_2_1_stdin_() void {}
export fn _IO_2_1_stdout_() void {}
export fn _IO_adjust_column() void {}
export fn _IO_adjust_wcolumn() void {}
export fn _IO_default_doallocate() void {}
export fn _IO_default_finish() void {}
export fn _IO_default_pbackfail() void {}
export fn _IO_default_uflow() void {}
export fn _IO_default_xsgetn() void {}
export fn _IO_default_xsputn() void {}
export fn _IO_do_write() void {}
export fn _IO_doallocbuf() void {}
export fn _IO_enable_locks() void {}
export fn _IO_fclose() void {}
export fn _IO_fdopen() void {}
export fn _IO_feof() void {}
export fn _IO_ferror() void {}
export fn _IO_fflush() void {}
export fn _IO_fgetpos() void {}
export fn _IO_fgetpos64() void {}
export fn _IO_fgets() void {}
export fn _IO_file_attach() void {}
export fn _IO_file_close() void {}
export fn _IO_file_close_it() void {}
export fn _IO_file_doallocate() void {}
export fn _IO_file_finish() void {}
export fn _IO_file_fopen() void {}
export fn _IO_file_init() void {}
export fn _IO_file_jumps() void {}
export fn _IO_file_open() void {}
export fn _IO_file_overflow() void {}
export fn _IO_file_read() void {}
export fn _IO_file_seek() void {}
export fn _IO_file_seekoff() void {}
export fn _IO_file_setbuf() void {}
export fn _IO_file_stat() void {}
export fn _IO_file_sync() void {}
export fn _IO_file_underflow() void {}
export fn _IO_file_write() void {}
export fn _IO_file_xsputn() void {}
export fn _IO_flockfile() void {}
export fn _IO_flush_all() void {}
export fn _IO_flush_all_linebuffered() void {}
export fn _IO_fopen() void {}
export fn _IO_fprintf() void {}
export fn _IO_fputs() void {}
export fn _IO_fread() void {}
export fn _IO_free_backup_area() void {}
export fn _IO_free_wbackup_area() void {}
export fn _IO_fsetpos() void {}
export fn _IO_fsetpos64() void {}
export fn _IO_ftell() void {}
export fn _IO_ftrylockfile() void {}
export fn _IO_funlockfile() void {}
export fn _IO_fwrite() void {}
export fn _IO_getc() void {}
export fn _IO_getline() void {}
export fn _IO_getline_info() void {}
export fn _IO_gets() void {}
export fn _IO_init() void {}
export fn _IO_init_marker() void {}
export fn _IO_init_wmarker() void {}
export fn _IO_iter_begin() void {}
export fn _IO_iter_end() void {}
export fn _IO_iter_file() void {}
export fn _IO_iter_next() void {}
export fn _IO_least_wmarker() void {}
export fn _IO_link_in() void {}
export fn _IO_list_all() void {}
export fn _IO_list_lock() void {}
export fn _IO_list_resetlock() void {}
export fn _IO_list_unlock() void {}
export fn _IO_marker_delta() void {}
export fn _IO_marker_difference() void {}
export fn _IO_padn() void {}
export fn _IO_peekc_locked() void {}
export fn _IO_popen() void {}
export fn _IO_printf() void {}
export fn _IO_proc_close() void {}
export fn _IO_proc_open() void {}
export fn _IO_putc() void {}
export fn _IO_puts() void {}
export fn _IO_remove_marker() void {}
export fn _IO_seekmark() void {}
export fn _IO_seekoff() void {}
export fn _IO_seekpos() void {}
export fn _IO_seekwmark() void {}
export fn _IO_setb() void {}
export fn _IO_setbuffer() void {}
export fn _IO_setvbuf() void {}
export fn _IO_sgetn() void {}
export fn _IO_sprintf() void {}
export fn _IO_sputbackc() void {}
export fn _IO_sputbackwc() void {}
export fn _IO_sscanf() void {}
export fn _IO_str_init_readonly() void {}
export fn _IO_str_init_static() void {}
export fn _IO_str_overflow() void {}
export fn _IO_str_pbackfail() void {}
export fn _IO_str_seekoff() void {}
export fn _IO_str_underflow() void {}
export fn _IO_sungetc() void {}
export fn _IO_sungetwc() void {}
export fn _IO_switch_to_get_mode() void {}
export fn _IO_switch_to_main_wget_area() void {}
export fn _IO_switch_to_wbackup_area() void {}
export fn _IO_switch_to_wget_mode() void {}
export fn _IO_un_link() void {}
export fn _IO_ungetc() void {}
export fn _IO_unsave_markers() void {}
export fn _IO_unsave_wmarkers() void {}
export fn _IO_vfprintf() void {}
export fn _IO_vfscanf() void {}
export fn _IO_vsprintf() void {}
export fn _IO_wdefault_doallocate() void {}
export fn _IO_wdefault_finish() void {}
export fn _IO_wdefault_pbackfail() void {}
export fn _IO_wdefault_uflow() void {}
export fn _IO_wdefault_xsgetn() void {}
export fn _IO_wdefault_xsputn() void {}
export fn _IO_wdo_write() void {}
export fn _IO_wdoallocbuf() void {}
export fn _IO_wfile_jumps() void {}
export fn _IO_wfile_overflow() void {}
export fn _IO_wfile_seekoff() void {}
export fn _IO_wfile_sync() void {}
export fn _IO_wfile_underflow() void {}
export fn _IO_wfile_xsputn() void {}
export fn _IO_wmarker_delta() void {}
export fn _IO_wsetb() void {}
export fn __abort_msg() void {}
export fn __adjtimex() void {}
export fn __after_morecore_hook() void {}
export fn __argz_count() void {}
export fn __argz_next() void {}
export fn __argz_stringify() void {}
export fn __asprintf() void {}
export fn __asprintf_chk() void {}
export fn __assert() void {}
export fn __assert_fail() void {}
export fn __assert_perror_fail() void {}
export fn __backtrace() void {}
export fn __backtrace_symbols() void {}
export fn __backtrace_symbols_fd() void {}
export fn __bsd_getpgrp() void {}
export fn __bzero() void {}
export fn __call_tls_dtors() void {}
export fn __check_rhosts_file() void {}
export fn __chk_fail() void {}
export fn __clock_getcpuclockid() void {}
export fn __clock_getres() void {}
export fn __clock_gettime() void {}
export fn __clock_nanosleep() void {}
export fn __clock_settime() void {}
export fn __clone() void {}
export fn __close() void {}
export fn __close_nocancel() void {}
export fn __cmsg_nxthdr() void {}
export fn __confstr_chk() void {}
export fn __connect() void {}
export fn __copy_grp() void {}
export fn __ctype32_b() void {}
export fn __ctype32_tolower() void {}
export fn __ctype32_toupper() void {}
export fn __ctype_b() void {}
export fn __ctype_b_loc() void {}
export fn __ctype_get_mb_cur_max() void {}
export fn __ctype_init() void {}
export fn __ctype_tolower() void {}
export fn __ctype_tolower_loc() void {}
export fn __ctype_toupper() void {}
export fn __ctype_toupper_loc() void {}
export fn __curbrk() void {}
export fn __cxa_at_quick_exit() void {}
export fn __cxa_atexit() void {}
export fn __cxa_finalize() void {}
export fn __cxa_thread_atexit_impl() void {}
export fn __cyg_profile_func_enter() void {}
export fn __cyg_profile_func_exit() void {}
export fn __daylight() void {}
export fn __dcgettext() void {}
export fn __default_morecore() void {}
export fn __dgettext() void {}
export fn __dprintf_chk() void {}
export fn __dup2() void {}
export fn __duplocale() void {}
export fn __endmntent() void {}
export fn __environ() void {}
export fn __errno_location() void {}
export fn __explicit_bzero_chk() void {}
export fn __fbufsize() void {}
export fn __fcntl() void {}
export fn __fdelt_chk() void {}
export fn __fdelt_warn() void {}
export fn __fentry__() void {}
export fn __ffs() void {}
export fn __fgets_chk() void {}
export fn __fgets_unlocked_chk() void {}
export fn __fgetws_chk() void {}
export fn __fgetws_unlocked_chk() void {}
export fn __finite() void {}
export fn __finitef() void {}
export fn __finitel() void {}
export fn __flbf() void {}
export fn __fork() void {}
export fn __fortify_fail() void {}
export fn __fpending() void {}
export fn __fprintf_chk() void {}
export fn __fpu_control() void {}
export fn __fpurge() void {}
export fn __fread_chk() void {}
export fn __fread_unlocked_chk() void {}
export fn __freadable() void {}
export fn __freading() void {}
export fn __free_hook() void {}
export fn __freelocale() void {}
export fn __fseeko64() void {}
export fn __fsetlocking() void {}
export fn __ftello64() void {}
export fn __fwprintf_chk() void {}
export fn __fwritable() void {}
export fn __fwriting() void {}
export fn __fxstat() void {}
export fn __fxstat64() void {}
export fn __fxstatat() void {}
export fn __fxstatat64() void {}
export fn __gai_sigqueue() void {}
export fn __gconv_get_alias_db() void {}
export fn __gconv_get_cache() void {}
export fn __gconv_get_modules_db() void {}
export fn __gconv_transliterate() void {}
export fn __getauxval() void {}
export fn __getcwd_chk() void {}
export fn __getdelim() void {}
export fn __getdomainname_chk() void {}
export fn __getgroups_chk() void {}
export fn __gethostname_chk() void {}
export fn __getlogin_r_chk() void {}
export fn __getmntent_r() void {}
export fn __getpagesize() void {}
export fn __getpgid() void {}
export fn __getpid() void {}
export fn __getrlimit() void {}
export fn __gets_chk() void {}
export fn __gettimeofday() void {}
export fn __getwd_chk() void {}
export fn __gmtime_r() void {}
export fn __h_errno() void {}
export fn __h_errno_location() void {}
export fn __idna_from_dns_encoding() void {}
export fn __idna_to_dns_encoding() void {}
export fn __inet6_scopeid_pton() void {}
export fn __inet_aton_exact() void {}
export fn __inet_pton_length() void {}
export fn __internal_endnetgrent() void {}
export fn __internal_getnetgrent_r() void {}
export fn __internal_setnetgrent() void {}
export fn __isalnum_l() void {}
export fn __isalpha_l() void {}
export fn __isascii_l() void {}
export fn __isblank_l() void {}
export fn __iscntrl_l() void {}
export fn __isctype() void {}
export fn __isdigit_l() void {}
export fn __isgraph_l() void {}
export fn __isinf() void {}
export fn __isinff() void {}
export fn __isinfl() void {}
export fn __islower_l() void {}
export fn __isnan() void {}
export fn __isnanf() void {}
export fn __isnanl() void {}
export fn __isoc99_fscanf() void {}
export fn __isoc99_fwscanf() void {}
export fn __isoc99_scanf() void {}
export fn __isoc99_sscanf() void {}
export fn __isoc99_swscanf() void {}
export fn __isoc99_vfscanf() void {}
export fn __isoc99_vfwscanf() void {}
export fn __isoc99_vscanf() void {}
export fn __isoc99_vsscanf() void {}
export fn __isoc99_vswscanf() void {}
export fn __isoc99_vwscanf() void {}
export fn __isoc99_wscanf() void {}
export fn __isprint_l() void {}
export fn __ispunct_l() void {}
export fn __isspace_l() void {}
export fn __isupper_l() void {}
export fn __iswalnum_l() void {}
export fn __iswalpha_l() void {}
export fn __iswblank_l() void {}
export fn __iswcntrl_l() void {}
export fn __iswctype() void {}
export fn __iswctype_l() void {}
export fn __iswdigit_l() void {}
export fn __iswgraph_l() void {}
export fn __iswlower_l() void {}
export fn __iswprint_l() void {}
export fn __iswpunct_l() void {}
export fn __iswspace_l() void {}
export fn __iswupper_l() void {}
export fn __iswxdigit_l() void {}
export fn __isxdigit_l() void {}
export fn __ivaliduser() void {}
export fn __key_decryptsession_pk_LOCAL() void {}
export fn __key_encryptsession_pk_LOCAL() void {}
export fn __key_gendes_LOCAL() void {}
export fn __libc_alloc_buffer_alloc_array() void {}
export fn __libc_alloc_buffer_allocate() void {}
export fn __libc_alloc_buffer_copy_bytes() void {}
export fn __libc_alloc_buffer_copy_string() void {}
export fn __libc_alloc_buffer_create_failure() void {}
export fn __libc_alloca_cutoff() void {}
export fn __libc_allocate_once_slow() void {}
export fn __libc_allocate_rtsig() void {}
export fn __libc_allocate_rtsig_private() void {}
export fn __libc_calloc() void {}
export fn __libc_clntudp_bufcreate() void {}
export fn __libc_current_sigrtmax() void {}
export fn __libc_current_sigrtmax_private() void {}
export fn __libc_current_sigrtmin() void {}
export fn __libc_current_sigrtmin_private() void {}
export fn __libc_dlclose() void {}
export fn __libc_dlopen_mode() void {}
export fn __libc_dlsym() void {}
export fn __libc_dlvsym() void {}
export fn __libc_dynarray_at_failure() void {}
export fn __libc_dynarray_emplace_enlarge() void {}
export fn __libc_dynarray_finalize() void {}
export fn __libc_dynarray_resize() void {}
export fn __libc_dynarray_resize_clear() void {}
export fn __libc_fatal() void {}
export fn __libc_fcntl64() void {}
export fn __libc_fork() void {}
export fn __libc_free() void {}
export fn __libc_freeres() void {}
export fn __libc_ifunc_impl_list() void {}
export fn __libc_init_first() void {}
export fn __libc_longjmp() void {}
export fn __libc_mallinfo() void {}
export fn __libc_malloc() void {}
export fn __libc_mallopt() void {}
export fn __libc_memalign() void {}
export fn __libc_msgrcv() void {}
export fn __libc_msgsnd() void {}
export fn __libc_pread() void {}
export fn __libc_pthread_init() void {}
export fn __libc_pvalloc() void {}
export fn __libc_pwrite() void {}
export fn __libc_readline_unlocked() void {}
export fn __libc_realloc() void {}
export fn __libc_reallocarray() void {}
export fn __libc_rpc_getport() void {}
export fn __libc_sa_len() void {}
export fn __libc_scratch_buffer_grow() void {}
export fn __libc_scratch_buffer_grow_preserve() void {}
export fn __libc_scratch_buffer_set_array_size() void {}
export fn __libc_secure_getenv() void {}
export fn __libc_siglongjmp() void {}
export fn __libc_start_main() void {}
export fn __libc_system() void {}
export fn __libc_thread_freeres() void {}
export fn __libc_valloc() void {}
export fn __libc_vfork() void {}
export fn __longjmp_chk() void {}
export fn __lseek() void {}
export fn __lxstat() void {}
export fn __lxstat64() void {}
export fn __madvise() void {}
export fn __malloc_hook() void {}
export fn __malloc_initialize_hook() void {}
export fn __mbrlen() void {}
export fn __mbrtowc() void {}
export fn __mbsnrtowcs_chk() void {}
export fn __mbsrtowcs_chk() void {}
export fn __mbstowcs_chk() void {}
export fn __memalign_hook() void {}
export fn __memcpy_chk() void {}
export fn __memmove_chk() void {}
export fn __mempcpy() void {}
export fn __mempcpy_chk() void {}
export fn __mempcpy_small() void {}
export fn __memset_chk() void {}
export fn __merge_grp() void {}
export fn __mktemp() void {}
export fn __mmap() void {}
export fn __monstartup() void {}
export fn __morecore() void {}
export fn __mprotect() void {}
export fn __munmap() void {}
export fn __nanosleep() void {}
export fn __nanosleep_nocancel() void {}
export fn __netlink_assert_response() void {}
export fn __newlocale() void {}
export fn __nl_langinfo_l() void {}
export fn __nss_configure_lookup() void {}
export fn __nss_database_lookup() void {}
export fn __nss_disable_nscd() void {}
export fn __nss_group_lookup() void {}
export fn __nss_group_lookup2() void {}
export fn __nss_hash() void {}
export fn __nss_hostname_digits_dots() void {}
export fn __nss_hosts_lookup() void {}
export fn __nss_hosts_lookup2() void {}
export fn __nss_lookup() void {}
export fn __nss_lookup_function() void {}
export fn __nss_next() void {}
export fn __nss_next2() void {}
export fn __nss_passwd_lookup() void {}
export fn __nss_passwd_lookup2() void {}
export fn __nss_services_lookup2() void {}
export fn __obstack_printf_chk() void {}
export fn __obstack_vprintf_chk() void {}
export fn __open() void {}
export fn __open64() void {}
export fn __open64_2() void {}
export fn __open64_nocancel() void {}
export fn __open_2() void {}
export fn __open_catalog() void {}
export fn __open_nocancel() void {}
export fn __openat64_2() void {}
export fn __openat_2() void {}
export fn __overflow() void {}
export fn __pause_nocancel() void {}
export fn __pipe() void {}
export fn __poll() void {}
export fn __poll_chk() void {}
export fn __posix_getopt() void {}
export fn __ppoll_chk() void {}
export fn __pread64() void {}
export fn __pread64_chk() void {}
export fn __pread_chk() void {}
export fn __printf_chk() void {}
export fn __printf_fp() void {}
export fn __profile_frequency() void {}
export fn __progname() void {}
export fn __progname_full() void {}
export fn __ptsname_r_chk() void {}
export fn __pwrite64() void {}
export fn __rawmemchr() void {}
export fn __rcmd_errstr() void {}
export fn __read() void {}
export fn __read_chk() void {}
export fn __read_nocancel() void {}
export fn __readlink_chk() void {}
export fn __readlinkat_chk() void {}
export fn __realloc_hook() void {}
export fn __realpath_chk() void {}
export fn __recv() void {}
export fn __recv_chk() void {}
export fn __recvfrom_chk() void {}
export fn __register_atfork() void {}
export fn __res_iclose() void {}
export fn __res_init() void {}
export fn __res_nclose() void {}
export fn __res_ninit() void {}
export fn __res_randomid() void {}
export fn __res_state() void {}
export fn __resolv_context_get() void {}
export fn __resolv_context_get_override() void {}
export fn __resolv_context_get_preinit() void {}
export fn __resolv_context_put() void {}
export fn __resp() void {}
export fn __rpc_thread_createerr() void {}
export fn __rpc_thread_svc_fdset() void {}
export fn __rpc_thread_svc_max_pollfd() void {}
export fn __rpc_thread_svc_pollfd() void {}
export fn __sbrk() void {}
export fn __sched_cpualloc() void {}
export fn __sched_cpucount() void {}
export fn __sched_cpufree() void {}
export fn __sched_get_priority_max() void {}
export fn __sched_get_priority_min() void {}
export fn __sched_getparam() void {}
export fn __sched_getscheduler() void {}
export fn __sched_setscheduler() void {}
export fn __sched_yield() void {}
export fn __secure_getenv() void {}
export fn __select() void {}
export fn __send() void {}
export fn __sendmmsg() void {}
export fn __setmntent() void {}
export fn __setpgid() void {}
export fn __sigaction() void {}
export fn __sigaddset() void {}
export fn __sigdelset() void {}
export fn __sigismember() void {}
export fn __signbit() void {}
export fn __signbitf() void {}
export fn __signbitl() void {}
export fn __sigpause() void {}
export fn __sigsetjmp() void {}
export fn __sigsuspend() void {}
export fn __sigtimedwait() void {}
export fn __snprintf() void {}
export fn __snprintf_chk() void {}
export fn __socket() void {}
export fn __sprintf_chk() void {}
export fn __stack_chk_fail() void {}
export fn __statfs() void {}
export fn __stpcpy() void {}
export fn __stpcpy_chk() void {}
export fn __stpcpy_small() void {}
export fn __stpncpy() void {}
export fn __stpncpy_chk() void {}
export fn __strcasecmp() void {}
export fn __strcasecmp_l() void {}
export fn __strcasestr() void {}
export fn __strcat_chk() void {}
export fn __strcoll_l() void {}
export fn __strcpy_chk() void {}
export fn __strcpy_small() void {}
export fn __strcspn_c1() void {}
export fn __strcspn_c2() void {}
export fn __strcspn_c3() void {}
export fn __strdup() void {}
export fn __strerror_r() void {}
export fn __strfmon_l() void {}
export fn __strftime_l() void {}
export fn __strncasecmp_l() void {}
export fn __strncat_chk() void {}
export fn __strncpy_chk() void {}
export fn __strndup() void {}
export fn __strpbrk_c2() void {}
export fn __strpbrk_c3() void {}
export fn __strsep_1c() void {}
export fn __strsep_2c() void {}
export fn __strsep_3c() void {}
export fn __strsep_g() void {}
export fn __strspn_c1() void {}
export fn __strspn_c2() void {}
export fn __strspn_c3() void {}
export fn __strtod_internal() void {}
export fn __strtod_l() void {}
export fn __strtod_nan() void {}
export fn __strtof128_internal() void {}
export fn __strtof128_nan() void {}
export fn __strtof_internal() void {}
export fn __strtof_l() void {}
export fn __strtof_nan() void {}
export fn __strtok_r() void {}
export fn __strtok_r_1c() void {}
export fn __strtol_internal() void {}
export fn __strtol_l() void {}
export fn __strtold_internal() void {}
export fn __strtold_l() void {}
export fn __strtold_nan() void {}
export fn __strtoll_internal() void {}
export fn __strtoll_l() void {}
export fn __strtoul_internal() void {}
export fn __strtoul_l() void {}
export fn __strtoull_internal() void {}
export fn __strtoull_l() void {}
export fn __strverscmp() void {}
export fn __strxfrm_l() void {}
export fn __swprintf_chk() void {}
export fn __sysconf() void {}
export fn __sysctl() void {}
export fn __syslog_chk() void {}
export fn __sysv_signal() void {}
export fn __tdelete() void {}
export fn __tfind() void {}
export fn __timezone() void {}
export fn __toascii_l() void {}
export fn __tolower_l() void {}
export fn __toupper_l() void {}
export fn __towctrans() void {}
export fn __towctrans_l() void {}
export fn __towlower_l() void {}
export fn __towupper_l() void {}
export fn __tsearch() void {}
export fn __ttyname_r_chk() void {}
export fn __twalk() void {}
export fn __tzname() void {}
export fn __uflow() void {}
export fn __underflow() void {}
export fn __uselocale() void {}
export fn __vasprintf_chk() void {}
export fn __vdprintf_chk() void {}
export fn __vfork() void {}
export fn __vfprintf_chk() void {}
export fn __vfscanf() void {}
export fn __vfwprintf_chk() void {}
export fn __vprintf_chk() void {}
export fn __vsnprintf() void {}
export fn __vsnprintf_chk() void {}
export fn __vsprintf_chk() void {}
export fn __vsscanf() void {}
export fn __vswprintf_chk() void {}
export fn __vsyslog_chk() void {}
export fn __vwprintf_chk() void {}
export fn __wait() void {}
export fn __waitpid() void {}
export fn __wcpcpy_chk() void {}
export fn __wcpncpy_chk() void {}
export fn __wcrtomb_chk() void {}
export fn __wcscasecmp_l() void {}
export fn __wcscat_chk() void {}
export fn __wcscoll_l() void {}
export fn __wcscpy_chk() void {}
export fn __wcsftime_l() void {}
export fn __wcsncasecmp_l() void {}
export fn __wcsncat_chk() void {}
export fn __wcsncpy_chk() void {}
export fn __wcsnrtombs_chk() void {}
export fn __wcsrtombs_chk() void {}
export fn __wcstod_internal() void {}
export fn __wcstod_l() void {}
export fn __wcstof128_internal() void {}
export fn __wcstof_internal() void {}
export fn __wcstof_l() void {}
export fn __wcstol_internal() void {}
export fn __wcstol_l() void {}
export fn __wcstold_internal() void {}
export fn __wcstold_l() void {}
export fn __wcstoll_internal() void {}
export fn __wcstoll_l() void {}
export fn __wcstombs_chk() void {}
export fn __wcstoul_internal() void {}
export fn __wcstoul_l() void {}
export fn __wcstoull_internal() void {}
export fn __wcstoull_l() void {}
export fn __wcsxfrm_l() void {}
export fn __wctomb_chk() void {}
export fn __wctrans_l() void {}
export fn __wctype_l() void {}
export fn __wmemcpy_chk() void {}
export fn __wmemmove_chk() void {}
export fn __wmempcpy_chk() void {}
export fn __wmemset_chk() void {}
export fn __woverflow() void {}
export fn __wprintf_chk() void {}
export fn __write() void {}
export fn __write_nocancel() void {}
export fn __wuflow() void {}
export fn __wunderflow() void {}
export fn __xmknod() void {}
export fn __xmknodat() void {}
export fn __xpg_basename() void {}
export fn __xpg_sigpause() void {}
export fn __xpg_strerror_r() void {}
export fn __xstat() void {}
export fn __xstat64() void {}
export fn _authenticate() void {}
export fn _dl_addr() void {}
export fn _dl_catch_error() void {}
export fn _dl_catch_exception() void {}
export fn _dl_mcount_wrapper() void {}
export fn _dl_mcount_wrapper_check() void {}
export fn _dl_open_hook() void {}
export fn _dl_open_hook2() void {}
export fn _dl_signal_error() void {}
export fn _dl_signal_exception() void {}
export fn _dl_sym() void {}
export fn _dl_vsym() void {}
export fn _environ() void {}
export fn _exit() void {}
export fn _flushlbf() void {}
export fn _itoa_lower_digits() void {}
export fn _libc_intl_domainname() void {}
export fn _longjmp() void {}
export fn _mcleanup() void {}
export fn _mcount() void {}
export fn _nl_default_dirname() void {}
export fn _nl_domain_bindings() void {}
export fn _nl_msg_cat_cntr() void {}
export fn _nss_files_parse_grent() void {}
export fn _nss_files_parse_pwent() void {}
export fn _nss_files_parse_sgent() void {}
export fn _nss_files_parse_spent() void {}
export fn _null_auth() void {}
export fn _obstack() void {}
export fn _obstack_allocated_p() void {}
export fn _obstack_begin() void {}
export fn _obstack_begin_1() void {}
export fn _obstack_free() void {}
export fn _obstack_memory_used() void {}
export fn _obstack_newchunk() void {}
export fn _res() void {}
export fn _res_hconf() void {}
export fn _rpc_dtablesize() void {}
export fn _seterr_reply() void {}
export fn _setjmp() void {}
export fn _sys_errlist() void {}
export fn _sys_nerr() void {}
export fn _sys_siglist() void {}
export fn _tolower() void {}
export fn _toupper() void {}
export fn a64l() void {}
export fn abort() void {}
export fn abs() void {}
export fn accept() void {}
export fn accept4() void {}
export fn access() void {}
export fn acct() void {}
export fn addmntent() void {}
export fn addseverity() void {}
export fn adjtime() void {}
export fn adjtimex() void {}
export fn advance() void {}
export fn alarm() void {}
export fn aligned_alloc() void {}
export fn alphasort() void {}
export fn alphasort64() void {}
export fn argp_err_exit_status() void {}
export fn argp_error() void {}
export fn argp_failure() void {}
export fn argp_help() void {}
export fn argp_parse() void {}
export fn argp_program_bug_address() void {}
export fn argp_program_version() void {}
export fn argp_program_version_hook() void {}
export fn argp_state_help() void {}
export fn argp_usage() void {}
export fn argz_add() void {}
export fn argz_add_sep() void {}
export fn argz_append() void {}
export fn argz_count() void {}
export fn argz_create() void {}
export fn argz_create_sep() void {}
export fn argz_delete() void {}
export fn argz_extract() void {}
export fn argz_insert() void {}
export fn argz_next() void {}
export fn argz_replace() void {}
export fn argz_stringify() void {}
export fn asctime() void {}
export fn asctime_r() void {}
export fn asprintf() void {}
export fn atof() void {}
export fn atoi() void {}
export fn atol() void {}
export fn atoll() void {}
export fn authdes_create() void {}
export fn authdes_getucred() void {}
export fn authdes_pk_create() void {}
export fn authnone_create() void {}
export fn authunix_create() void {}
export fn authunix_create_default() void {}
export fn backtrace() void {}
export fn backtrace_symbols() void {}
export fn backtrace_symbols_fd() void {}
export fn basename() void {}
export fn bcmp() void {}
export fn bcopy() void {}
export fn bdflush() void {}
export fn bind() void {}
export fn bind_textdomain_codeset() void {}
export fn bindresvport() void {}
export fn bindtextdomain() void {}
export fn brk() void {}
export fn bsd_signal() void {}
export fn bsearch() void {}
export fn btowc() void {}
export fn bzero() void {}
export fn c16rtomb() void {}
export fn c32rtomb() void {}
export fn calloc() void {}
export fn callrpc() void {}
export fn canonicalize_file_name() void {}
export fn capget() void {}
export fn capset() void {}
export fn catclose() void {}
export fn catgets() void {}
export fn catopen() void {}
export fn cbc_crypt() void {}
export fn cfgetispeed() void {}
export fn cfgetospeed() void {}
export fn cfmakeraw() void {}
export fn cfree() void {}
export fn cfsetispeed() void {}
export fn cfsetospeed() void {}
export fn cfsetspeed() void {}
export fn chdir() void {}
export fn chflags() void {}
export fn chmod() void {}
export fn chown() void {}
export fn chroot() void {}
export fn clearenv() void {}
export fn clearerr() void {}
export fn clearerr_unlocked() void {}
export fn clnt_broadcast() void {}
export fn clnt_create() void {}
export fn clnt_pcreateerror() void {}
export fn clnt_perrno() void {}
export fn clnt_perror() void {}
export fn clnt_spcreateerror() void {}
export fn clnt_sperrno() void {}
export fn clnt_sperror() void {}
export fn clntraw_create() void {}
export fn clnttcp_create() void {}
export fn clntudp_bufcreate() void {}
export fn clntudp_create() void {}
export fn clntunix_create() void {}
export fn clock() void {}
export fn clock_adjtime() void {}
export fn clock_getcpuclockid() void {}
export fn clock_getres() void {}
export fn clock_gettime() void {}
export fn clock_nanosleep() void {}
export fn clock_settime() void {}
export fn clone() void {}
export fn close() void {}
export fn closedir() void {}
export fn closelog() void {}
export fn confstr() void {}
export fn connect() void {}
export fn copy_file_range() void {}
export fn copysign() void {}
export fn copysignf() void {}
export fn copysignl() void {}
export fn creat() void {}
export fn creat64() void {}
export fn create_module() void {}
export fn ctermid() void {}
export fn ctime() void {}
export fn ctime_r() void {}
export fn cuserid() void {}
export fn daemon() void {}
export fn daylight() void {}
export fn dcgettext() void {}
export fn dcngettext() void {}
export fn delete_module() void {}
export fn des_setparity() void {}
export fn dgettext() void {}
export fn difftime() void {}
export fn dirfd() void {}
export fn dirname() void {}
export fn div() void {}
export fn dl_iterate_phdr() void {}
export fn dngettext() void {}
export fn dprintf() void {}
export fn drand48() void {}
export fn drand48_r() void {}
export fn dup() void {}
export fn dup2() void {}
export fn dup3() void {}
export fn duplocale() void {}
export fn dysize() void {}
export fn eaccess() void {}
export fn ecb_crypt() void {}
export fn ecvt() void {}
export fn ecvt_r() void {}
export fn endaliasent() void {}
export fn endfsent() void {}
export fn endgrent() void {}
export fn endhostent() void {}
export fn endmntent() void {}
export fn endnetent() void {}
export fn endnetgrent() void {}
export fn endprotoent() void {}
export fn endpwent() void {}
export fn endrpcent() void {}
export fn endservent() void {}
export fn endsgent() void {}
export fn endspent() void {}
export fn endttyent() void {}
export fn endusershell() void {}
export fn endutent() void {}
export fn endutxent() void {}
export fn environ() void {}
export fn envz_add() void {}
export fn envz_entry() void {}
export fn envz_get() void {}
export fn envz_merge() void {}
export fn envz_remove() void {}
export fn envz_strip() void {}
export fn epoll_create() void {}
export fn epoll_create1() void {}
export fn epoll_ctl() void {}
export fn epoll_pwait() void {}
export fn epoll_wait() void {}
export fn erand48() void {}
export fn erand48_r() void {}
export fn err() void {}
export fn errno() void {}
export fn error_at_line() void {}
export fn error_message_count() void {}
export fn error_one_per_line() void {}
export fn error_print_progname() void {}
export fn errx() void {}
export fn ether_aton() void {}
export fn ether_aton_r() void {}
export fn ether_hostton() void {}
export fn ether_line() void {}
export fn ether_ntoa() void {}
export fn ether_ntoa_r() void {}
export fn ether_ntohost() void {}
export fn euidaccess() void {}
export fn eventfd() void {}
export fn eventfd_read() void {}
export fn eventfd_write() void {}
export fn execl() void {}
export fn execle() void {}
export fn execlp() void {}
export fn execv() void {}
export fn execve() void {}
export fn execvp() void {}
export fn execvpe() void {}
export fn exit() void {}
export fn explicit_bzero() void {}
export fn faccessat() void {}
export fn fallocate() void {}
export fn fallocate64() void {}
export fn fanotify_init() void {}
export fn fanotify_mark() void {}
export fn fattach() void {}
export fn fchdir() void {}
export fn fchflags() void {}
export fn fchmod() void {}
export fn fchmodat() void {}
export fn fchown() void {}
export fn fchownat() void {}
export fn fclose() void {}
export fn fcloseall() void {}
export fn fcntl() void {}
export fn fcntl64() void {}
export fn fcvt() void {}
export fn fcvt_r() void {}
export fn fdatasync() void {}
export fn fdetach() void {}
export fn fdopen() void {}
export fn fdopendir() void {}
export fn feof() void {}
export fn feof_unlocked() void {}
export fn ferror() void {}
export fn ferror_unlocked() void {}
export fn fexecve() void {}
export fn fflush() void {}
export fn fflush_unlocked() void {}
export fn ffs() void {}
export fn ffsl() void {}
export fn ffsll() void {}
export fn fgetc() void {}
export fn fgetc_unlocked() void {}
export fn fgetgrent() void {}
export fn fgetgrent_r() void {}
export fn fgetpos() void {}
export fn fgetpos64() void {}
export fn fgetpwent() void {}
export fn fgetpwent_r() void {}
export fn fgets() void {}
export fn fgets_unlocked() void {}
export fn fgetsgent() void {}
export fn fgetsgent_r() void {}
export fn fgetspent() void {}
export fn fgetspent_r() void {}
export fn fgetwc() void {}
export fn fgetwc_unlocked() void {}
export fn fgetws() void {}
export fn fgetws_unlocked() void {}
export fn fgetxattr() void {}
export fn fileno() void {}
export fn fileno_unlocked() void {}
export fn finite() void {}
export fn finitef() void {}
export fn finitel() void {}
export fn flistxattr() void {}
export fn flock() void {}
export fn flockfile() void {}
export fn fmemopen() void {}
export fn fmtmsg() void {}
export fn fnmatch() void {}
export fn fopen() void {}
export fn fopen64() void {}
export fn fopencookie() void {}
export fn fork() void {}
export fn fpathconf() void {}
export fn fprintf() void {}
export fn fputc() void {}
export fn fputc_unlocked() void {}
export fn fputs() void {}
export fn fputs_unlocked() void {}
export fn fputwc() void {}
export fn fputwc_unlocked() void {}
export fn fputws() void {}
export fn fputws_unlocked() void {}
export fn fread() void {}
export fn fread_unlocked() void {}
export fn free() void {}
export fn freeaddrinfo() void {}
export fn freeifaddrs() void {}
export fn freelocale() void {}
export fn fremovexattr() void {}
export fn freopen() void {}
export fn freopen64() void {}
export fn frexp() void {}
export fn frexpf() void {}
export fn frexpl() void {}
export fn fscanf() void {}
export fn fseek() void {}
export fn fseeko() void {}
export fn fseeko64() void {}
export fn fsetpos() void {}
export fn fsetpos64() void {}
export fn fsetxattr() void {}
export fn fstatfs() void {}
export fn fstatfs64() void {}
export fn fstatvfs() void {}
export fn fstatvfs64() void {}
export fn fsync() void {}
export fn ftell() void {}
export fn ftello() void {}
export fn ftello64() void {}
export fn ftime() void {}
export fn ftok() void {}
export fn ftruncate() void {}
export fn ftruncate64() void {}
export fn ftrylockfile() void {}
export fn fts64_children() void {}
export fn fts64_close() void {}
export fn fts64_open() void {}
export fn fts64_read() void {}
export fn fts64_set() void {}
export fn fts_children() void {}
export fn fts_close() void {}
export fn fts_open() void {}
export fn fts_read() void {}
export fn fts_set() void {}
export fn ftw() void {}
export fn ftw64() void {}
export fn funlockfile() void {}
export fn futimens() void {}
export fn futimes() void {}
export fn futimesat() void {}
export fn fwide() void {}
export fn fwprintf() void {}
export fn fwrite() void {}
export fn fwrite_unlocked() void {}
export fn fwscanf() void {}
export fn gai_strerror() void {}
export fn gcvt() void {}
export fn get_avphys_pages() void {}
export fn get_current_dir_name() void {}
export fn get_kernel_syms() void {}
export fn get_myaddress() void {}
export fn get_nprocs() void {}
export fn get_nprocs_conf() void {}
export fn get_phys_pages() void {}
export fn getaddrinfo() void {}
export fn getaliasbyname() void {}
export fn getaliasbyname_r() void {}
export fn getaliasent() void {}
export fn getaliasent_r() void {}
export fn getauxval() void {}
export fn getc() void {}
export fn getc_unlocked() void {}
export fn getchar() void {}
export fn getchar_unlocked() void {}
export fn getcontext() void {}
export fn getcpu() void {}
export fn getcwd() void {}
export fn getdate() void {}
export fn getdate_err() void {}
export fn getdate_r() void {}
export fn getdelim() void {}
export fn getdirentries() void {}
export fn getdirentries64() void {}
export fn getdomainname() void {}
export fn getdtablesize() void {}
export fn getegid() void {}
export fn getentropy() void {}
export fn getenv() void {}
export fn geteuid() void {}
export fn getfsent() void {}
export fn getfsfile() void {}
export fn getfsspec() void {}
export fn getgid() void {}
export fn getgrent() void {}
export fn getgrent_r() void {}
export fn getgrgid() void {}
export fn getgrgid_r() void {}
export fn getgrnam() void {}
export fn getgrnam_r() void {}
export fn getgrouplist() void {}
export fn getgroups() void {}
export fn gethostbyaddr() void {}
export fn gethostbyaddr_r() void {}
export fn gethostbyname() void {}
export fn gethostbyname2() void {}
export fn gethostbyname2_r() void {}
export fn gethostbyname_r() void {}
export fn gethostent() void {}
export fn gethostent_r() void {}
export fn gethostid() void {}
export fn gethostname() void {}
export fn getifaddrs() void {}
export fn getipv4sourcefilter() void {}
export fn getitimer() void {}
export fn getline() void {}
export fn getloadavg() void {}
export fn getlogin() void {}
export fn getlogin_r() void {}
export fn getmntent() void {}
export fn getmntent_r() void {}
export fn getmsg() void {}
export fn getnameinfo() void {}
export fn getnetbyaddr() void {}
export fn getnetbyaddr_r() void {}
export fn getnetbyname() void {}
export fn getnetbyname_r() void {}
export fn getnetent() void {}
export fn getnetent_r() void {}
export fn getnetgrent() void {}
export fn getnetgrent_r() void {}
export fn getnetname() void {}
export fn getopt() void {}
export fn getopt_long() void {}
export fn getopt_long_only() void {}
export fn getpagesize() void {}
export fn getpass() void {}
export fn getpeername() void {}
export fn getpgid() void {}
export fn getpgrp() void {}
export fn getpid() void {}
export fn getpmsg() void {}
export fn getppid() void {}
export fn getpriority() void {}
export fn getprotobyname() void {}
export fn getprotobyname_r() void {}
export fn getprotobynumber() void {}
export fn getprotobynumber_r() void {}
export fn getprotoent() void {}
export fn getprotoent_r() void {}
export fn getpt() void {}
export fn getpublickey() void {}
export fn getpw() void {}
export fn getpwent() void {}
export fn getpwent_r() void {}
export fn getpwnam() void {}
export fn getpwnam_r() void {}
export fn getpwuid() void {}
export fn getpwuid_r() void {}
export fn getrandom() void {}
export fn getresgid() void {}
export fn getresuid() void {}
export fn getrlimit() void {}
export fn getrlimit64() void {}
export fn getrpcbyname() void {}
export fn getrpcbyname_r() void {}
export fn getrpcbynumber() void {}
export fn getrpcbynumber_r() void {}
export fn getrpcent() void {}
export fn getrpcent_r() void {}
export fn getrpcport() void {}
export fn getrusage() void {}
export fn gets() void {}
export fn getsecretkey() void {}
export fn getservbyname() void {}
export fn getservbyname_r() void {}
export fn getservbyport() void {}
export fn getservbyport_r() void {}
export fn getservent() void {}
export fn getservent_r() void {}
export fn getsgent() void {}
export fn getsgent_r() void {}
export fn getsgnam() void {}
export fn getsgnam_r() void {}
export fn getsid() void {}
export fn getsockname() void {}
export fn getsockopt() void {}
export fn getsourcefilter() void {}
export fn getspent() void {}
export fn getspent_r() void {}
export fn getspnam() void {}
export fn getspnam_r() void {}
export fn getsubopt() void {}
export fn gettext() void {}
export fn gettimeofday() void {}
export fn getttyent() void {}
export fn getttynam() void {}
export fn getuid() void {}
export fn getusershell() void {}
export fn getutent() void {}
export fn getutent_r() void {}
export fn getutid() void {}
export fn getutid_r() void {}
export fn getutline() void {}
export fn getutline_r() void {}
export fn getutmp() void {}
export fn getutmpx() void {}
export fn getutxent() void {}
export fn getutxid() void {}
export fn getutxline() void {}
export fn getw() void {}
export fn getwc() void {}
export fn getwc_unlocked() void {}
export fn getwchar() void {}
export fn getwchar_unlocked() void {}
export fn getwd() void {}
export fn getxattr() void {}
export fn glob() void {}
export fn glob64() void {}
export fn glob_pattern_p() void {}
export fn globfree() void {}
export fn globfree64() void {}
export fn gmtime() void {}
export fn gmtime_r() void {}
export fn gnu_dev_major() void {}
export fn gnu_dev_makedev() void {}
export fn gnu_dev_minor() void {}
export fn gnu_get_libc_release() void {}
export fn gnu_get_libc_version() void {}
export fn grantpt() void {}
export fn group_member() void {}
export fn gsignal() void {}
export fn gtty() void {}
export fn h_errlist() void {}
export fn h_nerr() void {}
export fn hasmntopt() void {}
export fn hcreate() void {}
export fn hcreate_r() void {}
export fn hdestroy() void {}
export fn hdestroy_r() void {}
export fn herror() void {}
export fn host2netname() void {}
export fn hsearch() void {}
export fn hsearch_r() void {}
export fn hstrerror() void {}
export fn htonl() void {}
export fn htons() void {}
export fn iconv() void {}
export fn iconv_close() void {}
export fn iconv_open() void {}
export fn if_freenameindex() void {}
export fn if_indextoname() void {}
export fn if_nameindex() void {}
export fn if_nametoindex() void {}
export fn imaxabs() void {}
export fn imaxdiv() void {}
export fn in6addr_any() void {}
export fn in6addr_loopback() void {}
export fn index() void {}
export fn inet6_opt_append() void {}
export fn inet6_opt_find() void {}
export fn inet6_opt_finish() void {}
export fn inet6_opt_get_val() void {}
export fn inet6_opt_init() void {}
export fn inet6_opt_next() void {}
export fn inet6_opt_set_val() void {}
export fn inet6_option_alloc() void {}
export fn inet6_option_append() void {}
export fn inet6_option_find() void {}
export fn inet6_option_init() void {}
export fn inet6_option_next() void {}
export fn inet6_option_space() void {}
export fn inet6_rth_add() void {}
export fn inet6_rth_getaddr() void {}
export fn inet6_rth_init() void {}
export fn inet6_rth_reverse() void {}
export fn inet6_rth_segments() void {}
export fn inet6_rth_space() void {}
export fn inet_addr() void {}
export fn inet_aton() void {}
export fn inet_lnaof() void {}
export fn inet_makeaddr() void {}
export fn inet_netof() void {}
export fn inet_network() void {}
export fn inet_nsap_addr() void {}
export fn inet_nsap_ntoa() void {}
export fn inet_ntoa() void {}
export fn inet_ntop() void {}
export fn inet_pton() void {}
export fn init_module() void {}
export fn initgroups() void {}
export fn initstate() void {}
export fn initstate_r() void {}
export fn innetgr() void {}
export fn inotify_add_watch() void {}
export fn inotify_init() void {}
export fn inotify_init1() void {}
export fn inotify_rm_watch() void {}
export fn insque() void {}
export fn ioctl() void {}
export fn ioperm() void {}
export fn iopl() void {}
export fn iruserok() void {}
export fn iruserok_af() void {}
export fn isalnum() void {}
export fn isalnum_l() void {}
export fn isalpha() void {}
export fn isalpha_l() void {}
export fn isascii() void {}
export fn isastream() void {}
export fn isatty() void {}
export fn isblank() void {}
export fn isblank_l() void {}
export fn iscntrl() void {}
export fn iscntrl_l() void {}
export fn isctype() void {}
export fn isdigit() void {}
export fn isdigit_l() void {}
export fn isfdtype() void {}
export fn isgraph() void {}
export fn isgraph_l() void {}
export fn isinf() void {}
export fn isinff() void {}
export fn isinfl() void {}
export fn islower() void {}
export fn islower_l() void {}
export fn isnan() void {}
export fn isnanf() void {}
export fn isnanl() void {}
export fn isprint() void {}
export fn isprint_l() void {}
export fn ispunct() void {}
export fn ispunct_l() void {}
export fn isspace() void {}
export fn isspace_l() void {}
export fn isupper() void {}
export fn isupper_l() void {}
export fn iswalnum() void {}
export fn iswalnum_l() void {}
export fn iswalpha() void {}
export fn iswalpha_l() void {}
export fn iswblank() void {}
export fn iswblank_l() void {}
export fn iswcntrl() void {}
export fn iswcntrl_l() void {}
export fn iswctype() void {}
export fn iswctype_l() void {}
export fn iswdigit() void {}
export fn iswdigit_l() void {}
export fn iswgraph() void {}
export fn iswgraph_l() void {}
export fn iswlower() void {}
export fn iswlower_l() void {}
export fn iswprint() void {}
export fn iswprint_l() void {}
export fn iswpunct() void {}
export fn iswpunct_l() void {}
export fn iswspace() void {}
export fn iswspace_l() void {}
export fn iswupper() void {}
export fn iswupper_l() void {}
export fn iswxdigit() void {}
export fn iswxdigit_l() void {}
export fn isxdigit() void {}
export fn isxdigit_l() void {}
export fn jrand48() void {}
export fn jrand48_r() void {}
export fn key_decryptsession() void {}
export fn key_decryptsession_pk() void {}
export fn key_encryptsession() void {}
export fn key_encryptsession_pk() void {}
export fn key_gendes() void {}
export fn key_get_conv() void {}
export fn key_secretkey_is_set() void {}
export fn key_setnet() void {}
export fn key_setsecret() void {}
export fn kill() void {}
export fn killpg() void {}
export fn klogctl() void {}
export fn l64a() void {}
export fn labs() void {}
export fn lchmod() void {}
export fn lchown() void {}
export fn lckpwdf() void {}
export fn lcong48() void {}
export fn lcong48_r() void {}
export fn ldexp() void {}
export fn ldexpf() void {}
export fn ldexpl() void {}
export fn ldiv() void {}
export fn lfind() void {}
export fn lgetxattr() void {}
export fn link() void {}
export fn linkat() void {}
export fn listen() void {}
export fn listxattr() void {}
export fn llabs() void {}
export fn lldiv() void {}
export fn llistxattr() void {}
export fn llseek() void {}
export fn loc1() void {}
export fn loc2() void {}
export fn localeconv() void {}
export fn localtime() void {}
export fn localtime_r() void {}
export fn lockf() void {}
export fn lockf64() void {}
export fn locs() void {}
export fn longjmp() void {}
export fn lrand48() void {}
export fn lrand48_r() void {}
export fn lremovexattr() void {}
export fn lsearch() void {}
export fn lseek() void {}
export fn lseek64() void {}
export fn lsetxattr() void {}
export fn lutimes() void {}
export fn madvise() void {}
export fn makecontext() void {}
export fn mallinfo() void {}
export fn malloc() void {}
export fn malloc_get_state() void {}
export fn malloc_info() void {}
export fn malloc_set_state() void {}
export fn malloc_stats() void {}
export fn malloc_trim() void {}
export fn malloc_usable_size() void {}
export fn mallopt() void {}
export fn mallwatch() void {}
export fn mblen() void {}
export fn mbrlen() void {}
export fn mbrtoc16() void {}
export fn mbrtoc32() void {}
export fn mbrtowc() void {}
export fn mbsinit() void {}
export fn mbsnrtowcs() void {}
export fn mbsrtowcs() void {}
export fn mbstowcs() void {}
export fn mbtowc() void {}
export fn mcheck() void {}
export fn mcheck_check_all() void {}
export fn mcheck_pedantic() void {}
export fn mcount() void {}
export fn memalign() void {}
export fn memccpy() void {}
export fn memchr() void {}
export fn memcmp() void {}
export fn memcpy() void {}
export fn memfd_create() void {}
export fn memfrob() void {}
export fn memmem() void {}
export fn memmove() void {}
export fn mempcpy() void {}
export fn memrchr() void {}
export fn memset() void {}
export fn mincore() void {}
export fn mkdir() void {}
export fn mkdirat() void {}
export fn mkdtemp() void {}
export fn mkfifo() void {}
export fn mkfifoat() void {}
export fn mkostemp() void {}
export fn mkostemp64() void {}
export fn mkostemps() void {}
export fn mkostemps64() void {}
export fn mkstemp() void {}
export fn mkstemp64() void {}
export fn mkstemps() void {}
export fn mkstemps64() void {}
export fn mktemp() void {}
export fn mktime() void {}
export fn mlock() void {}
export fn mlock2() void {}
export fn mlockall() void {}
export fn mmap() void {}
export fn mmap64() void {}
export fn modf() void {}
export fn modff() void {}
export fn modfl() void {}
export fn modify_ldt() void {}
export fn moncontrol() void {}
export fn monstartup() void {}
export fn mount() void {}
export fn mprobe() void {}
export fn mprotect() void {}
export fn mrand48() void {}
export fn mrand48_r() void {}
export fn mremap() void {}
export fn msgctl() void {}
export fn msgget() void {}
export fn msgrcv() void {}
export fn msgsnd() void {}
export fn msync() void {}
export fn mtrace() void {}
export fn munlock() void {}
export fn munlockall() void {}
export fn munmap() void {}
export fn muntrace() void {}
export fn name_to_handle_at() void {}
export fn nanosleep() void {}
export fn netname2host() void {}
export fn netname2user() void {}
export fn newlocale() void {}
export fn nfsservctl() void {}
export fn nftw() void {}
export fn nftw64() void {}
export fn ngettext() void {}
export fn nice() void {}
export fn nl_langinfo() void {}
export fn nl_langinfo_l() void {}
export fn nrand48() void {}
export fn nrand48_r() void {}
export fn ntohl() void {}
export fn ntohs() void {}
export fn ntp_adjtime() void {}
export fn ntp_gettime() void {}
export fn ntp_gettimex() void {}
export fn obstack_alloc_failed_handler() void {}
export fn obstack_exit_failure() void {}
export fn obstack_free() void {}
export fn obstack_printf() void {}
export fn obstack_vprintf() void {}
export fn on_exit() void {}
export fn open() void {}
export fn open64() void {}
export fn open_by_handle_at() void {}
export fn open_memstream() void {}
export fn open_wmemstream() void {}
export fn openat() void {}
export fn openat64() void {}
export fn opendir() void {}
export fn openlog() void {}
export fn optarg() void {}
export fn opterr() void {}
export fn optind() void {}
export fn optopt() void {}
export fn parse_printf_format() void {}
export fn passwd2des() void {}
export fn pathconf() void {}
export fn pause() void {}
export fn pclose() void {}
export fn perror() void {}
export fn personality() void {}
export fn pipe() void {}
export fn pipe2() void {}
export fn pivot_root() void {}
export fn pkey_alloc() void {}
export fn pkey_free() void {}
export fn pkey_get() void {}
export fn pkey_mprotect() void {}
export fn pkey_set() void {}
export fn pmap_getmaps() void {}
export fn pmap_getport() void {}
export fn pmap_rmtcall() void {}
export fn pmap_set() void {}
export fn pmap_unset() void {}
export fn poll() void {}
export fn popen() void {}
export fn posix_fadvise() void {}
export fn posix_fadvise64() void {}
export fn posix_fallocate() void {}
export fn posix_fallocate64() void {}
export fn posix_madvise() void {}
export fn posix_memalign() void {}
export fn posix_openpt() void {}
export fn posix_spawn() void {}
export fn posix_spawn_file_actions_addchdir_np() void {}
export fn posix_spawn_file_actions_addclose() void {}
export fn posix_spawn_file_actions_adddup2() void {}
export fn posix_spawn_file_actions_addfchdir_np() void {}
export fn posix_spawn_file_actions_addopen() void {}
export fn posix_spawn_file_actions_destroy() void {}
export fn posix_spawn_file_actions_init() void {}
export fn posix_spawnattr_destroy() void {}
export fn posix_spawnattr_getflags() void {}
export fn posix_spawnattr_getpgroup() void {}
export fn posix_spawnattr_getschedparam() void {}
export fn posix_spawnattr_getschedpolicy() void {}
export fn posix_spawnattr_getsigdefault() void {}
export fn posix_spawnattr_getsigmask() void {}
export fn posix_spawnattr_init() void {}
export fn posix_spawnattr_setflags() void {}
export fn posix_spawnattr_setpgroup() void {}
export fn posix_spawnattr_setschedparam() void {}
export fn posix_spawnattr_setschedpolicy() void {}
export fn posix_spawnattr_setsigdefault() void {}
export fn posix_spawnattr_setsigmask() void {}
export fn posix_spawnp() void {}
export fn ppoll() void {}
export fn prctl() void {}
export fn pread() void {}
export fn pread64() void {}
export fn preadv() void {}
export fn preadv2() void {}
export fn preadv64() void {}
export fn preadv64v2() void {}
export fn printf() void {}
export fn printf_size() void {}
export fn printf_size_info() void {}
export fn prlimit() void {}
export fn prlimit64() void {}
export fn process_vm_readv() void {}
export fn process_vm_writev() void {}
export fn profil() void {}
export fn program_invocation_name() void {}
export fn program_invocation_short_name() void {}
export fn pselect() void {}
export fn psiginfo() void {}
export fn psignal() void {}
export fn pthread_attr_destroy() void {}
export fn pthread_attr_getdetachstate() void {}
export fn pthread_attr_getinheritsched() void {}
export fn pthread_attr_getschedparam() void {}
export fn pthread_attr_getschedpolicy() void {}
export fn pthread_attr_getscope() void {}
export fn pthread_attr_init() void {}
export fn pthread_attr_setdetachstate() void {}
export fn pthread_attr_setinheritsched() void {}
export fn pthread_attr_setschedparam() void {}
export fn pthread_attr_setschedpolicy() void {}
export fn pthread_attr_setscope() void {}
export fn pthread_cond_broadcast() void {}
export fn pthread_cond_destroy() void {}
export fn pthread_cond_init() void {}
export fn pthread_cond_signal() void {}
export fn pthread_cond_timedwait() void {}
export fn pthread_cond_wait() void {}
export fn pthread_condattr_destroy() void {}
export fn pthread_condattr_init() void {}
export fn pthread_equal() void {}
export fn pthread_exit() void {}
export fn pthread_getschedparam() void {}
export fn pthread_mutex_destroy() void {}
export fn pthread_mutex_init() void {}
export fn pthread_mutex_lock() void {}
export fn pthread_mutex_unlock() void {}
export fn pthread_self() void {}
export fn pthread_setcancelstate() void {}
export fn pthread_setcanceltype() void {}
export fn pthread_setschedparam() void {}
export fn ptrace() void {}
export fn ptsname() void {}
export fn ptsname_r() void {}
export fn putc() void {}
export fn putc_unlocked() void {}
export fn putchar() void {}
export fn putchar_unlocked() void {}
export fn putenv() void {}
export fn putgrent() void {}
export fn putmsg() void {}
export fn putpmsg() void {}
export fn putpwent() void {}
export fn puts() void {}
export fn putsgent() void {}
export fn putspent() void {}
export fn pututline() void {}
export fn pututxline() void {}
export fn putw() void {}
export fn putwc() void {}
export fn putwc_unlocked() void {}
export fn putwchar() void {}
export fn putwchar_unlocked() void {}
export fn pvalloc() void {}
export fn pwrite() void {}
export fn pwrite64() void {}
export fn pwritev() void {}
export fn pwritev2() void {}
export fn pwritev64() void {}
export fn pwritev64v2() void {}
export fn qecvt() void {}
export fn qecvt_r() void {}
export fn qfcvt() void {}
export fn qfcvt_r() void {}
export fn qgcvt() void {}
export fn qsort() void {}
export fn qsort_r() void {}
export fn query_module() void {}
export fn quick_exit() void {}
export fn quotactl() void {}
export fn raise() void {}
export fn rand() void {}
export fn rand_r() void {}
export fn random() void {}
export fn random_r() void {}
export fn rawmemchr() void {}
export fn rcmd() void {}
export fn rcmd_af() void {}
export fn re_comp() void {}
export fn re_compile_fastmap() void {}
export fn re_compile_pattern() void {}
export fn re_exec() void {}
export fn re_match() void {}
export fn re_match_2() void {}
export fn re_max_failures() void {}
export fn re_search() void {}
export fn re_search_2() void {}
export fn re_set_registers() void {}
export fn re_set_syntax() void {}
export fn re_syntax_options() void {}
export fn read() void {}
export fn readahead() void {}
export fn readdir() void {}
export fn readdir64() void {}
export fn readdir64_r() void {}
export fn readdir_r() void {}
export fn readlink() void {}
export fn readlinkat() void {}
export fn readv() void {}
export fn realloc() void {}
export fn reallocarray() void {}
export fn realpath() void {}
export fn reboot() void {}
export fn recv() void {}
export fn recvfrom() void {}
export fn recvmmsg() void {}
export fn recvmsg() void {}
export fn regcomp() void {}
export fn regerror() void {}
export fn regexec() void {}
export fn regfree() void {}
export fn register_printf_function() void {}
export fn register_printf_modifier() void {}
export fn register_printf_specifier() void {}
export fn register_printf_type() void {}
export fn registerrpc() void {}
export fn remap_file_pages() void {}
export fn remove() void {}
export fn removexattr() void {}
export fn remque() void {}
export fn rename() void {}
export fn renameat() void {}
export fn renameat2() void {}
export fn revoke() void {}
export fn rewind() void {}
export fn rewinddir() void {}
export fn rexec() void {}
export fn rexec_af() void {}
export fn rexecoptions() void {}
export fn rindex() void {}
export fn rmdir() void {}
export fn rpc_createerr() void {}
export fn rpmatch() void {}
export fn rresvport() void {}
export fn rresvport_af() void {}
export fn rtime() void {}
export fn ruserok() void {}
export fn ruserok_af() void {}
export fn ruserpass() void {}
export fn sbrk() void {}
export fn scalbn() void {}
export fn scalbnf() void {}
export fn scalbnl() void {}
export fn scandir() void {}
export fn scandir64() void {}
export fn scandirat() void {}
export fn scandirat64() void {}
export fn scanf() void {}
export fn sched_get_priority_max() void {}
export fn sched_get_priority_min() void {}
export fn sched_getaffinity() void {}
export fn sched_getcpu() void {}
export fn sched_getparam() void {}
export fn sched_getscheduler() void {}
export fn sched_rr_get_interval() void {}
export fn sched_setaffinity() void {}
export fn sched_setparam() void {}
export fn sched_setscheduler() void {}
export fn sched_yield() void {}
export fn secure_getenv() void {}
export fn seed48() void {}
export fn seed48_r() void {}
export fn seekdir() void {}
export fn select() void {}
export fn semctl() void {}
export fn semget() void {}
export fn semop() void {}
export fn semtimedop() void {}
export fn send() void {}
export fn sendfile() void {}
export fn sendfile64() void {}
export fn sendmmsg() void {}
export fn sendmsg() void {}
export fn sendto() void {}
export fn setaliasent() void {}
export fn setbuf() void {}
export fn setbuffer() void {}
export fn setcontext() void {}
export fn setdomainname() void {}
export fn setegid() void {}
export fn setenv() void {}
export fn seteuid() void {}
export fn setfsent() void {}
export fn setfsgid() void {}
export fn setfsuid() void {}
export fn setgid() void {}
export fn setgrent() void {}
export fn setgroups() void {}
export fn sethostent() void {}
export fn sethostid() void {}
export fn sethostname() void {}
export fn setipv4sourcefilter() void {}
export fn setitimer() void {}
export fn setjmp() void {}
export fn setlinebuf() void {}
export fn setlocale() void {}
export fn setlogin() void {}
export fn setlogmask() void {}
export fn setmntent() void {}
export fn setnetent() void {}
export fn setnetgrent() void {}
export fn setns() void {}
export fn setpgid() void {}
export fn setpgrp() void {}
export fn setpriority() void {}
export fn setprotoent() void {}
export fn setpwent() void {}
export fn setregid() void {}
export fn setresgid() void {}
export fn setresuid() void {}
export fn setreuid() void {}
export fn setrlimit() void {}
export fn setrlimit64() void {}
export fn setrpcent() void {}
export fn setservent() void {}
export fn setsgent() void {}
export fn setsid() void {}
export fn setsockopt() void {}
export fn setsourcefilter() void {}
export fn setspent() void {}
export fn setstate() void {}
export fn setstate_r() void {}
export fn settimeofday() void {}
export fn setttyent() void {}
export fn setuid() void {}
export fn setusershell() void {}
export fn setutent() void {}
export fn setutxent() void {}
export fn setvbuf() void {}
export fn setxattr() void {}
export fn sgetsgent() void {}
export fn sgetsgent_r() void {}
export fn sgetspent() void {}
export fn sgetspent_r() void {}
export fn shmat() void {}
export fn shmctl() void {}
export fn shmdt() void {}
export fn shmget() void {}
export fn shutdown() void {}
export fn sigaction() void {}
export fn sigaddset() void {}
export fn sigaltstack() void {}
export fn sigandset() void {}
export fn sigblock() void {}
export fn sigdelset() void {}
export fn sigemptyset() void {}
export fn sigfillset() void {}
export fn siggetmask() void {}
export fn sighold() void {}
export fn sigignore() void {}
export fn siginterrupt() void {}
export fn sigisemptyset() void {}
export fn sigismember() void {}
export fn siglongjmp() void {}
export fn signal() void {}
export fn signalfd() void {}
export fn sigorset() void {}
export fn sigpause() void {}
export fn sigpending() void {}
export fn sigprocmask() void {}
export fn sigqueue() void {}
export fn sigrelse() void {}
export fn sigreturn() void {}
export fn sigset() void {}
export fn sigsetmask() void {}
export fn sigstack() void {}
export fn sigsuspend() void {}
export fn sigtimedwait() void {}
export fn sigvec() void {}
export fn sigwait() void {}
export fn sigwaitinfo() void {}
export fn sleep() void {}
export fn snprintf() void {}
export fn sockatmark() void {}
export fn socket() void {}
export fn socketpair() void {}
export fn splice() void {}
export fn sprintf() void {}
export fn sprofil() void {}
export fn srand() void {}
export fn srand48() void {}
export fn srand48_r() void {}
export fn srandom() void {}
export fn srandom_r() void {}
export fn sscanf() void {}
export fn ssignal() void {}
export fn sstk() void {}
export fn statfs() void {}
export fn statfs64() void {}
export fn statvfs() void {}
export fn statvfs64() void {}
export fn statx() void {}
export fn stderr() void {}
export fn stdin() void {}
export fn stdout() void {}
export fn step() void {}
export fn stime() void {}
export fn stpcpy() void {}
export fn stpncpy() void {}
export fn strcasecmp() void {}
export fn strcasecmp_l() void {}
export fn strcasestr() void {}
export fn strcat() void {}
export fn strchr() void {}
export fn strchrnul() void {}
export fn strcmp() void {}
export fn strcoll() void {}
export fn strcoll_l() void {}
export fn strcpy() void {}
export fn strcspn() void {}
export fn strdup() void {}
export fn strerror() void {}
export fn strerror_l() void {}
export fn strerror_r() void {}
export fn strfmon() void {}
export fn strfmon_l() void {}
export fn strfromd() void {}
export fn strfromf() void {}
export fn strfromf128() void {}
export fn strfromf32() void {}
export fn strfromf32x() void {}
export fn strfromf64() void {}
export fn strfromf64x() void {}
export fn strfroml() void {}
export fn strfry() void {}
export fn strftime() void {}
export fn strftime_l() void {}
export fn strlen() void {}
export fn strncasecmp() void {}
export fn strncasecmp_l() void {}
export fn strncat() void {}
export fn strncmp() void {}
export fn strncpy() void {}
export fn strndup() void {}
export fn strnlen() void {}
export fn strpbrk() void {}
export fn strptime() void {}
export fn strptime_l() void {}
export fn strrchr() void {}
export fn strsep() void {}
export fn strsignal() void {}
export fn strspn() void {}
export fn strstr() void {}
export fn strtod() void {}
export fn strtod_l() void {}
export fn strtof() void {}
export fn strtof128() void {}
export fn strtof128_l() void {}
export fn strtof32() void {}
export fn strtof32_l() void {}
export fn strtof32x() void {}
export fn strtof32x_l() void {}
export fn strtof64() void {}
export fn strtof64_l() void {}
export fn strtof64x() void {}
export fn strtof64x_l() void {}
export fn strtof_l() void {}
export fn strtoimax() void {}
export fn strtok() void {}
export fn strtok_r() void {}
export fn strtol() void {}
export fn strtol_l() void {}
export fn strtold() void {}
export fn strtold_l() void {}
export fn strtoll() void {}
export fn strtoll_l() void {}
export fn strtoq() void {}
export fn strtoul() void {}
export fn strtoul_l() void {}
export fn strtoull() void {}
export fn strtoull_l() void {}
export fn strtoumax() void {}
export fn strtouq() void {}
export fn strverscmp() void {}
export fn strxfrm() void {}
export fn strxfrm_l() void {}
export fn stty() void {}
export fn svc_exit() void {}
export fn svc_fdset() void {}
export fn svc_getreq() void {}
export fn svc_getreq_common() void {}
export fn svc_getreq_poll() void {}
export fn svc_getreqset() void {}
export fn svc_max_pollfd() void {}
export fn svc_pollfd() void {}
export fn svc_register() void {}
export fn svc_run() void {}
export fn svc_sendreply() void {}
export fn svc_unregister() void {}
export fn svcauthdes_stats() void {}
export fn svcerr_auth() void {}
export fn svcerr_decode() void {}
export fn svcerr_noproc() void {}
export fn svcerr_noprog() void {}
export fn svcerr_progvers() void {}
export fn svcerr_systemerr() void {}
export fn svcerr_weakauth() void {}
export fn svcfd_create() void {}
export fn svcraw_create() void {}
export fn svctcp_create() void {}
export fn svcudp_bufcreate() void {}
export fn svcudp_create() void {}
export fn svcudp_enablecache() void {}
export fn svcunix_create() void {}
export fn svcunixfd_create() void {}
export fn swab() void {}
export fn swapcontext() void {}
export fn swapoff() void {}
export fn swapon() void {}
export fn swprintf() void {}
export fn swscanf() void {}
export fn symlink() void {}
export fn symlinkat() void {}
export fn sync() void {}
export fn sync_file_range() void {}
export fn syncfs() void {}
export fn sys_errlist() void {}
export fn sys_nerr() void {}
export fn sys_sigabbrev() void {}
export fn sys_siglist() void {}
export fn syscall() void {}
export fn sysconf() void {}
export fn sysctl() void {}
export fn sysinfo() void {}
export fn syslog() void {}
export fn system() void {}
export fn sysv_signal() void {}
export fn tcdrain() void {}
export fn tcflow() void {}
export fn tcflush() void {}
export fn tcgetattr() void {}
export fn tcgetpgrp() void {}
export fn tcgetsid() void {}
export fn tcsendbreak() void {}
export fn tcsetattr() void {}
export fn tcsetpgrp() void {}
export fn tdelete() void {}
export fn tdestroy() void {}
export fn tee() void {}
export fn telldir() void {}
export fn tempnam() void {}
export fn textdomain() void {}
export fn tfind() void {}
export fn thrd_current() void {}
export fn thrd_equal() void {}
export fn thrd_sleep() void {}
export fn thrd_yield() void {}
export fn time() void {}
export fn timegm() void {}
export fn timelocal() void {}
export fn timerfd_create() void {}
export fn timerfd_gettime() void {}
export fn timerfd_settime() void {}
export fn times() void {}
export fn timespec_get() void {}
export fn timezone() void {}
export fn tmpfile() void {}
export fn tmpfile64() void {}
export fn tmpnam() void {}
export fn tmpnam_r() void {}
export fn toascii() void {}
export fn tolower() void {}
export fn tolower_l() void {}
export fn toupper() void {}
export fn toupper_l() void {}
export fn towctrans() void {}
export fn towctrans_l() void {}
export fn towlower() void {}
export fn towlower_l() void {}
export fn towupper() void {}
export fn towupper_l() void {}
export fn tr_break() void {}
export fn truncate() void {}
export fn truncate64() void {}
export fn tsearch() void {}
export fn ttyname() void {}
export fn ttyname_r() void {}
export fn ttyslot() void {}
export fn twalk() void {}
export fn tzname() void {}
export fn tzset() void {}
export fn ualarm() void {}
export fn ulckpwdf() void {}
export fn ulimit() void {}
export fn umask() void {}
export fn umount() void {}
export fn umount2() void {}
export fn uname() void {}
export fn ungetc() void {}
export fn ungetwc() void {}
export fn unlink() void {}
export fn unlinkat() void {}
export fn unlockpt() void {}
export fn unsetenv() void {}
export fn unshare() void {}
export fn updwtmp() void {}
export fn updwtmpx() void {}
export fn uselib() void {}
export fn uselocale() void {}
export fn user2netname() void {}
export fn usleep() void {}
export fn ustat() void {}
export fn utime() void {}
export fn utimensat() void {}
export fn utimes() void {}
export fn utmpname() void {}
export fn utmpxname() void {}
export fn valloc() void {}
export fn vasprintf() void {}
export fn vdprintf() void {}
export fn verr() void {}
export fn verrx() void {}
export fn versionsort() void {}
export fn versionsort64() void {}
export fn vfork() void {}
export fn vfprintf() void {}
export fn vfscanf() void {}
export fn vfwprintf() void {}
export fn vfwscanf() void {}
export fn vhangup() void {}
export fn vlimit() void {}
export fn vmsplice() void {}
export fn vprintf() void {}
export fn vscanf() void {}
export fn vsnprintf() void {}
export fn vsprintf() void {}
export fn vsscanf() void {}
export fn vswprintf() void {}
export fn vswscanf() void {}
export fn vsyslog() void {}
export fn vtimes() void {}
export fn vwarn() void {}
export fn vwarnx() void {}
export fn vwprintf() void {}
export fn vwscanf() void {}
export fn wait() void {}
export fn wait3() void {}
export fn wait4() void {}
export fn waitid() void {}
export fn waitpid() void {}
export fn warn() void {}
export fn warnx() void {}
export fn wcpcpy() void {}
export fn wcpncpy() void {}
export fn wcrtomb() void {}
export fn wcscasecmp() void {}
export fn wcscasecmp_l() void {}
export fn wcscat() void {}
export fn wcschr() void {}
export fn wcschrnul() void {}
export fn wcscmp() void {}
export fn wcscoll() void {}
export fn wcscoll_l() void {}
export fn wcscpy() void {}
export fn wcscspn() void {}
export fn wcsdup() void {}
export fn wcsftime() void {}
export fn wcsftime_l() void {}
export fn wcslen() void {}
export fn wcsncasecmp() void {}
export fn wcsncasecmp_l() void {}
export fn wcsncat() void {}
export fn wcsncmp() void {}
export fn wcsncpy() void {}
export fn wcsnlen() void {}
export fn wcsnrtombs() void {}
export fn wcspbrk() void {}
export fn wcsrchr() void {}
export fn wcsrtombs() void {}
export fn wcsspn() void {}
export fn wcsstr() void {}
export fn wcstod() void {}
export fn wcstod_l() void {}
export fn wcstof() void {}
export fn wcstof128() void {}
export fn wcstof128_l() void {}
export fn wcstof32() void {}
export fn wcstof32_l() void {}
export fn wcstof32x() void {}
export fn wcstof32x_l() void {}
export fn wcstof64() void {}
export fn wcstof64_l() void {}
export fn wcstof64x() void {}
export fn wcstof64x_l() void {}
export fn wcstof_l() void {}
export fn wcstoimax() void {}
export fn wcstok() void {}
export fn wcstol() void {}
export fn wcstol_l() void {}
export fn wcstold() void {}
export fn wcstold_l() void {}
export fn wcstoll() void {}
export fn wcstoll_l() void {}
export fn wcstombs() void {}
export fn wcstoq() void {}
export fn wcstoul() void {}
export fn wcstoul_l() void {}
export fn wcstoull() void {}
export fn wcstoull_l() void {}
export fn wcstoumax() void {}
export fn wcstouq() void {}
export fn wcswcs() void {}
export fn wcswidth() void {}
export fn wcsxfrm() void {}
export fn wcsxfrm_l() void {}
export fn wctob() void {}
export fn wctomb() void {}
export fn wctrans() void {}
export fn wctrans_l() void {}
export fn wctype() void {}
export fn wctype_l() void {}
export fn wcwidth() void {}
export fn wmemchr() void {}
export fn wmemcmp() void {}
export fn wmemcpy() void {}
export fn wmemmove() void {}
export fn wmempcpy() void {}
export fn wmemset() void {}
export fn wordexp() void {}
export fn wordfree() void {}
export fn wprintf() void {}
export fn write() void {}
export fn writev() void {}
export fn wscanf() void {}
export fn xdecrypt() void {}
export fn xdr_accepted_reply() void {}
export fn xdr_array() void {}
export fn xdr_authdes_cred() void {}
export fn xdr_authdes_verf() void {}
export fn xdr_authunix_parms() void {}
export fn xdr_bool() void {}
export fn xdr_bytes() void {}
export fn xdr_callhdr() void {}
export fn xdr_callmsg() void {}
export fn xdr_char() void {}
export fn xdr_cryptkeyarg() void {}
export fn xdr_cryptkeyarg2() void {}
export fn xdr_cryptkeyres() void {}
export fn xdr_des_block() void {}
export fn xdr_double() void {}
export fn xdr_enum() void {}
export fn xdr_float() void {}
export fn xdr_free() void {}
export fn xdr_getcredres() void {}
export fn xdr_hyper() void {}
export fn xdr_int() void {}
export fn xdr_int16_t() void {}
export fn xdr_int32_t() void {}
export fn xdr_int64_t() void {}
export fn xdr_int8_t() void {}
export fn xdr_key_netstarg() void {}
export fn xdr_key_netstres() void {}
export fn xdr_keybuf() void {}
export fn xdr_keystatus() void {}
export fn xdr_long() void {}
export fn xdr_longlong_t() void {}
export fn xdr_netnamestr() void {}
export fn xdr_netobj() void {}
export fn xdr_opaque() void {}
export fn xdr_opaque_auth() void {}
export fn xdr_pmap() void {}
export fn xdr_pmaplist() void {}
export fn xdr_pointer() void {}
export fn xdr_quad_t() void {}
export fn xdr_reference() void {}
export fn xdr_rejected_reply() void {}
export fn xdr_replymsg() void {}
export fn xdr_rmtcall_args() void {}
export fn xdr_rmtcallres() void {}
export fn xdr_short() void {}
export fn xdr_sizeof() void {}
export fn xdr_string() void {}
export fn xdr_u_char() void {}
export fn xdr_u_hyper() void {}
export fn xdr_u_int() void {}
export fn xdr_u_long() void {}
export fn xdr_u_longlong_t() void {}
export fn xdr_u_quad_t() void {}
export fn xdr_u_short() void {}
export fn xdr_uint16_t() void {}
export fn xdr_uint32_t() void {}
export fn xdr_uint64_t() void {}
export fn xdr_uint8_t() void {}
export fn xdr_union() void {}
export fn xdr_unixcred() void {}
export fn xdr_vector() void {}
export fn xdr_void() void {}
export fn xdr_wrapstring() void {}
export fn xdrmem_create() void {}
export fn xdrrec_create() void {}
export fn xdrrec_endofrecord() void {}
export fn xdrrec_eof() void {}
export fn xdrrec_skiprecord() void {}
export fn xdrstdio_create() void {}
export fn xencrypt() void {}
export fn xprt_register() void {}
export fn xprt_unregister() void {} | libc/dummy/c.zig |
const std = @import("std");
const Mat4 = @import("./util/matrix.zig");
const Ring = @import("./util/ring.zig");
const UI = @import("./util/ui.zig");
const GL = @import("./util/opengl.zig");
const Editor = @import("./editor.zig");
const Self = @This();
gl: *GL,
cpu_ring: *Ring,
gpu_ring: GPURing,
vaos: AutoGenerated(GL.uint, .{ "lissajous_2d", "frequency", "picking", "oscilloscope" }),
fbs: AutoGenerated(GL.uint, .{ "heat_sum", "picking_buffer" }),
textures: AutoGenerated(GL.uint, .{ "heat_sum_tex", "scale_tex", "picking_tex" }),
bufs: AutoGenerated(GL.uint, .{ "quad", "signal", "osc_lines" }),
lissajous_program: Program(.{
"num_frames",
"dot_size",
"dot_color",
"graph_scale",
"matrix",
}),
heat_program: Program(.{
"dot_size",
"graph_scale",
}),
blit_heat_program: Program(.{
"heat_tex",
"scale_tex",
"matrix",
}),
frequency_program: Program(.{
"matrix",
"col",
}),
picking_program: Program(.{
"matrix",
"picking_id",
}),
oscilloscope_program: Program(.{
"matrix",
"num_frames",
}),
pub fn init(allocator: *std.mem.Allocator, gl: *GL, ring: *Ring, images: Images) !Self {
var self: Self = undefined;
self.gl = gl;
self.vaos = generate(self.vaos, "glGenVertexArrays", gl);
self.fbs = generate(self.fbs, "glGenFramebuffers", gl);
self.textures = generate(self.textures, "glGenTextures", gl);
self.bufs = generate(self.bufs, "glGenBuffers", gl);
try self.lissajous_program.init(allocator, gl, .{
.{ GL.VERTEX_SHADER, @embedFile("./windows/lissajous.vert") },
.{ GL.FRAGMENT_SHADER, @embedFile("./windows/lissajous.frag") },
});
try self.heat_program.init(allocator, gl, .{
.{ GL.VERTEX_SHADER, @embedFile("./windows/heat_dot.vert") },
.{ GL.FRAGMENT_SHADER, @embedFile("./windows/heat_dot.frag") },
});
try self.blit_heat_program.init(allocator, gl, .{
.{ GL.VERTEX_SHADER, @embedFile("./windows/blit_heat.vert") },
.{ GL.FRAGMENT_SHADER, @embedFile("./windows/blit_heat.frag") },
});
try self.frequency_program.init(allocator, gl, .{
.{ GL.VERTEX_SHADER, @embedFile("./windows/frequency.vert") },
.{ GL.FRAGMENT_SHADER, @embedFile("./windows/frequency.frag") },
});
try self.picking_program.init(allocator, gl, .{
.{ GL.VERTEX_SHADER, @embedFile("./windows/picking.vert") },
.{ GL.FRAGMENT_SHADER, @embedFile("./windows/picking.frag") },
});
try self.oscilloscope_program.init(allocator, gl, .{
.{ GL.VERTEX_SHADER, @embedFile("./windows/oscilloscope.vert") },
.{ GL.FRAGMENT_SHADER, @embedFile("./windows/oscilloscope.frag") },
});
self.gpu_ring = GPURing.init(self.bufs.signal, gl);
self.cpu_ring = ring;
{
gl.glDisable(GL.DEPTH_TEST);
gl.glEnable(GL.CULL_FACE);
gl.glEnable(GL.BLEND);
gl.glCullFace(GL.BACK);
}
// Heat Framebuffer Setup
{
gl.callCheckError("glActiveTexture", .{GL.TEXTURE0});
gl.callCheckError("glBindTexture", .{ GL.TEXTURE_2D, self.textures.heat_sum_tex });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_MIN_FILTER, GL.LINEAR });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_MAG_FILTER, GL.LINEAR });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_WRAP_S, GL.CLAMP_TO_EDGE });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_WRAP_T, GL.CLAMP_TO_EDGE });
gl.callCheckError("glTexImage2D", .{ GL.TEXTURE_2D, 0, GL.RGBA, 512, 512, 0, GL.RGBA, GL.FLOAT, null });
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, self.fbs.heat_sum });
gl.callCheckError("glFramebufferTexture2D", .{ GL.FRAMEBUFFER, GL.COLOR_ATTACHMENT0, GL.TEXTURE_2D, self.textures.heat_sum_tex, 0 });
}
// Heat Scale Texture Setup
{
gl.callCheckError("glActiveTexture", .{GL.TEXTURE1});
gl.callCheckError("glBindTexture", .{ GL.TEXTURE_2D, self.textures.scale_tex });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_MIN_FILTER, GL.LINEAR });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_MAG_FILTER, GL.LINEAR });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_WRAP_S, GL.CLAMP_TO_EDGE });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_WRAP_T, GL.CLAMP_TO_EDGE });
gl.callCheckError("glTexImage2D", .{ GL.TEXTURE_2D, 0, GL.RGBA, 1024, 1, 0, GL.RGBA, GL.UNSIGNED_BYTE, images.scale.ptr });
}
// Picking Framebuffer Setup
{
gl.callCheckError("glActiveTexture", .{GL.TEXTURE2});
gl.callCheckError("glBindTexture", .{ GL.TEXTURE_2D, self.textures.picking_tex });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_MIN_FILTER, GL.NEAREST });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_MAG_FILTER, GL.NEAREST });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_WRAP_S, GL.CLAMP_TO_EDGE });
gl.callCheckError("glTexParameteri", .{ GL.TEXTURE_2D, GL.TEXTURE_WRAP_T, GL.CLAMP_TO_EDGE });
// TODO This needs to be the same size as the backbuffer
gl.callCheckError("glTexImage2D", .{ GL.TEXTURE_2D, 0, GL.RGBA, 500, 500, 0, GL.RGBA, GL.FLOAT, null });
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, self.fbs.picking_buffer });
gl.callCheckError("glFramebufferTexture2D", .{ GL.FRAMEBUFFER, GL.COLOR_ATTACHMENT0, GL.TEXTURE_2D, self.textures.picking_tex, 0 });
}
// Quad Buffer
{
const quad = [_]GL.float{
-1, -1,
-1, 1,
1, 1,
1, -1,
};
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.quad });
gl.callCheckError("glBufferData", .{ GL.ARRAY_BUFFER, quad.len * @sizeOf(GL.float), &quad, GL.STATIC_DRAW });
}
// Icosahedron Model for 3D Lissajous
{
// TODO
}
// Lissajous VAO Setup
{
gl.callCheckError("glBindVertexArray", .{self.vaos.lissajous_2d});
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.quad });
gl.callCheckError("glEnableVertexAttribArray", .{0});
gl.callCheckError("glVertexAttribPointer", .{ 0, 2, GL.FLOAT, GL.FALSE, 0, null });
gl.callCheckError("glVertexAttribDivisor", .{ 0, 0 });
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.signal });
gl.callCheckError("glEnableVertexAttribArray", .{1});
gl.callCheckError("glVertexAttribPointer", .{ 1, 2, GL.FLOAT, GL.FALSE, @sizeOf(f32) * 2, null });
gl.callCheckError("glVertexAttribDivisor", .{ 1, 1 });
}
{
gl.callCheckError("glBindVertexArray", .{self.vaos.frequency});
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.quad });
gl.callCheckError("glEnableVertexAttribArray", .{0});
gl.callCheckError("glVertexAttribPointer", .{ 0, 2, GL.FLOAT, GL.FALSE, 0, null });
}
{
gl.callCheckError("glBindVertexArray", .{self.vaos.picking});
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.quad });
gl.callCheckError("glEnableVertexAttribArray", .{0});
gl.callCheckError("glVertexAttribPointer", .{ 0, 2, GL.FLOAT, GL.FALSE, 0, null });
}
// OSC Lines buffer setup
{
const lines = line_result: {
const time_ms = 100;
const frame_count = 48_000 * (time_ms / @as(f64, 1000.0));
const vertex_count = frame_count * 2;
var vertices = try allocator.alloc(GL.float, vertex_count * 2);
var idx: usize = 0;
var vertex_idx: usize = 0;
while (idx < frame_count) : (idx += 1) {
vertices[vertex_idx] = -1;
vertices[vertex_idx + 1] = @intToFloat(f32, idx);
vertices[vertex_idx + 2] = 1;
vertices[vertex_idx + 3] = @intToFloat(f32, idx);
vertex_idx += 4;
}
break :line_result vertices;
};
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.osc_lines });
gl.callCheckError("glBufferData", .{ GL.ARRAY_BUFFER, @intCast(i32, lines.len) * @sizeOf(GL.float), lines.ptr, GL.STATIC_DRAW });
}
{
gl.callCheckError("glBindVertexArray", .{self.vaos.oscilloscope});
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.osc_lines });
gl.callCheckError("glEnableVertexAttribArray", .{0});
gl.callCheckError("glVertexAttribPointer", .{ 0, 2, GL.FLOAT, GL.FALSE, 0, null });
// gl.callCheckError("glEnableVertexAttribArray", .{2});
// gl.callCheckError("glVertexAttribPointer", .{ 2, 1, GL.FLOAT, GL.FALSE, stride, @intToPtr(*c_void, 12) });
// gl.callCheckError("glEnableVertexAttribArray", .{3});
// gl.callCheckError("glVertexAttribPointer", .{ 3, 1, GL.FLOAT, GL.FALSE, stride, @intToPtr(*c_void, 16) });
gl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.bufs.signal });
gl.callCheckError("glEnableVertexAttribArray", .{1});
gl.callCheckError("glVertexAttribPointer", .{ 1, 2, GL.FLOAT, GL.FALSE, 0, @intToPtr(*c_void, 8) });
gl.callCheckError("glEnableVertexAttribArray", .{2});
gl.callCheckError("glVertexAttribPointer", .{ 2, 2, GL.FLOAT, GL.FALSE, 0, @intToPtr(*c_void, 16) });
gl.callCheckError("glEnableVertexAttribArray", .{3});
gl.callCheckError("glVertexAttribPointer", .{ 3, 2, GL.FLOAT, GL.FALSE, 0, null });
}
gl.callCheckError("glUseProgram", .{self.lissajous_program.id});
gl.callCheckError("glUniform3f", .{ self.lissajous_program.uniforms.dot_color, 0.572, 0.909, 0.266 });
return self;
}
pub fn resize(self: *Self, viewport: Viewport) void {
const gl = self.gl;
gl.callCheckError("glBindTexture", .{ GL.TEXTURE_2D, self.textures.picking_tex });
gl.callCheckError("glTexImage2D", .{ GL.TEXTURE_2D, 0, GL.RGBA, viewport.width, viewport.height, 0, GL.RGBA, GL.FLOAT, null });
}
pub fn deinit(self: *Self) void {
cleanup(self.vaos, "glDeleteVertexArrays", self.gl);
cleanup(self.fbs, "glDeleteFramebuffers", self.gl);
cleanup(self.textures, "glDeleteTextures", self.gl);
cleanup(self.bufs, "glDeleteBuffers", self.gl);
}
pub fn render(self: *Self, editor: Editor, viewport: Viewport, picking_pos: ?UI.MousePos) ?u32 {
const gl = self.gl;
const params = editor.params;
const num_frames = self.gpu_ring.max_capacity / 2;
self.gpu_ring.copyFromRing(self.cpu_ring);
const lis_matrix = switch (editor.lissajous_mode) {
.Lissajous3D => Mat4.multiplyMany(&[_]Mat4{
Mat4.rotateY(-0.5),
Mat4.translate(0, 0, -2),
Mat4.perspective(70, 1, 0.01, 100),
}),
else => Mat4.identity,
};
const lis_pane_matrix = Mat4.multiplyMany(&[_]Mat4{
Mat4.scale(std.math.sqrt1_2, std.math.sqrt1_2, 1),
Mat4.scale(0.9, 0.9, 1),
Mat4.rotateZ(-std.math.pi / 4.0),
lis_matrix,
paneMatrix(editor.layout.getPane("lissajous")),
});
gl.callCheckError("glEnable", .{GL.DEPTH_TEST});
gl.callCheckError("glDisable", .{GL.BLEND});
switch (editor.lissajous_mode) {
.Lissajous3D, .Lissajous2D => {
const lp = self.lissajous_program;
gl.callCheckError("glBindVertexArray", .{self.vaos.lissajous_2d});
gl.glUseProgram(lp.id);
gl.glBlendFunc(GL.SRC_ALPHA, GL.ONE_MINUS_SRC_ALPHA);
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, 0 });
gl.glClear(GL.COLOR_BUFFER_BIT | GL.DEPTH_BUFFER_BIT);
gl.callCheckError("glUniformMatrix4fv", .{ lp.uniforms.matrix, 1, GL.FALSE, &lis_pane_matrix.data });
gl.callCheckError("glUniform1f", .{ lp.uniforms.dot_size, params.get("Point Size") });
gl.callCheckError("glUniform1f", .{ lp.uniforms.graph_scale, params.get("Graph Scale") });
gl.callCheckError("glUniform1f", .{ lp.uniforms.num_frames, @intToFloat(f32, num_frames) });
gl.callCheckError("glDrawArraysInstanced", .{ GL.TRIANGLE_FAN, 0, 8, @intCast(GL.sizei, num_frames) });
},
.Heatmap => {
const hp = self.heat_program;
const bhp = self.blit_heat_program;
gl.callCheckError("glDisable", .{GL.DEPTH_TEST});
gl.callCheckError("glEnable", .{GL.BLEND});
gl.callCheckError("glBindVertexArray", .{self.vaos.lissajous_2d});
{
gl.callCheckError("glUseProgram", .{hp.id});
gl.callCheckError("glBlendFunc", .{ GL.SRC_ALPHA, GL.ONE });
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, self.fbs.heat_sum });
gl.glViewport(0, 0, 512, 512);
gl.glClearColor(0, 0, 0, 0);
gl.glClear(GL.COLOR_BUFFER_BIT | GL.DEPTH_BUFFER_BIT);
gl.callCheckError("glUniform1f", .{ hp.uniforms.dot_size, params.get("Point Size") });
gl.callCheckError("glUniform1f", .{ hp.uniforms.graph_scale, params.get("Graph Scale") });
gl.callCheckError("glDrawArraysInstanced", .{ GL.TRIANGLE_FAN, 0, 8, @intCast(GL.sizei, num_frames) });
}
{
gl.callCheckError("glUseProgram", .{bhp.id});
gl.glBlendFunc(GL.SRC_ALPHA, GL.ONE_MINUS_SRC_ALPHA);
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, 0 });
gl.glClearColor(0, 0, 0, 1);
gl.glViewport(0, 0, viewport.width, viewport.height);
gl.glClear(GL.COLOR_BUFFER_BIT | GL.DEPTH_BUFFER_BIT);
gl.callCheckError("glUniformMatrix4fv", .{ bhp.uniforms.matrix, 1, GL.FALSE, &lis_pane_matrix.data });
gl.callCheckError("glUniform1i", .{ bhp.uniforms.heat_tex, 0 });
gl.callCheckError("glUniform1i", .{ bhp.uniforms.scale_tex, 1 });
gl.callCheckError("glDrawArrays", .{ GL.TRIANGLE_FAN, 0, 8 });
}
},
}
const freq_pane_matrix = paneMatrix(editor.layout.getPane("frequency"));
switch (editor.frequency_mode) {
.Flat, .Waterfall => {
const fp = self.frequency_program;
gl.callCheckError("glBindVertexArray", .{self.vaos.frequency});
gl.callCheckError("glUseProgram", .{fp.id});
gl.callCheckError("glBlendFunc", .{ GL.SRC_ALPHA, GL.ONE_MINUS_SRC_ALPHA });
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, 0 });
gl.glViewport(0, 0, viewport.width, viewport.height);
gl.callCheckError("glUniformMatrix4fv", .{ fp.uniforms.matrix, 1, GL.FALSE, &freq_pane_matrix.data });
gl.callCheckError("glUniform3f", .{ fp.uniforms.col, 0.2, 0.2, 0.2 });
gl.callCheckError("glDrawArrays", .{ GL.TRIANGLE_FAN, 0, 8 });
},
}
const osc_pane_matrix = paneMatrix(editor.layout.getPane("frequency"));
switch (editor.oscilloscope_mode) {
.Combined => {
const op = self.oscilloscope_program;
gl.callCheckError("glDisable", .{GL.DEPTH_TEST});
gl.callCheckError("glBindVertexArray", .{self.vaos.oscilloscope});
gl.callCheckError("glDisable", .{GL.BLEND});
gl.callCheckError("glEnable", .{GL.DEPTH_TEST});
gl.callCheckError("glUseProgram", .{op.id});
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, 0 });
gl.glViewport(0, 0, viewport.width, viewport.height);
gl.callCheckError("glUniformMatrix4fv", .{ op.uniforms.matrix, 1, GL.FALSE, &osc_pane_matrix.data });
gl.callCheckError("glUniform1f", .{ op.uniforms.num_frames, @intToFloat(f32, num_frames) });
gl.callCheckError("glDrawArrays", .{ GL.TRIANGLE_STRIP, 0, @intCast(i32, num_frames * 2) });
},
}
var picked_id: ?u32 = null;
// Picking
if (picking_pos) |pos| {
const pip = self.picking_program;
gl.callCheckError("glDisable", .{GL.BLEND});
gl.callCheckError("glBindFramebuffer", .{ GL.FRAMEBUFFER, self.fbs.picking_buffer });
gl.callCheckError("glBindVertexArray", .{self.vaos.picking});
gl.callCheckError("glUseProgram", .{pip.id});
gl.callCheckError("glClearColor", .{ 0, 0, 0, 0 });
gl.callCheckError("glClear", .{GL.COLOR_BUFFER_BIT});
inline for ([_][]const u8{
"lissajous",
"frequency",
"oscilloscope",
"graph_scale",
"lissajous_controls",
}) |element, i| {
const matrix = paneMatrix(editor.layout.getPane(element));
const id = 1 + @intCast(u32, i);
gl.callCheckError("glUniformMatrix4fv", .{ pip.uniforms.matrix, 1, GL.FALSE, &matrix.data });
gl.callCheckError("glUniform4f", .{
pip.uniforms.picking_id,
@intToFloat(f32, ((id >> 0) & 0xff)) / 0xff,
@intToFloat(f32, (id >> 8) & 0xff) / 0xff,
@intToFloat(f32, (id >> 16) & 0xff) / 0xff,
@intToFloat(f32, (id >> 24) & 0xff) / 0xff,
});
gl.callCheckError("glDrawArrays", .{ GL.TRIANGLE_FAN, 0, 8 });
}
const h = 1;
const w = 1;
var data: [w * h * 4]u8 = undefined;
gl.callCheckError("glReadPixels", .{ pos.x, viewport.height - pos.y, w, h, GL.RGBA, GL.UNSIGNED_BYTE, &data });
const id = std.mem.readIntLittle(u32, &data);
if (id != 0) picked_id = id;
}
{
// For some reason this absolutely wrecks my CPU, causing the usage to go
// up to 32%, whereas it's at a stable ~3% without these few lines.
// Is this related to VSync? Maybe we have to call it outside of the render
// routine and after the buffers were swapped? If so I should probably
// move it to a separate function and make the wrapper responsible for
// calling it at the appropriate time.
// Even without runtime safety we want to at least know if any call
// in the frame generated an error.
// if (comptime !std.debug.runtime_safety) {
// const err = gl.glGetError();
// if (err != 0) {
// std.log.crit("An error was generated this frame: {}", .{err});
// }
// }
}
return picked_id;
}
pub const Images = struct {
scale: []const u8,
};
pub const Viewport = struct {
width: i32,
height: i32,
};
fn paneMatrix(pane: Editor.Pane) Mat4 {
return Mat4.multiplyMany(&[_]Mat4{
Mat4.translate(1, -1, 0),
Mat4.scale(0.5, 0.5, 1),
Mat4.scale(pane.width, pane.height, 1),
Mat4.translate(pane.x, -pane.y, 0),
Mat4.scale(2, 2, 1),
Mat4.translate(-1, 1, 0),
});
}
fn Program(comptime uniforms: anytype) type {
return struct {
id: GL.uint,
uniforms: AutoGenerated(GL.int, uniforms),
pub fn init(self: *@This(), allocator: *std.mem.Allocator, gl: *GL, comptime shader_source: anytype) !void {
self.id = gl.callCheckError("glCreateProgram", .{});
inline for (shader_source) |pair| {
const shader = try makeShader(allocator, gl, pair.@"0", pair.@"1");
gl.callCheckError("glAttachShader", .{ self.id, shader });
}
gl.callCheckError("glLinkProgram", .{self.id});
var link_status: GL.int = undefined;
gl.callCheckError("glGetProgramiv", .{ self.id, GL.LINK_STATUS, &link_status });
if (link_status != GL.TRUE) {
var log_len: GL.int = undefined;
gl.callCheckError("glGetProgramiv", .{ self.id, GL.INFO_LOG_LENGTH, &log_len });
var log = try allocator.alloc(u8, @intCast(usize, log_len));
defer allocator.free(log);
var out_len: GL.int = undefined;
gl.callCheckError("glGetProgramInfoLog", .{ self.id, log_len, &out_len, log.ptr });
std.log.crit("Failed to link program: {s}", .{log});
return error.FailedToLinkProgram;
}
inline for (uniforms) |name| {
@field(self.uniforms, name) = gl.callCheckError("glGetUniformLocation", .{ self.id, name });
if (@field(self.uniforms, name) == -1) {
std.log.warn("Uniform '{s}' could not be found", .{name});
}
}
}
};
}
fn AutoGenerated(comptime T: type, comptime names: anytype) type {
const TypeInfo = std.builtin.TypeInfo;
var fields: [names.len]TypeInfo.StructField = undefined;
inline for (names) |name, i| {
fields[i].name = name;
fields[i].field_type = T;
fields[i].default_value = null;
fields[i].is_comptime = false;
fields[i].alignment = @alignOf(T);
}
return @Type(TypeInfo{
.Struct = .{
.layout = .Auto,
.fields = &fields,
.decls = &[_]TypeInfo.Declaration{},
.is_tuple = false,
},
});
}
fn generate(any: anytype, comptime fn_name: []const u8, opengl: *GL) @TypeOf(any) {
const FieldT = @TypeOf(any);
const fields = comptime std.meta.fields(FieldT);
if (comptime fields.len == 0) {
return FieldT{};
} else {
const T = fields[0].field_type;
var ids: [fields.len]T = undefined;
var result: FieldT = undefined;
opengl.callCheckError(fn_name, .{ ids.len, &ids });
inline for (fields) |field, i| {
@field(result, field.name) = ids[i];
}
return result;
}
}
fn cleanup(any: anytype, comptime fn_name: []const u8, opengl: *GL) void {
const FieldT = @TypeOf(any);
const fields = comptime std.meta.fields(FieldT);
if (comptime fields.len > 0) {
const T = fields[0].field_type;
var ids: [fields.len]T = undefined;
inline for (fields) |field, i| {
ids[i] = @field(any, field.name);
}
opengl.callCheckError(fn_name, .{ ids.len, &ids });
}
}
fn makeShader(allocator: *std.mem.Allocator, opengl: *GL, shader_type: GL.Enum, source: []const u8) !GL.uint {
const shader = opengl.glCreateShader(shader_type);
const ptr = @ptrCast([*]const [*]const u8, &source[0..]);
opengl.glShaderSource(shader, 1, ptr, &[_]GL.int{@intCast(GL.int, source.len)});
opengl.glCompileShader(shader);
var compile_status: GL.int = undefined;
opengl.glGetShaderiv(shader, GL.COMPILE_STATUS, &compile_status);
if (compile_status != GL.TRUE) {
var log_len: GL.int = undefined;
opengl.glGetShaderiv(shader, GL.INFO_LOG_LENGTH, &log_len);
var log = try allocator.alloc(u8, @intCast(usize, log_len));
defer allocator.free(log);
var out_len: GL.int = undefined;
opengl.glGetShaderInfoLog(shader, log_len, &out_len, log.ptr);
std.log.crit("Failed to compile shader: {s}", .{log});
return error.FailedToCompileShader;
}
return shader;
}
pub const GPURing = struct {
buffer: GL.uint,
opengl: *GL,
max_capacity: usize = 0,
allocated: usize = 0,
write_index: usize = 0,
read_index: usize = 0,
pub fn init(buffer: GL.uint, opengl: *GL) GPURing {
return .{
.buffer = buffer,
.opengl = opengl,
};
}
pub fn resize(self: *GPURing, num_samples: usize) void {
if (num_samples > self.allocated) {
const byte_size = @intCast(GL.sizei, @sizeOf(f32) * num_samples);
self.opengl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.buffer });
self.opengl.callCheckError("glBufferData", .{ GL.ARRAY_BUFFER, byte_size, null, GL.DYNAMIC_DRAW });
self.allocated = num_samples;
}
self.max_capacity = num_samples;
self.write_index %= self.max_capacity;
self.read_index %= self.max_capacity;
}
pub fn update(self: *GPURing, data: []const f32) void {
const writable = if (data.len > self.max_capacity) data[data.len - self.max_capacity ..] else data;
var left = writable;
self.opengl.callCheckError("glBindBuffer", .{ GL.ARRAY_BUFFER, self.buffer });
while (left.len > 0) {
const required = left.len;
const write_head = self.write_index % self.max_capacity;
const space_left = self.max_capacity - write_head;
const actual_length = std.math.min(required, space_left);
const able_to_write = left[0..actual_length];
left = left[actual_length..];
const byte_offset = @intCast(GL.sizei, @sizeOf(f32) * write_head);
const byte_size = @intCast(GL.sizei, @sizeOf(f32) * able_to_write.len);
self.opengl.callCheckError("glBufferSubData", .{ GL.ARRAY_BUFFER, byte_offset, byte_size, &able_to_write[0] });
self.write_index += actual_length;
}
}
pub fn copyFromRing(self: *GPURing, ring: *Ring) void {
const result = ring.readSlice();
self.update(result.first);
if (result.second) |second| self.update(second);
}
}; | src/gl_renderer.zig |
const std = @import("std");
const assert = std.debug.assert;
const zp = @import("../../zplay.zig");
const gl = zp.deps.gl;
const Self = @This();
/// max number of vbo
const MAX_VBO_NUM = 8;
/// buffer data's binding target
const BufferTarget = enum(c_uint) {
array_buffer = gl.GL_ARRAY_BUFFER,
copy_read_buffer = gl.GL_COPY_READ_BUFFER,
copy_write_buffer = gl.GL_COPY_WRITE_BUFFER,
element_array_buffer = gl.GL_ELEMENT_ARRAY_BUFFER,
pixel_pack_buffer = gl.GL_PIXEL_PACK_BUFFER,
pixel_unpack_buffer = gl.GL_PIXEL_UNPACK_BUFFER,
texture_buffer = gl.GL_TEXTURE_BUFFER,
transform_feedback_buffer = gl.GL_TRANSFORM_FEEDBACK_BUFFER,
uniform_buffer = gl.GL_UNIFORM_BUFFER,
};
/// buffer data's usage
const BufferUsage = enum(c_uint) {
stream_draw = gl.GL_STREAM_DRAW,
stream_read = gl.GL_STREAM_READ,
stream_copy = gl.GL_STREAM_COPY,
static_draw = gl.GL_STATIC_DRAW,
static_read = gl.GL_STATIC_READ,
static_copy = gl.GL_STATIC_COPY,
dynamic_draw = gl.GL_DYNAMIC_DRAW,
dynamic_read = gl.GL_DYNAMIC_READ,
dynamic_copy = gl.GL_DYNAMIC_COPY,
};
/// buffer data's access level
const BufferAccess = enum(c_uint) {
read = gl.GL_READ_ONLY,
write = gl.GL_WRITE_ONLY,
read_write = gl.GL_READ_WRITE,
};
/// id of vertex array
id: gl.GLuint = undefined,
/// buffer objects
vbos: [MAX_VBO_NUM]gl.GLuint = undefined,
vbo_num: u32 = undefined,
/// init vertex array
pub fn init(vbo_num: u32) Self {
assert(vbo_num > 0 and vbo_num <= MAX_VBO_NUM);
var va: Self = undefined;
gl.genVertexArrays(1, &va.id);
va.vbo_num = vbo_num;
gl.genBuffers(@intCast(c_int, vbo_num), &va.vbos);
gl.util.checkError();
return va;
}
/// deinitialize vertex array
pub fn deinit(self: Self) void {
gl.deleteVertexArrays(1, &self.id);
gl.deleteBuffers(@intCast(c_int, self.vbo_num), &self.vbos);
gl.util.checkError();
}
/// allocate and initialize buffer data
pub fn bufferData(
self: Self,
vbo_index: u32,
comptime T: type,
data: []const T,
target: BufferTarget,
usage: BufferUsage,
) void {
assert(vbo_index < self.vbo_num);
gl.bindBuffer(@enumToInt(target), self.vbos[vbo_index]);
gl.bufferData(
@enumToInt(target),
@intCast(c_longlong, data.len * @sizeOf(T)),
data.ptr,
@enumToInt(usage),
);
gl.util.checkError();
}
/// only allocate buffer data
pub fn bufferDataAlloc(
self: Self,
vbo_index: u32,
size: u32,
target: BufferTarget,
usage: BufferUsage,
) void {
assert(vbo_index < self.vbo_num);
gl.bindBuffer(@enumToInt(target), self.vbos[vbo_index]);
gl.bufferData(
@enumToInt(target),
@intCast(c_longlong, size),
null,
@enumToInt(usage),
);
gl.util.checkError();
}
/// update buffer data, user need to make sure enough memory had been allocated
pub fn bufferSubData(
self: Self,
vbo_index: u32,
offset: u32,
comptime T: type,
data: []const T,
target: BufferTarget,
) void {
assert(vbo_index < self.vbo_num);
gl.bindBuffer(@enumToInt(target), self.vbos[vbo_index]);
gl.bufferSubData(
@enumToInt(target),
@intCast(c_longlong, offset),
@intCast(c_longlong, data.len * @sizeOf(T)),
data.ptr,
);
gl.util.checkError();
}
/// copy buffer data from gpu
pub fn getBufferData(
self: Self,
vbo_index: u32,
offset: u32,
data: []const u8,
target: BufferTarget,
) void {
assert(vbo_index < self.vbo_num);
gl.bindBuffer(@enumToInt(target), self.vbos[vbo_index]);
gl.getBufferSubData(
@enumToInt(target),
@intCast(c_longlong, offset),
@intCast(c_longlong, data.len),
data.ptr,
);
gl.util.checkError();
}
/// get mapped memory pointer
pub fn mapBuffer(
self: Self,
vbo_index: u32,
target: BufferTarget,
access: BufferAccess,
) ?[*]u8 {
assert(vbo_index < self.vbo_num);
gl.bindBuffer(@enumToInt(target), self.vbos[vbo_index]);
var data = gl.mapBuffer(@enumToInt(target), @enumToInt(access));
if (data) |ptr| {
return @ptrCast([*]u8, ptr);
}
return null;
}
/// unmap specified buffer object
/// returns true unless data store become corrupted, which means user needs to reinitialize data.
pub fn unmapBuffer(self: Self, target: BufferTarget) bool {
_ = self;
return gl.util.boolType(gl.unmapBuffer(@enumToInt(target)));
}
// set vertex attribute (will enable attribute afterwards)
pub fn setAttribute(
self: Self,
vbo_index: u32,
loc: gl.GLuint,
size: u32,
comptime T: type,
normalized: bool,
stride: u32,
offset: u32,
) void {
assert(vbo_index < self.vbo_num);
gl.bindBuffer(
@enumToInt(BufferTarget.array_buffer),
self.vbos[vbo_index],
);
gl.util.checkError();
gl.vertexAttribPointer(
loc,
@intCast(c_int, size),
gl.util.dataType(T),
gl.util.boolType(normalized),
@intCast(c_int, stride),
@intToPtr(*allowzero anyopaque, offset),
);
gl.enableVertexAttribArray(loc);
gl.util.checkError();
}
/// start using vertex array
pub fn use(self: Self) void {
gl.bindVertexArray(self.id);
gl.util.checkError();
}
/// stop using vertex array
pub fn disuse(self: Self) void {
_ = self;
gl.bindVertexArray(0);
gl.util.checkError();
} | src/graphics/common/VertexArray.zig |
const std = @import("std");
const c = @import("c.zig");
const panic = std.debug.panic;
pub const Xlib = struct {
display: *c.Display = undefined,
screen: i32 = 0,
root: c.Window = undefined,
font: *c.XftFont = undefined,
cursor: u64 = undefined,
const Self = *Xlib;
fn init(self: Self, fontname: []const u8) void {
self.display = c.XOpenDisplay(null) orelse @panic("unable to create window");
self.screen = c.XDefaultScreen(self.display);
self.root = c.XRootWindow(self.display, self.screen);
self.font = c.XftFontOpenName(self.display, self.screen, fontname.ptr) orelse @panic("could not load font");
self.cursor = c.XCreateFontCursor(self.display, 2);
var windowAttributes: c.XSetWindowAttributes = undefined;
windowAttributes.event_mask = c.SubstructureNotifyMask | c.SubstructureRedirectMask | c.KeyPressMask | c.EnterWindowMask | c.FocusChangeMask | c.PropertyChangeMask | c.PointerMotionMask | c.NoEventMask;
windowAttributes.cursor = self.cursor;
_ = c.XChangeWindowAttributes(self.display, self.root, c.CWEventMask | c.CWCursor, &windowAttributes);
_ = c.XSelectInput(self.display, self.root, windowAttributes.event_mask);
_ = c.XSync(self.display, 0);
}
fn delete(self: Self) void {
_ = c.XCloseDisplay(self.display);
}
fn getDisplayWidth(self: Self) u32 {
displayWidth = @intCast(u32, c.XDisplayWidth(display, xscreen));
}
fn getDisplayHeight(self: Self) u32 {
displayHeight = @intCast(u32, c.XDisplayHeight(display, xscreen));
}
fn grabKey(self: Self, mask: u32, key: c.KeySym) void {
var code = c.XKeysymToKeycode(self.display, key);
_ = c.XGrabKey(self.display, code, mask, self.root, 1, c.GrabModeAsync, c.GrabModeAsync);
}
fn grabButton(self: Self, window: c.Window) void {
_ = c.XGrabButton(self.display, c.AnyButton, c.AnyModifier, window, 0,
c.ButtonPressMask, c.GrabModeSync, c.GrabModeSync, 0, 0);
}
fn ungrabButton(self: Self, window: c.Window) void {
_ = c.XUngrabButton(self.display, c.AnyButton, c.AnyModifier, window);
}
fn closeWindow(self: Self, window: c.Window) void {
_ = c.XKillClient(self.display, window);
}
fn hideWindow(self: Self, window: c.Window) void {
// TODO: better way to hide
self.move(window, -4000, 0);
}
fn move(self: Self, window: c.Window, x: i32, y: i32) void {
_ = c.XMoveWindow(self.display, window, x, y);
}
fn moveByDelta(self: Self, window: c.Window, x: i32, y: i32) void {
var windowDimension = self.getWindowPos(window);
self.move(window, @intCast(i32, windowDimension[0]) + x, @intCast(i32, windowDimension[1]) + y);
}
fn focusWindow(self: Self, window: c.Window) void {
_ = c.XSetInputFocus(self.display, window, c.PointerRoot, c.CurrentTime);
}
// TODO: API is crap -> string handling?
fn getWindowName(self: Self, window: c.Window, textProperty: *c.XTextProperty) bool {
var name: []const u8 = "_NET_WM_NAME";
var atom = c.XInternAtom(self.display, name.ptr, 0);
var res = c.XGetTextProperty(self.display, window, textProperty, atom);
return res != 0;
}
fn freeWindowName(self: Self, textProperty: *c.XTextProperty) void {
_ = c.XFree(textProperty.value);
//_ = c.XFree(textProperty);
}
fn createWindow(self: Self, x: i32, y: i32, width: u32, height: u32) c.Window {
var attributes: c.XSetWindowAttributes = undefined;
attributes.background_pixel = c.ParentRelative;
attributes.event_mask = c.ButtonPressMask | c.ExposureMask;
var res = c.XCreateWindow(self.display,
self.root,
x, y,
width, height,
0,
c.XDefaultDepth(self.display, self.screen),
c.CopyFromParent,
c.XDefaultVisual(self.display, self.screen),
c.CWEventMask | c.CWBackPixel,
&attributes);
_ = c.XMapWindow(self.display, res);
return res;
}
fn getWindowDimensions(self: Self, window: c.Window) @Vector(2, u32) {
var rootReturn: c.Window = undefined;
var x: c_int = 0;
var y: c_int = 0;
var width: c_uint = 0;
var height: c_uint = 0;
var borderWidth: c_uint = 0;
var depth: c_uint = 0;
_ = c.XGetGeometry(self.display, window, &rootReturn,
&x, &y,
&width, &height,
&borderWidth, &depth);
return [2]u32{width, height};
}
fn getWindowPos(self: Self, window: c.Window) @Vector(2, i32) {
var rootReturn: c.Window = undefined;
var x: c_int = 0;
var y: c_int = 0;
var width: c_uint = 0;
var height: c_uint = 0;
var borderWidth: c_uint = 0;
var depth: c_uint = 0;
_ = c.XGetGeometry(self.display, window, &rootReturn,
&x, &y,
&width, &height,
&borderWidth, &depth);
return [2]i32{x, y};
}
fn getWindowWidth(self: Self, window: c.Window) u32 {
var rootReturn: c.Window = undefined;
var x: c_int = 0;
var y: c_int = 0;
var width: c_uint = 0;
var height: c_uint = 0;
var borderWidth: c_uint = 0;
var depth: c_uint = 0;
_ = c.XGetGeometry(self.display, window, &rootReturn,
&x, &y,
&width, &height,
&borderWidth, &depth);
return width;
}
fn getWindowHeight(self: Self, window: c.Window) u32 {
var rootReturn: c.Window = undefined;
var x: c_int = 0;
var y: c_int = 0;
var width: c_uint = 0;
var height: c_uint = 0;
var borderWidth: c_uint = 0;
var depth: c_uint = 0;
_ = c.XGetGeometry(self.display, window, &rootReturn,
&x, &y,
&width, &height,
&borderWidth, &depth);
return height;
}
fn resize(self: Self, window: c.Window, x: i32, y: i32, width: u32, height: u32) void {
var changes: c.XWindowChanges = undefined;
changes.x = x;
changes.y = y;
changes.width = @intCast(c_int, width);
changes.height = @intCast(c_int, height);
_ = c.XConfigureWindow(self.display, window, c.CWX | c.CWY | c.CWWidth | c.CWHeight, &changes);
}
fn windowSetBorder(self: Self, window: c.Window, color: u64, borderWidth: i32) void {
_ = c.XSetWindowBorder(self.display, window, color);
var changes: c.XWindowChanges = undefined;
changes.border_width = borderWidth;
_ = c.XConfigureWindow(self.display, window, c.CWBorderWidth, &changes);
}
fn setPointer(self: Self, x: i32, y: i32) void {
var res = c.XWarpPointer(self.display, self.root, self.root, 0, 0, 0, 0, x, y);
_ = c.XFlush(self.display);
_ = c.XSync(self.display, 0);
}
fn getPointerPos(self: Self, window: c.Window) @Vector(2, i32) {
var i: i32 = 0;
var x: i32 = 0;
var y: i32 = 0;
var ui: u32 = 0;
var win: c.Window = 0;
_ = c.XQueryPointer(self.display, window, &win, &win, &i, &i, &x, &y, &ui);
return [2]i32{x, y};
}
fn isFixed(self: Self, window: c.Window) bool {
var res = false;
var hints: c.XSizeHints = undefined;
var tempHints: c.XSizeHints = undefined;
// TODO: remove this
tempHints.min_width = 0;
tempHints.min_height = 0;
tempHints.max_width = -1;
tempHints.max_height = -1;
var foo: i64 = 0;
_ = c.XGetWMNormalHints(self.display, window, &hints, &foo);
if (hints.flags & c.PBaseSize == c.PBaseSize) {
std.debug.warn("PBaseSize {} {}\n", .{hints.base_width, hints.base_height});
}
if (hints.flags & c.PMinSize == c.PMinSize) {
std.debug.warn("PMin {} {}\n", .{hints.min_width, hints.min_height});
tempHints.min_width = hints.min_width;
tempHints.min_height = hints.min_height;
}
if (hints.flags & c.PMaxSize == c.PMaxSize) {
std.debug.warn("PMaxSize {} {}\n", .{hints.max_width, hints.max_height});
tempHints.max_width = hints.max_width;
tempHints.max_height = hints.max_height;
}
res = tempHints.min_width == tempHints.max_width and tempHints.min_height == tempHints.max_height;
return res;
}
}; | src/x.zig |
const builtin = @import("builtin");
const std = @import("../index.zig");
const TypeId = builtin.TypeId;
const assert = std.debug.assert;
pub const e = 2.71828182845904523536028747135266249775724709369995;
pub const pi = 3.14159265358979323846264338327950288419716939937510;
// From a small c++ [program using boost float128](https://github.com/winksaville/cpp_boost_float128)
pub const f128_true_min = @bitCast(f128, u128(0x00000000000000000000000000000001));
pub const f128_min = @bitCast(f128, u128(0x00010000000000000000000000000000));
pub const f128_max = @bitCast(f128, u128(0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF));
pub const f128_epsilon = @bitCast(f128, u128(0x3F8F0000000000000000000000000000));
pub const f128_toint = 1.0 / f128_epsilon;
// float.h details
pub const f64_true_min = 4.94065645841246544177e-324;
pub const f64_min = 2.2250738585072014e-308;
pub const f64_max = 1.79769313486231570815e+308;
pub const f64_epsilon = 2.22044604925031308085e-16;
pub const f64_toint = 1.0 / f64_epsilon;
pub const f32_true_min = 1.40129846432481707092e-45;
pub const f32_min = 1.17549435082228750797e-38;
pub const f32_max = 3.40282346638528859812e+38;
pub const f32_epsilon = 1.1920928955078125e-07;
pub const f32_toint = 1.0 / f32_epsilon;
pub const f16_true_min = 0.000000059604644775390625; // 2**-24
pub const f16_min = 0.00006103515625; // 2**-14
pub const f16_max = 65504;
pub const f16_epsilon = 0.0009765625; // 2**-10
pub const f16_toint = 1.0 / f16_epsilon;
pub const nan_u16 = u16(0x7C01);
pub const nan_f16 = @bitCast(f16, nan_u16);
pub const inf_u16 = u16(0x7C00);
pub const inf_f16 = @bitCast(f16, inf_u16);
pub const nan_u32 = u32(0x7F800001);
pub const nan_f32 = @bitCast(f32, nan_u32);
pub const inf_u32 = u32(0x7F800000);
pub const inf_f32 = @bitCast(f32, inf_u32);
pub const nan_u64 = u64(0x7FF << 52) | 1;
pub const nan_f64 = @bitCast(f64, nan_u64);
pub const inf_u64 = u64(0x7FF << 52);
pub const inf_f64 = @bitCast(f64, inf_u64);
pub const nan = @import("nan.zig").nan;
pub const snan = @import("nan.zig").snan;
pub const inf = @import("inf.zig").inf;
pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool {
assert(@typeId(T) == TypeId.Float);
return fabs(x - y) < epsilon;
}
// TODO: Hide the following in an internal module.
pub fn forceEval(value: var) void {
const T = @typeOf(value);
switch (T) {
f16 => {
var x: f16 = undefined;
const p = @ptrCast(*volatile f16, &x);
p.* = x;
},
f32 => {
var x: f32 = undefined;
const p = @ptrCast(*volatile f32, &x);
p.* = x;
},
f64 => {
var x: f64 = undefined;
const p = @ptrCast(*volatile f64, &x);
p.* = x;
},
else => {
@compileError("forceEval not implemented for " ++ @typeName(T));
},
}
}
pub fn raiseInvalid() void {
// Raise INVALID fpu exception
}
pub fn raiseUnderflow() void {
// Raise UNDERFLOW fpu exception
}
pub fn raiseOverflow() void {
// Raise OVERFLOW fpu exception
}
pub fn raiseInexact() void {
// Raise INEXACT fpu exception
}
pub fn raiseDivByZero() void {
// Raise INEXACT fpu exception
}
pub const isNan = @import("isnan.zig").isNan;
pub const isSignalNan = @import("isnan.zig").isSignalNan;
pub const fabs = @import("fabs.zig").fabs;
pub const ceil = @import("ceil.zig").ceil;
pub const floor = @import("floor.zig").floor;
pub const trunc = @import("trunc.zig").trunc;
pub const round = @import("round.zig").round;
pub const frexp = @import("frexp.zig").frexp;
pub const frexp32_result = @import("frexp.zig").frexp32_result;
pub const frexp64_result = @import("frexp.zig").frexp64_result;
pub const modf = @import("modf.zig").modf;
pub const modf32_result = @import("modf.zig").modf32_result;
pub const modf64_result = @import("modf.zig").modf64_result;
pub const copysign = @import("copysign.zig").copysign;
pub const isFinite = @import("isfinite.zig").isFinite;
pub const isInf = @import("isinf.zig").isInf;
pub const isPositiveInf = @import("isinf.zig").isPositiveInf;
pub const isNegativeInf = @import("isinf.zig").isNegativeInf;
pub const isNormal = @import("isnormal.zig").isNormal;
pub const signbit = @import("signbit.zig").signbit;
pub const scalbn = @import("scalbn.zig").scalbn;
pub const pow = @import("pow.zig").pow;
pub const powi = @import("powi.zig").powi;
pub const sqrt = @import("sqrt.zig").sqrt;
pub const cbrt = @import("cbrt.zig").cbrt;
pub const acos = @import("acos.zig").acos;
pub const asin = @import("asin.zig").asin;
pub const atan = @import("atan.zig").atan;
pub const atan2 = @import("atan2.zig").atan2;
pub const hypot = @import("hypot.zig").hypot;
pub const exp = @import("exp.zig").exp;
pub const exp2 = @import("exp2.zig").exp2;
pub const expm1 = @import("expm1.zig").expm1;
pub const ilogb = @import("ilogb.zig").ilogb;
pub const ln = @import("ln.zig").ln;
pub const log = @import("log.zig").log;
pub const log2 = @import("log2.zig").log2;
pub const log10 = @import("log10.zig").log10;
pub const log1p = @import("log1p.zig").log1p;
pub const fma = @import("fma.zig").fma;
pub const asinh = @import("asinh.zig").asinh;
pub const acosh = @import("acosh.zig").acosh;
pub const atanh = @import("atanh.zig").atanh;
pub const sinh = @import("sinh.zig").sinh;
pub const cosh = @import("cosh.zig").cosh;
pub const tanh = @import("tanh.zig").tanh;
pub const cos = @import("cos.zig").cos;
pub const sin = @import("sin.zig").sin;
pub const tan = @import("tan.zig").tan;
pub const complex = @import("complex/index.zig");
pub const Complex = complex.Complex;
pub const big = @import("big/index.zig");
test "math" {
_ = @import("nan.zig");
_ = @import("isnan.zig");
_ = @import("fabs.zig");
_ = @import("ceil.zig");
_ = @import("floor.zig");
_ = @import("trunc.zig");
_ = @import("round.zig");
_ = @import("frexp.zig");
_ = @import("modf.zig");
_ = @import("copysign.zig");
_ = @import("isfinite.zig");
_ = @import("isinf.zig");
_ = @import("isnormal.zig");
_ = @import("signbit.zig");
_ = @import("scalbn.zig");
_ = @import("pow.zig");
_ = @import("powi.zig");
_ = @import("sqrt.zig");
_ = @import("cbrt.zig");
_ = @import("acos.zig");
_ = @import("asin.zig");
_ = @import("atan.zig");
_ = @import("atan2.zig");
_ = @import("hypot.zig");
_ = @import("exp.zig");
_ = @import("exp2.zig");
_ = @import("expm1.zig");
_ = @import("ilogb.zig");
_ = @import("ln.zig");
_ = @import("log.zig");
_ = @import("log2.zig");
_ = @import("log10.zig");
_ = @import("log1p.zig");
_ = @import("fma.zig");
_ = @import("asinh.zig");
_ = @import("acosh.zig");
_ = @import("atanh.zig");
_ = @import("sinh.zig");
_ = @import("cosh.zig");
_ = @import("tanh.zig");
_ = @import("sin.zig");
_ = @import("cos.zig");
_ = @import("tan.zig");
_ = @import("complex/index.zig");
_ = @import("big/index.zig");
}
pub fn floatMantissaBits(comptime T: type) comptime_int {
assert(@typeId(T) == builtin.TypeId.Float);
return switch (T.bit_count) {
16 => 10,
32 => 23,
64 => 52,
80 => 64,
128 => 112,
else => @compileError("unknown floating point type " ++ @typeName(T)),
};
}
pub fn floatExponentBits(comptime T: type) comptime_int {
assert(@typeId(T) == builtin.TypeId.Float);
return switch (T.bit_count) {
16 => 5,
32 => 8,
64 => 11,
80 => 15,
128 => 15,
else => @compileError("unknown floating point type " ++ @typeName(T)),
};
}
pub fn min(x: var, y: var) @typeOf(x + y) {
return if (x < y) x else y;
}
test "math.min" {
assert(min(i32(-1), i32(2)) == -1);
}
pub fn max(x: var, y: var) @typeOf(x + y) {
return if (x > y) x else y;
}
test "math.max" {
assert(max(i32(-1), i32(2)) == 2);
}
pub fn mul(comptime T: type, a: T, b: T) (error{Overflow}!T) {
var answer: T = undefined;
return if (@mulWithOverflow(T, a, b, &answer)) error.Overflow else answer;
}
pub fn add(comptime T: type, a: T, b: T) (error{Overflow}!T) {
var answer: T = undefined;
return if (@addWithOverflow(T, a, b, &answer)) error.Overflow else answer;
}
pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) {
var answer: T = undefined;
return if (@subWithOverflow(T, a, b, &answer)) error.Overflow else answer;
}
pub fn negate(x: var) !@typeOf(x) {
return sub(@typeOf(x), 0, x);
}
pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
var answer: T = undefined;
return if (@shlWithOverflow(T, a, shift_amt, &answer)) error.Overflow else answer;
}
/// Shifts left. Overflowed bits are truncated.
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: var) T {
const abs_shift_amt = absCast(shift_amt);
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
if (@typeOf(shift_amt).is_signed) {
if (shift_amt >= 0) {
return a << casted_shift_amt;
} else {
return a >> casted_shift_amt;
}
}
return a << casted_shift_amt;
}
test "math.shl" {
assert(shl(u8, 0b11111111, usize(3)) == 0b11111000);
assert(shl(u8, 0b11111111, usize(8)) == 0);
assert(shl(u8, 0b11111111, usize(9)) == 0);
assert(shl(u8, 0b11111111, isize(-2)) == 0b00111111);
}
/// Shifts right. Overflowed bits are truncated.
/// A negative shift amount results in a lefft shift.
pub fn shr(comptime T: type, a: T, shift_amt: var) T {
const abs_shift_amt = absCast(shift_amt);
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
if (@typeOf(shift_amt).is_signed) {
if (shift_amt >= 0) {
return a >> casted_shift_amt;
} else {
return a << casted_shift_amt;
}
}
return a >> casted_shift_amt;
}
test "math.shr" {
assert(shr(u8, 0b11111111, usize(3)) == 0b00011111);
assert(shr(u8, 0b11111111, usize(8)) == 0);
assert(shr(u8, 0b11111111, usize(9)) == 0);
assert(shr(u8, 0b11111111, isize(-2)) == 0b11111100);
}
/// Rotates right. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotr(comptime T: type, x: T, r: var) T {
if (T.is_signed) {
@compileError("cannot rotate signed integer");
} else {
const ar = @mod(r, T.bit_count);
return shr(T, x, ar) | shl(T, x, T.bit_count - ar);
}
}
test "math.rotr" {
assert(rotr(u8, 0b00000001, usize(0)) == 0b00000001);
assert(rotr(u8, 0b00000001, usize(9)) == 0b10000000);
assert(rotr(u8, 0b00000001, usize(8)) == 0b00000001);
assert(rotr(u8, 0b00000001, usize(4)) == 0b00010000);
assert(rotr(u8, 0b00000001, isize(-1)) == 0b00000010);
}
/// Rotates left. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotl(comptime T: type, x: T, r: var) T {
if (T.is_signed) {
@compileError("cannot rotate signed integer");
} else {
const ar = @mod(r, T.bit_count);
return shl(T, x, ar) | shr(T, x, T.bit_count - ar);
}
}
test "math.rotl" {
assert(rotl(u8, 0b00000001, usize(0)) == 0b00000001);
assert(rotl(u8, 0b00000001, usize(9)) == 0b00000010);
assert(rotl(u8, 0b00000001, usize(8)) == 0b00000001);
assert(rotl(u8, 0b00000001, usize(4)) == 0b00010000);
assert(rotl(u8, 0b00000001, isize(-1)) == 0b10000000);
}
pub fn Log2Int(comptime T: type) type {
// comptime ceil log2
comptime var count = 0;
comptime var s = T.bit_count - 1;
inline while (s != 0) : (s >>= 1) {
count += 1;
}
return @IntType(false, count);
}
pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) type {
assert(from <= to);
if (from == 0 and to == 0) {
return u0;
}
const is_signed = from < 0;
const largest_positive_integer = max(if (from<0) (-from)-1 else from, to); // two's complement
const base = log2(largest_positive_integer);
const upper = (1 << base) - 1;
var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1;
if (is_signed) {
magnitude_bits += 1;
}
return @IntType(is_signed, magnitude_bits);
}
test "math.IntFittingRange" {
assert(IntFittingRange(0, 0) == u0);
assert(IntFittingRange(0, 1) == u1);
assert(IntFittingRange(0, 2) == u2);
assert(IntFittingRange(0, 3) == u2);
assert(IntFittingRange(0, 4) == u3);
assert(IntFittingRange(0, 7) == u3);
assert(IntFittingRange(0, 8) == u4);
assert(IntFittingRange(0, 9) == u4);
assert(IntFittingRange(0, 15) == u4);
assert(IntFittingRange(0, 16) == u5);
assert(IntFittingRange(0, 17) == u5);
assert(IntFittingRange(0, 4095) == u12);
assert(IntFittingRange(2000, 4095) == u12);
assert(IntFittingRange(0, 4096) == u13);
assert(IntFittingRange(2000, 4096) == u13);
assert(IntFittingRange(0, 4097) == u13);
assert(IntFittingRange(2000, 4097) == u13);
assert(IntFittingRange(0, 123456789123456798123456789) == u87);
assert(IntFittingRange(0, 123456789123456798123456789123456789123456798123456789) == u177);
assert(IntFittingRange(-1, -1) == i1);
assert(IntFittingRange(-1, 0) == i1);
assert(IntFittingRange(-1, 1) == i2);
assert(IntFittingRange(-2, -2) == i2);
assert(IntFittingRange(-2, -1) == i2);
assert(IntFittingRange(-2, 0) == i2);
assert(IntFittingRange(-2, 1) == i2);
assert(IntFittingRange(-2, 2) == i3);
assert(IntFittingRange(-1, 2) == i3);
assert(IntFittingRange(-1, 3) == i3);
assert(IntFittingRange(-1, 4) == i4);
assert(IntFittingRange(-1, 7) == i4);
assert(IntFittingRange(-1, 8) == i5);
assert(IntFittingRange(-1, 9) == i5);
assert(IntFittingRange(-1, 15) == i5);
assert(IntFittingRange(-1, 16) == i6);
assert(IntFittingRange(-1, 17) == i6);
assert(IntFittingRange(-1, 4095) == i13);
assert(IntFittingRange(-4096, 4095) == i13);
assert(IntFittingRange(-1, 4096) == i14);
assert(IntFittingRange(-4097, 4095) == i14);
assert(IntFittingRange(-1, 4097) == i14);
assert(IntFittingRange(-1, 123456789123456798123456789) == i88);
assert(IntFittingRange(-1, 123456789123456798123456789123456789123456798123456789) == i178);
}
test "math overflow functions" {
testOverflow();
comptime testOverflow();
}
fn testOverflow() void {
assert((mul(i32, 3, 4) catch unreachable) == 12);
assert((add(i32, 3, 4) catch unreachable) == 7);
assert((sub(i32, 3, 4) catch unreachable) == -1);
assert((shlExact(i32, 0b11, 4) catch unreachable) == 0b110000);
}
pub fn absInt(x: var) !@typeOf(x) {
const T = @typeOf(x);
comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer to absInt
comptime assert(T.is_signed); // must pass a signed integer to absInt
if (x == minInt(@typeOf(x))) {
return error.Overflow;
} else {
@setRuntimeSafety(false);
return if (x < 0) -x else x;
}
}
test "math.absInt" {
testAbsInt();
comptime testAbsInt();
}
fn testAbsInt() void {
assert((absInt(i32(-10)) catch unreachable) == 10);
assert((absInt(i32(10)) catch unreachable) == 10);
}
pub const absFloat = @import("fabs.zig").fabs;
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divTrunc(numerator, denominator);
}
test "math.divTrunc" {
testDivTrunc();
comptime testDivTrunc();
}
fn testDivTrunc() void {
assert((divTrunc(i32, 5, 3) catch unreachable) == 1);
assert((divTrunc(i32, -5, 3) catch unreachable) == -1);
if (divTrunc(i8, -5, 0)) |_| unreachable else |err| assert(err == error.DivisionByZero);
if (divTrunc(i8, -128, -1)) |_| unreachable else |err| assert(err == error.Overflow);
assert((divTrunc(f32, 5.0, 3.0) catch unreachable) == 1.0);
assert((divTrunc(f32, -5.0, 3.0) catch unreachable) == -1.0);
}
pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divFloor(numerator, denominator);
}
test "math.divFloor" {
testDivFloor();
comptime testDivFloor();
}
fn testDivFloor() void {
assert((divFloor(i32, 5, 3) catch unreachable) == 1);
assert((divFloor(i32, -5, 3) catch unreachable) == -2);
if (divFloor(i8, -5, 0)) |_| unreachable else |err| assert(err == error.DivisionByZero);
if (divFloor(i8, -128, -1)) |_| unreachable else |err| assert(err == error.Overflow);
assert((divFloor(f32, 5.0, 3.0) catch unreachable) == 1.0);
assert((divFloor(f32, -5.0, 3.0) catch unreachable) == -2.0);
}
pub fn divExact(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
const result = @divTrunc(numerator, denominator);
if (result * denominator != numerator) return error.UnexpectedRemainder;
return result;
}
test "math.divExact" {
testDivExact();
comptime testDivExact();
}
fn testDivExact() void {
assert((divExact(i32, 10, 5) catch unreachable) == 2);
assert((divExact(i32, -10, 5) catch unreachable) == -2);
if (divExact(i8, -5, 0)) |_| unreachable else |err| assert(err == error.DivisionByZero);
if (divExact(i8, -128, -1)) |_| unreachable else |err| assert(err == error.Overflow);
if (divExact(i32, 5, 2)) |_| unreachable else |err| assert(err == error.UnexpectedRemainder);
assert((divExact(f32, 10.0, 5.0) catch unreachable) == 2.0);
assert((divExact(f32, -10.0, 5.0) catch unreachable) == -2.0);
if (divExact(f32, 5.0, 2.0)) |_| unreachable else |err| assert(err == error.UnexpectedRemainder);
}
pub fn mod(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (denominator < 0) return error.NegativeDenominator;
return @mod(numerator, denominator);
}
test "math.mod" {
testMod();
comptime testMod();
}
fn testMod() void {
assert((mod(i32, -5, 3) catch unreachable) == 1);
assert((mod(i32, 5, 3) catch unreachable) == 2);
if (mod(i32, 10, -1)) |_| unreachable else |err| assert(err == error.NegativeDenominator);
if (mod(i32, 10, 0)) |_| unreachable else |err| assert(err == error.DivisionByZero);
assert((mod(f32, -5, 3) catch unreachable) == 1);
assert((mod(f32, 5, 3) catch unreachable) == 2);
if (mod(f32, 10, -1)) |_| unreachable else |err| assert(err == error.NegativeDenominator);
if (mod(f32, 10, 0)) |_| unreachable else |err| assert(err == error.DivisionByZero);
}
pub fn rem(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (denominator < 0) return error.NegativeDenominator;
return @rem(numerator, denominator);
}
test "math.rem" {
testRem();
comptime testRem();
}
fn testRem() void {
assert((rem(i32, -5, 3) catch unreachable) == -2);
assert((rem(i32, 5, 3) catch unreachable) == 2);
if (rem(i32, 10, -1)) |_| unreachable else |err| assert(err == error.NegativeDenominator);
if (rem(i32, 10, 0)) |_| unreachable else |err| assert(err == error.DivisionByZero);
assert((rem(f32, -5, 3) catch unreachable) == -2);
assert((rem(f32, 5, 3) catch unreachable) == 2);
if (rem(f32, 10, -1)) |_| unreachable else |err| assert(err == error.NegativeDenominator);
if (rem(f32, 10, 0)) |_| unreachable else |err| assert(err == error.DivisionByZero);
}
/// Returns the absolute value of the integer parameter.
/// Result is an unsigned integer.
pub fn absCast(x: var) @IntType(false, @typeOf(x).bit_count) {
const uint = @IntType(false, @typeOf(x).bit_count);
if (x >= 0) return @intCast(uint, x);
return @intCast(uint, -(x + 1)) + 1;
}
test "math.absCast" {
assert(absCast(i32(-999)) == 999);
assert(@typeOf(absCast(i32(-999))) == u32);
assert(absCast(i32(999)) == 999);
assert(@typeOf(absCast(i32(999))) == u32);
assert(absCast(i32(minInt(i32))) == -minInt(i32));
assert(@typeOf(absCast(i32(minInt(i32)))) == u32);
}
/// Returns the negation of the integer parameter.
/// Result is a signed integer.
pub fn negateCast(x: var) !@IntType(true, @typeOf(x).bit_count) {
if (@typeOf(x).is_signed) return negate(x);
const int = @IntType(true, @typeOf(x).bit_count);
if (x > -minInt(int)) return error.Overflow;
if (x == -minInt(int)) return minInt(int);
return -@intCast(int, x);
}
test "math.negateCast" {
assert((negateCast(u32(999)) catch unreachable) == -999);
assert(@typeOf(negateCast(u32(999)) catch unreachable) == i32);
assert((negateCast(u32(-minInt(i32))) catch unreachable) == minInt(i32));
assert(@typeOf(negateCast(u32(-minInt(i32))) catch unreachable) == i32);
if (negateCast(u32(maxInt(i32) + 10))) |_| unreachable else |err| assert(err == error.Overflow);
}
/// Cast an integer to a different integer type. If the value doesn't fit,
/// return an error.
pub fn cast(comptime T: type, x: var) (error{Overflow}!T) {
comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer
comptime assert(@typeId(@typeOf(x)) == builtin.TypeId.Int); // must pass an integer
if (maxInt(@typeOf(x)) > maxInt(T) and x > maxInt(T)) {
return error.Overflow;
} else if (minInt(@typeOf(x)) < minInt(T) and x < minInt(T)) {
return error.Overflow;
} else {
return @intCast(T, x);
}
}
test "math.cast" {
if (cast(u8, u32(300))) |_| @panic("fail") else |err| assert(err == error.Overflow);
if (cast(i8, i32(-200))) |_| @panic("fail") else |err| assert(err == error.Overflow);
if (cast(u8, i8(-1))) |_| @panic("fail") else |err| assert(err == error.Overflow);
if (cast(u64, i8(-1))) |_| @panic("fail") else |err| assert(err == error.Overflow);
assert((try cast(u8, u32(255))) == u8(255));
assert(@typeOf(try cast(u8, u32(255))) == u8);
}
pub const AlignCastError = error{UnalignedMemory};
/// Align cast a pointer but return an error if it's the wrong alignment
pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@typeOf(@alignCast(alignment, ptr)) {
const addr = @ptrToInt(ptr);
if (addr % alignment != 0) {
return error.UnalignedMemory;
}
return @alignCast(alignment, ptr);
}
pub fn floorPowerOfTwo(comptime T: type, value: T) T {
var x = value;
comptime var i = 1;
inline while (T.bit_count > i) : (i *= 2) {
x |= (x >> i);
}
return x - (x >> 1);
}
test "math.floorPowerOfTwo" {
testFloorPowerOfTwo();
comptime testFloorPowerOfTwo();
}
pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
assert(x != 0);
return @intCast(Log2Int(T), T.bit_count - 1 - @clz(x));
}
pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
assert(x != 0);
const log2_val = log2_int(T, x);
if (T(1) << log2_val == x)
return log2_val;
return log2_val + 1;
}
test "std.math.log2_int_ceil" {
assert(log2_int_ceil(u32, 1) == 0);
assert(log2_int_ceil(u32, 2) == 1);
assert(log2_int_ceil(u32, 3) == 2);
assert(log2_int_ceil(u32, 4) == 2);
assert(log2_int_ceil(u32, 5) == 3);
assert(log2_int_ceil(u32, 6) == 3);
assert(log2_int_ceil(u32, 7) == 3);
assert(log2_int_ceil(u32, 8) == 3);
assert(log2_int_ceil(u32, 9) == 4);
assert(log2_int_ceil(u32, 10) == 4);
}
fn testFloorPowerOfTwo() void {
assert(floorPowerOfTwo(u32, 63) == 32);
assert(floorPowerOfTwo(u32, 64) == 64);
assert(floorPowerOfTwo(u32, 65) == 64);
assert(floorPowerOfTwo(u4, 7) == 4);
assert(floorPowerOfTwo(u4, 8) == 8);
assert(floorPowerOfTwo(u4, 9) == 8);
}
pub fn lossyCast(comptime T: type, value: var) T {
switch (@typeInfo(@typeOf(value))) {
builtin.TypeId.Int => return @intToFloat(T, value),
builtin.TypeId.Float => return @floatCast(T, value),
builtin.TypeId.ComptimeInt => return T(value),
builtin.TypeId.ComptimeFloat => return T(value),
else => @compileError("bad type"),
}
}
test "math.f64_min" {
const f64_min_u64 = 0x0010000000000000;
const fmin: f64 = f64_min;
assert(@bitCast(u64, fmin) == f64_min_u64);
}
pub fn maxInt(comptime T: type) comptime_int {
const info = @typeInfo(T);
const bit_count = comptime_int(info.Int.bits); // TODO #1683
if (bit_count == 0) return 0;
return (1 << (bit_count - @boolToInt(info.Int.is_signed))) - 1;
}
pub fn minInt(comptime T: type) comptime_int {
const info = @typeInfo(T);
const bit_count = comptime_int(info.Int.bits); // TODO #1683
if (!info.Int.is_signed) return 0;
if (bit_count == 0) return 0;
return -(1 << (bit_count - 1));
}
test "minInt and maxInt" {
assert(maxInt(u0) == 0);
assert(maxInt(u1) == 1);
assert(maxInt(u8) == 255);
assert(maxInt(u16) == 65535);
assert(maxInt(u32) == 4294967295);
assert(maxInt(u64) == 18446744073709551615);
assert(maxInt(i0) == 0);
assert(maxInt(i1) == 0);
assert(maxInt(i8) == 127);
assert(maxInt(i16) == 32767);
assert(maxInt(i32) == 2147483647);
assert(maxInt(i63) == 4611686018427387903);
assert(maxInt(i64) == 9223372036854775807);
assert(minInt(u0) == 0);
assert(minInt(u1) == 0);
assert(minInt(u8) == 0);
assert(minInt(u16) == 0);
assert(minInt(u32) == 0);
assert(minInt(u63) == 0);
assert(minInt(u64) == 0);
assert(minInt(i0) == 0);
assert(minInt(i1) == -1);
assert(minInt(i8) == -128);
assert(minInt(i16) == -32768);
assert(minInt(i32) == -2147483648);
assert(minInt(i63) == -4611686018427387904);
assert(minInt(i64) == -9223372036854775808);
}
test "max value type" {
// If the type of maxInt(i32) was i32 then this implicit cast to
// u32 would not work. But since the value is a number literal,
// it works fine.
const x: u32 = maxInt(i32);
assert(x == 2147483647);
} | std/math/index.zig |
const std = @import("std");
const Type = @import("../../type.zig").Type;
const Target = std.Target;
const assert = std.debug.assert;
pub const Class = enum { integer, sse, sseup, x87, x87up, complex_x87, memory, none };
pub fn classifyWindows(ty: Type, target: Target) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
// bytes, or isn't 1, 2, 4, or 8 bytes, must be passed by reference. A single argument
// is never spread across multiple registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
switch (ty.abiSize(target)) {
1, 2, 4, 8 => {},
else => return .memory,
}
return switch (ty.zigTypeTag()) {
.Int, .Bool, .Enum, .Void, .NoReturn, .ErrorSet, .Struct, .Union => .integer,
.Optional => if (ty.isPtrLikeOptional()) return .integer else return .memory,
.Float, .Vector => .sse,
else => unreachable,
};
}
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, target: Target) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag()) {
.Int, .Enum, .ErrorSet => {
const bits = ty.intInfo(target).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
}
if (bits <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
if (bits <= 192) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
return result;
}
if (bits <= 256) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
result[3] = .integer;
return result;
}
return memory_class;
},
.Bool, .Void, .NoReturn => {
result[0] = .integer;
return result;
},
.Float => switch (ty.floatBits(target)) {
16, 32, 64 => {
result[0] = .sse;
return result;
},
128 => {
// "Arguments of types__float128,_Decimal128and__m128are
// split into two halves. The least significant ones belong
// to class SSE, the mostsignificant one to class SSEUP."
result[0] = .sse;
result[1] = .sseup;
return result;
},
else => {
// "The 64-bit mantissa of arguments of typelong double
// belongs to classX87, the 16-bit exponent plus 6 bytes
// of padding belongs to class X87UP."
result[0] = .x87;
result[1] = .x87up;
return result;
},
},
.Vector => {
const elem_ty = ty.childType();
const bits = elem_ty.bitSize(target) * ty.arrayLen();
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 128) return .{
.sse, .sseup, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 192) return .{
.sse, .sseup, .sseup, .none,
.none, .none, .none, .none,
};
if (bits <= 256) return .{
.sse, .sseup, .sseup, .sseup,
.none, .none, .none, .none,
};
if (bits <= 320) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .none, .none, .none,
};
if (bits <= 384) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .none, .none,
};
if (bits <= 448) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .none,
};
if (bits <= 512) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .sseup,
};
return memory_class;
},
.Optional => {
if (ty.isPtrLikeOptional()) {
result[0] = .integer;
return result;
}
return memory_class;
},
.Struct => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(target);
if (ty_size > 64)
return memory_class;
var result_i: usize = 0; // out of 8
var byte_i: usize = 0; // out of 8
const fields = ty.structFields();
for (fields.values()) |field| {
if (field.abi_align.tag() != .abi_align_default) {
const field_alignment = field.abi_align.toUnsignedInt();
if (field_alignment < field.ty.abiAlignment(target)) {
return memory_class;
}
}
const field_size = field.ty.abiSize(target);
const field_class_array = classifySystemV(field.ty, target);
const field_class = std.mem.sliceTo(&field_class_array, .none);
if (byte_i + field_size <= 8) {
// Combine this field with the previous one.
combine: {
// "If both classes are equal, this is the resulting class."
if (result[result_i] == field_class[0]) {
break :combine;
}
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result[result_i] == .none) {
result[result_i] = field_class[0];
break :combine;
}
assert(field_class[0] != .none);
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result[result_i] == .memory or field_class[0] == .memory) {
result[result_i] = .memory;
break :combine;
}
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result[result_i] == .integer or field_class[0] == .integer) {
result[result_i] = .integer;
break :combine;
}
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result[result_i] == .x87 or
result[result_i] == .x87up or
result[result_i] == .complex_x87 or
field_class[0] == .x87 or
field_class[0] == .x87up or
field_class[0] == .complex_x87)
{
result[result_i] = .memory;
break :combine;
}
// "Otherwise class SSE is used."
result[result_i] = .sse;
}
byte_i += field_size;
if (byte_i == 8) {
byte_i = 0;
result_i += 1;
}
} else {
// Cannot combine this field with the previous one.
if (byte_i != 0) {
byte_i = 0;
result_i += 1;
}
std.mem.copy(Class, result[result_i..], field_class);
result_i += field_class.len;
// If there are any bytes leftover, we have to try to combine
// the next field with them.
byte_i = field_size % 8;
if (byte_i != 0) result_i -= 1;
}
}
// Post-merger cleanup
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
for (result) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (result) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
.Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(target);
if (ty_size > 64)
return memory_class;
const fields = ty.unionFields();
for (fields.values()) |field| {
if (field.abi_align.tag() != .abi_align_default) {
const field_alignment = field.abi_align.toUnsignedInt();
if (field_alignment < field.ty.abiAlignment(target)) {
return memory_class;
}
}
// Combine this field with the previous one.
const field_class = classifySystemV(field.ty, target);
for (result) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
if (result_item.* == field_item) {
continue;
}
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result_item.* == .none) {
result_item.* = field_item;
continue;
}
if (field_item == .none) {
continue;
}
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result_item.* == .memory or field_item == .memory) {
result_item.* = .memory;
continue;
}
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result_item.* == .integer or field_item == .integer) {
result_item.* = .integer;
continue;
}
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result_item.* == .x87 or
result_item.* == .x87up or
result_item.* == .complex_x87 or
field_item == .x87 or
field_item == .x87up or
field_item == .complex_x87)
{
result_item.* = .memory;
continue;
}
// "Otherwise class SSE is used."
result_item.* = .sse;
}
}
// Post-merger cleanup
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
for (result) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (result) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
else => unreachable,
}
} | src/arch/x86_64/abi.zig |
//! These are MIPS ABI compatible.
pub const E = enum(i32) {
/// No error occurred.
SUCCESS = 0,
PERM = 1,
NOENT = 2,
SRCH = 3,
INTR = 4,
IO = 5,
NXIO = 6,
@"2BIG" = 7,
NOEXEC = 8,
BADF = 9,
CHILD = 10,
/// Also used for WOULDBLOCK.
AGAIN = 11,
NOMEM = 12,
ACCES = 13,
FAULT = 14,
NOTBLK = 15,
BUSY = 16,
EXIST = 17,
XDEV = 18,
NODEV = 19,
NOTDIR = 20,
ISDIR = 21,
INVAL = 22,
NFILE = 23,
MFILE = 24,
NOTTY = 25,
TXTBSY = 26,
FBIG = 27,
NOSPC = 28,
SPIPE = 29,
ROFS = 30,
MLINK = 31,
PIPE = 32,
DOM = 33,
RANGE = 34,
NOMSG = 35,
IDRM = 36,
CHRNG = 37,
L2NSYNC = 38,
L3HLT = 39,
L3RST = 40,
LNRNG = 41,
UNATCH = 42,
NOCSI = 43,
L2HLT = 44,
DEADLK = 45,
NOLCK = 46,
BADE = 50,
BADR = 51,
XFULL = 52,
NOANO = 53,
BADRQC = 54,
BADSLT = 55,
DEADLOCK = 56,
BFONT = 59,
NOSTR = 60,
NODATA = 61,
TIME = 62,
NOSR = 63,
NONET = 64,
NOPKG = 65,
REMOTE = 66,
NOLINK = 67,
ADV = 68,
SRMNT = 69,
COMM = 70,
PROTO = 71,
DOTDOT = 73,
MULTIHOP = 74,
BADMSG = 77,
NAMETOOLONG = 78,
OVERFLOW = 79,
NOTUNIQ = 80,
BADFD = 81,
REMCHG = 82,
LIBACC = 83,
LIBBAD = 84,
LIBSCN = 85,
LIBMAX = 86,
LIBEXEC = 87,
ILSEQ = 88,
NOSYS = 89,
LOOP = 90,
RESTART = 91,
STRPIPE = 92,
NOTEMPTY = 93,
USERS = 94,
NOTSOCK = 95,
DESTADDRREQ = 96,
MSGSIZE = 97,
PROTOTYPE = 98,
NOPROTOOPT = 99,
PROTONOSUPPORT = 120,
SOCKTNOSUPPORT = 121,
OPNOTSUPP = 122,
PFNOSUPPORT = 123,
AFNOSUPPORT = 124,
ADDRINUSE = 125,
ADDRNOTAVAIL = 126,
NETDOWN = 127,
NETUNREACH = 128,
NETRESET = 129,
CONNABORTED = 130,
CONNRESET = 131,
NOBUFS = 132,
ISCONN = 133,
NOTCONN = 134,
UCLEAN = 135,
NOTNAM = 137,
NAVAIL = 138,
ISNAM = 139,
REMOTEIO = 140,
SHUTDOWN = 143,
TOOMANYREFS = 144,
TIMEDOUT = 145,
CONNREFUSED = 146,
HOSTDOWN = 147,
HOSTUNREACH = 148,
ALREADY = 149,
INPROGRESS = 150,
STALE = 151,
CANCELED = 158,
NOMEDIUM = 159,
MEDIUMTYPE = 160,
NOKEY = 161,
KEYEXPIRED = 162,
KEYREVOKED = 163,
KEYREJECTED = 164,
OWNERDEAD = 165,
NOTRECOVERABLE = 166,
RFKILL = 167,
HWPOISON = 168,
DQUOT = 1133,
_,
}; | lib/std/os/bits/linux/errno/mips.zig |
const std = @import("std");
const backend = @import("backend.zig");
const Size = @import("data.zig").Size;
const DataWrapper = @import("data.zig").DataWrapper;
const Widget = @import("widget.zig").Widget;
pub const Tabs_Impl = struct {
pub usingnamespace @import("internal.zig").All(Tabs_Impl);
peer: ?backend.TabContainer = null,
handlers: Tabs_Impl.Handlers = undefined,
dataWrappers: Tabs_Impl.DataWrappers = .{},
childrens: std.ArrayList(Widget),
/// The widget associated to this Tabs_Impl
widget: ?*Widget = null,
pub fn init(widget: Widget) Tabs_Impl {
return Tabs_Impl.init_events(Tabs_Impl{});
}
pub fn show(self: *Tabs_Impl) !void {
if (self.peer == null) {
var peer = try backend.TabContainer.create();
for (self.childrens.items) |*widget| {
try widget.show();
peer.add(widget.peer.?);
}
self.peer = peer;
try self.show_events();
}
}
pub fn getPreferredSize(self: *Tabs_Impl, available: Size) Size {
return Size.init(0, 0); // TODO
}
pub fn add(self: *Tabs_Impl, widget: anytype) !void {
const ComponentType = @import("internal.zig").DereferencedType(@TypeOf(widget));
var genericWidget = try @import("internal.zig").genericWidgetFrom(widget);
if (self.widget) |parent| {
genericWidget.parent = parent;
}
const slot = try self.childrens.addOne();
slot.* = genericWidget;
if (@hasField(ComponentType, "dataWrappers")) {
genericWidget.as(ComponentType).dataWrappers.widget = slot;
}
if (self.peer) |*peer| {
try slot.show();
peer.insert(peer.getTabsNumber(), slot.peer.?);
}
}
pub fn _deinit(self: *Tabs_Impl, _: *Widget) void {
for (self.childrens.items) |*child| {
child.deinit();
}
self.childrens.deinit();
}
};
fn isErrorUnion(comptime T: type) bool {
return switch (@typeInfo(T)) {
.ErrorUnion => true,
else => false,
};
}
pub fn Tabs(element: anytype) anyerror!Tabs_Impl {
const child =
if (comptime isErrorUnion(@TypeOf(element)))
try element
else
element;
const widget = try @import("internal.zig").genericWidgetFrom(child);
return Tabs_Impl.init(widget);
} | src/tabs.zig |
const std = @import("std");
const debug = std.debug;
const heap = std.heap;
const math = std.math;
const mem = std.mem;
const testing = std.testing;
const CustomList = @import("list.zig").CustomList;
const List = @import("list.zig").List;
test "List.foreach" {
var buf: [1024 * 1024 * 5]u8 = undefined;
const allocator = &heap.FixedBufferAllocator.init(&buf).allocator;
const l = try List(u8).fromSlice(allocator, "a" ** (1024 * 4));
var i: usize = 0;
while (i < l.len()) : (i += 1) {
var j: usize = i;
l.foreach(i, &j, struct {
fn each(expect_i: *usize, actual_i: usize, c: u8) error{}!void {
testing.expectEqual(expect_i.*, actual_i);
testing.expectEqual(@as(u8, 'a'), c);
expect_i.* += 1;
}
}.each) catch {};
testing.expectEqual(l.len(), j);
}
}
test "fuzz case 3-128-2-580" {
var buf: [1024 * 1024 * 5]u8 = undefined;
const allocator = &heap.FixedBufferAllocator.init(&buf).allocator;
var l = CustomList(u8, 3){ .allocator = allocator };
var cmp = std.ArrayList(u8).init(allocator);
l = try CustomList(u8, 3).fromSlice(allocator, "dvtusjjmqsiojhglereivjnhkdvyeqdjtcutufsezllzjrmupifivylniljdjyfioyboualnynwiygddjgtfpkod");
try cmp.resize(0);
try cmp.appendSlice("dvtusjjmqsiojhglereivjnhkdvyeqdjtcutufsezllzjrmupifivylniljdjyfioyboualnynwiygddjgtfpkod");
try testList(l, cmp.items);
l = try l.insertSlice(17, "wvcgqeluuybbenuiunnnrcdyvoqrmdinfwffgyryebafzauyzpwwlzuoirkxlyjqboyvtkbehondfnqhzdrsrhqfexyindwoop");
try cmp.insertSlice(17, "wvcgqeluuybbenuiunnnrcdyvoqrmdinfwffgyryebafzauyzpwwlzuoirkxlyjqboyvtkbehondfnqhzdrsrhqfexyindwoop");
try testList(l, cmp.items);
}
test "fuzz case 4-128-2-6313103345818793189" {
var buf: [1024 * 1024 * 5]u8 = undefined;
const allocator = &heap.FixedBufferAllocator.init(&buf).allocator;
var l = CustomList(u8, 4){ .allocator = allocator };
var cmp = std.ArrayList(u8).init(allocator);
l = try CustomList(u8, 4).fromSlice(allocator, "<KEY>");
try cmp.resize(0);
try cmp.appendSlice("<KEY>");
try testList(l, cmp.items);
l = try l.slice(101, 116);
try cmp.appendSlice(cmp.toOwnedSlice()[101..116]);
try testList(l, cmp.items);
}
test "fuzz case 32-512-2-16721983880728474569" {
var buf: [1024 * 1024 * 5]u8 = undefined;
const allocator = &heap.FixedBufferAllocator.init(&buf).allocator;
var l = CustomList(u8, 32){ .allocator = allocator };
var cmp = std.ArrayList(u8).init(allocator);
l = try CustomList(u8, 32).fromSlice(allocator, "<KEY>");
try cmp.resize(0);
try cmp.appendSlice("<KEY>");
try testList(l, cmp.items);
l = try l.insertSlice(319, "<KEY>");
try cmp.insertSlice(319, "<KEY>");
try testList(l, cmp.items);
}
pub fn testList(list: var, expect: []const u8) !void {
const other = @TypeOf(list).fromSlice(list.allocator, expect) catch unreachable;
var it = list.iterator(0);
try list.foreach(0, expect, struct {
fn t(e: []const u8, i: usize, item: u8) !void {
if (e[i] != item)
return error.TestFailed;
}
}.t);
for (expect) |c, i| {
if (list.at(i) != c)
return error.TestFailed;
const item = it.next() orelse return error.TestFailed;
if (item != c)
return error.TestFailed;
}
if (it.next() != null)
return error.TestFailed;
if (!list.equal(other))
return error.TestFailed;
} | src/core/list_tests.zig |
const std = @import("std");
const testing = std.testing;
const allocator = std.heap.page_allocator;
pub const Evaluator = struct {
pub const Precedence = enum {
None,
MulBeforeAdd,
AddBeforeMul,
};
precedence: Precedence,
nums: [128]usize,
oprs: [128]u8,
pn: usize,
po: usize,
pub fn init(precedence: Precedence) Evaluator {
return Evaluator{
.precedence = precedence,
.nums = undefined,
.oprs = undefined,
.pn = 0,
.po = 0,
};
}
pub fn deinit(self: *Evaluator) void {
_ = self;
}
pub fn reset(self: *Evaluator) void {
self.pn = 0;
self.po = 0;
}
pub fn push_num(self: *Evaluator, num: usize) void {
// std.debug.warn("NUM {} {}\n", .{ self.pn, num });
self.nums[self.pn] = num;
self.pn += 1;
}
pub fn push_op(self: *Evaluator, op: u8) void {
// std.debug.warn("OP {} {c}\n", .{ self.po, op });
self.oprs[self.po] = op;
self.po += 1;
}
fn reduce_one(self: *Evaluator) bool {
if (self.pn < 2 or self.po < 1) return false;
const l = self.nums[self.pn - 2];
const r = self.nums[self.pn - 1];
const o = self.oprs[self.po - 1];
const a = switch (o) {
'+' => l + r,
'*' => l * r,
else => @panic("REDUCE"),
};
// std.debug.warn("REDUCE {}({}) {c}({}) {}({}) = {}\n", .{ l, self.pn - 2, o, self.po - 1, r, self.pn - 1, a });
self.nums[self.pn - 2] = a;
self.pn -= 1;
self.po -= 1;
return true;
}
// reduce while operator found
fn reduce_eq(self: *Evaluator, needed: usize, op: u8) void {
while (self.pn >= needed and self.po >= 1 and self.oprs[self.po - 1] == op) {
if (!self.reduce_one()) break;
}
}
// reduce while operator not found
fn reduce_ne(self: *Evaluator, needed: usize, op: u8) void {
while (self.pn >= needed and self.po >= 1 and self.oprs[self.po - 1] != op) {
if (!self.reduce_one()) break;
}
}
// reduce high precedence while possible
fn reduce_greedy(self: *Evaluator) void {
switch (self.precedence) {
.AddBeforeMul => self.reduce_eq(2, '+'),
.MulBeforeAdd => self.reduce_eq(2, '*'),
.None => self.reduce_ne(2, '('),
}
}
// reduce inside parenthesis and then greedily
fn reduce_parens(self: *Evaluator) void {
self.reduce_ne(1, '(');
self.po -= 1;
self.reduce_greedy();
}
pub fn eval(self: *Evaluator, str: []const u8) usize {
// std.debug.warn("\nEVAL {}\n", .{str});
self.reset();
for (str) |c| {
switch (c) {
' ', '\t' => {},
'0'...'9' => {
const n = c - '0';
self.push_num(n);
self.reduce_greedy();
},
'+' => {
self.push_op(c);
},
'*' => {
self.push_op(c);
},
'(' => {
self.push_op(c);
},
')' => {
self.reduce_parens();
},
else => {
@panic("CHAR");
},
}
}
self.reduce_ne(2, 0);
return self.nums[0];
}
};
test "samples part a" {
var evaluator = Evaluator.init(Evaluator.Precedence.None);
defer evaluator.deinit();
try testing.expect(evaluator.eval("1 + 2 * 3 + 4 * 5 + 6") == 71);
try testing.expect(evaluator.eval("1 + (2 * 3) + (4 * (5 + 6))") == 51);
try testing.expect(evaluator.eval("2 * 3 + (4 * 5)") == 26);
try testing.expect(evaluator.eval("5 + (8 * 3 + 9 + 3 * 4 * 3)") == 437);
try testing.expect(evaluator.eval("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))") == 12240);
try testing.expect(evaluator.eval("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2") == 13632);
}
test "samples normal precedence" {
var evaluator = Evaluator.init(Evaluator.Precedence.MulBeforeAdd);
defer evaluator.deinit();
try testing.expect(evaluator.eval("1 + 2 * 3 + 4 * 5 + 6") == 33);
try testing.expect(evaluator.eval("1 + (2 * 3) + (4 * (5 + 6))") == 51);
try testing.expect(evaluator.eval("2 * 3 + (4 * 5)") == 26);
try testing.expect(evaluator.eval("5 + (8 * 3 + 9 + 3 * 4 * 3)") == 74);
try testing.expect(evaluator.eval("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))") == 5490);
try testing.expect(evaluator.eval("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2") == 3208);
try testing.expect(evaluator.eval("(6 + 6 * 8) + 4 * 3 + (8 * 2 + 2 * 3) * (7 * (8 * 4)) * 9") == 44418);
}
test "samples part b" {
var evaluator = Evaluator.init(Evaluator.Precedence.AddBeforeMul);
defer evaluator.deinit();
try testing.expect(evaluator.eval("1 + 2 * 3 + 4 * 5 + 6") == 231);
try testing.expect(evaluator.eval("1 + (2 * 3) + (4 * (5 + 6))") == 51);
try testing.expect(evaluator.eval("2 * 3 + (4 * 5)") == 46);
try testing.expect(evaluator.eval("5 + (8 * 3 + 9 + 3 * 4 * 3)") == 1445);
try testing.expect(evaluator.eval("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))") == 669060);
try testing.expect(evaluator.eval("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2") == 23340);
// this is how I found out a bug
try testing.expect(evaluator.eval("(6 + 6 * 8) + 4 * 3 + (8 * 2 + 2 * 3) * (7 * (8 * 4)) * 9") == 19958400);
} | 2020/p18/evaluator.zig |
const std = @import("std");
const log = std.log;
const utils = @import("utils.zig");
const ast = @import("ast.zig");
const NodeKind = ast.NodeKind;
const Node = ast.Node;
const TokenKind = @import("tokenizer.zig").TokenKind;
const csl = @import("csl_json.zig");
const CitationItem = csl.CitationItem;
const bib_to_csl_json = @import("bib_to_csl.zig").bib_to_csl_json;
const bibtex = @import("bibtex.zig");
const builtin = @import("builtin.zig");
const BuiltinCall = builtin.BuiltinCall;
// pub const CiteprocResult = struct {
// citations: [][]FormattedOrLiteral,
// // array of [id, []FormattedOrLiteral]
// bibliography: [][2][]FormattedOrLiteral,
// warnings: []const u8,
// };
// pub const FormattedOrLiteral = union(enum) {
// literal: []const u8,
// formatted: FormattedString,
// };
// pub const FormattedString = struct {
// format: Format,
// contents: []const []const u8,
// };
pub const Format = enum {
// outerquotes, // only present in rendercontext but not where json output is emitted
italics,
@"no-italics",
bold,
@"no-bold",
underline,
@"no-decoration",
@"small-caps",
@"no-small-caps",
superscript,
subscript,
baseline,
div,
};
/// will use json's memory for strings
/// caller takes ownership
pub fn nodes_from_citeproc_json(
allocator: *std.mem.Allocator,
json: []const u8,
cite_nodes: []*Node, // TODO NodeKind.Citation nodes in the same order as citations were passed to citeproc
) ![]*Node {
// NOTE: either use the Parser and keep the ValueTree and generate formatted
// strings from that directly or use json.TokenStream to generate
// CiteprocResult from that 'manually'
var stream = std.json.Parser.init(allocator, false);
defer stream.deinit(); // deallocates node/values stack
var json_tree = try stream.parse(json);
// json_tree.arena holds allocated Arrays/ObjectMaps/Strings
// no Strings allocated since we passed false as copy_strings
// otherwise we could not free at the end of this proc
defer json_tree.deinit();
var bib_nodes = std.ArrayList(*Node).init(allocator);
const citations = &json_tree.root.Object.getEntry("citations").?.value_ptr.*;
std.debug.assert(citations.Array.items.len == cite_nodes.len);
for (citations.Array.items) |citation, i| {
try nodes_from_formatted(allocator, citation.Array.items, cite_nodes[i]);
}
const bibliography = &json_tree.root.Object.getEntry("bibliography").?.value_ptr.*;
for (bibliography.Array.items) |bib_entry| {
const entry_node = try Node.create(allocator);
// NOTE: only passing string ids to citeproc so we can only expect string ids back
const entry_id = try allocator.dupe(u8, bib_entry.Array.items[0].String);
entry_node.data = .{
.BibEntry = .{ .id = entry_id },
};
try bib_nodes.append(entry_node);
try nodes_from_formatted(allocator, bib_entry.Array.items[1].Array.items, entry_node);
}
return bib_nodes.toOwnedSlice();
}
/// adds formatted ast.Nodes to first_parent from a Citeproc formatted string in json
fn nodes_from_formatted(
allocator: *std.mem.Allocator,
formatted_items: []const std.json.Value,
first_parent: *Node
) !void {
// TODO instead of chaning the BuiltinCall node to Citation
// use Citation node for a single "CitationItem" in the sense of csl/citeproc
// and store the id so we can later generate a link to the corresponding bibentry
for (formatted_items) |formatted| {
switch (formatted) {
.String => |str| {
const txt_node = try Node.create(allocator);
// NOTE: json.Parser re-allocates strings if there is an escape token (\)
// inside them -> dupe them (otherwise we can't free the ValueTree
// returned from the parser)
// (unfortunately no S.escapes is available like in the json.Parser itself
// to check if there are escapes)
txt_node.data = .{
.Text = .{ .text = try allocator.dupe(u8, str) },
};
first_parent.append_child(txt_node);
},
.Object => |obj| {
const format = std.meta.stringToEnum(Format, obj.get("format").?.String).?;
var parent: *Node = undefined;
switch (format) {
.italics => {
parent = try Node.create(allocator);
parent.data = .{
.Emphasis = .{ .opener_token_kind = TokenKind.Asterisk },
};
first_parent.append_child(parent);
},
.bold => {
parent = try Node.create(allocator);
parent.data = .{
.StrongEmphasis = .{ .opener_token_kind = TokenKind.Asterisk_double },
};
first_parent.append_child(parent);
},
.@"small-caps" => {
parent = try Node.create(allocator);
parent.data = .SmallCaps;
first_parent.append_child(parent);
},
.superscript => {
parent = try Node.create(allocator);
parent.data = .Superscript;
first_parent.append_child(parent);
},
.subscript => {
parent = try Node.create(allocator);
parent.data = .Subscript;
first_parent.append_child(parent);
},
.underline => {
parent = try Node.create(allocator);
parent.data = .Underline;
first_parent.append_child(parent);
},
.baseline, // TODO what is this?
.@"no-italics",
.@"no-bold",
.@"no-decoration",
.@"no-small-caps",
=> parent = first_parent,
.div => unreachable,
}
const contents = obj.get("contents").?;
for (contents.Array.items) |str| {
var txt_node = try Node.create(allocator);
txt_node.data = .{
.Text = .{ .text = try allocator.dupe(u8, str.String) },
};
parent.append_child(txt_node);
}
},
else => unreachable,
}
}
}
/// HAS to be called with parser's node_arena.allocator (or another ArenaAllocator)
/// potential @MemoryLeak if no ArenaAllocator or sth similar is used
/// since the caller takes ownership of stdout and stderr that are
/// currently not passed TODO
pub fn run_citeproc(
allocator: *std.mem.Allocator,
cite_nodes: []*Node,
references: []csl.Item,
csl_file: []const u8,
locale: []const u8,
) ![]*Node {
// NOTE: Try spawning the process before doing most of the work, so we can at
// least abort on Windows, should the executable not be available
// (on POSIX that info is delayed till we .wait() on the process, at least
// in zig's implementation)
// NOTE: excecutable has to be specified without extension otherwise it tries to
// find it as executable.exe.exe *.exe.bat etc.
// see: https://github.com/ziglang/zig/pull/2705 and https://github.com/ziglang/zig/pull/2770
const cmd = &[_][]const u8{
"citeproc", "--format=json",
"--style", csl_file,
};
log.debug("Cite commands:", .{});
for (cmd) |c| {
log.debug("{s} ", .{ c });
}
var runner = try std.ChildProcess.init(cmd, allocator);
defer runner.deinit();
runner.stdin_behavior = .Pipe;
runner.stdout_behavior = .Pipe;
runner.stderr_behavior = .Pipe;
// order important otherwise stdin etc. not initialized
try runner.spawn();
// jgm/citeproc states that it takes either an array of Citation{} objects (json)
// or an array of CitationItem arrays
// but if the first option is passed it errors:
// Error in $.citations[0]: parsing [] failed, expected Array, but encountered Object
// .citations = &[_]Citation {
// .{
// .schema = "https://resource.citationstyles.org/schema/latest/input/json/csl-citation.json",
// .citationID = .{ .number = 1, },
// .citationItems = cites[0..],
// },
// },
var citations = std.ArrayList([]const CitationItem).init(allocator);
defer citations.deinit();
// TODO take ArenaAllocator as param and use child allocator here?
var ids = std.BufSet.init(allocator);
defer ids.deinit();
for (cite_nodes) |cite| {
if (cite.data.BuiltinCall.result) |result| {
switch (result.*) {
// same problem as below: &[1]CitationItem { single_cite }
// for casting a single item ptr x to a slice:
// ([]T)(*[1]T)(&x) (using old casting syntax)
// I guess the first cast is now implicit
.cite => |*single_cite| try citations.append(@as(*[1]CitationItem, single_cite)),
// NOTE: this just overwrites the previous one that was appended
// since |two_cites| will be the values copied on the stack (which is [2]CitationItem)
// .textcite => |two_cites| try citations.append(two_cites[0..]),
// whereas |*two_cites| will be [2]*CitationItem
// @Compiler TODO the documentation mention that |value| will actually
// copy value onto the stack (and not use *const value as I assumed)
// even though it does say that |*value| makes it a ptr
.textcite => |*two_cites| try citations.append(two_cites[0..]),
.cites => |cites| try citations.append(cites),
else => unreachable,
}
// add used citation ids for gathering references next
for (citations.items[citations.items.len - 1]) |citation| {
switch (citation.id) {
.string => |str| try ids.insert(str),
.number => |num| {
// engough chars for a 64-bit number
var buf: [20]u8 = undefined;
const str = try std.fmt.bufPrint(buf[0..], "{}", .{ num });
// BufSet copies the str so this is fine
try ids.insert(str);
},
}
}
}
}
var used_refs = std.ArrayList(csl.Item).init(allocator);
defer used_refs.deinit();
for (references) |ref| {
switch (ref.id) {
.string => |str| {
if (ids.contains(str))
try used_refs.append(ref);
},
.number => |num| {
// engough chars for a 64-bit number
var buf: [20]u8 = undefined;
const str = try std.fmt.bufPrint(buf[0..], "{}", .{ num });
// BufSet copies the str so this is fine
if (ids.contains(str))
try used_refs.append(ref);
},
}
}
var to_citeproc = .{
// citations = [[CitationItem, ..], [CitationItem, ..], ..]
.citations = citations.items,
.references = used_refs.items,
.lang = locale,
};
// write program code to stdin
// debug try std.json.stringify(to_citeproc, .{}, std.io.getStdOut().writer());
try std.json.stringify(to_citeproc, .{}, runner.stdin.?.writer());
runner.stdin.?.close();
// has to be set to null otherwise the ChildProcess tries to close it again
// and hits unreachable code
runner.stdin = null;
log.debug("Done writing to stdin!\n", .{});
// might deadlock due to https://github.com/ziglang/zig/issues/6343
const stdout = try runner.stdout.?.reader().readAllAlloc(allocator, 10 * 1024 * 1024);
defer allocator.free(stdout);
log.debug("Done reading from citeproc stdout!\n", .{});
// log.debug("OUT:\n{s}\n", .{ stdout });
const stderr = try runner.stderr.?.reader().readAllAlloc(allocator, 10 * 1024 * 1024);
defer allocator.free(stderr);
log.debug("Done reading from citeproc stderr!\nERR:\n{s}\n", .{stderr});
_ = try runner.wait();
var res = try nodes_from_citeproc_json(allocator, stdout, cite_nodes);
return res;
}
pub fn csl_items_from_file(
allocator: *std.mem.Allocator,
filename: []const u8,
write_conversion: bool
) !csl.CSLJsonParser.Result {
var ref_file_type = enum { bib, json, unsupported }.unsupported;
if (std.mem.endsWith(u8, filename, ".bib")) {
ref_file_type = .bib;
} else if (std.mem.endsWith(u8, filename, ".json")) {
ref_file_type = .json;
} else {
log.err("Only CSL-JSON and bib(la)tex are supported as bibliography file formats!", .{});
return error.UnsupportedRefFile;
}
const file = blk: {
if (std.fs.path.isAbsolute(filename)) {
break :blk try std.fs.openFileAbsolute(filename, .{ .read = true, .write = false });
} else {
break :blk try std.fs.cwd().openFile(filename, .{ .read = true, .write = false });
}
};
defer file.close();
// 20 MiB max
const ref_file_bytes = try file.readToEndAlloc(allocator, 20 * 1024 * 1024);
defer allocator.free(ref_file_bytes);
var csl_json_result: csl.CSLJsonParser.Result = undefined;
switch (ref_file_type) {
.bib => {
var bibparser = bibtex.BibParser.init(allocator, filename, ref_file_bytes);
var bib = try bibparser.parse();
defer bib.deinit();
var arena = std.heap.ArenaAllocator.init(allocator);
// NOTE: we copy the strings from the Bibliography values so we can free it
const items = try bib_to_csl_json(&arena.allocator, bib, true);
csl_json_result = .{ .arena = arena, .items = items };
// write file
if (write_conversion) {
// swap .bib with .json extension
var converted_fn = try allocator.alloc(u8, filename.len + 1);
@memcpy(converted_fn.ptr, filename.ptr, filename.len);
@memcpy(converted_fn[filename.len - 3..].ptr, "json", 4);
const write_file = blk: {
if (std.fs.path.isAbsolute(converted_fn)) {
// truncate: reduce file to length 0 if it exists
break :blk try std.fs.createFileAbsolute(
converted_fn, .{ .read = true, .truncate = true });
} else {
break :blk try std.fs.cwd().createFile(
converted_fn, .{ .read = true, .truncate = true });
}
};
defer write_file.close();
try csl.write_items_json(items, write_file.writer());
}
},
.json => {
csl_json_result = try csl.read_items_json(allocator, ref_file_bytes);
},
else => unreachable,
}
return csl_json_result;
} | src/cite.zig |
const os = @import("root").os;
const platform = os.platform;
const log = os.log;
const libalign = os.lib.libalign;
const range = os.lib.range.range;
const range_reverse = os.lib.range.range_reverse;
const pmm = os.memory.pmm;
const Context = *platform.paging.PagingContext;
pub var kernel_context: os.platform.paging.PagingContext = undefined;
pub fn init() void {
os.platform.paging.PagingContext.read_current();
}
pub fn map(args: struct {
virt: usize,
size: usize,
perm: Perms,
memtype: platform.paging.MemoryType,
context: Context = &kernel_context,
}) !void {
var argc = args;
return map_impl_with_rollback(.{
.virt = &argc.virt,
.phys = null,
.size = &argc.size,
.context = argc.context,
.perm = argc.perm,
.memtype = argc.memtype
});
}
pub fn map_phys(args: struct {
virt: usize,
phys: usize,
size: usize,
perm: Perms,
memtype: platform.paging.MemoryType,
context: Context = &kernel_context,
}) !void {
var argc = args;
return map_impl_with_rollback(.{
.virt = &argc.virt,
.phys = &argc.phys,
.size = &argc.size,
.context = argc.context,
.perm = argc.perm,
.memtype = argc.memtype
});
}
pub fn unmap(args: struct {
virt: usize,
size: usize,
reclaim_pages: bool,
context: Context = &kernel_context,
}) void {
var argc = args;
unmap_loop(&argc.virt, &argc.size, argc.reclaim_pages, argc.context);
}
pub const Perms = struct {
writable: bool,
executable: bool,
userspace: bool = false,
pub fn allows(self: @This(), other: @This()) bool {
if(!self.writable and other.writable)
return false;
if(!self.executable and other.executable)
return false;
if(!self.userspace and other.userspace)
return false;
return true;
}
pub fn add_perms(self: @This(), other: @This()) @This() {
return .{
.writable = self.writable or other.writable,
.executable = self.executable or other.executable,
.userspace = self.userspace or other.userspace,
};
}
};
pub fn rx() Perms {
return .{
.writable = false,
.executable = true,
};
}
pub fn ro() Perms {
return .{
.writable = false,
.executable = false,
};
}
pub fn rw() Perms {
return .{
.writable = true,
.executable = false,
};
}
pub fn rwx() Perms {
return .{
.writable = true,
.executable = true,
};
}
pub fn user(p: Perms) Perms {
var ret = p;
ret.userspace = true;
return ret;
}
pub fn get_current_paging_root() platform.paging_root {
return platform.current_paging_root();
}
pub fn set_context(new_context: Context) !void {
new_context.apply();
kernel_context = new_context.*;
}
extern var __kernel_text_begin: u8;
extern var __kernel_text_end: u8;
extern var __kernel_data_begin: u8;
extern var __kernel_data_end: u8;
extern var __kernel_rodata_begin: u8;
extern var __kernel_rodata_end: u8;
extern var __bootstrap_stack_bottom: u8;
extern var __bootstrap_stack_top: u8;
pub fn bootstrap_kernel_paging() !platform.paging.PagingContext {
// Setup some paging
var new_context = try platform.paging.PagingContext.make_default();
try map_kernel_section(&new_context, &__kernel_text_begin, &__kernel_text_end, rx());
try map_kernel_section(&new_context, &__kernel_data_begin, &__kernel_data_end, rw());
try map_kernel_section(&new_context, &__kernel_rodata_begin, &__kernel_rodata_end, ro());
try map_kernel_section(&new_context, &__bootstrap_stack_bottom, &__bootstrap_stack_top, rw());
return new_context;
}
fn map_kernel_section(new_paging_context: Context, start: *u8, end: *u8, perm: Perms) !void {
const virt = @ptrToInt(start);
const phys = os.vital(translate_virt(.{.virt = virt}), "Translating kaddr");
const region_size = @ptrToInt(end) - virt;
os.vital(map_phys(.{
.virt = virt,
.phys = phys,
.size = region_size,
.perm = perm,
.memtype = .MemoryWriteBack,
.context = new_paging_context,
}), "Mapping kernel section");
}
pub fn map_physmem(args: struct {
context: Context,
map_limit: usize,
}) !void {
// Map once with each memory type
try map_phys(.{
.virt = args.context.phys_to_write_back_virt(0),
.phys = 0,
.size = args.map_limit,
.perm = rw(),
.context = args.context,
.memtype = .MemoryWriteBack,
});
try map_phys(.{
.virt = args.context.phys_to_write_combining_virt(0),
.phys = 0,
.size = args.map_limit,
.perm = rw(),
.context = args.context,
.memtype = .DeviceWriteCombining,
});
try map_phys(.{
.virt = args.context.phys_to_uncached_virt(0),
.phys = 0,
.size = args.map_limit,
.perm = rw(),
.context = args.context,
.memtype = .DeviceUncacheable,
});
}
/// Tries to map the range. If the mapping fails,
/// it unmaps any memory it has touched.
fn map_impl_with_rollback(args: struct {
virt: *usize,
phys: ?*usize,
size: *usize,
perm: Perms,
memtype: platform.paging.MemoryType,
context: Context,
}) !void {
const start_virt = args.virt.*;
if(!is_aligned(args.virt.*, args.phys, 0, args.context) or
!libalign.is_aligned(usize, args.context.page_size(0, args.virt.*), args.size.*)) {
// virt, phys and size all need to be aligned
return error.BadAlignment;
}
errdefer {
// Roll it back
if(start_virt != args.virt.*) {
unmap(.{
.virt = start_virt,
.size = args.virt.* - start_virt,
.reclaim_pages = args.phys == null,
.context = args.context,
});
}
}
const root = args.context.root_table(args.virt.*);
try map_impl(args.virt, args.phys, args.size, root, args.perm, args.memtype, args.context);
if(args.size.* != 0)
return error.IncompleteMapping;
}
fn is_aligned(virt: usize, phys: ?*usize, level: anytype, context: Context) bool {
if(!libalign.is_aligned(usize, context.page_size(level, virt), virt))
return false;
if(phys) |p|
return libalign.is_aligned(usize, context.page_size(level, virt), p.*);
return true;
}
const MapError = error{
AlreadyPresent,
OutOfMemory,
PhysAllocTooSmall,
};
fn map_impl(
virt: *usize,
phys: ?*usize,
size: *usize,
table: anytype,
perm: Perms,
memtype: platform.paging.MemoryType,
context: Context,
) MapError!void {
var curr = table;
const children = table.skip_to(virt.*);
for(children) |*child| {
switch(table.decode_child(child)) {
.Mapping => return error.AlreadyPresent,
.Table => |*tbl| {
try map_impl(virt, phys, size, tbl.*, perm, memtype, context);
if(!tbl.perms.allows(perm)) {
tbl.add_perms(perm);
context.invalidate(virt.*);
}
},
.Empty => {
const dom = table.child_domain(virt.*);
// Should we map at the current level?
if(dom.ptr == virt.* and dom.len <= size.*
and context.can_map_at_level(table.level() - 1)
and is_aligned(virt.*, phys, table.level() - 1, context)) {
const m = try table.make_child_mapping(child, if(phys) |p| p.* else null, perm, memtype);
const step = dom.len;
if(step >= size.*) {
size.* = 0;
return;
}
else {
size.* -= step;
virt.* += step;
if(phys) |p|
p.* += step;
}
} else {
const tbl = try table.make_child_table(child, perm);
try map_impl(virt, phys, size, tbl, perm, memtype, context);
}
},
}
if(size.* == 0)
return;
}
}
fn translate_virt_impl(
virt: usize,
table: anytype,
context: Context,
) error{NotPresent}!usize {
const child = table.decode_child(&table.skip_to(virt)[0]);
const dom = table.child_domain(virt);
switch(child) {
.Empty => return error.NotPresent,
.Mapping => |m| return m.mapped_bytes().ptr + virt - dom.ptr,
.Table => |t| return translate_virt_impl(virt, t, context),
}
}
pub fn translate_virt(args: struct {
virt: usize,
context: Context = &kernel_context,
}) !usize {
const root = args.context.root_table(args.virt);
return translate_virt_impl(args.virt, root, args.context);
}
fn unmap_loop(
virt: *usize,
size: *usize,
reclaim_pages: bool,
context: Context,
) void {
const root = context.root_table(virt.*);
while(size.* != 0)
unmap_iter(virt, size, reclaim_pages, root, context);
}
fn unmap_iter(
virt: *usize,
size: *usize,
reclaim_pages: bool,
table: anytype,
context: Context,
) void {
for(table.skip_to(virt.*)) |*child| {
const dom = table.child_domain(virt.*);
switch(table.decode_child(child)) {
.Empty => {
if(dom.len >= size.*){
size.* = 0;
return;
}
virt.* += dom.len;
size.* -= dom.len;
},
.Table => |tbl| unmap_iter(virt, size, reclaim_pages, tbl, context),
.Mapping => |mapping| {
if(dom.len > size.* or dom.ptr != virt.*)
@panic("No partial unmapping");
if(reclaim_pages)
pmm.free_phys(mapping.phys, dom.len);
child.* = context.encode_empty(mapping.level);
context.invalidate(virt.*);
virt.* += dom.len;
size.* -= dom.len;
},
}
if(size.* == 0)
return;
}
}
pub fn print_paging(root: *platform.PagingRoot) void {
log("Paging: {x}\n", .{root});
for(platform.root_tables(root)) |table| {
log("Dumping page tables from root {x}\n", .{table});
print_impl(table, paging_levels - 1);
}
}
fn print_impl(root: *page_table, comptime level: usize) void {
var offset: u32 = 0;
var had_any: bool = false;
while(offset < platform.paging.page_sizes[0]): (offset += 8) {
const ent = @intToPtr(*page_table_entry, @ptrToInt(root) + offset);
if(ent.is_present(level)) {
had_any = true;
var cnt = paging_levels - level - 1;
while(cnt != 0) {
log(" ", .{});
cnt -= 1;
}
log("Index {x:0>3}: {}\n", .{offset/8, ent});
if(level != 0) {
if(ent.is_table(level))
print_impl(ent.get_table(level) catch unreachable, level - 1);
}
}
}
if(!had_any) {
var cnt = paging_levels - level - 1;
while(cnt != 0) {
log(" ", .{});
cnt -= 1;
}
log("Empty table\n", .{});
}
}
pub fn switch_to_context(context: Context) void {
const state = os.platform.get_and_disable_interrupts();
context.apply();
os.platform.get_current_task().paging_context = context;
os.platform.set_interrupts(state);
} | src/memory/paging.zig |
const assert = @import("std").debug.assert;
const client = @import("wayland.zig").client;
const common = @import("common.zig");
pub const Object = common.Object;
pub const Message = common.Message;
pub const Interface = common.Interface;
pub const Array = common.Array;
pub const Fixed = common.Fixed;
pub const Argument = common.Argument;
pub const Proxy = opaque {
extern fn wl_proxy_create(factory: *Proxy, interface: *const Interface) ?*Proxy;
pub fn create(factory: *Proxy, interface: *const Interface) error{OutOfMemory}!*Proxy {
return wl_proxy_create(factory, interface) orelse error.OutOfMemory;
}
extern fn wl_proxy_destroy(proxy: *Proxy) void;
pub fn destroy(proxy: *Proxy) void {
wl_proxy_destroy(proxy);
}
extern fn wl_proxy_marshal_array(proxy: *Proxy, opcode: u32, args: ?[*]Argument) void;
pub const marshal = wl_proxy_marshal_array;
extern fn wl_proxy_marshal_array_constructor(
proxy: *Proxy,
opcode: u32,
args: [*]Argument,
interface: *const Interface,
) ?*Proxy;
pub fn marshalConstructor(
proxy: *Proxy,
opcode: u32,
args: [*]Argument,
interface: *const Interface,
) error{OutOfMemory}!*Proxy {
return wl_proxy_marshal_array_constructor(proxy, opcode, args, interface) orelse
error.OutOfMemory;
}
extern fn wl_proxy_marshal_array_constructor_versioned(
proxy: *Proxy,
opcode: u32,
args: [*]Argument,
interface: *const Interface,
version: u32,
) ?*Proxy;
pub fn marshalConstructorVersioned(
proxy: *Proxy,
opcode: u32,
args: [*]Argument,
interface: *const Interface,
version: u32,
) error{OutOfMemory}!*Proxy {
return wl_proxy_marshal_array_constructor_versioned(proxy, opcode, args, interface, version) orelse
error.OutOfMemory;
}
const DispatcherFn = fn (
implementation: ?*const c_void,
proxy: *Proxy,
opcode: u32,
message: *const Message,
args: [*]Argument,
) callconv(.C) c_int;
extern fn wl_proxy_add_dispatcher(
proxy: *Proxy,
dispatcher: DispatcherFn,
implementation: ?*const c_void,
data: ?*c_void,
) c_int;
pub fn addDispatcher(
proxy: *Proxy,
dispatcher: DispatcherFn,
implementation: ?*const c_void,
data: ?*c_void,
) void {
const ret = wl_proxy_add_dispatcher(proxy, dispatcher, implementation, data);
// Since there is no way to remove listeners, adding a listener to
// the same proxy twice is always a bug, so assert instead of returning
// an error.
assert(ret != -1); // If this fails, a listener was already added
}
extern fn wl_proxy_get_user_data(proxy: *Proxy) ?*c_void;
pub fn getUserData(proxy: *Proxy) ?*c_void {
return wl_proxy_get_user_data(proxy);
}
extern fn wl_proxy_get_version(proxy: *Proxy) u32;
pub fn getVersion(proxy: *Proxy) u32 {
return wl_proxy_get_version(proxy);
}
extern fn wl_proxy_get_id(proxy: *Proxy) u32;
pub fn getId(proxy: *Proxy) u32 {
return wl_proxy_get_id(proxy);
}
};
pub const EventQueue = opaque {
extern fn wl_event_queue_destroy(queue: *EventQueue) void;
pub fn destroy(event_queue: *EventQueue) void {
wl_event_queue_destroy(event_queue);
}
};
pub const EglWindow = opaque {
extern fn wl_egl_window_create(surface: *client.wl.Surface, width: c_int, height: c_int) ?*EglWindow;
pub fn create(surface: *client.wl.Surface, width: c_int, height: c_int) !*EglWindow {
// Why do people use int when they require a positive number?
assert(width > 0 and height > 0);
return wl_egl_window_create(surface, width, height) orelse error.OutOfMemory;
}
extern fn wl_egl_window_destroy(egl_window: *EglWindow) void;
pub const destroy = wl_egl_window_destroy;
extern fn wl_egl_window_resize(egl_window: *EglWindow, width: c_int, height: c_int, dx: c_int, dy: c_int) void;
pub const resize = wl_egl_window_resize;
extern fn wl_egl_window_get_attached_size(egl_window: *EglWindow, width: *c_int, height: *c_int) void;
pub const getAttachedSize = wl_egl_window_get_attached_size;
};
pub const CursorTheme = opaque {
extern fn wl_cursor_theme_load(name: ?[*:0]const u8, size: c_int, shm: *client.wl.Shm) ?*CursorTheme;
pub fn load(name: ?[*:0]const u8, size: i32, shm: *client.wl.Shm) error{LoadThemeFailed}!*CursorTheme {
return wl_cursor_theme_load(name, @intCast(c_int, size), shm) orelse error.LoadThemeFailed;
}
extern fn wl_cursor_theme_destroy(wl_cursor_theme: *CursorTheme) void;
pub const destroy = wl_cursor_theme_destroy;
extern fn wl_cursor_theme_get_cursor(theme: *CursorTheme, name: [*:0]const u8) ?*Cursor;
pub const getCursor = wl_cursor_theme_get_cursor;
};
pub const Cursor = extern struct {
image_count: c_uint,
images: [*]*CursorImage,
name: [*:0]u8,
extern fn wl_cursor_frame(cursor: *Cursor, time: u32) c_int;
pub const frame = wl_cursor_frame;
extern fn wl_cursor_frame_and_duration(cursor: *Cursor, time: u32, duration: *u32) c_int;
pub const frameAndDuration = wl_cursor_frame_and_duration;
};
pub const CursorImage = extern struct {
width: u32,
height: u32,
hotspot_x: u32,
hotspot_y: u32,
delay: u32,
extern fn wl_cursor_image_get_buffer(image: *CursorImage) ?*client.wl.Buffer;
pub fn getBuffer(image: *CursorImage) error{OutOfMemory}!*client.wl.Buffer {
return wl_cursor_image_get_buffer(image) orelse error.OutOfMemory;
}
}; | src/wayland_client_core.zig |
const std = @import("std");
const kernel = @import("root");
const x86 = @import("../x86.zig");
const elf = std.elf;
var logger = @TypeOf(x86.logger).childOf(@typeName(@This())){};
const trampoline_elf = @embedFile("../../../build/x86_64/trampolines.o");
pub fn getTrampolineELF() []const u8 {
return trampoline_elf;
}
pub fn get_nth_section(header: *const elf.Header, file: anytype, idx: u16) ?elf.Elf64_Shdr {
var sec_it = header.section_header_iterator(file);
var cnt: u16 = 0;
while (true) : (cnt += 1) {
var value = sec_it.next() catch @panic("xd");
if (value == null) break;
if (cnt == idx) {
return value.?;
}
}
return null;
}
pub fn getString(idx: u32) ?[]const u8 {
var fbs = std.io.fixedBufferStream(trampoline_elf);
const header = elf.Header.read(&fbs) catch {
@panic("Invalid ELF header");
};
const section = find_section_by_name(&header, &fbs, ".strtab").?;
const start = section.sh_offset;
const end = start + section.sh_size;
const section_data = trampoline_elf[start..end];
const data = section_data[idx..];
const null_pos = std.mem.indexOfScalar(u8, data, 0);
if (null_pos) |finish| {
return data[0..finish];
}
return null;
}
pub fn getSymbol(idx: u32) ?elf.Elf64_Sym {
var fbs = std.io.fixedBufferStream(trampoline_elf);
const header = elf.Header.read(&fbs) catch {
@panic("Invalid ELF header");
};
const section = find_section_by_name(&header, &fbs, ".symtab").?;
const section_data = init: {
const start = section.sh_offset;
const end = start + section.sh_size;
break :init trampoline_elf[start..end];
};
const data = init: {
const start = @sizeOf(elf.Elf64_Sym) * idx;
const end = start + @sizeOf(elf.Elf64_Sym);
break :init section_data[start..end];
};
var result: elf.Elf64_Sym = undefined;
std.debug.assert(@sizeOf(elf.Elf64_Sym) == data.len);
std.mem.copy(u8, std.mem.asBytes(&result), data);
return result;
}
pub fn find_section_by_name(header: *const elf.Header, file: anytype, name: []const u8) ?elf.Elf64_Shdr {
const section = get_nth_section(header, file, header.shstrndx).?;
var buffer: [0x100]u8 = undefined;
file.seekableStream().seekTo(section.sh_offset) catch unreachable;
file.reader().readNoEof(&buffer) catch unreachable;
var name_it = std.mem.split(u8, buffer[0..section.sh_size], "\x00");
var i: u16 = 0;
while (true) : (i += 1) {
const v = name_it.next();
if (v) |sname| {
if (std.mem.eql(u8, sname, name)) {
return get_nth_section(header, file, i);
}
} else {
break;
}
}
return null;
}
pub fn getSectionByName(name: []const u8) ?elf.Elf64_Shdr {
var fbs = std.io.fixedBufferStream(trampoline_elf);
const header = elf.Header.read(&fbs) catch {
@panic("Invalid ELF header");
};
return find_section_by_name(&header, &fbs, name);
}
pub fn getSectionData(name: []const u8) ?[]const u8 {
const section = getSectionByName(name);
if (section == null) return null;
const start = section.?.sh_offset;
const end = start + section.?.sh_size;
return getTrampolineELF()[start..end];
}
pub fn init() void {
logger.log("Initializing trampolines\n", .{});
} | kernel/arch/x86/trampoline.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const math = std.math;
const log = @import("./log.zig").log;
const math_utils = @import("./math_utils.zig");
const Point = math_utils.Point;
const Line = math_utils.Line;
const star_math = @import("./star_math.zig");
const SkyCoord = star_math.SkyCoord;
const ObserverPosition = star_math.ObserverPosition;
pub const Pixel = packed struct {
r: u8 = 0,
g: u8 = 0,
b: u8 = 0,
a: u8 = 0,
pub fn rgb(r: u8, g: u8, b: u8) Pixel {
return Pixel{ .r = r, .g = g, .b = b, .a = 255 };
}
pub fn rgba(r: u8, g: u8, b: u8, a: u8) Pixel {
return Pixel{ .r = r, .g = g, .b = b, .a = a };
}
};
pub const Canvas = struct {
pub const Settings = struct {
width: u32,
height: u32,
background_radius: f32,
zoom_factor: f32,
drag_speed: f32,
draw_north_up: bool,
draw_constellation_grid: bool,
draw_asterisms: bool,
zodiac_only: bool,
};
data: []Pixel,
settings: Settings,
pub fn init(allocator: *Allocator, settings: Settings) !Canvas {
var canvas: Canvas = undefined;
canvas.settings = settings;
canvas.data = try allocator.alloc(Pixel, canvas.settings.width * canvas.settings.height);
for (canvas.data) |*p| {
p.* = Pixel{};
}
return canvas;
}
pub fn setPixelAt(self: *Canvas, point: Point, new_pixel: Pixel) void {
if (std.math.isNan(point.x) or std.math.isNan(point.y)) {
return;
}
if (point.x < 0 or point.y < 0) return;
if (point.x > @intToFloat(f32, self.settings.width) or point.y > @intToFloat(f32, self.settings.height)) return;
const x = @floatToInt(usize, point.x);
const y = @floatToInt(usize, point.y);
const p_index: usize = (y * @intCast(usize, self.settings.width)) + x;
if (p_index >= self.data.len) return;
self.data[p_index] = new_pixel;
}
pub fn coordToPoint(canvas: Canvas, sky_coord: SkyCoord, observer_pos: ObserverPosition, filter_below_horizon: bool) ?Point {
const two_pi = comptime math.pi * 2.0;
const local_sidereal_time = observer_pos.localSiderealTime();
const hour_angle = local_sidereal_time - @as(f64, sky_coord.right_ascension);
const hour_angle_rad = math_utils.floatMod(math_utils.degToRad(hour_angle), two_pi);
const declination_rad = @floatCast(f64, math_utils.degToRad(sky_coord.declination));
const sin_dec = math.sin(declination_rad);
const lat_rad = math_utils.degToRad(observer_pos.latitude);
const sin_lat = math.sin(lat_rad);
const cos_lat = math.cos(lat_rad);
const sin_alt = sin_dec * sin_lat + math.cos(declination_rad) * cos_lat * math.cos(hour_angle_rad);
const altitude = math_utils.boundedASin(sin_alt) catch return null;
if (filter_below_horizon and altitude < 0) {
return null;
}
const cos_azi = (sin_dec - math.sin(altitude) * sin_lat) / (math.cos(altitude) * cos_lat);
const azi = math.acos(cos_azi);
const azimuth = if (math.sin(hour_angle_rad) < 0) azi else two_pi - azi;
const canvas_point = blk: {
const radius = comptime 2.0 / math.pi;
// s is the distance from the center of the projection circle to the point
// aka 1 - the angular distance along the surface of the sky sphere
const s = 1.0 - (radius * altitude);
// Convert from polar to cartesian coordinates
break :blk Point{
// @note without negating x here, the whole chart is rendered backwards. Not sure if this is where the negations
// is SUPPOSED to go, or if I messed up a negation somewhere else and this is just a hack that makes it work
.x = -@floatCast(f32, s * math.sin(azimuth)),
.y = @floatCast(f32, s * math.cos(azimuth))
};
};
return canvas.translatePoint(canvas_point);
}
pub fn pointToCoord(canvas: Canvas, point: Point, observer_pos: ObserverPosition) ?SkyCoord {
if (!canvas.isInsideCircle(point)) return null;
const raw_point = canvas.untranslatePoint(point);
// Distance from raw_point to the center of the sky circle
const s = math.sqrt(math.pow(f32, raw_point.x, 2.0) + math.pow(f32, raw_point.y, 2.0));
const altitude = (math.pi * (1 - s)) / 2;
const observer_lat_rad = math_utils.degToRad(observer_pos.latitude);
const sin_lat = math.sin(observer_lat_rad);
const cos_lat = math.cos(observer_lat_rad);
const declination = math_utils.boundedASin(((raw_point.y / s) * math.cos(altitude) * cos_lat) + (math.sin(altitude) * sin_lat)) catch {
log(.Error, "Error computing declination", .{});
return null;
};
var hour_angle_rad = math_utils.boundedACos((math.sin(altitude) - (math.sin(declination) * sin_lat)) / (math.cos(declination) * cos_lat)) catch {
log(.Error, "Error computing hour angle. Declination was {d:.3}", .{declination});
return null;
};
hour_angle_rad = if (raw_point.x < 0) -hour_angle_rad else hour_angle_rad;
const hour_angle = math_utils.radToDeg(hour_angle_rad);
const lst = observer_pos.localSiderealTime();
const right_ascension = math_utils.floatMod(lst - hour_angle, 360);
return SkyCoord{
.right_ascension = @floatCast(f32, right_ascension),
.declination = math_utils.radToDeg(declination)
};
}
pub fn translatePoint(self: Canvas, pt: Point) Point {
const center = Point{
.x = @intToFloat(f32, self.settings.width) / 2.0,
.y = @intToFloat(f32, self.settings.height) / 2.0,
};
// A multiplier used to convert a coordinate between [-1, 1] to a coordinate on the actual canvas, taking into
// account the rendering modifiers that can change based on the user zooming in/out or the travelling moving across poles
const direction_modifier: f32 = if (self.settings.draw_north_up) 1.0 else -1.0;
const translate_factor: f32 = direction_modifier * self.settings.background_radius * self.settings.zoom_factor;
return Point{
.x = center.x + (translate_factor * pt.x),
.y = center.y - (translate_factor * pt.y)
};
}
pub fn untranslatePoint(self: Canvas, pt: Point) Point {
const center = Point{
.x = @intToFloat(f32, self.settings.width) / 2.0,
.y = @intToFloat(f32, self.settings.height) / 2.0
};
const direction_modifier: f32 = if (self.settings.draw_north_up) 1.0 else -1.0;
const translate_factor: f32 = direction_modifier * self.settings.background_radius * self.settings.zoom_factor;
return Point{
.x = (pt.x - center.x) / translate_factor,
.y = (pt.y - center.y) / -translate_factor
};
}
pub fn isInsideCircle(self: Canvas, point: Point) bool {
const center = Point{
.x = @intToFloat(f32, self.settings.width) / 2.0,
.y = @intToFloat(f32, self.settings.height) / 2.0,
};
return point.getDist(center) <= self.settings.background_radius;
}
pub fn drawLine(self: *Canvas, line: Line, color: Pixel, thickness: u32) void {
const num_points = @floatToInt(u32, 75 * self.settings.zoom_factor);
const is_a_inside_circle = self.isInsideCircle(line.a);
const start = if (is_a_inside_circle) line.a else line.b;
const end = if (is_a_inside_circle) line.b else line.a;
const expand_x = line.getSlope() > 1.5;
const total_dist = start.getDist(end);
var point_index: u32 = 0;
while (point_index < num_points) : (point_index += 1) {
const point_dist = (total_dist / @intToFloat(f32, num_points)) * @intToFloat(f32, point_index);
var next_point = Point{
.x = start.x + (point_dist / total_dist) * (end.x - start.x),
.y = start.y + (point_dist / total_dist) * (end.y - start.y)
};
var width_index: u32 = 0;
while (width_index < thickness) : (width_index += 1) {
if (expand_x) {
next_point.x += 1;
} else {
next_point.y += 1;
}
if (self.isInsideCircle(next_point)) {
self.setPixelAt(next_point, color);
} else break;
}
}
}
};
test "translate point" {
const canvas_settings = Canvas.Settings{
.width = 700,
.height = 700,
.draw_north_up = true,
.background_radius = 0.45 * 700.0,
.zoom_factor = 1.0,
.draw_asterisms = false,
.draw_constellation_grid = false,
};
var canvas = try Canvas.init(std.testing.allocator, canvas_settings);
defer std.testing.allocator.free(canvas.data);
const point = Point{
.x = 0.5,
.y = -0.3
};
const translated_point = canvas.translatePoint(point);
const untranslated_point = canvas.untranslatePoint(translated_point);
std.testing.expectWithinMargin(untranslated_point.x, point.x, 0.005);
std.testing.expectWithinMargin(untranslated_point.y, point.y, 0.005);
} | night-math/src/render.zig |
const std = @import("std");
const snow = @import("snow.zig");
const sync = @import("sync.zig");
const pike = @import("pike");
const net = std.net;
const mem = std.mem;
const testing = std.testing;
test "client / server" {
const Protocol = struct {
const Self = @This();
event: sync.Event = .{},
pub fn handshake(self: *Self, comptime side: snow.Side, socket: anytype) !void {
return {};
}
pub fn close(self: *Self, comptime side: snow.Side, socket: anytype) void {
return {};
}
pub fn purge(self: *Self, comptime side: snow.Side, socket: anytype, items: []const []const u8) void {
return {};
}
pub fn read(self: *Self, comptime side: snow.Side, socket: anytype, reader: anytype) !void {
while (true) {
const line = try reader.readLine();
defer reader.shift(line.len);
self.event.notify();
}
}
pub fn write(self: *Self, comptime side: snow.Side, socket: anytype, writer: anytype, items: [][]const u8) !void {
for (items) |message| {
if (mem.indexOfScalar(u8, message, '\n') != null) {
return error.UnexpectedDelimiter;
}
const frame = try writer.peek(message.len + 1);
mem.copy(u8, frame[0..message.len], message);
frame[message.len..][0] = '\n';
}
try writer.flush();
}
};
const opts: snow.Options = .{ .protocol_type = *Protocol };
const Test = struct {
fn run(notifier: *const pike.Notifier, protocol: *Protocol, stopped: *bool) !void {
defer stopped.* = true;
var server = try snow.Server(opts).init(
protocol,
testing.allocator,
notifier,
net.Address.initIp4(.{ 0, 0, 0, 0 }, 0),
);
defer server.deinit();
try server.serve();
var client = snow.Client(opts).init(
protocol,
testing.allocator,
notifier,
try server.socket.getBindAddress(),
);
defer client.deinit();
inline for (.{ "A", "B", "C", "D" }) |message| {
try client.write(message);
protocol.event.wait();
}
}
};
const notifier = try pike.Notifier.init();
defer notifier.deinit();
var protocol: Protocol = .{};
var stopped = false;
var frame = async Test.run(¬ifier, &protocol, &stopped);
while (!stopped) {
try notifier.poll(10_000);
}
try nosuspend await frame;
} | test.zig |
const std = @import("std");
const warn = std.debug.warn;
const math = std.math;
const gl = @import("opengl.zig");
usingnamespace @import("util.zig");
pub const window_name = "genexp004";
pub const window_width = 2 * 1024;
pub const window_height = 2 * 1024;
const bounds: f64 = 3.0;
pub const GenerativeExperimentState = struct {
prng: std.rand.DefaultPrng,
pass: u32 = 0,
y: f64 = -bounds,
fs_postprocess: u32 = 0,
tex_fp32: u32 = 0,
fbo_fp32: u32 = 0,
fbo_srgb: u32 = 0,
pub fn init() GenerativeExperimentState {
return GenerativeExperimentState{
.prng = std.rand.DefaultPrng.init(123),
};
}
pub fn deinit(self: GenerativeExperimentState) void {
gl.deleteTextures(1, &self.tex_fp32);
gl.deleteProgram(self.fs_postprocess);
}
};
pub fn setup(genexp: *GenerativeExperimentState) !void {
gl.getIntegerv(gl.DRAW_FRAMEBUFFER_BINDING, @ptrCast([*c]c_int, &genexp.fbo_srgb));
gl.clearBufferfv(gl.COLOR, 0, &[_]f32{ 1.0, 1.0, 1.0, 1.0 });
gl.pointSize(3.0);
gl.blendFunc(gl.ONE, gl.ONE);
gl.blendEquation(gl.FUNC_ADD);
gl.matrixLoadIdentityEXT(gl.PROJECTION);
gl.matrixOrthoEXT(gl.PROJECTION, -3.0, 3.0, -3.0, 3.0, -1.0, 1.0);
gl.createTextures(gl.TEXTURE_RECTANGLE, 1, &genexp.tex_fp32);
gl.textureStorage2D(genexp.tex_fp32, 1, gl.RGBA32F, window_width, window_height);
gl.clearTexImage(genexp.tex_fp32, 0, gl.RGBA, gl.FLOAT, null);
gl.createFramebuffers(1, &genexp.fbo_fp32);
gl.namedFramebufferTexture(genexp.fbo_fp32, gl.COLOR_ATTACHMENT0, genexp.tex_fp32, 0);
genexp.fs_postprocess = gl.createShaderProgramv(gl.FRAGMENT_SHADER, 1, &@as([*c]const u8,
\\ #version 460 compatibility
\\ layout(binding = 0) uniform sampler2DRect tex_fp32;
\\
\\ void main() {
\\ vec3 color = texture(tex_fp32, gl_FragCoord.xy).rgb;
\\ color = color / (color + 1.0);
\\ color = 1.0 - color;
\\ color = pow(color, vec3(2.2));
\\ gl_FragColor = vec4(color, 1.0);
\\ }
));
}
pub fn update(genexp: *GenerativeExperimentState, time: f64, dt: f32) void {
gl.enable(gl.BLEND);
gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, genexp.fbo_fp32);
gl.useProgram(0);
if (genexp.y <= bounds and genexp.pass == 0) {
gl.begin(gl.POINTS);
const step: f64 = 0.001;
var row: u32 = 0;
while (row < 16) : (row += 1) {
var x: f64 = -bounds;
while (x <= bounds) : (x += step) {
const xoff = genexp.prng.random.floatNorm(f64) * 0.002;
const yoff = genexp.prng.random.floatNorm(f64) * 0.002;
gl.color4f(0.002, 0.002, 0.002, 1.0);
gl.vertex2d(x + xoff, genexp.y + yoff);
}
genexp.y += step;
}
gl.end();
} else if (genexp.y <= bounds and genexp.pass == 1) {
gl.begin(gl.POINTS);
const step: f64 = 0.001;
var row: u32 = 0;
while (row < 4) : (row += 1) {
var x: f64 = -bounds;
while (x <= bounds) : (x += step) {
var v = Vec2d{ .x = x, .y = genexp.y };
var i: u32 = 0;
while (i < 4) : (i += 1) {
const xoff = genexp.prng.random.floatNorm(f64) * 0.01;
const yoff = genexp.prng.random.floatNorm(f64) * 0.01;
v = pdj(v, 1.0);
v = julia(v, 1.5, genexp.prng.random.float(f64));
v = hyperbolic(v, 1.0);
v = sinusoidal(v, 2.0);
gl.color4f(0.001, 0.001, 0.001, 1.0);
gl.vertex2d(v.x + xoff, v.y + yoff);
}
}
genexp.y += step;
}
gl.end();
}
if (genexp.y >= bounds) {
genexp.y = -bounds;
genexp.pass += 1;
}
gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, genexp.fbo_srgb);
gl.bindTextureUnit(0, genexp.tex_fp32);
gl.disable(gl.BLEND);
gl.useProgram(genexp.fs_postprocess);
gl.begin(gl.QUADS);
gl.vertex2f(-3.0, -3.0);
gl.vertex2f(3.0, -3.0);
gl.vertex2f(3.0, 3.0);
gl.vertex2f(-3.0, 3.0);
gl.end();
}
fn sinusoidal(v: Vec2d, scale: f64) Vec2d {
return Vec2d{ .x = scale * math.sin(v.x), .y = scale * math.sin(v.y) };
}
fn hyperbolic(v: Vec2d, scale: f64) Vec2d {
const r = v.length() + 0.0001;
const theta = math.atan2(f64, v.x, v.y);
const x = scale * math.sin(theta) / r;
const y = scale * math.cos(theta) * r;
return Vec2d{ .x = x, .y = y };
}
fn pdj(v: Vec2d, scale: f64) Vec2d {
const pdj_a = 0.1;
const pdj_b = 1.9;
const pdj_c = -0.8;
const pdj_d = -1.2;
//const pdj_a = 1.0111;
//const pdj_b = -1.011;
//const pdj_c = 2.08;
//const pdj_d = 10.2;
return Vec2d{
.x = scale * (math.sin(pdj_a * v.y) - math.cos(pdj_b * v.x)),
.y = scale * (math.sin(pdj_c * v.x) - math.cos(pdj_d * v.y)),
};
}
fn julia(v: Vec2d, scale: f64, rand01: f64) Vec2d {
const r = scale * math.sqrt(v.length());
const theta = 0.5 * math.atan2(f64, v.x, v.y) +
math.pi * @intToFloat(f64, @floatToInt(i32, 2.0 * rand01));
const x = r * math.cos(theta);
const y = r * math.sin(theta);
return Vec2d{ .x = x, .y = y };
} | src/genexp004.zig |
usingnamespace @import("psptypes.zig");
pub const RegKeyTypes = extern enum(c_int) {
REG_TYPE_DIR = 1,
REG_TYPE_INT = 2,
REG_TYPE_STR = 3,
REG_TYPE_BIN = 4,
_,
};
pub const RegHandle = c_uint;
pub const RegParam = extern struct {
regtype: c_uint,
name: [256]u8,
namelen: c_uint,
unk2: c_uint,
unk3: c_uint,
};
// Open the registry
//
// @param reg - A filled in ::RegParam structure
// @param mode - Open mode (set to 1)
// @param h - Pointer to a REGHANDLE to receive the registry handle
//
// @return 0 on success, < 0 on error
pub extern fn sceRegOpenRegistry(reg: *RegParam, mode: c_int, h: *RegHandle) c_int;
pub fn regOpenRegistry(reg: *RegParam, mode: c_int, h: *RegHandle) !void {
var res = sceRegOpenRegistry(reg, mode, h);
if (res < 0) {
return error.Unexpected;
}
}
// Flush the registry to disk
//
// @param h - The open registry handle
//
// @return 0 on success, < 0 on error
pub extern fn sceRegFlushRegistry(h: RegHandle) c_int;
pub fn regFlushRegistry(h: *RegHandle) !void {
var res = sceRegFlushRegistry(h);
if (res < 0) {
return error.Unexpected;
}
}
// Close the registry
//
// @param h - The open registry handle
//
// @return 0 on success, < 0 on error
pub extern fn sceRegCloseRegistry(h: RegHandle) c_int;
pub fn regCloseRegistry(h: RegHandle) !void {
var res = sceRegCloseRegistry(h);
if (res < 0) {
return error.Unexpected;
}
}
// Open a registry directory
//
// @param h - The open registry handle
// @param name - The path to the dir to open (e.g. /CONFIG/SYSTEM)
// @param mode - Open mode (can be 1 or 2, probably read or read/write
// @param hd - Pointer to a REGHANDLE to receive the registry dir handle
//
// @return 0 on success, < 0 on error
pub extern fn sceRegOpenCategory(h: RegHandle, name: []const u8, mode: c_int, hd: *RegHandle) c_int;
pub fn regOpenCategory(h: RegHandle, name: []const u8, mode: c_int, hd: *RegHandle) !void {
var res = sceRegOpenCategory(h, name, mode, hd);
if (res < 0) {
return error.Unexpected;
}
}
// Remove a registry dir
//
// @param h - The open registry dir handle
// @param name - The name of the key
//
// @return 0 on success, < 0 on error
pub extern fn sceRegRemoveCategory(h: RegHandle, name: []const u8) c_int;
pub fn regRemoveCategory(h: RegHandle, name: []const u8) !void {
var res = sceRegRemoveCategory(h, name);
if (res < 0) {
return error.Unexpected;
}
}
// Close the registry directory
//
// @param hd - The open registry dir handle
//
// @return 0 on success, < 0 on error
pub extern fn sceRegCloseCategory(hd: RegHandle) c_int;
pub fn regCloseCategory(hd: RegHandle) !void {
var res = sceRegCloseCategory(hd);
if (res < 0) {
return error.Unexpected;
}
}
// Flush the registry directory to disk
//
// @param hd - The open registry dir handle
//
// @return 0 on success, < 0 on error
pub extern fn sceRegFlushCategory(hd: RegHandle) c_int;
pub fn regFlushCategory(hd: RegHandle) !void {
var res = sceRegFlushCategory(hd);
if (res < 0) {
return error.Unexpected;
}
}
// Get a key's information
//
// @param hd - The open registry dir handle
// @param name - Name of the key
// @param hk - Pointer to a REGHANDLE to get registry key handle
// @param type - Type of the key, on of ::RegKeyTypes
// @param size - The size of the key's value in bytes
//
// @return 0 on success, < 0 on error
pub extern fn sceRegGetKeyInfo(hd: RegHandle, name: []const u8, hk: *RegHandle, typec: *c_uint, size: *SceSize) c_int;
pub fn regGetKeyInfo(hd: RegHandle, name: []const u8, hk: *RegHandle, typec: *c_uint, size: *SceSize) !void {
var res = sceRegGetKeyInfo(hd, name, hk, typec, size);
if (res < 0) {
return error.Unexpected;
}
}
// Get a key's information by name
//
// @param hd - The open registry dir handle
// @param name - Name of the key
// @param type - Type of the key, on of ::RegKeyTypes
// @param size - The size of the key's value in bytes
//
// @return 0 on success, < 0 on error
pub extern fn sceRegGetKeyInfoByName(hd: RegHandle, name: []const u8, typec: *c_uint, size: *SceSize) c_int;
pub fn regGetKeyInfoByName(hd: RegHandle, name: []const u8, typec: *c_uint, size: *SceSize) !void {
var res = sceRegGetKeyInfoByName(hd, name, typec, size);
if (res < 0) {
return error.Unexpected;
}
}
// Get a key's value
//
// @param hd - The open registry dir handle
// @param hk - The open registry key handler (from ::sceRegGetKeyInfo)
// @param buf - Buffer to hold the value
// @param size - The size of the buffer
//
// @return 0 on success, < 0 on error
pub extern fn sceRegGetKeyValue(hd: RegHandle, hk: RegHandle, buf: ?*c_void, size: SceSize) c_int;
pub fn regGetKeyValue(hd: RegHandle, hk: RegHandle, buf: ?*c_void, size: SceSize) !void {
var res = sceRegGetKeyValue(hd, hk, buf, size);
if (res < 0) {
return error.Unexpected;
}
}
// Get a key's value by name
//
// @param hd - The open registry dir handle
// @param name - The key name
// @param buf - Buffer to hold the value
// @param size - The size of the buffer
//
// @return 0 on success, < 0 on error
pub extern fn sceRegGetKeyValueByName(hd: RegHandle, name: []const u8, buf: ?*c_void, size: SceSize) c_int;
pub fn regGetKeyValueByName(hd: RegHandle, name: []const u8, buf: ?*c_void, size: SceSize) !void {
var res = sceRegGetKeyValueByName(hd, name, buf, size);
if (res < 0) {
return error.Unexpected;
}
}
// Set a key's value
//
// @param hd - The open registry dir handle
// @param name - The key name
// @param buf - Buffer to hold the value
// @param size - The size of the buffer
//
// @return 0 on success, < 0 on error
pub extern fn sceRegSetKeyValue(hd: RegHandle, name: []const u8, buf: ?*const c_void, size: SceSize) c_int;
pub fn regSetKeyValue(hd: RegHandle, name: []const u8, buf: ?*const c_void, size: SceSize) !void {
var res = sceRegSetKeyValue(hd, name, buf, size);
if (res < 0) {
return error.Unexpected;
}
}
// Get number of subkeys in the current dir
//
// @param hd - The open registry dir handle
// @param num - Pointer to an integer to receive the number
//
// @return 0 on success, < 0 on error
pub extern fn sceRegGetKeysNum(hd: RegHandle, num: *c_int) c_int;
pub fn regGetKeysNum(hd: RegHandle, num: *c_int) !void {
var res = sceRegGetKeysNum(hd, num);
if (res < 0) {
return error.Unexpected;
}
}
// Get the key names in the current directory
//
// @param hd - The open registry dir handle
// @param buf - Buffer to hold the NUL terminated strings, should be num*REG_KEYNAME_SIZE
// @param num - Number of elements in buf
//
// @return 0 on success, < 0 on error
pub extern fn sceRegGetKeys(hd: RegHandle, buf: [*]u8, num: c_int) c_int;
pub fn regGetKeys(hd: RegHandle, buf: [*]u8, num: c_int) !void {
var res = sceRegGetKeys(hd, buf, num);
if (res < 0) {
return error.Unexpected;
}
}
// Create a key
//
// @param hd - The open registry dir handle
// @param name - Name of the key to create
// @param type - Type of key (note cannot be a directory type)
// @param size - Size of the allocated value space
//
// @return 0 on success, < 0 on error
pub extern fn sceRegCreateKey(hd: RegHandle, name: []const u8, typec: c_int, size: SceSize) c_int;
pub fn regCreateKey(hd: RegHandle, name: []const u8, typec: c_int, size: SceSize) !void {
var res = sceRegCreateKey(hd, name, typec, size);
if (res < 0) {
return error.Unexpected;
}
}
// Remove a registry (HONESTLY, DO NOT USE)
//
// @param reg - Filled out registry parameter
//
// @return 0 on success, < 0 on error
pub extern fn sceRegRemoveRegistry(reg: *RegParam) c_int;
pub fn regRemoveRegistry(reg: *RegParam) !void {
var res = sceRegRemoveRegistry(reg);
if (res < 0) {
return error.Unexpected;
}
} | src/psp/sdk/pspreg.zig |
pub const RADIUS_EXTENSION_VERSION = @as(u32, 1);
//--------------------------------------------------------------------------------
// Section: Types (66)
//--------------------------------------------------------------------------------
const CLSID_SdoMachine_Value = @import("../zig.zig").Guid.initString("e9218ae7-9e91-11d1-bf60-0080c7846bc0");
pub const CLSID_SdoMachine = &CLSID_SdoMachine_Value;
pub const ATTRIBUTEID = enum(u32) {
ATTRIBUTE_UNDEFINED = 0,
ATTRIBUTE_MIN_VALUE = 1,
// RADIUS_ATTRIBUTE_USER_NAME = 1, this enum value conflicts with ATTRIBUTE_MIN_VALUE
RADIUS_ATTRIBUTE_USER_PASSWORD = 2,
RADIUS_ATTRIBUTE_CHAP_PASSWORD = 3,
RADIUS_ATTRIBUTE_NAS_IP_ADDRESS = 4,
RADIUS_ATTRIBUTE_NAS_PORT = 5,
RADIUS_ATTRIBUTE_SERVICE_TYPE = 6,
RADIUS_ATTRIBUTE_FRAMED_PROTOCOL = 7,
RADIUS_ATTRIBUTE_FRAMED_IP_ADDRESS = 8,
RADIUS_ATTRIBUTE_FRAMED_IP_NETMASK = 9,
RADIUS_ATTRIBUTE_FRAMED_ROUTING = 10,
RADIUS_ATTRIBUTE_FILTER_ID = 11,
RADIUS_ATTRIBUTE_FRAMED_MTU = 12,
RADIUS_ATTRIBUTE_FRAMED_COMPRESSION = 13,
RADIUS_ATTRIBUTE_LOGIN_IP_HOST = 14,
RADIUS_ATTRIBUTE_LOGIN_SERVICE = 15,
RADIUS_ATTRIBUTE_LOGIN_TCP_PORT = 16,
RADIUS_ATTRIBUTE_UNASSIGNED1 = 17,
RADIUS_ATTRIBUTE_REPLY_MESSAGE = 18,
RADIUS_ATTRIBUTE_CALLBACK_NUMBER = 19,
RADIUS_ATTRIBUTE_CALLBACK_ID = 20,
RADIUS_ATTRIBUTE_UNASSIGNED2 = 21,
RADIUS_ATTRIBUTE_FRAMED_ROUTE = 22,
RADIUS_ATTRIBUTE_FRAMED_IPX_NETWORK = 23,
RADIUS_ATTRIBUTE_STATE = 24,
RADIUS_ATTRIBUTE_CLASS = 25,
RADIUS_ATTRIBUTE_VENDOR_SPECIFIC = 26,
RADIUS_ATTRIBUTE_SESSION_TIMEOUT = 27,
RADIUS_ATTRIBUTE_IDLE_TIMEOUT = 28,
RADIUS_ATTRIBUTE_TERMINATION_ACTION = 29,
RADIUS_ATTRIBUTE_CALLED_STATION_ID = 30,
RADIUS_ATTRIBUTE_CALLING_STATION_ID = 31,
RADIUS_ATTRIBUTE_NAS_IDENTIFIER = 32,
RADIUS_ATTRIBUTE_PROXY_STATE = 33,
RADIUS_ATTRIBUTE_LOGIN_LAT_SERVICE = 34,
RADIUS_ATTRIBUTE_LOGIN_LAT_NODE = 35,
RADIUS_ATTRIBUTE_LOGIN_LAT_GROUP = 36,
RADIUS_ATTRIBUTE_FRAMED_APPLETALK_LINK = 37,
RADIUS_ATTRIBUTE_FRAMED_APPLETALK_NET = 38,
RADIUS_ATTRIBUTE_FRAMED_APPLETALK_ZONE = 39,
RADIUS_ATTRIBUTE_ACCT_STATUS_TYPE = 40,
RADIUS_ATTRIBUTE_ACCT_DELAY_TIME = 41,
RADIUS_ATTRIBUTE_ACCT_INPUT_OCTETS = 42,
RADIUS_ATTRIBUTE_ACCT_OUTPUT_OCTETS = 43,
RADIUS_ATTRIBUTE_ACCT_SESSION_ID = 44,
RADIUS_ATTRIBUTE_ACCT_AUTHENTIC = 45,
RADIUS_ATTRIBUTE_ACCT_SESSION_TIME = 46,
RADIUS_ATTRIBUTE_ACCT_INPUT_PACKETS = 47,
RADIUS_ATTRIBUTE_ACCT_OUTPUT_PACKETS = 48,
RADIUS_ATTRIBUTE_ACCT_TERMINATE_CAUSE = 49,
RADIUS_ATTRIBUTE_ACCT_MULTI_SSN_ID = 50,
RADIUS_ATTRIBUTE_ACCT_LINK_COUNT = 51,
RADIUS_ATTRIBUTE_CHAP_CHALLENGE = 60,
RADIUS_ATTRIBUTE_NAS_PORT_TYPE = 61,
RADIUS_ATTRIBUTE_PORT_LIMIT = 62,
RADIUS_ATTRIBUTE_LOGIN_LAT_PORT = 63,
RADIUS_ATTRIBUTE_TUNNEL_TYPE = 64,
RADIUS_ATTRIBUTE_TUNNEL_MEDIUM_TYPE = 65,
RADIUS_ATTRIBUTE_TUNNEL_CLIENT_ENDPT = 66,
RADIUS_ATTRIBUTE_TUNNEL_SERVER_ENDPT = 67,
RADIUS_ATTRIBUTE_ACCT_TUNNEL_CONN = 68,
RADIUS_ATTRIBUTE_TUNNEL_PASSWORD = 69,
RADIUS_ATTRIBUTE_ARAP_PASSWORD = 70,
RADIUS_ATTRIBUTE_ARAP_FEATURES = 71,
RADIUS_ATTRIBUTE_ARAP_ZONE_ACCESS = 72,
RADIUS_ATTRIBUTE_ARAP_SECURITY = 73,
RADIUS_ATTRIBUTE_ARAP_SECURITY_DATA = 74,
RADIUS_ATTRIBUTE_PASSWORD_RETRY = 75,
RADIUS_ATTRIBUTE_PROMPT = 76,
RADIUS_ATTRIBUTE_CONNECT_INFO = 77,
RADIUS_ATTRIBUTE_CONFIGURATION_TOKEN = 78,
RADIUS_ATTRIBUTE_EAP_MESSAGE = 79,
RADIUS_ATTRIBUTE_SIGNATURE = 80,
RADIUS_ATTRIBUTE_TUNNEL_PVT_GROUP_ID = 81,
RADIUS_ATTRIBUTE_TUNNEL_ASSIGNMENT_ID = 82,
RADIUS_ATTRIBUTE_TUNNEL_PREFERENCE = 83,
RADIUS_ATTRIBUTE_ARAP_CHALLENGE_RESPONSE = 84,
RADIUS_ATTRIBUTE_ACCT_INTERIM_INTERVAL = 85,
RADIUS_ATTRIBUTE_NAS_IPv6_ADDRESS = 95,
RADIUS_ATTRIBUTE_FRAMED_INTERFACE_ID = 96,
RADIUS_ATTRIBUTE_FRAMED_IPv6_PREFIX = 97,
RADIUS_ATTRIBUTE_LOGIN_IPv6_HOST = 98,
RADIUS_ATTRIBUTE_FRAMED_IPv6_ROUTE = 99,
RADIUS_ATTRIBUTE_FRAMED_IPv6_POOL = 100,
IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IP_ADDRESS = 4096,
IAS_ATTRIBUTE_SAVED_RADIUS_CALLBACK_NUMBER = 4097,
IAS_ATTRIBUTE_NP_CALLING_STATION_ID = 4098,
IAS_ATTRIBUTE_SAVED_NP_CALLING_STATION_ID = 4099,
IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_ROUTE = 4100,
IAS_ATTRIBUTE_IGNORE_USER_DIALIN_PROPERTIES = 4101,
IAS_ATTRIBUTE_NP_TIME_OF_DAY = 4102,
IAS_ATTRIBUTE_NP_CALLED_STATION_ID = 4103,
IAS_ATTRIBUTE_NP_ALLOWED_PORT_TYPES = 4104,
IAS_ATTRIBUTE_NP_AUTHENTICATION_TYPE = 4105,
IAS_ATTRIBUTE_NP_ALLOWED_EAP_TYPE = 4106,
IAS_ATTRIBUTE_SHARED_SECRET = 4107,
IAS_ATTRIBUTE_CLIENT_IP_ADDRESS = 4108,
IAS_ATTRIBUTE_CLIENT_PACKET_HEADER = 4109,
IAS_ATTRIBUTE_TOKEN_GROUPS = 4110,
IAS_ATTRIBUTE_ALLOW_DIALIN = 4111,
IAS_ATTRIBUTE_REQUEST_ID = 4112,
IAS_ATTRIBUTE_MANIPULATION_TARGET = 4113,
IAS_ATTRIBUTE_MANIPULATION_RULE = 4114,
IAS_ATTRIBUTE_ORIGINAL_USER_NAME = 4115,
IAS_ATTRIBUTE_CLIENT_VENDOR_TYPE = 4116,
IAS_ATTRIBUTE_CLIENT_UDP_PORT = 4117,
MS_ATTRIBUTE_CHAP_CHALLENGE = 4118,
MS_ATTRIBUTE_CHAP_RESPONSE = 4119,
MS_ATTRIBUTE_CHAP_DOMAIN = 4120,
MS_ATTRIBUTE_CHAP_ERROR = 4121,
MS_ATTRIBUTE_CHAP_CPW1 = 4122,
MS_ATTRIBUTE_CHAP_CPW2 = 4123,
MS_ATTRIBUTE_CHAP_LM_ENC_PW = 4124,
MS_ATTRIBUTE_CHAP_NT_ENC_PW = 4125,
MS_ATTRIBUTE_CHAP_MPPE_KEYS = 4126,
IAS_ATTRIBUTE_AUTHENTICATION_TYPE = 4127,
IAS_ATTRIBUTE_CLIENT_NAME = 4128,
IAS_ATTRIBUTE_NT4_ACCOUNT_NAME = 4129,
IAS_ATTRIBUTE_FULLY_QUALIFIED_USER_NAME = 4130,
IAS_ATTRIBUTE_NTGROUPS = 4131,
IAS_ATTRIBUTE_EAP_FRIENDLY_NAME = 4132,
IAS_ATTRIBUTE_AUTH_PROVIDER_TYPE = 4133,
MS_ATTRIBUTE_ACCT_AUTH_TYPE = 4134,
MS_ATTRIBUTE_ACCT_EAP_TYPE = 4135,
IAS_ATTRIBUTE_PACKET_TYPE = 4136,
IAS_ATTRIBUTE_AUTH_PROVIDER_NAME = 4137,
IAS_ATTRIBUTE_ACCT_PROVIDER_TYPE = 4138,
IAS_ATTRIBUTE_ACCT_PROVIDER_NAME = 4139,
MS_ATTRIBUTE_MPPE_SEND_KEY = 4140,
MS_ATTRIBUTE_MPPE_RECV_KEY = 4141,
IAS_ATTRIBUTE_REASON_CODE = 4142,
MS_ATTRIBUTE_FILTER = 4143,
MS_ATTRIBUTE_CHAP2_RESPONSE = 4144,
MS_ATTRIBUTE_CHAP2_SUCCESS = 4145,
MS_ATTRIBUTE_CHAP2_CPW = 4146,
MS_ATTRIBUTE_RAS_VENDOR = 4147,
MS_ATTRIBUTE_RAS_VERSION = 4148,
IAS_ATTRIBUTE_NP_NAME = 4149,
MS_ATTRIBUTE_PRIMARY_DNS_SERVER = 4150,
MS_ATTRIBUTE_SECONDARY_DNS_SERVER = 4151,
MS_ATTRIBUTE_PRIMARY_NBNS_SERVER = 4152,
MS_ATTRIBUTE_SECONDARY_NBNS_SERVER = 4153,
IAS_ATTRIBUTE_PROXY_POLICY_NAME = 4154,
IAS_ATTRIBUTE_PROVIDER_TYPE = 4155,
IAS_ATTRIBUTE_PROVIDER_NAME = 4156,
IAS_ATTRIBUTE_REMOTE_SERVER_ADDRESS = 4157,
IAS_ATTRIBUTE_GENERATE_CLASS_ATTRIBUTE = 4158,
MS_ATTRIBUTE_RAS_CLIENT_NAME = 4159,
MS_ATTRIBUTE_RAS_CLIENT_VERSION = 4160,
IAS_ATTRIBUTE_ALLOWED_CERTIFICATE_EKU = 4161,
IAS_ATTRIBUTE_EXTENSION_STATE = 4162,
IAS_ATTRIBUTE_GENERATE_SESSION_TIMEOUT = 4163,
IAS_ATTRIBUTE_SESSION_TIMEOUT = 4164,
MS_ATTRIBUTE_QUARANTINE_IPFILTER = 4165,
MS_ATTRIBUTE_QUARANTINE_SESSION_TIMEOUT = 4166,
MS_ATTRIBUTE_USER_SECURITY_IDENTITY = 4167,
IAS_ATTRIBUTE_REMOTE_RADIUS_TO_WINDOWS_USER_MAPPING = 4168,
IAS_ATTRIBUTE_PASSPORT_USER_MAPPING_UPN_SUFFIX = 4169,
IAS_ATTRIBUTE_TUNNEL_TAG = 4170,
IAS_ATTRIBUTE_NP_PEAPUPFRONT_ENABLED = 4171,
IAS_ATTRIBUTE_CERTIFICATE_EKU = 8097,
IAS_ATTRIBUTE_EAP_CONFIG = 8098,
IAS_ATTRIBUTE_PEAP_EMBEDDED_EAP_TYPEID = 8099,
IAS_ATTRIBUTE_PEAP_FAST_ROAMED_SESSION = 8100,
IAS_ATTRIBUTE_EAP_TYPEID = 8101,
MS_ATTRIBUTE_EAP_TLV = 8102,
IAS_ATTRIBUTE_REJECT_REASON_CODE = 8103,
IAS_ATTRIBUTE_PROXY_EAP_CONFIG = 8104,
IAS_ATTRIBUTE_EAP_SESSION = 8105,
IAS_ATTRIBUTE_IS_REPLAY = 8106,
IAS_ATTRIBUTE_CLEAR_TEXT_PASSWORD = <PASSWORD>,
MS_ATTRIBUTE_IDENTITY_TYPE = 8108,
MS_ATTRIBUTE_SERVICE_CLASS = 8109,
MS_ATTRIBUTE_QUARANTINE_USER_CLASS = 8110,
MS_ATTRIBUTE_QUARANTINE_STATE = 8111,
IAS_ATTRIBUTE_OVERRIDE_RAP_AUTH = 8112,
IAS_ATTRIBUTE_PEAP_CHANNEL_UP = 8113,
IAS_ATTRIBUTE_NAME_MAPPED = 8114,
IAS_ATTRIBUTE_POLICY_ENFORCED = 8115,
IAS_ATTRIBUTE_MACHINE_NTGROUPS = 8116,
IAS_ATTRIBUTE_USER_NTGROUPS = 8117,
IAS_ATTRIBUTE_MACHINE_TOKEN_GROUPS = 8118,
IAS_ATTRIBUTE_USER_TOKEN_GROUPS = 8119,
MS_ATTRIBUTE_QUARANTINE_GRACE_TIME = 8120,
IAS_ATTRIBUTE_QUARANTINE_URL = 8121,
IAS_ATTRIBUTE_QUARANTINE_FIXUP_SERVERS = 8122,
MS_ATTRIBUTE_NOT_QUARANTINE_CAPABLE = 8123,
IAS_ATTRIBUTE_QUARANTINE_SYSTEM_HEALTH_RESULT = 8124,
IAS_ATTRIBUTE_QUARANTINE_SYSTEM_HEALTH_VALIDATORS = 8125,
IAS_ATTRIBUTE_MACHINE_NAME = 8126,
IAS_ATTRIBUTE_NT4_MACHINE_NAME = 8127,
IAS_ATTRIBUTE_QUARANTINE_SESSION_HANDLE = 8128,
IAS_ATTRIBUTE_FULLY_QUALIFIED_MACHINE_NAME = 8129,
IAS_ATTRIBUTE_QUARANTINE_FIXUP_SERVERS_CONFIGURATION = 8130,
IAS_ATTRIBUTE_CLIENT_QUARANTINE_COMPATIBLE = 8131,
MS_ATTRIBUTE_NETWORK_ACCESS_SERVER_TYPE = 8132,
IAS_ATTRIBUTE_QUARANTINE_SESSION_ID = 8133,
MS_ATTRIBUTE_AFW_QUARANTINE_ZONE = 8134,
MS_ATTRIBUTE_AFW_PROTECTION_LEVEL = 8135,
IAS_ATTRIBUTE_QUARANTINE_UPDATE_NON_COMPLIANT = 8136,
IAS_ATTRIBUTE_REQUEST_START_TIME = 8137,
MS_ATTRIBUTE_MACHINE_NAME = 8138,
IAS_ATTRIBUTE_CLIENT_IPv6_ADDRESS = 8139,
IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_INTERFACE_ID = 8140,
IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IPv6_PREFIX = 8141,
IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IPv6_ROUTE = 8142,
MS_ATTRIBUTE_QUARANTINE_GRACE_TIME_CONFIGURATION = 8143,
MS_ATTRIBUTE_IPv6_FILTER = 8144,
MS_ATTRIBUTE_IPV4_REMEDIATION_SERVERS = 8145,
MS_ATTRIBUTE_IPV6_REMEDIATION_SERVERS = 8146,
IAS_ATTRIBUTE_PROXY_RETRY_COUNT = 8147,
IAS_ATTRIBUTE_MACHINE_INVENTORY = 8148,
IAS_ATTRIBUTE_ABSOLUTE_TIME = 8149,
MS_ATTRIBUTE_QUARANTINE_SOH = 8150,
IAS_ATTRIBUTE_EAP_TYPES_CONFIGURED_IN_PROXYPOLICY = 8151,
MS_ATTRIBUTE_HCAP_LOCATION_GROUP_NAME = 8152,
MS_ATTRIBUTE_EXTENDED_QUARANTINE_STATE = 8153,
IAS_ATTRIBUTE_SOH_CARRIER_EAPTLV = 8154,
MS_ATTRIBUTE_HCAP_USER_GROUPS = 8155,
IAS_ATTRIBUTE_SAVED_MACHINE_HEALTHCHECK_ONLY = 8156,
IAS_ATTRIBUTE_POLICY_EVALUATED_SHV = 8157,
MS_ATTRIBUTE_RAS_CORRELATION_ID = 8158,
MS_ATTRIBUTE_HCAP_USER_NAME = 8159,
IAS_ATTRIBUTE_NT4_HCAP_ACCOUNT_NAME = 8160,
IAS_ATTRIBUTE_USER_TOKEN_SID = 8161,
IAS_ATTRIBUTE_MACHINE_TOKEN_SID = 8162,
IAS_ATTRIBUTE_MACHINE_VALIDATED = 8163,
MS_ATTRIBUTE_USER_IPv4_ADDRESS = 8164,
MS_ATTRIBUTE_USER_IPv6_ADDRESS = 8165,
MS_ATTRIBUTE_TSG_DEVICE_REDIRECTION = 8166,
IAS_ATTRIBUTE_ACCEPT_REASON_CODE = 8167,
IAS_ATTRIBUTE_LOGGING_RESULT = 8168,
IAS_ATTRIBUTE_SERVER_IP_ADDRESS = 8169,
IAS_ATTRIBUTE_SERVER_IPv6_ADDRESS = 8170,
IAS_ATTRIBUTE_RADIUS_USERNAME_ENCODING_ASCII = 8171,
MS_ATTRIBUTE_RAS_ROUTING_DOMAIN_ID = 8172,
IAS_ATTRIBUTE_CERTIFICATE_THUMBPRINT = 8250,
RAS_ATTRIBUTE_ENCRYPTION_TYPE = 4294967206,
RAS_ATTRIBUTE_ENCRYPTION_POLICY = 4294967207,
RAS_ATTRIBUTE_BAP_REQUIRED = 4294967208,
RAS_ATTRIBUTE_BAP_LINE_DOWN_TIME = 4294967209,
RAS_ATTRIBUTE_BAP_LINE_DOWN_LIMIT = 4294967210,
};
pub const ATTRIBUTE_UNDEFINED = ATTRIBUTEID.ATTRIBUTE_UNDEFINED;
pub const ATTRIBUTE_MIN_VALUE = ATTRIBUTEID.ATTRIBUTE_MIN_VALUE;
pub const RADIUS_ATTRIBUTE_USER_NAME = ATTRIBUTEID.ATTRIBUTE_MIN_VALUE;
pub const RADIUS_ATTRIBUTE_USER_PASSWORD = ATTRIBUTEID.RADIUS_ATTRIBUTE_USER_PASSWORD;
pub const RADIUS_ATTRIBUTE_CHAP_PASSWORD = ATTRIBUTEID.RADIUS_ATTRIBUTE_CHAP_PASSWORD;
pub const RADIUS_ATTRIBUTE_NAS_IP_ADDRESS = ATTRIBUTEID.RADIUS_ATTRIBUTE_NAS_IP_ADDRESS;
pub const RADIUS_ATTRIBUTE_NAS_PORT = ATTRIBUTEID.RADIUS_ATTRIBUTE_NAS_PORT;
pub const RADIUS_ATTRIBUTE_SERVICE_TYPE = ATTRIBUTEID.RADIUS_ATTRIBUTE_SERVICE_TYPE;
pub const RADIUS_ATTRIBUTE_FRAMED_PROTOCOL = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_PROTOCOL;
pub const RADIUS_ATTRIBUTE_FRAMED_IP_ADDRESS = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_IP_ADDRESS;
pub const RADIUS_ATTRIBUTE_FRAMED_IP_NETMASK = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_IP_NETMASK;
pub const RADIUS_ATTRIBUTE_FRAMED_ROUTING = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_ROUTING;
pub const RADIUS_ATTRIBUTE_FILTER_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_FILTER_ID;
pub const RADIUS_ATTRIBUTE_FRAMED_MTU = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_MTU;
pub const RADIUS_ATTRIBUTE_FRAMED_COMPRESSION = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_COMPRESSION;
pub const RADIUS_ATTRIBUTE_LOGIN_IP_HOST = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_IP_HOST;
pub const RADIUS_ATTRIBUTE_LOGIN_SERVICE = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_SERVICE;
pub const RADIUS_ATTRIBUTE_LOGIN_TCP_PORT = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_TCP_PORT;
pub const RADIUS_ATTRIBUTE_UNASSIGNED1 = ATTRIBUTEID.RADIUS_ATTRIBUTE_UNASSIGNED1;
pub const RADIUS_ATTRIBUTE_REPLY_MESSAGE = ATTRIBUTEID.RADIUS_ATTRIBUTE_REPLY_MESSAGE;
pub const RADIUS_ATTRIBUTE_CALLBACK_NUMBER = ATTRIBUTEID.RADIUS_ATTRIBUTE_CALLBACK_NUMBER;
pub const RADIUS_ATTRIBUTE_CALLBACK_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_CALLBACK_ID;
pub const RADIUS_ATTRIBUTE_UNASSIGNED2 = ATTRIBUTEID.RADIUS_ATTRIBUTE_UNASSIGNED2;
pub const RADIUS_ATTRIBUTE_FRAMED_ROUTE = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_ROUTE;
pub const RADIUS_ATTRIBUTE_FRAMED_IPX_NETWORK = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_IPX_NETWORK;
pub const RADIUS_ATTRIBUTE_STATE = ATTRIBUTEID.RADIUS_ATTRIBUTE_STATE;
pub const RADIUS_ATTRIBUTE_CLASS = ATTRIBUTEID.RADIUS_ATTRIBUTE_CLASS;
pub const RADIUS_ATTRIBUTE_VENDOR_SPECIFIC = ATTRIBUTEID.RADIUS_ATTRIBUTE_VENDOR_SPECIFIC;
pub const RADIUS_ATTRIBUTE_SESSION_TIMEOUT = ATTRIBUTEID.RADIUS_ATTRIBUTE_SESSION_TIMEOUT;
pub const RADIUS_ATTRIBUTE_IDLE_TIMEOUT = ATTRIBUTEID.RADIUS_ATTRIBUTE_IDLE_TIMEOUT;
pub const RADIUS_ATTRIBUTE_TERMINATION_ACTION = ATTRIBUTEID.RADIUS_ATTRIBUTE_TERMINATION_ACTION;
pub const RADIUS_ATTRIBUTE_CALLED_STATION_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_CALLED_STATION_ID;
pub const RADIUS_ATTRIBUTE_CALLING_STATION_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_CALLING_STATION_ID;
pub const RADIUS_ATTRIBUTE_NAS_IDENTIFIER = ATTRIBUTEID.RADIUS_ATTRIBUTE_NAS_IDENTIFIER;
pub const RADIUS_ATTRIBUTE_PROXY_STATE = ATTRIBUTEID.RADIUS_ATTRIBUTE_PROXY_STATE;
pub const RADIUS_ATTRIBUTE_LOGIN_LAT_SERVICE = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_LAT_SERVICE;
pub const RADIUS_ATTRIBUTE_LOGIN_LAT_NODE = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_LAT_NODE;
pub const RADIUS_ATTRIBUTE_LOGIN_LAT_GROUP = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_LAT_GROUP;
pub const RADIUS_ATTRIBUTE_FRAMED_APPLETALK_LINK = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_APPLETALK_LINK;
pub const RADIUS_ATTRIBUTE_FRAMED_APPLETALK_NET = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_APPLETALK_NET;
pub const RADIUS_ATTRIBUTE_FRAMED_APPLETALK_ZONE = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_APPLETALK_ZONE;
pub const RADIUS_ATTRIBUTE_ACCT_STATUS_TYPE = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_STATUS_TYPE;
pub const RADIUS_ATTRIBUTE_ACCT_DELAY_TIME = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_DELAY_TIME;
pub const RADIUS_ATTRIBUTE_ACCT_INPUT_OCTETS = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_INPUT_OCTETS;
pub const RADIUS_ATTRIBUTE_ACCT_OUTPUT_OCTETS = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_OUTPUT_OCTETS;
pub const RADIUS_ATTRIBUTE_ACCT_SESSION_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_SESSION_ID;
pub const RADIUS_ATTRIBUTE_ACCT_AUTHENTIC = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_AUTHENTIC;
pub const RADIUS_ATTRIBUTE_ACCT_SESSION_TIME = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_SESSION_TIME;
pub const RADIUS_ATTRIBUTE_ACCT_INPUT_PACKETS = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_INPUT_PACKETS;
pub const RADIUS_ATTRIBUTE_ACCT_OUTPUT_PACKETS = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_OUTPUT_PACKETS;
pub const RADIUS_ATTRIBUTE_ACCT_TERMINATE_CAUSE = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_TERMINATE_CAUSE;
pub const RADIUS_ATTRIBUTE_ACCT_MULTI_SSN_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_MULTI_SSN_ID;
pub const RADIUS_ATTRIBUTE_ACCT_LINK_COUNT = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_LINK_COUNT;
pub const RADIUS_ATTRIBUTE_CHAP_CHALLENGE = ATTRIBUTEID.RADIUS_ATTRIBUTE_CHAP_CHALLENGE;
pub const RADIUS_ATTRIBUTE_NAS_PORT_TYPE = ATTRIBUTEID.RADIUS_ATTRIBUTE_NAS_PORT_TYPE;
pub const RADIUS_ATTRIBUTE_PORT_LIMIT = ATTRIBUTEID.RADIUS_ATTRIBUTE_PORT_LIMIT;
pub const RADIUS_ATTRIBUTE_LOGIN_LAT_PORT = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_LAT_PORT;
pub const RADIUS_ATTRIBUTE_TUNNEL_TYPE = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_TYPE;
pub const RADIUS_ATTRIBUTE_TUNNEL_MEDIUM_TYPE = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_MEDIUM_TYPE;
pub const RADIUS_ATTRIBUTE_TUNNEL_CLIENT_ENDPT = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_CLIENT_ENDPT;
pub const RADIUS_ATTRIBUTE_TUNNEL_SERVER_ENDPT = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_SERVER_ENDPT;
pub const RADIUS_ATTRIBUTE_ACCT_TUNNEL_CONN = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_TUNNEL_CONN;
pub const RADIUS_ATTRIBUTE_TUNNEL_PASSWORD = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_PASSWORD;
pub const RADIUS_ATTRIBUTE_ARAP_PASSWORD = ATTRIBUTEID.RADIUS_ATTRIBUTE_ARAP_PASSWORD;
pub const RADIUS_ATTRIBUTE_ARAP_FEATURES = ATTRIBUTEID.RADIUS_ATTRIBUTE_ARAP_FEATURES;
pub const RADIUS_ATTRIBUTE_ARAP_ZONE_ACCESS = ATTRIBUTEID.RADIUS_ATTRIBUTE_ARAP_ZONE_ACCESS;
pub const RADIUS_ATTRIBUTE_ARAP_SECURITY = ATTRIBUTEID.RADIUS_ATTRIBUTE_ARAP_SECURITY;
pub const RADIUS_ATTRIBUTE_ARAP_SECURITY_DATA = ATTRIBUTEID.RADIUS_ATTRIBUTE_ARAP_SECURITY_DATA;
pub const RADIUS_ATTRIBUTE_PASSWORD_RETRY = ATTRIBUTEID.RADIUS_ATTRIBUTE_PASSWORD_RETRY;
pub const RADIUS_ATTRIBUTE_PROMPT = ATTRIBUTEID.RADIUS_ATTRIBUTE_PROMPT;
pub const RADIUS_ATTRIBUTE_CONNECT_INFO = ATTRIBUTEID.RADIUS_ATTRIBUTE_CONNECT_INFO;
pub const RADIUS_ATTRIBUTE_CONFIGURATION_TOKEN = ATTRIBUTEID.RADIUS_ATTRIBUTE_CONFIGURATION_TOKEN;
pub const RADIUS_ATTRIBUTE_EAP_MESSAGE = ATTRIBUTEID.RADIUS_ATTRIBUTE_EAP_MESSAGE;
pub const RADIUS_ATTRIBUTE_SIGNATURE = ATTRIBUTEID.RADIUS_ATTRIBUTE_SIGNATURE;
pub const RADIUS_ATTRIBUTE_TUNNEL_PVT_GROUP_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_PVT_GROUP_ID;
pub const RADIUS_ATTRIBUTE_TUNNEL_ASSIGNMENT_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_ASSIGNMENT_ID;
pub const RADIUS_ATTRIBUTE_TUNNEL_PREFERENCE = ATTRIBUTEID.RADIUS_ATTRIBUTE_TUNNEL_PREFERENCE;
pub const RADIUS_ATTRIBUTE_ARAP_CHALLENGE_RESPONSE = ATTRIBUTEID.RADIUS_ATTRIBUTE_ARAP_CHALLENGE_RESPONSE;
pub const RADIUS_ATTRIBUTE_ACCT_INTERIM_INTERVAL = ATTRIBUTEID.RADIUS_ATTRIBUTE_ACCT_INTERIM_INTERVAL;
pub const RADIUS_ATTRIBUTE_NAS_IPv6_ADDRESS = ATTRIBUTEID.RADIUS_ATTRIBUTE_NAS_IPv6_ADDRESS;
pub const RADIUS_ATTRIBUTE_FRAMED_INTERFACE_ID = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_INTERFACE_ID;
pub const RADIUS_ATTRIBUTE_FRAMED_IPv6_PREFIX = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_IPv6_PREFIX;
pub const RADIUS_ATTRIBUTE_LOGIN_IPv6_HOST = ATTRIBUTEID.RADIUS_ATTRIBUTE_LOGIN_IPv6_HOST;
pub const RADIUS_ATTRIBUTE_FRAMED_IPv6_ROUTE = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_IPv6_ROUTE;
pub const RADIUS_ATTRIBUTE_FRAMED_IPv6_POOL = ATTRIBUTEID.RADIUS_ATTRIBUTE_FRAMED_IPv6_POOL;
pub const IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IP_ADDRESS = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IP_ADDRESS;
pub const IAS_ATTRIBUTE_SAVED_RADIUS_CALLBACK_NUMBER = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_RADIUS_CALLBACK_NUMBER;
pub const IAS_ATTRIBUTE_NP_CALLING_STATION_ID = ATTRIBUTEID.IAS_ATTRIBUTE_NP_CALLING_STATION_ID;
pub const IAS_ATTRIBUTE_SAVED_NP_CALLING_STATION_ID = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_NP_CALLING_STATION_ID;
pub const IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_ROUTE = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_ROUTE;
pub const IAS_ATTRIBUTE_IGNORE_USER_DIALIN_PROPERTIES = ATTRIBUTEID.IAS_ATTRIBUTE_IGNORE_USER_DIALIN_PROPERTIES;
pub const IAS_ATTRIBUTE_NP_TIME_OF_DAY = ATTRIBUTEID.IAS_ATTRIBUTE_NP_TIME_OF_DAY;
pub const IAS_ATTRIBUTE_NP_CALLED_STATION_ID = ATTRIBUTEID.IAS_ATTRIBUTE_NP_CALLED_STATION_ID;
pub const IAS_ATTRIBUTE_NP_ALLOWED_PORT_TYPES = ATTRIBUTEID.IAS_ATTRIBUTE_NP_ALLOWED_PORT_TYPES;
pub const IAS_ATTRIBUTE_NP_AUTHENTICATION_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_NP_AUTHENTICATION_TYPE;
pub const IAS_ATTRIBUTE_NP_ALLOWED_EAP_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_NP_ALLOWED_EAP_TYPE;
pub const IAS_ATTRIBUTE_SHARED_SECRET = ATTRIBUTEID.IAS_ATTRIBUTE_SHARED_SECRET;
pub const IAS_ATTRIBUTE_CLIENT_IP_ADDRESS = ATTRIBUTEID.IAS_ATTRIBUTE_CLIENT_IP_ADDRESS;
pub const IAS_ATTRIBUTE_CLIENT_PACKET_HEADER = ATTRIBUTEID.IAS_ATTRIBUTE_CLIENT_PACKET_HEADER;
pub const IAS_ATTRIBUTE_TOKEN_GROUPS = ATTRIBUTEID.IAS_ATTRIBUTE_TOKEN_GROUPS;
pub const IAS_ATTRIBUTE_ALLOW_DIALIN = ATTRIBUTEID.IAS_ATTRIBUTE_ALLOW_DIALIN;
pub const IAS_ATTRIBUTE_REQUEST_ID = ATTRIBUTEID.IAS_ATTRIBUTE_REQUEST_ID;
pub const IAS_ATTRIBUTE_MANIPULATION_TARGET = ATTRIBUTEID.IAS_ATTRIBUTE_MANIPULATION_TARGET;
pub const IAS_ATTRIBUTE_MANIPULATION_RULE = ATTRIBUTEID.IAS_ATTRIBUTE_MANIPULATION_RULE;
pub const IAS_ATTRIBUTE_ORIGINAL_USER_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_ORIGINAL_USER_NAME;
pub const IAS_ATTRIBUTE_CLIENT_VENDOR_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_CLIENT_VENDOR_TYPE;
pub const IAS_ATTRIBUTE_CLIENT_UDP_PORT = ATTRIBUTEID.IAS_ATTRIBUTE_CLIENT_UDP_PORT;
pub const MS_ATTRIBUTE_CHAP_CHALLENGE = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_CHALLENGE;
pub const MS_ATTRIBUTE_CHAP_RESPONSE = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_RESPONSE;
pub const MS_ATTRIBUTE_CHAP_DOMAIN = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_DOMAIN;
pub const MS_ATTRIBUTE_CHAP_ERROR = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_ERROR;
pub const MS_ATTRIBUTE_CHAP_CPW1 = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_CPW1;
pub const MS_ATTRIBUTE_CHAP_CPW2 = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_CPW2;
pub const MS_ATTRIBUTE_CHAP_LM_ENC_PW = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_LM_ENC_PW;
pub const MS_ATTRIBUTE_CHAP_NT_ENC_PW = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_NT_ENC_PW;
pub const MS_ATTRIBUTE_CHAP_MPPE_KEYS = ATTRIBUTEID.MS_ATTRIBUTE_CHAP_MPPE_KEYS;
pub const IAS_ATTRIBUTE_AUTHENTICATION_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_AUTHENTICATION_TYPE;
pub const IAS_ATTRIBUTE_CLIENT_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_CLIENT_NAME;
pub const IAS_ATTRIBUTE_NT4_ACCOUNT_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_NT4_ACCOUNT_NAME;
pub const IAS_ATTRIBUTE_FULLY_QUALIFIED_USER_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_FULLY_QUALIFIED_USER_NAME;
pub const IAS_ATTRIBUTE_NTGROUPS = ATTRIBUTEID.IAS_ATTRIBUTE_NTGROUPS;
pub const IAS_ATTRIBUTE_EAP_FRIENDLY_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_EAP_FRIENDLY_NAME;
pub const IAS_ATTRIBUTE_AUTH_PROVIDER_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_AUTH_PROVIDER_TYPE;
pub const MS_ATTRIBUTE_ACCT_AUTH_TYPE = ATTRIBUTEID.MS_ATTRIBUTE_ACCT_AUTH_TYPE;
pub const MS_ATTRIBUTE_ACCT_EAP_TYPE = ATTRIBUTEID.MS_ATTRIBUTE_ACCT_EAP_TYPE;
pub const IAS_ATTRIBUTE_PACKET_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_PACKET_TYPE;
pub const IAS_ATTRIBUTE_AUTH_PROVIDER_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_AUTH_PROVIDER_NAME;
pub const IAS_ATTRIBUTE_ACCT_PROVIDER_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_ACCT_PROVIDER_TYPE;
pub const IAS_ATTRIBUTE_ACCT_PROVIDER_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_ACCT_PROVIDER_NAME;
pub const MS_ATTRIBUTE_MPPE_SEND_KEY = ATTRIBUTEID.MS_ATTRIBUTE_MPPE_SEND_KEY;
pub const MS_ATTRIBUTE_MPPE_RECV_KEY = ATTRIBUTEID.MS_ATTRIBUTE_MPPE_RECV_KEY;
pub const IAS_ATTRIBUTE_REASON_CODE = ATTRIBUTEID.IAS_ATTRIBUTE_REASON_CODE;
pub const MS_ATTRIBUTE_FILTER = ATTRIBUTEID.MS_ATTRIBUTE_FILTER;
pub const MS_ATTRIBUTE_CHAP2_RESPONSE = ATTRIBUTEID.MS_ATTRIBUTE_CHAP2_RESPONSE;
pub const MS_ATTRIBUTE_CHAP2_SUCCESS = ATTRIBUTEID.MS_ATTRIBUTE_CHAP2_SUCCESS;
pub const MS_ATTRIBUTE_CHAP2_CPW = ATTRIBUTEID.MS_ATTRIBUTE_CHAP2_CPW;
pub const MS_ATTRIBUTE_RAS_VENDOR = ATTRIBUTEID.MS_ATTRIBUTE_RAS_VENDOR;
pub const MS_ATTRIBUTE_RAS_VERSION = ATTRIBUTEID.MS_ATTRIBUTE_RAS_VERSION;
pub const IAS_ATTRIBUTE_NP_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_NP_NAME;
pub const MS_ATTRIBUTE_PRIMARY_DNS_SERVER = ATTRIBUTEID.MS_ATTRIBUTE_PRIMARY_DNS_SERVER;
pub const MS_ATTRIBUTE_SECONDARY_DNS_SERVER = ATTRIBUTEID.MS_ATTRIBUTE_SECONDARY_DNS_SERVER;
pub const MS_ATTRIBUTE_PRIMARY_NBNS_SERVER = ATTRIBUTEID.MS_ATTRIBUTE_PRIMARY_NBNS_SERVER;
pub const MS_ATTRIBUTE_SECONDARY_NBNS_SERVER = ATTRIBUTEID.MS_ATTRIBUTE_SECONDARY_NBNS_SERVER;
pub const IAS_ATTRIBUTE_PROXY_POLICY_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_PROXY_POLICY_NAME;
pub const IAS_ATTRIBUTE_PROVIDER_TYPE = ATTRIBUTEID.IAS_ATTRIBUTE_PROVIDER_TYPE;
pub const IAS_ATTRIBUTE_PROVIDER_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_PROVIDER_NAME;
pub const IAS_ATTRIBUTE_REMOTE_SERVER_ADDRESS = ATTRIBUTEID.IAS_ATTRIBUTE_REMOTE_SERVER_ADDRESS;
pub const IAS_ATTRIBUTE_GENERATE_CLASS_ATTRIBUTE = ATTRIBUTEID.IAS_ATTRIBUTE_GENERATE_CLASS_ATTRIBUTE;
pub const MS_ATTRIBUTE_RAS_CLIENT_NAME = ATTRIBUTEID.MS_ATTRIBUTE_RAS_CLIENT_NAME;
pub const MS_ATTRIBUTE_RAS_CLIENT_VERSION = ATTRIBUTEID.MS_ATTRIBUTE_RAS_CLIENT_VERSION;
pub const IAS_ATTRIBUTE_ALLOWED_CERTIFICATE_EKU = ATTRIBUTEID.IAS_ATTRIBUTE_ALLOWED_CERTIFICATE_EKU;
pub const IAS_ATTRIBUTE_EXTENSION_STATE = ATTRIBUTEID.IAS_ATTRIBUTE_EXTENSION_STATE;
pub const IAS_ATTRIBUTE_GENERATE_SESSION_TIMEOUT = ATTRIBUTEID.IAS_ATTRIBUTE_GENERATE_SESSION_TIMEOUT;
pub const IAS_ATTRIBUTE_SESSION_TIMEOUT = ATTRIBUTEID.IAS_ATTRIBUTE_SESSION_TIMEOUT;
pub const MS_ATTRIBUTE_QUARANTINE_IPFILTER = ATTRIBUTEID.MS_ATTRIBUTE_QUARANTINE_IPFILTER;
pub const MS_ATTRIBUTE_QUARANTINE_SESSION_TIMEOUT = ATTRIBUTEID.MS_ATTRIBUTE_QUARANTINE_SESSION_TIMEOUT;
pub const MS_ATTRIBUTE_USER_SECURITY_IDENTITY = ATTRIBUTEID.MS_ATTRIBUTE_USER_SECURITY_IDENTITY;
pub const IAS_ATTRIBUTE_REMOTE_RADIUS_TO_WINDOWS_USER_MAPPING = ATTRIBUTEID.IAS_ATTRIBUTE_REMOTE_RADIUS_TO_WINDOWS_USER_MAPPING;
pub const IAS_ATTRIBUTE_PASSPORT_USER_MAPPING_UPN_SUFFIX = ATTRIBUTEID.IAS_ATTRIBUTE_PASSPORT_USER_MAPPING_UPN_SUFFIX;
pub const IAS_ATTRIBUTE_TUNNEL_TAG = ATTRIBUTEID.IAS_ATTRIBUTE_TUNNEL_TAG;
pub const IAS_ATTRIBUTE_NP_PEAPUPFRONT_ENABLED = ATTRIBUTEID.IAS_ATTRIBUTE_NP_PEAPUPFRONT_ENABLED;
pub const IAS_ATTRIBUTE_CERTIFICATE_EKU = ATTRIBUTEID.IAS_ATTRIBUTE_CERTIFICATE_EKU;
pub const IAS_ATTRIBUTE_EAP_CONFIG = ATTRIBUTEID.IAS_ATTRIBUTE_EAP_CONFIG;
pub const IAS_ATTRIBUTE_PEAP_EMBEDDED_EAP_TYPEID = ATTRIBUTEID.IAS_ATTRIBUTE_PEAP_EMBEDDED_EAP_TYPEID;
pub const IAS_ATTRIBUTE_PEAP_FAST_ROAMED_SESSION = ATTRIBUTEID.IAS_ATTRIBUTE_PEAP_FAST_ROAMED_SESSION;
pub const IAS_ATTRIBUTE_EAP_TYPEID = ATTRIBUTEID.IAS_ATTRIBUTE_EAP_TYPEID;
pub const MS_ATTRIBUTE_EAP_TLV = ATTRIBUTEID.MS_ATTRIBUTE_EAP_TLV;
pub const IAS_ATTRIBUTE_REJECT_REASON_CODE = ATTRIBUTEID.IAS_ATTRIBUTE_REJECT_REASON_CODE;
pub const IAS_ATTRIBUTE_PROXY_EAP_CONFIG = ATTRIBUTEID.IAS_ATTRIBUTE_PROXY_EAP_CONFIG;
pub const IAS_ATTRIBUTE_EAP_SESSION = ATTRIBUTEID.IAS_ATTRIBUTE_EAP_SESSION;
pub const IAS_ATTRIBUTE_IS_REPLAY = ATTRIBUTEID.IAS_ATTRIBUTE_IS_REPLAY;
pub const IAS_ATTRIBUTE_CLEAR_TEXT_PASSWORD = ATTRIBUTEID.IAS_ATTRIBUTE_CLEAR_TEXT_PASSWORD;
pub const MS_ATTRIBUTE_IDENTITY_TYPE = ATTRIBUTEID.MS_ATTRIBUTE_IDENTITY_TYPE;
pub const MS_ATTRIBUTE_SERVICE_CLASS = ATTRIBUTEID.MS_ATTRIBUTE_SERVICE_CLASS;
pub const MS_ATTRIBUTE_QUARANTINE_USER_CLASS = ATTRIBUTEID.MS_ATTRIBUTE_QUARANTINE_USER_CLASS;
pub const MS_ATTRIBUTE_QUARANTINE_STATE = ATTRIBUTEID.MS_ATTRIBUTE_QUARANTINE_STATE;
pub const IAS_ATTRIBUTE_OVERRIDE_RAP_AUTH = ATTRIBUTEID.IAS_ATTRIBUTE_OVERRIDE_RAP_AUTH;
pub const IAS_ATTRIBUTE_PEAP_CHANNEL_UP = ATTRIBUTEID.IAS_ATTRIBUTE_PEAP_CHANNEL_UP;
pub const IAS_ATTRIBUTE_NAME_MAPPED = ATTRIBUTEID.IAS_ATTRIBUTE_NAME_MAPPED;
pub const IAS_ATTRIBUTE_POLICY_ENFORCED = ATTRIBUTEID.IAS_ATTRIBUTE_POLICY_ENFORCED;
pub const IAS_ATTRIBUTE_MACHINE_NTGROUPS = ATTRIBUTEID.IAS_ATTRIBUTE_MACHINE_NTGROUPS;
pub const IAS_ATTRIBUTE_USER_NTGROUPS = ATTRIBUTEID.IAS_ATTRIBUTE_USER_NTGROUPS;
pub const IAS_ATTRIBUTE_MACHINE_TOKEN_GROUPS = ATTRIBUTEID.IAS_ATTRIBUTE_MACHINE_TOKEN_GROUPS;
pub const IAS_ATTRIBUTE_USER_TOKEN_GROUPS = ATTRIBUTEID.IAS_ATTRIBUTE_USER_TOKEN_GROUPS;
pub const MS_ATTRIBUTE_QUARANTINE_GRACE_TIME = ATTRIBUTEID.MS_ATTRIBUTE_QUARANTINE_GRACE_TIME;
pub const IAS_ATTRIBUTE_QUARANTINE_URL = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_URL;
pub const IAS_ATTRIBUTE_QUARANTINE_FIXUP_SERVERS = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_FIXUP_SERVERS;
pub const MS_ATTRIBUTE_NOT_QUARANTINE_CAPABLE = ATTRIBUTEID.MS_ATTRIBUTE_NOT_QUARANTINE_CAPABLE;
pub const IAS_ATTRIBUTE_QUARANTINE_SYSTEM_HEALTH_RESULT = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_SYSTEM_HEALTH_RESULT;
pub const IAS_ATTRIBUTE_QUARANTINE_SYSTEM_HEALTH_VALIDATORS = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_SYSTEM_HEALTH_VALIDATORS;
pub const IAS_ATTRIBUTE_MACHINE_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_MACHINE_NAME;
pub const IAS_ATTRIBUTE_NT4_MACHINE_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_NT4_MACHINE_NAME;
pub const IAS_ATTRIBUTE_QUARANTINE_SESSION_HANDLE = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_SESSION_HANDLE;
pub const IAS_ATTRIBUTE_FULLY_QUALIFIED_MACHINE_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_FULLY_QUALIFIED_MACHINE_NAME;
pub const IAS_ATTRIBUTE_QUARANTINE_FIXUP_SERVERS_CONFIGURATION = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_FIXUP_SERVERS_CONFIGURATION;
pub const IAS_ATTRIBUTE_CLIENT_QUARANTINE_COMPATIBLE = ATTRIBUTEID.IAS_ATTRIBUTE_CLIENT_QUARANTINE_COMPATIBLE;
pub const MS_ATTRIBUTE_NETWORK_ACCESS_SERVER_TYPE = ATTRIBUTEID.MS_ATTRIBUTE_NETWORK_ACCESS_SERVER_TYPE;
pub const IAS_ATTRIBUTE_QUARANTINE_SESSION_ID = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_SESSION_ID;
pub const MS_ATTRIBUTE_AFW_QUARANTINE_ZONE = ATTRIBUTEID.MS_ATTRIBUTE_AFW_QUARANTINE_ZONE;
pub const MS_ATTRIBUTE_AFW_PROTECTION_LEVEL = ATTRIBUTEID.MS_ATTRIBUTE_AFW_PROTECTION_LEVEL;
pub const IAS_ATTRIBUTE_QUARANTINE_UPDATE_NON_COMPLIANT = ATTRIBUTEID.IAS_ATTRIBUTE_QUARANTINE_UPDATE_NON_COMPLIANT;
pub const IAS_ATTRIBUTE_REQUEST_START_TIME = ATTRIBUTEID.IAS_ATTRIBUTE_REQUEST_START_TIME;
pub const MS_ATTRIBUTE_MACHINE_NAME = ATTRIBUTEID.MS_ATTRIBUTE_MACHINE_NAME;
pub const IAS_ATTRIBUTE_CLIENT_IPv6_ADDRESS = ATTRIBUTEID.IAS_ATTRIBUTE_CLIENT_IPv6_ADDRESS;
pub const IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_INTERFACE_ID = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_INTERFACE_ID;
pub const IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IPv6_PREFIX = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IPv6_PREFIX;
pub const IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IPv6_ROUTE = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_RADIUS_FRAMED_IPv6_ROUTE;
pub const MS_ATTRIBUTE_QUARANTINE_GRACE_TIME_CONFIGURATION = ATTRIBUTEID.MS_ATTRIBUTE_QUARANTINE_GRACE_TIME_CONFIGURATION;
pub const MS_ATTRIBUTE_IPv6_FILTER = ATTRIBUTEID.MS_ATTRIBUTE_IPv6_FILTER;
pub const MS_ATTRIBUTE_IPV4_REMEDIATION_SERVERS = ATTRIBUTEID.MS_ATTRIBUTE_IPV4_REMEDIATION_SERVERS;
pub const MS_ATTRIBUTE_IPV6_REMEDIATION_SERVERS = ATTRIBUTEID.MS_ATTRIBUTE_IPV6_REMEDIATION_SERVERS;
pub const IAS_ATTRIBUTE_PROXY_RETRY_COUNT = ATTRIBUTEID.IAS_ATTRIBUTE_PROXY_RETRY_COUNT;
pub const IAS_ATTRIBUTE_MACHINE_INVENTORY = ATTRIBUTEID.IAS_ATTRIBUTE_MACHINE_INVENTORY;
pub const IAS_ATTRIBUTE_ABSOLUTE_TIME = ATTRIBUTEID.IAS_ATTRIBUTE_ABSOLUTE_TIME;
pub const MS_ATTRIBUTE_QUARANTINE_SOH = ATTRIBUTEID.MS_ATTRIBUTE_QUARANTINE_SOH;
pub const IAS_ATTRIBUTE_EAP_TYPES_CONFIGURED_IN_PROXYPOLICY = ATTRIBUTEID.IAS_ATTRIBUTE_EAP_TYPES_CONFIGURED_IN_PROXYPOLICY;
pub const MS_ATTRIBUTE_HCAP_LOCATION_GROUP_NAME = ATTRIBUTEID.MS_ATTRIBUTE_HCAP_LOCATION_GROUP_NAME;
pub const MS_ATTRIBUTE_EXTENDED_QUARANTINE_STATE = ATTRIBUTEID.MS_ATTRIBUTE_EXTENDED_QUARANTINE_STATE;
pub const IAS_ATTRIBUTE_SOH_CARRIER_EAPTLV = ATTRIBUTEID.IAS_ATTRIBUTE_SOH_CARRIER_EAPTLV;
pub const MS_ATTRIBUTE_HCAP_USER_GROUPS = ATTRIBUTEID.MS_ATTRIBUTE_HCAP_USER_GROUPS;
pub const IAS_ATTRIBUTE_SAVED_MACHINE_HEALTHCHECK_ONLY = ATTRIBUTEID.IAS_ATTRIBUTE_SAVED_MACHINE_HEALTHCHECK_ONLY;
pub const IAS_ATTRIBUTE_POLICY_EVALUATED_SHV = ATTRIBUTEID.IAS_ATTRIBUTE_POLICY_EVALUATED_SHV;
pub const MS_ATTRIBUTE_RAS_CORRELATION_ID = ATTRIBUTEID.MS_ATTRIBUTE_RAS_CORRELATION_ID;
pub const MS_ATTRIBUTE_HCAP_USER_NAME = ATTRIBUTEID.MS_ATTRIBUTE_HCAP_USER_NAME;
pub const IAS_ATTRIBUTE_NT4_HCAP_ACCOUNT_NAME = ATTRIBUTEID.IAS_ATTRIBUTE_NT4_HCAP_ACCOUNT_NAME;
pub const IAS_ATTRIBUTE_USER_TOKEN_SID = ATTRIBUTEID.IAS_ATTRIBUTE_USER_TOKEN_SID;
pub const IAS_ATTRIBUTE_MACHINE_TOKEN_SID = ATTRIBUTEID.IAS_ATTRIBUTE_MACHINE_TOKEN_SID;
pub const IAS_ATTRIBUTE_MACHINE_VALIDATED = ATTRIBUTEID.IAS_ATTRIBUTE_MACHINE_VALIDATED;
pub const MS_ATTRIBUTE_USER_IPv4_ADDRESS = ATTRIBUTEID.MS_ATTRIBUTE_USER_IPv4_ADDRESS;
pub const MS_ATTRIBUTE_USER_IPv6_ADDRESS = ATTRIBUTEID.MS_ATTRIBUTE_USER_IPv6_ADDRESS;
pub const MS_ATTRIBUTE_TSG_DEVICE_REDIRECTION = ATTRIBUTEID.MS_ATTRIBUTE_TSG_DEVICE_REDIRECTION;
pub const IAS_ATTRIBUTE_ACCEPT_REASON_CODE = ATTRIBUTEID.IAS_ATTRIBUTE_ACCEPT_REASON_CODE;
pub const IAS_ATTRIBUTE_LOGGING_RESULT = ATTRIBUTEID.IAS_ATTRIBUTE_LOGGING_RESULT;
pub const IAS_ATTRIBUTE_SERVER_IP_ADDRESS = ATTRIBUTEID.IAS_ATTRIBUTE_SERVER_IP_ADDRESS;
pub const IAS_ATTRIBUTE_SERVER_IPv6_ADDRESS = ATTRIBUTEID.IAS_ATTRIBUTE_SERVER_IPv6_ADDRESS;
pub const IAS_ATTRIBUTE_RADIUS_USERNAME_ENCODING_ASCII = ATTRIBUTEID.IAS_ATTRIBUTE_RADIUS_USERNAME_ENCODING_ASCII;
pub const MS_ATTRIBUTE_RAS_ROUTING_DOMAIN_ID = ATTRIBUTEID.MS_ATTRIBUTE_RAS_ROUTING_DOMAIN_ID;
pub const IAS_ATTRIBUTE_CERTIFICATE_THUMBPRINT = ATTRIBUTEID.IAS_ATTRIBUTE_CERTIFICATE_THUMBPRINT;
pub const RAS_ATTRIBUTE_ENCRYPTION_TYPE = ATTRIBUTEID.RAS_ATTRIBUTE_ENCRYPTION_TYPE;
pub const RAS_ATTRIBUTE_ENCRYPTION_POLICY = ATTRIBUTEID.RAS_ATTRIBUTE_ENCRYPTION_POLICY;
pub const RAS_ATTRIBUTE_BAP_REQUIRED = ATTRIBUTEID.RAS_ATTRIBUTE_BAP_REQUIRED;
pub const RAS_ATTRIBUTE_BAP_LINE_DOWN_TIME = ATTRIBUTEID.RAS_ATTRIBUTE_BAP_LINE_DOWN_TIME;
pub const RAS_ATTRIBUTE_BAP_LINE_DOWN_LIMIT = ATTRIBUTEID.RAS_ATTRIBUTE_BAP_LINE_DOWN_LIMIT;
pub const NEW_LOG_FILE_FREQUENCY = enum(i32) {
UNLIMITED_SIZE = 0,
DAILY = 1,
WEEKLY = 2,
MONTHLY = 3,
WHEN_FILE_SIZE_REACHES = 4,
};
pub const IAS_LOGGING_UNLIMITED_SIZE = NEW_LOG_FILE_FREQUENCY.UNLIMITED_SIZE;
pub const IAS_LOGGING_DAILY = NEW_LOG_FILE_FREQUENCY.DAILY;
pub const IAS_LOGGING_WEEKLY = NEW_LOG_FILE_FREQUENCY.WEEKLY;
pub const IAS_LOGGING_MONTHLY = NEW_LOG_FILE_FREQUENCY.MONTHLY;
pub const IAS_LOGGING_WHEN_FILE_SIZE_REACHES = NEW_LOG_FILE_FREQUENCY.WHEN_FILE_SIZE_REACHES;
pub const AUTHENTICATION_TYPE = enum(i32) {
INVALID = 0,
PAP = 1,
MD5CHAP = 2,
MSCHAP = 3,
MSCHAP2 = 4,
EAP = 5,
ARAP = 6,
NONE = 7,
CUSTOM = 8,
MSCHAP_CPW = 9,
MSCHAP2_CPW = 10,
PEAP = 11,
};
pub const IAS_AUTH_INVALID = AUTHENTICATION_TYPE.INVALID;
pub const IAS_AUTH_PAP = AUTHENTICATION_TYPE.PAP;
pub const IAS_AUTH_MD5CHAP = AUTHENTICATION_TYPE.MD5CHAP;
pub const IAS_AUTH_MSCHAP = AUTHENTICATION_TYPE.MSCHAP;
pub const IAS_AUTH_MSCHAP2 = AUTHENTICATION_TYPE.MSCHAP2;
pub const IAS_AUTH_EAP = AUTHENTICATION_TYPE.EAP;
pub const IAS_AUTH_ARAP = AUTHENTICATION_TYPE.ARAP;
pub const IAS_AUTH_NONE = AUTHENTICATION_TYPE.NONE;
pub const IAS_AUTH_CUSTOM = AUTHENTICATION_TYPE.CUSTOM;
pub const IAS_AUTH_MSCHAP_CPW = AUTHENTICATION_TYPE.MSCHAP_CPW;
pub const IAS_AUTH_MSCHAP2_CPW = AUTHENTICATION_TYPE.MSCHAP2_CPW;
pub const IAS_AUTH_PEAP = AUTHENTICATION_TYPE.PEAP;
pub const IDENTITY_TYPE = enum(i32) {
T = 1,
};
pub const IAS_IDENTITY_NO_DEFAULT = IDENTITY_TYPE.T;
pub const ATTRIBUTESYNTAX = enum(i32) {
BOOLEAN = 1,
INTEGER = 2,
ENUMERATOR = 3,
INETADDR = 4,
STRING = 5,
OCTETSTRING = 6,
UTCTIME = 7,
PROVIDERSPECIFIC = 8,
UNSIGNEDINTEGER = 9,
INETADDR6 = 10,
};
pub const IAS_SYNTAX_BOOLEAN = ATTRIBUTESYNTAX.BOOLEAN;
pub const IAS_SYNTAX_INTEGER = ATTRIBUTESYNTAX.INTEGER;
pub const IAS_SYNTAX_ENUMERATOR = ATTRIBUTESYNTAX.ENUMERATOR;
pub const IAS_SYNTAX_INETADDR = ATTRIBUTESYNTAX.INETADDR;
pub const IAS_SYNTAX_STRING = ATTRIBUTESYNTAX.STRING;
pub const IAS_SYNTAX_OCTETSTRING = ATTRIBUTESYNTAX.OCTETSTRING;
pub const IAS_SYNTAX_UTCTIME = ATTRIBUTESYNTAX.UTCTIME;
pub const IAS_SYNTAX_PROVIDERSPECIFIC = ATTRIBUTESYNTAX.PROVIDERSPECIFIC;
pub const IAS_SYNTAX_UNSIGNEDINTEGER = ATTRIBUTESYNTAX.UNSIGNEDINTEGER;
pub const IAS_SYNTAX_INETADDR6 = ATTRIBUTESYNTAX.INETADDR6;
pub const ATTRIBUTERESTRICTIONS = enum(i32) {
MULTIVALUED = 1,
ALLOWEDINPROFILE = 2,
ALLOWEDINCONDITION = 4,
ALLOWEDINPROXYPROFILE = 8,
ALLOWEDINPROXYCONDITION = 16,
ALLOWEDINVPNDIALUP = 32,
ALLOWEDIN8021X = 64,
};
pub const MULTIVALUED = ATTRIBUTERESTRICTIONS.MULTIVALUED;
pub const ALLOWEDINPROFILE = ATTRIBUTERESTRICTIONS.ALLOWEDINPROFILE;
pub const ALLOWEDINCONDITION = ATTRIBUTERESTRICTIONS.ALLOWEDINCONDITION;
pub const ALLOWEDINPROXYPROFILE = ATTRIBUTERESTRICTIONS.ALLOWEDINPROXYPROFILE;
pub const ALLOWEDINPROXYCONDITION = ATTRIBUTERESTRICTIONS.ALLOWEDINPROXYCONDITION;
pub const ALLOWEDINVPNDIALUP = ATTRIBUTERESTRICTIONS.ALLOWEDINVPNDIALUP;
pub const ALLOWEDIN8021X = ATTRIBUTERESTRICTIONS.ALLOWEDIN8021X;
pub const ATTRIBUTEFILTER = enum(i32) {
NONE = 0,
VPN_DIALUP = 1,
IEEE_802_1x = 2,
};
pub const ATTRIBUTE_FILTER_NONE = ATTRIBUTEFILTER.NONE;
pub const ATTRIBUTE_FILTER_VPN_DIALUP = ATTRIBUTEFILTER.VPN_DIALUP;
pub const ATTRIBUTE_FILTER_IEEE_802_1x = ATTRIBUTEFILTER.IEEE_802_1x;
pub const ATTRIBUTEINFO = enum(i32) {
NAME = 1,
SYNTAX = 2,
RESTRICTIONS = 3,
DESCRIPTION = 4,
VENDORID = 5,
LDAPNAME = 6,
VENDORTYPE = 7,
};
pub const NAME = ATTRIBUTEINFO.NAME;
pub const SYNTAX = ATTRIBUTEINFO.SYNTAX;
pub const RESTRICTIONS = ATTRIBUTEINFO.RESTRICTIONS;
pub const DESCRIPTION = ATTRIBUTEINFO.DESCRIPTION;
pub const VENDORID = ATTRIBUTEINFO.VENDORID;
pub const LDAPNAME = ATTRIBUTEINFO.LDAPNAME;
pub const VENDORTYPE = ATTRIBUTEINFO.VENDORTYPE;
pub const IASCOMMONPROPERTIES = enum(i32) {
RESERVED = 0,
CLASS = 1,
NAME = 2,
DESCRIPTION = 3,
ID = 4,
DATASTORE_NAME = 5,
TEMPLATE_GUID = 6,
OPAQUE = 7,
START = 1024,
};
pub const PROPERTY_SDO_RESERVED = IASCOMMONPROPERTIES.RESERVED;
pub const PROPERTY_SDO_CLASS = IASCOMMONPROPERTIES.CLASS;
pub const PROPERTY_SDO_NAME = IASCOMMONPROPERTIES.NAME;
pub const PROPERTY_SDO_DESCRIPTION = IASCOMMONPROPERTIES.DESCRIPTION;
pub const PROPERTY_SDO_ID = IASCOMMONPROPERTIES.ID;
pub const PROPERTY_SDO_DATASTORE_NAME = IASCOMMONPROPERTIES.DATASTORE_NAME;
pub const PROPERTY_SDO_TEMPLATE_GUID = IASCOMMONPROPERTIES.TEMPLATE_GUID;
pub const PROPERTY_SDO_OPAQUE = IASCOMMONPROPERTIES.OPAQUE;
pub const PROPERTY_SDO_START = IASCOMMONPROPERTIES.START;
pub const USERPROPERTIES = enum(i32) {
CALLING_STATION_ID = 1024,
SAVED_CALLING_STATION_ID = 1025,
RADIUS_CALLBACK_NUMBER = 1026,
RADIUS_FRAMED_ROUTE = 1027,
RADIUS_FRAMED_IP_ADDRESS = 1028,
SAVED_RADIUS_CALLBACK_NUMBER = 1029,
SAVED_RADIUS_FRAMED_ROUTE = 1030,
SAVED_RADIUS_FRAMED_IP_ADDRESS = 1031,
ALLOW_DIALIN = 1032,
SERVICE_TYPE = 1033,
RADIUS_FRAMED_IPV6_ROUTE = 1034,
SAVED_RADIUS_FRAMED_IPV6_ROUTE = 1035,
RADIUS_FRAMED_INTERFACE_ID = 1036,
SAVED_RADIUS_FRAMED_INTERFACE_ID = 1037,
RADIUS_FRAMED_IPV6_PREFIX = 1038,
SAVED_RADIUS_FRAMED_IPV6_PREFIX = 1039,
};
pub const PROPERTY_USER_CALLING_STATION_ID = USERPROPERTIES.CALLING_STATION_ID;
pub const PROPERTY_USER_SAVED_CALLING_STATION_ID = USERPROPERTIES.SAVED_CALLING_STATION_ID;
pub const PROPERTY_USER_RADIUS_CALLBACK_NUMBER = USERPROPERTIES.RADIUS_CALLBACK_NUMBER;
pub const PROPERTY_USER_RADIUS_FRAMED_ROUTE = USERPROPERTIES.RADIUS_FRAMED_ROUTE;
pub const PROPERTY_USER_RADIUS_FRAMED_IP_ADDRESS = USERPROPERTIES.RADIUS_FRAMED_IP_ADDRESS;
pub const PROPERTY_USER_SAVED_RADIUS_CALLBACK_NUMBER = USERPROPERTIES.SAVED_RADIUS_CALLBACK_NUMBER;
pub const PROPERTY_USER_SAVED_RADIUS_FRAMED_ROUTE = USERPROPERTIES.SAVED_RADIUS_FRAMED_ROUTE;
pub const PROPERTY_USER_SAVED_RADIUS_FRAMED_IP_ADDRESS = USERPROPERTIES.SAVED_RADIUS_FRAMED_IP_ADDRESS;
pub const PROPERTY_USER_ALLOW_DIALIN = USERPROPERTIES.ALLOW_DIALIN;
pub const PROPERTY_USER_SERVICE_TYPE = USERPROPERTIES.SERVICE_TYPE;
pub const PROPERTY_USER_RADIUS_FRAMED_IPV6_ROUTE = USERPROPERTIES.RADIUS_FRAMED_IPV6_ROUTE;
pub const PROPERTY_USER_SAVED_RADIUS_FRAMED_IPV6_ROUTE = USERPROPERTIES.SAVED_RADIUS_FRAMED_IPV6_ROUTE;
pub const PROPERTY_USER_RADIUS_FRAMED_INTERFACE_ID = USERPROPERTIES.RADIUS_FRAMED_INTERFACE_ID;
pub const PROPERTY_USER_SAVED_RADIUS_FRAMED_INTERFACE_ID = USERPROPERTIES.SAVED_RADIUS_FRAMED_INTERFACE_ID;
pub const PROPERTY_USER_RADIUS_FRAMED_IPV6_PREFIX = USERPROPERTIES.RADIUS_FRAMED_IPV6_PREFIX;
pub const PROPERTY_USER_SAVED_RADIUS_FRAMED_IPV6_PREFIX = USERPROPERTIES.SAVED_RADIUS_FRAMED_IPV6_PREFIX;
pub const DICTIONARYPROPERTIES = enum(i32) {
ATTRIBUTES_COLLECTION = 1024,
LOCATION = 1025,
};
pub const PROPERTY_DICTIONARY_ATTRIBUTES_COLLECTION = DICTIONARYPROPERTIES.ATTRIBUTES_COLLECTION;
pub const PROPERTY_DICTIONARY_LOCATION = DICTIONARYPROPERTIES.LOCATION;
pub const ATTRIBUTEPROPERTIES = enum(i32) {
ID = 1024,
VENDOR_ID = 1025,
VENDOR_TYPE_ID = 1026,
IS_ENUMERABLE = 1027,
ENUM_NAMES = 1028,
ENUM_VALUES = 1029,
SYNTAX = 1030,
ALLOW_MULTIPLE = 1031,
ALLOW_LOG_ORDINAL = 1032,
ALLOW_IN_PROFILE = 1033,
ALLOW_IN_CONDITION = 1034,
DISPLAY_NAME = 1035,
VALUE = 1036,
ALLOW_IN_PROXY_PROFILE = 1037,
ALLOW_IN_PROXY_CONDITION = 1038,
ALLOW_IN_VPNDIALUP = 1039,
ALLOW_IN_8021X = 1040,
ENUM_FILTERS = 1041,
};
pub const PROPERTY_ATTRIBUTE_ID = ATTRIBUTEPROPERTIES.ID;
pub const PROPERTY_ATTRIBUTE_VENDOR_ID = ATTRIBUTEPROPERTIES.VENDOR_ID;
pub const PROPERTY_ATTRIBUTE_VENDOR_TYPE_ID = ATTRIBUTEPROPERTIES.VENDOR_TYPE_ID;
pub const PROPERTY_ATTRIBUTE_IS_ENUMERABLE = ATTRIBUTEPROPERTIES.IS_ENUMERABLE;
pub const PROPERTY_ATTRIBUTE_ENUM_NAMES = ATTRIBUTEPROPERTIES.ENUM_NAMES;
pub const PROPERTY_ATTRIBUTE_ENUM_VALUES = ATTRIBUTEPROPERTIES.ENUM_VALUES;
pub const PROPERTY_ATTRIBUTE_SYNTAX = ATTRIBUTEPROPERTIES.SYNTAX;
pub const PROPERTY_ATTRIBUTE_ALLOW_MULTIPLE = ATTRIBUTEPROPERTIES.ALLOW_MULTIPLE;
pub const PROPERTY_ATTRIBUTE_ALLOW_LOG_ORDINAL = ATTRIBUTEPROPERTIES.ALLOW_LOG_ORDINAL;
pub const PROPERTY_ATTRIBUTE_ALLOW_IN_PROFILE = ATTRIBUTEPROPERTIES.ALLOW_IN_PROFILE;
pub const PROPERTY_ATTRIBUTE_ALLOW_IN_CONDITION = ATTRIBUTEPROPERTIES.ALLOW_IN_CONDITION;
pub const PROPERTY_ATTRIBUTE_DISPLAY_NAME = ATTRIBUTEPROPERTIES.DISPLAY_NAME;
pub const PROPERTY_ATTRIBUTE_VALUE = ATTRIBUTEPROPERTIES.VALUE;
pub const PROPERTY_ATTRIBUTE_ALLOW_IN_PROXY_PROFILE = ATTRIBUTEPROPERTIES.ALLOW_IN_PROXY_PROFILE;
pub const PROPERTY_ATTRIBUTE_ALLOW_IN_PROXY_CONDITION = ATTRIBUTEPROPERTIES.ALLOW_IN_PROXY_CONDITION;
pub const PROPERTY_ATTRIBUTE_ALLOW_IN_VPNDIALUP = ATTRIBUTEPROPERTIES.ALLOW_IN_VPNDIALUP;
pub const PROPERTY_ATTRIBUTE_ALLOW_IN_8021X = ATTRIBUTEPROPERTIES.ALLOW_IN_8021X;
pub const PROPERTY_ATTRIBUTE_ENUM_FILTERS = ATTRIBUTEPROPERTIES.ENUM_FILTERS;
pub const IASPROPERTIES = enum(i32) {
RADIUSSERVERGROUPS_COLLECTION = 1024,
POLICIES_COLLECTION = 1025,
PROFILES_COLLECTION = 1026,
PROTOCOLS_COLLECTION = 1027,
AUDITORS_COLLECTION = 1028,
REQUESTHANDLERS_COLLECTION = 1029,
PROXYPOLICIES_COLLECTION = 1030,
PROXYPROFILES_COLLECTION = 1031,
REMEDIATIONSERVERGROUPS_COLLECTION = 1032,
SHVTEMPLATES_COLLECTION = 1033,
};
pub const PROPERTY_IAS_RADIUSSERVERGROUPS_COLLECTION = IASPROPERTIES.RADIUSSERVERGROUPS_COLLECTION;
pub const PROPERTY_IAS_POLICIES_COLLECTION = IASPROPERTIES.POLICIES_COLLECTION;
pub const PROPERTY_IAS_PROFILES_COLLECTION = IASPROPERTIES.PROFILES_COLLECTION;
pub const PROPERTY_IAS_PROTOCOLS_COLLECTION = IASPROPERTIES.PROTOCOLS_COLLECTION;
pub const PROPERTY_IAS_AUDITORS_COLLECTION = IASPROPERTIES.AUDITORS_COLLECTION;
pub const PROPERTY_IAS_REQUESTHANDLERS_COLLECTION = IASPROPERTIES.REQUESTHANDLERS_COLLECTION;
pub const PROPERTY_IAS_PROXYPOLICIES_COLLECTION = IASPROPERTIES.PROXYPOLICIES_COLLECTION;
pub const PROPERTY_IAS_PROXYPROFILES_COLLECTION = IASPROPERTIES.PROXYPROFILES_COLLECTION;
pub const PROPERTY_IAS_REMEDIATIONSERVERGROUPS_COLLECTION = IASPROPERTIES.REMEDIATIONSERVERGROUPS_COLLECTION;
pub const PROPERTY_IAS_SHVTEMPLATES_COLLECTION = IASPROPERTIES.SHVTEMPLATES_COLLECTION;
pub const TEMPLATESPROPERTIES = enum(i32) {
POLICIES_TEMPLATES = 1024,
PROFILES_TEMPLATES = 1025,
PROFILES_COLLECTION = 1026,
PROXYPOLICIES_TEMPLATES = 1027,
PROXYPROFILES_TEMPLATES = 1028,
PROXYPROFILES_COLLECTION = 1029,
REMEDIATIONSERVERGROUPS_TEMPLATES = 1030,
SHVTEMPLATES_TEMPLATES = 1031,
CLIENTS_TEMPLATES = 1032,
RADIUSSERVERS_TEMPLATES = 1033,
SHAREDSECRETS_TEMPLATES = 1034,
IPFILTERS_TEMPLATES = 1035,
};
pub const PROPERTY_TEMPLATES_POLICIES_TEMPLATES = TEMPLATESPROPERTIES.POLICIES_TEMPLATES;
pub const PROPERTY_TEMPLATES_PROFILES_TEMPLATES = TEMPLATESPROPERTIES.PROFILES_TEMPLATES;
pub const PROPERTY_TEMPLATES_PROFILES_COLLECTION = TEMPLATESPROPERTIES.PROFILES_COLLECTION;
pub const PROPERTY_TEMPLATES_PROXYPOLICIES_TEMPLATES = TEMPLATESPROPERTIES.PROXYPOLICIES_TEMPLATES;
pub const PROPERTY_TEMPLATES_PROXYPROFILES_TEMPLATES = TEMPLATESPROPERTIES.PROXYPROFILES_TEMPLATES;
pub const PROPERTY_TEMPLATES_PROXYPROFILES_COLLECTION = TEMPLATESPROPERTIES.PROXYPROFILES_COLLECTION;
pub const PROPERTY_TEMPLATES_REMEDIATIONSERVERGROUPS_TEMPLATES = TEMPLATESPROPERTIES.REMEDIATIONSERVERGROUPS_TEMPLATES;
pub const PROPERTY_TEMPLATES_SHVTEMPLATES_TEMPLATES = TEMPLATESPROPERTIES.SHVTEMPLATES_TEMPLATES;
pub const PROPERTY_TEMPLATES_CLIENTS_TEMPLATES = TEMPLATESPROPERTIES.CLIENTS_TEMPLATES;
pub const PROPERTY_TEMPLATES_RADIUSSERVERS_TEMPLATES = TEMPLATESPROPERTIES.RADIUSSERVERS_TEMPLATES;
pub const PROPERTY_TEMPLATES_SHAREDSECRETS_TEMPLATES = TEMPLATESPROPERTIES.SHAREDSECRETS_TEMPLATES;
pub const PROPERTY_TEMPLATES_IPFILTERS_TEMPLATES = TEMPLATESPROPERTIES.IPFILTERS_TEMPLATES;
pub const CLIENTPROPERTIES = enum(i32) {
REQUIRE_SIGNATURE = 1024,
UNUSED = 1025,
SHARED_SECRET = 1026,
NAS_MANUFACTURER = 1027,
ADDRESS = 1028,
QUARANTINE_COMPATIBLE = 1029,
ENABLED = 1030,
SECRET_TEMPLATE_GUID = 1031,
};
pub const PROPERTY_CLIENT_REQUIRE_SIGNATURE = CLIENTPROPERTIES.REQUIRE_SIGNATURE;
pub const PROPERTY_CLIENT_UNUSED = CLIENTPROPERTIES.UNUSED;
pub const PROPERTY_CLIENT_SHARED_SECRET = CLIENTPROPERTIES.SHARED_SECRET;
pub const PROPERTY_CLIENT_NAS_MANUFACTURER = CLIENTPROPERTIES.NAS_MANUFACTURER;
pub const PROPERTY_CLIENT_ADDRESS = CLIENTPROPERTIES.ADDRESS;
pub const PROPERTY_CLIENT_QUARANTINE_COMPATIBLE = CLIENTPROPERTIES.QUARANTINE_COMPATIBLE;
pub const PROPERTY_CLIENT_ENABLED = CLIENTPROPERTIES.ENABLED;
pub const PROPERTY_CLIENT_SECRET_TEMPLATE_GUID = CLIENTPROPERTIES.SECRET_TEMPLATE_GUID;
pub const VENDORPROPERTIES = enum(i32) {
D = 1024,
};
pub const PROPERTY_NAS_VENDOR_ID = VENDORPROPERTIES.D;
pub const PROFILEPROPERTIES = enum(i32) {
ATTRIBUTES_COLLECTION = 1024,
IPFILTER_TEMPLATE_GUID = 1025,
};
pub const PROPERTY_PROFILE_ATTRIBUTES_COLLECTION = PROFILEPROPERTIES.ATTRIBUTES_COLLECTION;
pub const PROPERTY_PROFILE_IPFILTER_TEMPLATE_GUID = PROFILEPROPERTIES.IPFILTER_TEMPLATE_GUID;
pub const POLICYPROPERTIES = enum(i32) {
CONSTRAINT = 1024,
MERIT = 1025,
UNUSED0 = 1026,
UNUSED1 = 1027,
PROFILE_NAME = 1028,
ACTION = 1029,
CONDITIONS_COLLECTION = 1030,
ENABLED = 1031,
SOURCETAG = 1032,
};
pub const PROPERTY_POLICY_CONSTRAINT = POLICYPROPERTIES.CONSTRAINT;
pub const PROPERTY_POLICY_MERIT = POLICYPROPERTIES.MERIT;
pub const PROPERTY_POLICY_UNUSED0 = POLICYPROPERTIES.UNUSED0;
pub const PROPERTY_POLICY_UNUSED1 = POLICYPROPERTIES.UNUSED1;
pub const PROPERTY_POLICY_PROFILE_NAME = POLICYPROPERTIES.PROFILE_NAME;
pub const PROPERTY_POLICY_ACTION = POLICYPROPERTIES.ACTION;
pub const PROPERTY_POLICY_CONDITIONS_COLLECTION = POLICYPROPERTIES.CONDITIONS_COLLECTION;
pub const PROPERTY_POLICY_ENABLED = POLICYPROPERTIES.ENABLED;
pub const PROPERTY_POLICY_SOURCETAG = POLICYPROPERTIES.SOURCETAG;
pub const CONDITIONPROPERTIES = enum(i32) {
T = 1024,
};
pub const PROPERTY_CONDITION_TEXT = CONDITIONPROPERTIES.T;
pub const RADIUSSERVERGROUPPROPERTIES = enum(i32) {
N = 1024,
};
pub const PROPERTY_RADIUSSERVERGROUP_SERVERS_COLLECTION = RADIUSSERVERGROUPPROPERTIES.N;
pub const RADIUSSERVERPROPERTIES = enum(i32) {
AUTH_PORT = 1024,
AUTH_SECRET = 1025,
ACCT_PORT = 1026,
ACCT_SECRET = 1027,
ADDRESS = 1028,
FORWARD_ACCT_ONOFF = 1029,
PRIORITY = 1030,
WEIGHT = 1031,
TIMEOUT = 1032,
MAX_LOST = 1033,
BLACKOUT = 1034,
SEND_SIGNATURE = 1035,
AUTH_SECRET_TEMPLATE_GUID = 1036,
ACCT_SECRET_TEMPLATE_GUID = 1037,
};
pub const PROPERTY_RADIUSSERVER_AUTH_PORT = RADIUSSERVERPROPERTIES.AUTH_PORT;
pub const PROPERTY_RADIUSSERVER_AUTH_SECRET = RADIUSSERVERPROPERTIES.AUTH_SECRET;
pub const PROPERTY_RADIUSSERVER_ACCT_PORT = RADIUSSERVERPROPERTIES.ACCT_PORT;
pub const PROPERTY_RADIUSSERVER_ACCT_SECRET = RADIUSSERVERPROPERTIES.ACCT_SECRET;
pub const PROPERTY_RADIUSSERVER_ADDRESS = RADIUSSERVERPROPERTIES.ADDRESS;
pub const PROPERTY_RADIUSSERVER_FORWARD_ACCT_ONOFF = RADIUSSERVERPROPERTIES.FORWARD_ACCT_ONOFF;
pub const PROPERTY_RADIUSSERVER_PRIORITY = RADIUSSERVERPROPERTIES.PRIORITY;
pub const PROPERTY_RADIUSSERVER_WEIGHT = RADIUSSERVERPROPERTIES.WEIGHT;
pub const PROPERTY_RADIUSSERVER_TIMEOUT = RADIUSSERVERPROPERTIES.TIMEOUT;
pub const PROPERTY_RADIUSSERVER_MAX_LOST = RADIUSSERVERPROPERTIES.MAX_LOST;
pub const PROPERTY_RADIUSSERVER_BLACKOUT = RADIUSSERVERPROPERTIES.BLACKOUT;
pub const PROPERTY_RADIUSSERVER_SEND_SIGNATURE = RADIUSSERVERPROPERTIES.SEND_SIGNATURE;
pub const PROPERTY_RADIUSSERVER_AUTH_SECRET_TEMPLATE_GUID = RADIUSSERVERPROPERTIES.AUTH_SECRET_TEMPLATE_GUID;
pub const PROPERTY_RADIUSSERVER_ACCT_SECRET_TEMPLATE_GUID = RADIUSSERVERPROPERTIES.ACCT_SECRET_TEMPLATE_GUID;
pub const REMEDIATIONSERVERGROUPPROPERTIES = enum(i32) {
N = 1024,
};
pub const PROPERTY_REMEDIATIONSERVERGROUP_SERVERS_COLLECTION = REMEDIATIONSERVERGROUPPROPERTIES.N;
pub const REMEDIATIONSERVERPROPERTIES = enum(i32) {
ADDRESS = 1024,
FRIENDLY_NAME = 1025,
};
pub const PROPERTY_REMEDIATIONSERVER_ADDRESS = REMEDIATIONSERVERPROPERTIES.ADDRESS;
pub const PROPERTY_REMEDIATIONSERVER_FRIENDLY_NAME = REMEDIATIONSERVERPROPERTIES.FRIENDLY_NAME;
pub const SHVTEMPLATEPROPERTIES = enum(i32) {
_COMBINATION_TYPE = 1024,
_LIST = 1025,
CONFIG_LIST = 1026,
};
pub const PROPERTY_SHV_COMBINATION_TYPE = SHVTEMPLATEPROPERTIES._COMBINATION_TYPE;
pub const PROPERTY_SHV_LIST = SHVTEMPLATEPROPERTIES._LIST;
pub const PROPERTY_SHVCONFIG_LIST = SHVTEMPLATEPROPERTIES.CONFIG_LIST;
pub const IPFILTERPROPERTIES = enum(i32) {
N = 1024,
};
pub const PROPERTY_IPFILTER_ATTRIBUTES_COLLECTION = IPFILTERPROPERTIES.N;
pub const SHAREDSECRETPROPERTIES = enum(i32) {
G = 1024,
};
pub const PROPERTY_SHAREDSECRET_STRING = SHAREDSECRETPROPERTIES.G;
pub const IASCOMPONENTPROPERTIES = enum(i32) {
ID = 1024,
PROG_ID = 1025,
START = 1026,
};
pub const PROPERTY_COMPONENT_ID = IASCOMPONENTPROPERTIES.ID;
pub const PROPERTY_COMPONENT_PROG_ID = IASCOMPONENTPROPERTIES.PROG_ID;
pub const PROPERTY_COMPONENT_START = IASCOMPONENTPROPERTIES.START;
pub const PROTOCOLPROPERTIES = enum(i32) {
REQUEST_HANDLER = 1026,
START = 1027,
};
pub const PROPERTY_PROTOCOL_REQUEST_HANDLER = PROTOCOLPROPERTIES.REQUEST_HANDLER;
pub const PROPERTY_PROTOCOL_START = PROTOCOLPROPERTIES.START;
pub const RADIUSPROPERTIES = enum(i32) {
ACCOUNTING_PORT = 1027,
AUTHENTICATION_PORT = 1028,
CLIENTS_COLLECTION = 1029,
VENDORS_COLLECTION = 1030,
};
pub const PROPERTY_RADIUS_ACCOUNTING_PORT = RADIUSPROPERTIES.ACCOUNTING_PORT;
pub const PROPERTY_RADIUS_AUTHENTICATION_PORT = RADIUSPROPERTIES.AUTHENTICATION_PORT;
pub const PROPERTY_RADIUS_CLIENTS_COLLECTION = RADIUSPROPERTIES.CLIENTS_COLLECTION;
pub const PROPERTY_RADIUS_VENDORS_COLLECTION = RADIUSPROPERTIES.VENDORS_COLLECTION;
pub const NTEVENTLOGPROPERTIES = enum(i32) {
APPLICATION_EVENTS = 1026,
MALFORMED = 1027,
DEBUG = 1028,
};
pub const PROPERTY_EVENTLOG_LOG_APPLICATION_EVENTS = NTEVENTLOGPROPERTIES.APPLICATION_EVENTS;
pub const PROPERTY_EVENTLOG_LOG_MALFORMED = NTEVENTLOGPROPERTIES.MALFORMED;
pub const PROPERTY_EVENTLOG_LOG_DEBUG = NTEVENTLOGPROPERTIES.DEBUG;
pub const NAMESPROPERTIES = enum(i32) {
S = 1026,
};
pub const PROPERTY_NAMES_REALMS = NAMESPROPERTIES.S;
pub const NTSAMPROPERTIES = enum(i32) {
N = 1026,
};
pub const PROPERTY_NTSAM_ALLOW_LM_AUTHENTICATION = NTSAMPROPERTIES.N;
pub const ACCOUNTINGPROPERTIES = enum(i32) {
LOG_ACCOUNTING = 1026,
LOG_ACCOUNTING_INTERIM = 1027,
LOG_AUTHENTICATION = 1028,
LOG_OPEN_NEW_FREQUENCY = 1029,
LOG_OPEN_NEW_SIZE = 1030,
LOG_FILE_DIRECTORY = 1031,
LOG_IAS1_FORMAT = 1032,
LOG_ENABLE_LOGGING = 1033,
LOG_DELETE_IF_FULL = 1034,
SQL_MAX_SESSIONS = 1035,
LOG_AUTHENTICATION_INTERIM = 1036,
LOG_FILE_IS_BACKUP = 1037,
DISCARD_REQUEST_ON_FAILURE = 1038,
};
pub const PROPERTY_ACCOUNTING_LOG_ACCOUNTING = ACCOUNTINGPROPERTIES.LOG_ACCOUNTING;
pub const PROPERTY_ACCOUNTING_LOG_ACCOUNTING_INTERIM = ACCOUNTINGPROPERTIES.LOG_ACCOUNTING_INTERIM;
pub const PROPERTY_ACCOUNTING_LOG_AUTHENTICATION = ACCOUNTINGPROPERTIES.LOG_AUTHENTICATION;
pub const PROPERTY_ACCOUNTING_LOG_OPEN_NEW_FREQUENCY = ACCOUNTINGPROPERTIES.LOG_OPEN_NEW_FREQUENCY;
pub const PROPERTY_ACCOUNTING_LOG_OPEN_NEW_SIZE = ACCOUNTINGPROPERTIES.LOG_OPEN_NEW_SIZE;
pub const PROPERTY_ACCOUNTING_LOG_FILE_DIRECTORY = ACCOUNTINGPROPERTIES.LOG_FILE_DIRECTORY;
pub const PROPERTY_ACCOUNTING_LOG_IAS1_FORMAT = ACCOUNTINGPROPERTIES.LOG_IAS1_FORMAT;
pub const PROPERTY_ACCOUNTING_LOG_ENABLE_LOGGING = ACCOUNTINGPROPERTIES.LOG_ENABLE_LOGGING;
pub const PROPERTY_ACCOUNTING_LOG_DELETE_IF_FULL = ACCOUNTINGPROPERTIES.LOG_DELETE_IF_FULL;
pub const PROPERTY_ACCOUNTING_SQL_MAX_SESSIONS = ACCOUNTINGPROPERTIES.SQL_MAX_SESSIONS;
pub const PROPERTY_ACCOUNTING_LOG_AUTHENTICATION_INTERIM = ACCOUNTINGPROPERTIES.LOG_AUTHENTICATION_INTERIM;
pub const PROPERTY_ACCOUNTING_LOG_FILE_IS_BACKUP = ACCOUNTINGPROPERTIES.LOG_FILE_IS_BACKUP;
pub const PROPERTY_ACCOUNTING_DISCARD_REQUEST_ON_FAILURE = ACCOUNTINGPROPERTIES.DISCARD_REQUEST_ON_FAILURE;
pub const NAPPROPERTIES = enum(i32) {
NAP_POLICIES_COLLECTION = 1026,
SHV_TEMPLATES_COLLECTION = 1027,
};
pub const PROPERTY_NAP_POLICIES_COLLECTION = NAPPROPERTIES.NAP_POLICIES_COLLECTION;
pub const PROPERTY_SHV_TEMPLATES_COLLECTION = NAPPROPERTIES.SHV_TEMPLATES_COLLECTION;
pub const RADIUSPROXYPROPERTIES = enum(i32) {
S = 1026,
};
pub const PROPERTY_RADIUSPROXY_SERVERGROUPS = RADIUSPROXYPROPERTIES.S;
pub const REMEDIATIONSERVERSPROPERTIES = enum(i32) {
S = 1026,
};
pub const PROPERTY_REMEDIATIONSERVERS_SERVERGROUPS = REMEDIATIONSERVERSPROPERTIES.S;
pub const SHV_COMBINATION_TYPE = enum(i32) {
ALL_PASS = 0,
ALL_FAIL = 1,
ONE_OR_MORE_PASS = 2,
ONE_OR_MORE_FAIL = 3,
ONE_OR_MORE_INFECTED = 4,
ONE_OR_MORE_TRANSITIONAL = 5,
ONE_OR_MORE_UNKNOWN = 6,
MAX = 7,
};
pub const SHV_COMBINATION_TYPE_ALL_PASS = SHV_COMBINATION_TYPE.ALL_PASS;
pub const SHV_COMBINATION_TYPE_ALL_FAIL = SHV_COMBINATION_TYPE.ALL_FAIL;
pub const SHV_COMBINATION_TYPE_ONE_OR_MORE_PASS = SHV_COMBINATION_TYPE.ONE_OR_MORE_PASS;
pub const SHV_COMBINATION_TYPE_ONE_OR_MORE_FAIL = SHV_COMBINATION_TYPE.ONE_OR_MORE_FAIL;
pub const SHV_COMBINATION_TYPE_ONE_OR_MORE_INFECTED = SHV_COMBINATION_TYPE.ONE_OR_MORE_INFECTED;
pub const SHV_COMBINATION_TYPE_ONE_OR_MORE_TRANSITIONAL = SHV_COMBINATION_TYPE.ONE_OR_MORE_TRANSITIONAL;
pub const SHV_COMBINATION_TYPE_ONE_OR_MORE_UNKNOWN = SHV_COMBINATION_TYPE.ONE_OR_MORE_UNKNOWN;
pub const SHV_COMBINATION_TYPE_MAX = SHV_COMBINATION_TYPE.MAX;
pub const SERVICE_TYPE = enum(i32) {
IAS = 0,
RAS = 1,
RAMGMTSVC = 2,
MAX = 3,
};
pub const SERVICE_TYPE_IAS = SERVICE_TYPE.IAS;
pub const SERVICE_TYPE_RAS = SERVICE_TYPE.RAS;
pub const SERVICE_TYPE_RAMGMTSVC = SERVICE_TYPE.RAMGMTSVC;
pub const SERVICE_TYPE_MAX = SERVICE_TYPE.MAX;
pub const IASOSTYPE = enum(i32) {
@"4_WORKSTATION" = 0,
@"5_WORKSTATION" = 1,
@"6_WORKSTATION" = 2,
@"6_1_WORKSTATION" = 3,
@"6_2_WORKSTATION" = 4,
@"6_3_WORKSTATION" = 5,
@"10_0_WORKSTATION" = 6,
@"4_SERVER" = 7,
@"5_SERVER" = 8,
@"6_SERVER" = 9,
@"6_1_SERVER" = 10,
@"6_2_SERVER" = 11,
@"6_3_SERVER" = 12,
@"10_0_SERVER" = 13,
};
pub const SYSTEM_TYPE_NT4_WORKSTATION = IASOSTYPE.@"4_WORKSTATION";
pub const SYSTEM_TYPE_NT5_WORKSTATION = IASOSTYPE.@"5_WORKSTATION";
pub const SYSTEM_TYPE_NT6_WORKSTATION = IASOSTYPE.@"6_WORKSTATION";
pub const SYSTEM_TYPE_NT6_1_WORKSTATION = IASOSTYPE.@"6_1_WORKSTATION";
pub const SYSTEM_TYPE_NT6_2_WORKSTATION = IASOSTYPE.@"6_2_WORKSTATION";
pub const SYSTEM_TYPE_NT6_3_WORKSTATION = IASOSTYPE.@"6_3_WORKSTATION";
pub const SYSTEM_TYPE_NT10_0_WORKSTATION = IASOSTYPE.@"10_0_WORKSTATION";
pub const SYSTEM_TYPE_NT4_SERVER = IASOSTYPE.@"4_SERVER";
pub const SYSTEM_TYPE_NT5_SERVER = IASOSTYPE.@"5_SERVER";
pub const SYSTEM_TYPE_NT6_SERVER = IASOSTYPE.@"6_SERVER";
pub const SYSTEM_TYPE_NT6_1_SERVER = IASOSTYPE.@"6_1_SERVER";
pub const SYSTEM_TYPE_NT6_2_SERVER = IASOSTYPE.@"6_2_SERVER";
pub const SYSTEM_TYPE_NT6_3_SERVER = IASOSTYPE.@"6_3_SERVER";
pub const SYSTEM_TYPE_NT10_0_SERVER = IASOSTYPE.@"10_0_SERVER";
pub const IASDOMAINTYPE = enum(i32) {
NONE = 0,
NT4 = 1,
NT5 = 2,
MIXED = 3,
};
pub const DOMAIN_TYPE_NONE = IASDOMAINTYPE.NONE;
pub const DOMAIN_TYPE_NT4 = IASDOMAINTYPE.NT4;
pub const DOMAIN_TYPE_NT5 = IASDOMAINTYPE.NT5;
pub const DOMAIN_TYPE_MIXED = IASDOMAINTYPE.MIXED;
pub const IASDATASTORE = enum(i32) {
LOCAL = 0,
DIRECTORY = 1,
};
pub const DATA_STORE_LOCAL = IASDATASTORE.LOCAL;
pub const DATA_STORE_DIRECTORY = IASDATASTORE.DIRECTORY;
// TODO: this type is limited to platform 'windowsServer2008'
const IID_ISdoMachine_Value = @import("../zig.zig").Guid.initString("479f6e75-49a2-11d2-8eca-00c04fc2f519");
pub const IID_ISdoMachine = &IID_ISdoMachine_Value;
pub const ISdoMachine = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
Attach: fn(
self: *const ISdoMachine,
bstrComputerName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDictionarySDO: fn(
self: *const ISdoMachine,
ppDictionarySDO: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetServiceSDO: fn(
self: *const ISdoMachine,
eDataStore: IASDATASTORE,
bstrServiceName: ?BSTR,
ppServiceSDO: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetUserSDO: fn(
self: *const ISdoMachine,
eDataStore: IASDATASTORE,
bstrUserName: ?BSTR,
ppUserSDO: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOSType: fn(
self: *const ISdoMachine,
eOSType: ?*IASOSTYPE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDomainType: fn(
self: *const ISdoMachine,
eDomainType: ?*IASDOMAINTYPE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsDirectoryAvailable: fn(
self: *const ISdoMachine,
boolDirectoryAvailable: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetAttachedComputer: fn(
self: *const ISdoMachine,
bstrComputerName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetSDOSchema: fn(
self: *const ISdoMachine,
ppSDOSchema: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_Attach(self: *const T, bstrComputerName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).Attach(@ptrCast(*const ISdoMachine, self), bstrComputerName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_GetDictionarySDO(self: *const T, ppDictionarySDO: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).GetDictionarySDO(@ptrCast(*const ISdoMachine, self), ppDictionarySDO);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_GetServiceSDO(self: *const T, eDataStore: IASDATASTORE, bstrServiceName: ?BSTR, ppServiceSDO: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).GetServiceSDO(@ptrCast(*const ISdoMachine, self), eDataStore, bstrServiceName, ppServiceSDO);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_GetUserSDO(self: *const T, eDataStore: IASDATASTORE, bstrUserName: ?BSTR, ppUserSDO: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).GetUserSDO(@ptrCast(*const ISdoMachine, self), eDataStore, bstrUserName, ppUserSDO);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_GetOSType(self: *const T, eOSType: ?*IASOSTYPE) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).GetOSType(@ptrCast(*const ISdoMachine, self), eOSType);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_GetDomainType(self: *const T, eDomainType: ?*IASDOMAINTYPE) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).GetDomainType(@ptrCast(*const ISdoMachine, self), eDomainType);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_IsDirectoryAvailable(self: *const T, boolDirectoryAvailable: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).IsDirectoryAvailable(@ptrCast(*const ISdoMachine, self), boolDirectoryAvailable);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_GetAttachedComputer(self: *const T, bstrComputerName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).GetAttachedComputer(@ptrCast(*const ISdoMachine, self), bstrComputerName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine_GetSDOSchema(self: *const T, ppSDOSchema: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine.VTable, self.vtable).GetSDOSchema(@ptrCast(*const ISdoMachine, self), ppSDOSchema);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_ISdoMachine2_Value = @import("../zig.zig").Guid.initString("518e5ffe-d8ce-4f7e-a5db-b40a35419d3b");
pub const IID_ISdoMachine2 = &IID_ISdoMachine2_Value;
pub const ISdoMachine2 = extern struct {
pub const VTable = extern struct {
base: ISdoMachine.VTable,
GetTemplatesSDO: fn(
self: *const ISdoMachine2,
bstrServiceName: ?BSTR,
ppTemplatesSDO: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnableTemplates: fn(
self: *const ISdoMachine2,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SyncConfigAgainstTemplates: fn(
self: *const ISdoMachine2,
bstrServiceName: ?BSTR,
ppConfigRoot: ?*?*IUnknown,
ppTemplatesRoot: ?*?*IUnknown,
bForcedSync: i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ImportRemoteTemplates: fn(
self: *const ISdoMachine2,
pLocalTemplatesRoot: ?*IUnknown,
bstrRemoteMachineName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reload: fn(
self: *const ISdoMachine2,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace ISdoMachine.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine2_GetTemplatesSDO(self: *const T, bstrServiceName: ?BSTR, ppTemplatesSDO: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine2.VTable, self.vtable).GetTemplatesSDO(@ptrCast(*const ISdoMachine2, self), bstrServiceName, ppTemplatesSDO);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine2_EnableTemplates(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine2.VTable, self.vtable).EnableTemplates(@ptrCast(*const ISdoMachine2, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine2_SyncConfigAgainstTemplates(self: *const T, bstrServiceName: ?BSTR, ppConfigRoot: ?*?*IUnknown, ppTemplatesRoot: ?*?*IUnknown, bForcedSync: i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine2.VTable, self.vtable).SyncConfigAgainstTemplates(@ptrCast(*const ISdoMachine2, self), bstrServiceName, ppConfigRoot, ppTemplatesRoot, bForcedSync);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine2_ImportRemoteTemplates(self: *const T, pLocalTemplatesRoot: ?*IUnknown, bstrRemoteMachineName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine2.VTable, self.vtable).ImportRemoteTemplates(@ptrCast(*const ISdoMachine2, self), pLocalTemplatesRoot, bstrRemoteMachineName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoMachine2_Reload(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoMachine2.VTable, self.vtable).Reload(@ptrCast(*const ISdoMachine2, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windowsServer2008'
const IID_ISdoServiceControl_Value = @import("../zig.zig").Guid.initString("479f6e74-49a2-11d2-8eca-00c04fc2f519");
pub const IID_ISdoServiceControl = &IID_ISdoServiceControl_Value;
pub const ISdoServiceControl = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
StartService: fn(
self: *const ISdoServiceControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
StopService: fn(
self: *const ISdoServiceControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetServiceStatus: fn(
self: *const ISdoServiceControl,
status: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ResetService: fn(
self: *const ISdoServiceControl,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoServiceControl_StartService(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoServiceControl.VTable, self.vtable).StartService(@ptrCast(*const ISdoServiceControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoServiceControl_StopService(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoServiceControl.VTable, self.vtable).StopService(@ptrCast(*const ISdoServiceControl, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoServiceControl_GetServiceStatus(self: *const T, status: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoServiceControl.VTable, self.vtable).GetServiceStatus(@ptrCast(*const ISdoServiceControl, self), status);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoServiceControl_ResetService(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoServiceControl.VTable, self.vtable).ResetService(@ptrCast(*const ISdoServiceControl, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windowsServer2008'
const IID_ISdo_Value = @import("../zig.zig").Guid.initString("56bc53de-96db-11d1-bf3f-000000000000");
pub const IID_ISdo = &IID_ISdo_Value;
pub const ISdo = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
GetPropertyInfo: fn(
self: *const ISdo,
Id: i32,
ppPropertyInfo: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProperty: fn(
self: *const ISdo,
Id: i32,
pValue: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PutProperty: fn(
self: *const ISdo,
Id: i32,
pValue: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ResetProperty: fn(
self: *const ISdo,
Id: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Apply: fn(
self: *const ISdo,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Restore: fn(
self: *const ISdo,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ISdo,
ppEnumVARIANT: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdo_GetPropertyInfo(self: *const T, Id: i32, ppPropertyInfo: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdo.VTable, self.vtable).GetPropertyInfo(@ptrCast(*const ISdo, self), Id, ppPropertyInfo);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdo_GetProperty(self: *const T, Id: i32, pValue: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdo.VTable, self.vtable).GetProperty(@ptrCast(*const ISdo, self), Id, pValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdo_PutProperty(self: *const T, Id: i32, pValue: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdo.VTable, self.vtable).PutProperty(@ptrCast(*const ISdo, self), Id, pValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdo_ResetProperty(self: *const T, Id: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdo.VTable, self.vtable).ResetProperty(@ptrCast(*const ISdo, self), Id);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdo_Apply(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdo.VTable, self.vtable).Apply(@ptrCast(*const ISdo, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdo_Restore(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdo.VTable, self.vtable).Restore(@ptrCast(*const ISdo, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdo_get__NewEnum(self: *const T, ppEnumVARIANT: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdo.VTable, self.vtable).get__NewEnum(@ptrCast(*const ISdo, self), ppEnumVARIANT);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windowsServer2008'
const IID_ISdoCollection_Value = @import("../zig.zig").Guid.initString("56bc53e2-96db-11d1-bf3f-000000000000");
pub const IID_ISdoCollection = &IID_ISdoCollection_Value;
pub const ISdoCollection = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Count: fn(
self: *const ISdoCollection,
pCount: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Add: fn(
self: *const ISdoCollection,
bstrName: ?BSTR,
ppItem: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Remove: fn(
self: *const ISdoCollection,
pItem: ?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RemoveAll: fn(
self: *const ISdoCollection,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reload: fn(
self: *const ISdoCollection,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsNameUnique: fn(
self: *const ISdoCollection,
bstrName: ?BSTR,
pBool: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Item: fn(
self: *const ISdoCollection,
Name: ?*VARIANT,
pItem: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get__NewEnum: fn(
self: *const ISdoCollection,
ppEnumVARIANT: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_get_Count(self: *const T, pCount: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).get_Count(@ptrCast(*const ISdoCollection, self), pCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_Add(self: *const T, bstrName: ?BSTR, ppItem: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).Add(@ptrCast(*const ISdoCollection, self), bstrName, ppItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_Remove(self: *const T, pItem: ?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).Remove(@ptrCast(*const ISdoCollection, self), pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_RemoveAll(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).RemoveAll(@ptrCast(*const ISdoCollection, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_Reload(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).Reload(@ptrCast(*const ISdoCollection, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_IsNameUnique(self: *const T, bstrName: ?BSTR, pBool: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).IsNameUnique(@ptrCast(*const ISdoCollection, self), bstrName, pBool);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_Item(self: *const T, Name: ?*VARIANT, pItem: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).Item(@ptrCast(*const ISdoCollection, self), Name, pItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoCollection_get__NewEnum(self: *const T, ppEnumVARIANT: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoCollection.VTable, self.vtable).get__NewEnum(@ptrCast(*const ISdoCollection, self), ppEnumVARIANT);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_ITemplateSdo_Value = @import("../zig.zig").Guid.initString("8aa85302-d2e2-4e20-8b1f-a571e437d6c9");
pub const IID_ITemplateSdo = &IID_ITemplateSdo_Value;
pub const ITemplateSdo = extern struct {
pub const VTable = extern struct {
base: ISdo.VTable,
AddToCollection: fn(
self: *const ITemplateSdo,
bstrName: ?BSTR,
pCollection: ?*IDispatch,
ppItem: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddToSdo: fn(
self: *const ITemplateSdo,
bstrName: ?BSTR,
pSdoTarget: ?*IDispatch,
ppItem: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddToSdoAsProperty: fn(
self: *const ITemplateSdo,
pSdoTarget: ?*IDispatch,
id: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace ISdo.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITemplateSdo_AddToCollection(self: *const T, bstrName: ?BSTR, pCollection: ?*IDispatch, ppItem: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ITemplateSdo.VTable, self.vtable).AddToCollection(@ptrCast(*const ITemplateSdo, self), bstrName, pCollection, ppItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITemplateSdo_AddToSdo(self: *const T, bstrName: ?BSTR, pSdoTarget: ?*IDispatch, ppItem: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ITemplateSdo.VTable, self.vtable).AddToSdo(@ptrCast(*const ITemplateSdo, self), bstrName, pSdoTarget, ppItem);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ITemplateSdo_AddToSdoAsProperty(self: *const T, pSdoTarget: ?*IDispatch, id: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const ITemplateSdo.VTable, self.vtable).AddToSdoAsProperty(@ptrCast(*const ITemplateSdo, self), pSdoTarget, id);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windowsServer2008'
const IID_ISdoDictionaryOld_Value = @import("../zig.zig").Guid.initString("d432e5f4-53d8-11d2-9a3a-00c04fb998ac");
pub const IID_ISdoDictionaryOld = &IID_ISdoDictionaryOld_Value;
pub const ISdoDictionaryOld = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
EnumAttributes: fn(
self: *const ISdoDictionaryOld,
Id: ?*VARIANT,
pValues: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetAttributeInfo: fn(
self: *const ISdoDictionaryOld,
Id: ATTRIBUTEID,
pInfoIDs: ?*VARIANT,
pInfoValues: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumAttributeValues: fn(
self: *const ISdoDictionaryOld,
Id: ATTRIBUTEID,
pValueIds: ?*VARIANT,
pValuesDesc: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CreateAttribute: fn(
self: *const ISdoDictionaryOld,
Id: ATTRIBUTEID,
ppAttributeObject: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetAttributeID: fn(
self: *const ISdoDictionaryOld,
bstrAttributeName: ?BSTR,
pId: ?*ATTRIBUTEID,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoDictionaryOld_EnumAttributes(self: *const T, Id: ?*VARIANT, pValues: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoDictionaryOld.VTable, self.vtable).EnumAttributes(@ptrCast(*const ISdoDictionaryOld, self), Id, pValues);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoDictionaryOld_GetAttributeInfo(self: *const T, Id: ATTRIBUTEID, pInfoIDs: ?*VARIANT, pInfoValues: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoDictionaryOld.VTable, self.vtable).GetAttributeInfo(@ptrCast(*const ISdoDictionaryOld, self), Id, pInfoIDs, pInfoValues);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoDictionaryOld_EnumAttributeValues(self: *const T, Id: ATTRIBUTEID, pValueIds: ?*VARIANT, pValuesDesc: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoDictionaryOld.VTable, self.vtable).EnumAttributeValues(@ptrCast(*const ISdoDictionaryOld, self), Id, pValueIds, pValuesDesc);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoDictionaryOld_CreateAttribute(self: *const T, Id: ATTRIBUTEID, ppAttributeObject: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoDictionaryOld.VTable, self.vtable).CreateAttribute(@ptrCast(*const ISdoDictionaryOld, self), Id, ppAttributeObject);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn ISdoDictionaryOld_GetAttributeID(self: *const T, bstrAttributeName: ?BSTR, pId: ?*ATTRIBUTEID) callconv(.Inline) HRESULT {
return @ptrCast(*const ISdoDictionaryOld.VTable, self.vtable).GetAttributeID(@ptrCast(*const ISdoDictionaryOld, self), bstrAttributeName, pId);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const RADIUS_ATTRIBUTE_TYPE = enum(i32) {
Minimum = 0,
UserName = 1,
UserPassword = 2,
CHAPPassword = 3,
NASIPAddress = 4,
NASPort = 5,
ServiceType = 6,
FramedProtocol = 7,
FramedIPAddress = 8,
FramedIPNetmask = 9,
FramedRouting = 10,
FilterId = 11,
FramedMTU = 12,
FramedCompression = 13,
LoginIPHost = 14,
LoginService = 15,
LoginPort = 16,
ReplyMessage = 18,
CallbackNumber = 19,
CallbackId = 20,
FramedRoute = 22,
FramedIPXNetwork = 23,
State = 24,
Class = 25,
VendorSpecific = 26,
SessionTimeout = 27,
IdleTimeout = 28,
TerminationAction = 29,
CalledStationId = 30,
CallingStationId = 31,
NASIdentifier = 32,
ProxyState = 33,
LoginLATService = 34,
LoginLATNode = 35,
LoginLATGroup = 36,
FramedAppleTalkLink = 37,
FramedAppleTalkNetwork = 38,
FramedAppleTalkZone = 39,
AcctStatusType = 40,
AcctDelayTime = 41,
AcctInputOctets = 42,
AcctOutputOctets = 43,
AcctSessionId = 44,
AcctAuthentic = 45,
AcctSessionTime = 46,
AcctInputPackets = 47,
AcctOutputPackets = 48,
AcctTerminationCause = 49,
CHAPChallenge = 60,
NASPortType = 61,
PortLimit = 62,
TunnelType = 64,
MediumType = 65,
TunnelPassword = 69,
TunnelPrivateGroupID = 81,
NASIPv6Address = 95,
FramedInterfaceId = 96,
FramedIPv6Prefix = 97,
LoginIPv6Host = 98,
FramedIPv6Route = 99,
FramedIPv6Pool = 100,
Code = 262,
Identifier = 263,
Authenticator = 264,
SrcIPAddress = 265,
SrcPort = 266,
Provider = 267,
StrippedUserName = 268,
FQUserName = 269,
PolicyName = 270,
UniqueId = 271,
ExtensionState = 272,
EAPTLV = 273,
RejectReasonCode = 274,
CRPPolicyName = 275,
ProviderName = 276,
ClearTextPassword = <PASSWORD>,
SrcIPv6Address = 278,
CertificateThumbprint = 279,
};
pub const ratMinimum = RADIUS_ATTRIBUTE_TYPE.Minimum;
pub const ratUserName = RADIUS_ATTRIBUTE_TYPE.UserName;
pub const ratUserPassword = RADIUS_ATTRIBUTE_TYPE.UserPassword;
pub const ratCHAPPassword = RADIUS_ATTRIBUTE_TYPE.CHAPPassword;
pub const ratNASIPAddress = RADIUS_ATTRIBUTE_TYPE.NASIPAddress;
pub const ratNASPort = RADIUS_ATTRIBUTE_TYPE.NASPort;
pub const ratServiceType = RADIUS_ATTRIBUTE_TYPE.ServiceType;
pub const ratFramedProtocol = RADIUS_ATTRIBUTE_TYPE.FramedProtocol;
pub const ratFramedIPAddress = RADIUS_ATTRIBUTE_TYPE.FramedIPAddress;
pub const ratFramedIPNetmask = RADIUS_ATTRIBUTE_TYPE.FramedIPNetmask;
pub const ratFramedRouting = RADIUS_ATTRIBUTE_TYPE.FramedRouting;
pub const ratFilterId = RADIUS_ATTRIBUTE_TYPE.FilterId;
pub const ratFramedMTU = RADIUS_ATTRIBUTE_TYPE.FramedMTU;
pub const ratFramedCompression = RADIUS_ATTRIBUTE_TYPE.FramedCompression;
pub const ratLoginIPHost = RADIUS_ATTRIBUTE_TYPE.LoginIPHost;
pub const ratLoginService = RADIUS_ATTRIBUTE_TYPE.LoginService;
pub const ratLoginPort = RADIUS_ATTRIBUTE_TYPE.LoginPort;
pub const ratReplyMessage = RADIUS_ATTRIBUTE_TYPE.ReplyMessage;
pub const ratCallbackNumber = RADIUS_ATTRIBUTE_TYPE.CallbackNumber;
pub const ratCallbackId = RADIUS_ATTRIBUTE_TYPE.CallbackId;
pub const ratFramedRoute = RADIUS_ATTRIBUTE_TYPE.FramedRoute;
pub const ratFramedIPXNetwork = RADIUS_ATTRIBUTE_TYPE.FramedIPXNetwork;
pub const ratState = RADIUS_ATTRIBUTE_TYPE.State;
pub const ratClass = RADIUS_ATTRIBUTE_TYPE.Class;
pub const ratVendorSpecific = RADIUS_ATTRIBUTE_TYPE.VendorSpecific;
pub const ratSessionTimeout = RADIUS_ATTRIBUTE_TYPE.SessionTimeout;
pub const ratIdleTimeout = RADIUS_ATTRIBUTE_TYPE.IdleTimeout;
pub const ratTerminationAction = RADIUS_ATTRIBUTE_TYPE.TerminationAction;
pub const ratCalledStationId = RADIUS_ATTRIBUTE_TYPE.CalledStationId;
pub const ratCallingStationId = RADIUS_ATTRIBUTE_TYPE.CallingStationId;
pub const ratNASIdentifier = RADIUS_ATTRIBUTE_TYPE.NASIdentifier;
pub const ratProxyState = RADIUS_ATTRIBUTE_TYPE.ProxyState;
pub const ratLoginLATService = RADIUS_ATTRIBUTE_TYPE.LoginLATService;
pub const ratLoginLATNode = RADIUS_ATTRIBUTE_TYPE.LoginLATNode;
pub const ratLoginLATGroup = RADIUS_ATTRIBUTE_TYPE.LoginLATGroup;
pub const ratFramedAppleTalkLink = RADIUS_ATTRIBUTE_TYPE.FramedAppleTalkLink;
pub const ratFramedAppleTalkNetwork = RADIUS_ATTRIBUTE_TYPE.FramedAppleTalkNetwork;
pub const ratFramedAppleTalkZone = RADIUS_ATTRIBUTE_TYPE.FramedAppleTalkZone;
pub const ratAcctStatusType = RADIUS_ATTRIBUTE_TYPE.AcctStatusType;
pub const ratAcctDelayTime = RADIUS_ATTRIBUTE_TYPE.AcctDelayTime;
pub const ratAcctInputOctets = RADIUS_ATTRIBUTE_TYPE.AcctInputOctets;
pub const ratAcctOutputOctets = RADIUS_ATTRIBUTE_TYPE.AcctOutputOctets;
pub const ratAcctSessionId = RADIUS_ATTRIBUTE_TYPE.AcctSessionId;
pub const ratAcctAuthentic = RADIUS_ATTRIBUTE_TYPE.AcctAuthentic;
pub const ratAcctSessionTime = RADIUS_ATTRIBUTE_TYPE.AcctSessionTime;
pub const ratAcctInputPackets = RADIUS_ATTRIBUTE_TYPE.AcctInputPackets;
pub const ratAcctOutputPackets = RADIUS_ATTRIBUTE_TYPE.AcctOutputPackets;
pub const ratAcctTerminationCause = RADIUS_ATTRIBUTE_TYPE.AcctTerminationCause;
pub const ratCHAPChallenge = RADIUS_ATTRIBUTE_TYPE.CHAPChallenge;
pub const ratNASPortType = RADIUS_ATTRIBUTE_TYPE.NASPortType;
pub const ratPortLimit = RADIUS_ATTRIBUTE_TYPE.PortLimit;
pub const ratTunnelType = RADIUS_ATTRIBUTE_TYPE.TunnelType;
pub const ratMediumType = RADIUS_ATTRIBUTE_TYPE.MediumType;
pub const ratTunnelPassword = RADIUS_ATTRIBUTE_TYPE.TunnelPassword;
pub const ratTunnelPrivateGroupID = RADIUS_ATTRIBUTE_TYPE.TunnelPrivateGroupID;
pub const ratNASIPv6Address = RADIUS_ATTRIBUTE_TYPE.NASIPv6Address;
pub const ratFramedInterfaceId = RADIUS_ATTRIBUTE_TYPE.FramedInterfaceId;
pub const ratFramedIPv6Prefix = RADIUS_ATTRIBUTE_TYPE.FramedIPv6Prefix;
pub const ratLoginIPv6Host = RADIUS_ATTRIBUTE_TYPE.LoginIPv6Host;
pub const ratFramedIPv6Route = RADIUS_ATTRIBUTE_TYPE.FramedIPv6Route;
pub const ratFramedIPv6Pool = RADIUS_ATTRIBUTE_TYPE.FramedIPv6Pool;
pub const ratCode = RADIUS_ATTRIBUTE_TYPE.Code;
pub const ratIdentifier = RADIUS_ATTRIBUTE_TYPE.Identifier;
pub const ratAuthenticator = RADIUS_ATTRIBUTE_TYPE.Authenticator;
pub const ratSrcIPAddress = RADIUS_ATTRIBUTE_TYPE.SrcIPAddress;
pub const ratSrcPort = RADIUS_ATTRIBUTE_TYPE.SrcPort;
pub const ratProvider = RADIUS_ATTRIBUTE_TYPE.Provider;
pub const ratStrippedUserName = RADIUS_ATTRIBUTE_TYPE.StrippedUserName;
pub const ratFQUserName = RADIUS_ATTRIBUTE_TYPE.FQUserName;
pub const ratPolicyName = RADIUS_ATTRIBUTE_TYPE.PolicyName;
pub const ratUniqueId = RADIUS_ATTRIBUTE_TYPE.UniqueId;
pub const ratExtensionState = RADIUS_ATTRIBUTE_TYPE.ExtensionState;
pub const ratEAPTLV = RADIUS_ATTRIBUTE_TYPE.EAPTLV;
pub const ratRejectReasonCode = RADIUS_ATTRIBUTE_TYPE.RejectReasonCode;
pub const ratCRPPolicyName = RADIUS_ATTRIBUTE_TYPE.CRPPolicyName;
pub const ratProviderName = RADIUS_ATTRIBUTE_TYPE.ProviderName;
pub const ratClearTextPassword = RADIUS_ATTRIBUTE_TYPE.ClearTextPassword;
pub const ratSrcIPv6Address = RADIUS_ATTRIBUTE_TYPE.SrcIPv6Address;
pub const ratCertificateThumbprint = RADIUS_ATTRIBUTE_TYPE.CertificateThumbprint;
pub const RADIUS_CODE = enum(i32) {
Unknown = 0,
AccessRequest = 1,
AccessAccept = 2,
AccessReject = 3,
AccountingRequest = 4,
AccountingResponse = 5,
AccessChallenge = 11,
Discard = 256,
};
pub const rcUnknown = RADIUS_CODE.Unknown;
pub const rcAccessRequest = RADIUS_CODE.AccessRequest;
pub const rcAccessAccept = RADIUS_CODE.AccessAccept;
pub const rcAccessReject = RADIUS_CODE.AccessReject;
pub const rcAccountingRequest = RADIUS_CODE.AccountingRequest;
pub const rcAccountingResponse = RADIUS_CODE.AccountingResponse;
pub const rcAccessChallenge = RADIUS_CODE.AccessChallenge;
pub const rcDiscard = RADIUS_CODE.Discard;
pub const RADIUS_AUTHENTICATION_PROVIDER = enum(i32) {
Unknown = 0,
UsersFile = 1,
Proxy = 2,
WindowsNT = 3,
MCIS = 4,
ODBC = 5,
None = 6,
};
pub const rapUnknown = RADIUS_AUTHENTICATION_PROVIDER.Unknown;
pub const rapUsersFile = RADIUS_AUTHENTICATION_PROVIDER.UsersFile;
pub const rapProxy = RADIUS_AUTHENTICATION_PROVIDER.Proxy;
pub const rapWindowsNT = RADIUS_AUTHENTICATION_PROVIDER.WindowsNT;
pub const rapMCIS = RADIUS_AUTHENTICATION_PROVIDER.MCIS;
pub const rapODBC = RADIUS_AUTHENTICATION_PROVIDER.ODBC;
pub const rapNone = RADIUS_AUTHENTICATION_PROVIDER.None;
pub const RADIUS_REJECT_REASON_CODE = enum(i32) {
Undefined = 0,
AccountUnknown = 1,
AccountDisabled = 2,
AccountExpired = 3,
AuthenticationFailure = 4,
};
pub const rrrcUndefined = RADIUS_REJECT_REASON_CODE.Undefined;
pub const rrrcAccountUnknown = RADIUS_REJECT_REASON_CODE.AccountUnknown;
pub const rrrcAccountDisabled = RADIUS_REJECT_REASON_CODE.AccountDisabled;
pub const rrrcAccountExpired = RADIUS_REJECT_REASON_CODE.AccountExpired;
pub const rrrcAuthenticationFailure = RADIUS_REJECT_REASON_CODE.AuthenticationFailure;
pub const RADIUS_DATA_TYPE = enum(i32) {
Unknown = 0,
String = 1,
Address = 2,
Integer = 3,
Time = 4,
Ipv6Address = 5,
};
pub const rdtUnknown = RADIUS_DATA_TYPE.Unknown;
pub const rdtString = RADIUS_DATA_TYPE.String;
pub const rdtAddress = RADIUS_DATA_TYPE.Address;
pub const rdtInteger = RADIUS_DATA_TYPE.Integer;
pub const rdtTime = RADIUS_DATA_TYPE.Time;
pub const rdtIpv6Address = RADIUS_DATA_TYPE.Ipv6Address;
pub const RADIUS_ATTRIBUTE = extern struct {
dwAttrType: u32,
fDataType: RADIUS_DATA_TYPE,
cbDataLength: u32,
Anonymous: extern union {
dwValue: u32,
lpValue: ?*const u8,
},
};
pub const RADIUS_VSA_FORMAT = extern struct {
VendorId: [4]u8,
VendorType: u8,
VendorLength: u8,
AttributeSpecific: [1]u8,
};
pub const RADIUS_ACTION = enum(i32) {
Continue = 0,
Reject = 1,
Accept = 2,
};
pub const raContinue = RADIUS_ACTION.Continue;
pub const raReject = RADIUS_ACTION.Reject;
pub const raAccept = RADIUS_ACTION.Accept;
pub const PRADIUS_EXTENSION_INIT = fn(
) callconv(@import("std").os.windows.WINAPI) u32;
pub const PRADIUS_EXTENSION_TERM = fn(
) callconv(@import("std").os.windows.WINAPI) void;
pub const PRADIUS_EXTENSION_PROCESS = fn(
pAttrs: ?*const RADIUS_ATTRIBUTE,
pfAction: ?*RADIUS_ACTION,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const PRADIUS_EXTENSION_PROCESS_EX = fn(
pInAttrs: ?*const RADIUS_ATTRIBUTE,
pOutAttrs: ?*?*RADIUS_ATTRIBUTE,
pfAction: ?*RADIUS_ACTION,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const PRADIUS_EXTENSION_FREE_ATTRIBUTES = fn(
pAttrs: ?*RADIUS_ATTRIBUTE,
) callconv(@import("std").os.windows.WINAPI) void;
pub const RADIUS_EXTENSION_POINT = enum(i32) {
entication = 0,
orization = 1,
};
pub const repAuthentication = RADIUS_EXTENSION_POINT.entication;
pub const repAuthorization = RADIUS_EXTENSION_POINT.orization;
pub const RADIUS_ATTRIBUTE_ARRAY = extern struct {
cbSize: u32,
Add: isize,
AttributeAt: ?*const ?*?*?*?*?*?*?*?*?*RADIUS_ATTRIBUTE,
GetSize: isize,
InsertAt: isize,
RemoveAt: isize,
SetAt: isize,
};
pub const RADIUS_EXTENSION_CONTROL_BLOCK = extern struct {
cbSize: u32,
dwVersion: u32,
repPoint: RADIUS_EXTENSION_POINT,
rcRequestType: RADIUS_CODE,
rcResponseType: RADIUS_CODE,
GetRequest: isize,
GetResponse: isize,
SetResponseType: isize,
};
pub const PRADIUS_EXTENSION_PROCESS_2 = fn(
pECB: ?*RADIUS_EXTENSION_CONTROL_BLOCK,
) callconv(@import("std").os.windows.WINAPI) u32;
//--------------------------------------------------------------------------------
// Section: Functions (0)
//--------------------------------------------------------------------------------
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (5)
//--------------------------------------------------------------------------------
const BSTR = @import("../foundation.zig").BSTR;
const HRESULT = @import("../foundation.zig").HRESULT;
const IDispatch = @import("../system/ole_automation.zig").IDispatch;
const IUnknown = @import("../system/com.zig").IUnknown;
const VARIANT = @import("../system/ole_automation.zig").VARIANT;
test {
// The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476
if (@hasDecl(@This(), "PRADIUS_EXTENSION_INIT")) { _ = PRADIUS_EXTENSION_INIT; }
if (@hasDecl(@This(), "PRADIUS_EXTENSION_TERM")) { _ = PRADIUS_EXTENSION_TERM; }
if (@hasDecl(@This(), "PRADIUS_EXTENSION_PROCESS")) { _ = PRADIUS_EXTENSION_PROCESS; }
if (@hasDecl(@This(), "PRADIUS_EXTENSION_PROCESS_EX")) { _ = PRADIUS_EXTENSION_PROCESS_EX; }
if (@hasDecl(@This(), "PRADIUS_EXTENSION_FREE_ATTRIBUTES")) { _ = PRADIUS_EXTENSION_FREE_ATTRIBUTES; }
if (@hasDecl(@This(), "PRADIUS_EXTENSION_PROCESS_2")) { _ = PRADIUS_EXTENSION_PROCESS_2; }
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | deps/zigwin32/win32/network_management/network_policy_server.zig |
const std = @import("std");
const mem = std.mem;
const WireBuffer = @import("wire.zig").WireBuffer;
pub const Table = struct {
// a slice of our rx_buffer (with its own head and end)
buf: WireBuffer = undefined,
len: usize = 0,
const Self = @This();
pub fn init(buffer: []u8) Table {
var t = Table{
.buf = WireBuffer.init(buffer),
.len = 0,
};
t.buf.writeU32(0);
return t;
}
// Lookup a value in the table. Note we need to know the type
// we expect at compile time. We might not know this at which
// point I guess I need a union. By the time we call lookup we
// should already have validated the frame, so I think we maybe
// can't error here.
pub fn lookup(self: *Self, comptime T: type, key: []const u8) ?T {
defer self.buf.reset();
const length = self.buf.readU32();
while (self.buf.isMoreData()) {
const current_key = self.buf.readShortString();
const correct_key = std.mem.eql(u8, key, current_key);
const t = self.buf.readU8();
switch (t) {
'F' => {
var table = self.buf.readTable();
if (@TypeOf(table) == T and correct_key) return table;
},
't' => {
const b = self.buf.readBool();
if (@TypeOf(b) == T and correct_key) return b;
},
's' => {
const s = self.buf.readShortString();
if (@TypeOf(s) == T and correct_key) return s;
},
'S' => {
const s = self.buf.readLongString();
if (@TypeOf(s) == T and correct_key) return s;
},
else => {
// TODO: support all types as continue will return garbage
continue;
},
}
}
return null;
}
pub fn insertTable(self: *Self, key: []const u8, table: *Table) void {
self.buf.writeShortString(key);
self.buf.writeU8('F');
self.buf.writeTable(table);
self.updateLength();
}
pub fn insertBool(self: *Self, key: []const u8, boolean: bool) void {
self.buf.writeShortString(key);
self.buf.writeU8('t');
self.buf.writeBool(boolean);
self.updateLength();
}
// Apparently actual implementations don't use 's' for short string
// (and therefore) I assume they don't use short strings (in tables)
// at all
// pub fn insertShortString(self: *Self, key: []u8, string: []u8) void {
// self.buf.writeShortString(key);
// self.buf.writeU8('s');
// self.buf.writeShortString(string);
// self.updateLength();
// }
pub fn insertLongString(self: *Self, key: []const u8, string: []const u8) void {
self.buf.writeShortString(key);
self.buf.writeU8('S');
self.buf.writeLongString(string);
self.updateLength();
}
fn updateLength(self: *Self) void {
mem.writeInt(u32, @ptrCast(*[@sizeOf(u32)]u8, &self.buf.mem[0]), @intCast(u32, self.buf.head - @sizeOf(u32)), .Big);
}
pub fn print(self: *Self) void {
for (self.buf.mem[0..self.buf.head]) |x| {
std.debug.warn("0x{x:0>2} ", .{x});
}
std.debug.warn("\n", .{});
}
}; | src/table.zig |
const std = @import("std");
const assert = std.debug.assert;
const fmt = std.fmt;
const mem = std.mem;
const os = std.os;
const log = std.log;
const config = @import("config.zig");
pub const log_level: std.log.Level = @intToEnum(std.log.Level, config.log_level);
const cli = @import("cli.zig");
const IO = @import("io.zig").IO;
const Time = @import("time.zig").Time;
const Storage = @import("storage.zig").Storage;
const MessageBus = @import("message_bus.zig").MessageBusReplica;
const StateMachine = @import("state_machine.zig").StateMachine;
const vsr = @import("vsr.zig");
const Replica = vsr.Replica(StateMachine, MessageBus, Storage, Time);
pub fn main() !void {
var io = try IO.init(128, 0);
defer io.deinit();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
switch (try cli.parse_args(allocator)) {
.init => |args| try init(&io, args.cluster, args.replica, args.dir_fd),
.start => |args| try start(
&io,
allocator,
args.cluster,
args.replica,
args.addresses,
args.dir_fd,
),
}
}
// Pad the cluster id number and the replica index with 0s
const filename_fmt = "cluster_{d:0>10}_replica_{d:0>3}.tigerbeetle";
const filename_len = fmt.count(filename_fmt, .{ 0, 0 });
/// Create a .tigerbeetle data file for the given args and exit
fn init(io: *IO, cluster: u32, replica: u8, dir_fd: os.fd_t) !void {
// Add 1 for the terminating null byte
var buffer: [filename_len + 1]u8 = undefined;
const filename = fmt.bufPrintZ(&buffer, filename_fmt, .{ cluster, replica }) catch unreachable;
assert(filename.len == filename_len);
// TODO Expose data file size on the CLI.
_ = try io.open_file(
dir_fd,
filename,
config.journal_size_max, // TODO Double-check that we have space for redundant headers.
true,
);
log.info("initialized data file", .{});
}
/// Run as a replica server defined by the given args
fn start(
io: *IO,
allocator: mem.Allocator,
cluster: u32,
replica_index: u8,
addresses: []std.net.Address,
dir_fd: os.fd_t,
) !void {
// Add 1 for the terminating null byte
var buffer: [filename_len + 1]u8 = undefined;
const filename = fmt.bufPrintZ(&buffer, filename_fmt, .{ cluster, replica_index }) catch {
unreachable;
};
assert(filename.len == filename_len);
// TODO Expose data file size on the CLI.
const storage_fd = try io.open_file(
dir_fd,
filename,
config.journal_size_max, // TODO Double-check that we have space for redundant headers.
false,
);
var state_machine = try StateMachine.init(
allocator,
config.accounts_max,
config.transfers_max,
config.commits_max,
);
var storage = try Storage.init(config.journal_size_max, storage_fd, io);
var message_bus = try MessageBus.init(
allocator,
cluster,
addresses,
replica_index,
io,
);
var time: Time = .{};
var replica = try Replica.init(
allocator,
cluster,
@intCast(u8, addresses.len),
replica_index,
&time,
&storage,
&message_bus,
&state_machine,
);
message_bus.set_on_message(*Replica, &replica, Replica.on_message);
log.info("cluster={x} replica={}: listening on {}", .{
cluster,
replica_index,
addresses[replica_index],
});
while (true) {
replica.tick();
message_bus.tick();
try io.run_for_ns(config.tick_ms * std.time.ns_per_ms);
}
} | src/main.zig |
const MachO = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
const macho = std.macho;
const codegen = @import("../codegen.zig");
const math = std.math;
const mem = std.mem;
const trace = @import("../tracy.zig").trace;
const Type = @import("../type.zig").Type;
const Module = @import("../Module.zig");
const link = @import("../link.zig");
const File = link.File;
pub const base_tag: File.Tag = File.Tag.macho;
base: File,
/// List of all load command headers that are in the file.
/// We use it to track number and size of all commands needed by the header.
commands: std.ArrayListUnmanaged(macho.load_command) = std.ArrayListUnmanaged(macho.load_command){},
command_file_offset: ?u64 = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
segments: std.ArrayListUnmanaged(macho.segment_command_64) = std.ArrayListUnmanaged(macho.segment_command_64){},
/// Section (headers) *always* follow segment (load commands) directly!
sections: std.ArrayListUnmanaged(macho.section_64) = std.ArrayListUnmanaged(macho.section_64){},
/// Offset (index) into __TEXT segment load command.
text_segment_offset: ?u64 = null,
/// Offset (index) into __LINKEDIT segment load command.
linkedit_segment_offset: ?u664 = null,
/// Entry point load command
entry_point_cmd: ?macho.entry_point_command = null,
entry_addr: ?u64 = null,
/// The first 4GB of process' memory is reserved for the null (__PAGEZERO) segment.
/// This is also the start address for our binary.
vm_start_address: u64 = 0x100000000,
seg_table_dirty: bool = false,
error_flags: File.ErrorFlags = File.ErrorFlags{},
/// `alloc_num / alloc_den` is the factor of padding when allocating.
const alloc_num = 4;
const alloc_den = 3;
/// Default path to dyld
/// TODO instead of hardcoding it, we should probably look through some env vars and search paths
/// instead but this will do for now.
const DEFAULT_DYLD_PATH: [*:0]const u8 = "/usr/lib/dyld";
/// Default lib search path
/// TODO instead of hardcoding it, we should probably look through some env vars and search paths
/// instead but this will do for now.
const DEFAULT_LIB_SEARCH_PATH: []const u8 = "/usr/lib";
const LIB_SYSTEM_NAME: [*:0]const u8 = "System";
/// TODO we should search for libSystem and fail if it doesn't exist, instead of hardcoding it
const LIB_SYSTEM_PATH: [*:0]const u8 = DEFAULT_LIB_SEARCH_PATH ++ "/libSystem.B.dylib";
pub const TextBlock = struct {
pub const empty = TextBlock{};
};
pub const SrcFn = struct {
pub const empty = SrcFn{};
};
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
assert(options.object_format == .macho);
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
var macho_file = try allocator.create(MachO);
errdefer allocator.destroy(macho_file);
macho_file.* = openFile(allocator, file, options) catch |err| switch (err) {
error.IncrFailed => try createFile(allocator, file, options),
else => |e| return e,
};
return &macho_file.base;
}
/// Returns error.IncrFailed if incremental update could not be performed.
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
switch (options.output_mode) {
.Exe => {},
.Obj => {},
.Lib => return error.IncrFailed,
}
var self: MachO = .{
.base = .{
.file = file,
.tag = .macho,
.options = options,
.allocator = allocator,
},
};
errdefer self.deinit();
// TODO implement reading the macho file
return error.IncrFailed;
//try self.populateMissingMetadata();
//return self;
}
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
var self: MachO = .{
.base = .{
.file = file,
.tag = .macho,
.options = options,
.allocator = allocator,
},
};
errdefer self.deinit();
switch (options.output_mode) {
.Exe => {
// The first segment command for executables is always a __PAGEZERO segment.
const pagezero = .{
.cmd = macho.LC_SEGMENT_64,
.cmdsize = commandSize(@sizeOf(macho.segment_command_64)),
.segname = makeString("__PAGEZERO"),
.vmaddr = 0,
.vmsize = self.vm_start_address,
.fileoff = 0,
.filesize = 0,
.maxprot = macho.VM_PROT_NONE,
.initprot = macho.VM_PROT_NONE,
.nsects = 0,
.flags = 0,
};
try self.commands.append(allocator, .{
.cmd = pagezero.cmd,
.cmdsize = pagezero.cmdsize,
});
try self.segments.append(allocator, pagezero);
},
.Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
try self.populateMissingMetadata();
return self;
}
fn writeMachOHeader(self: *MachO) !void {
var hdr: macho.mach_header_64 = undefined;
hdr.magic = macho.MH_MAGIC_64;
const CpuInfo = struct {
cpu_type: macho.cpu_type_t,
cpu_subtype: macho.cpu_subtype_t,
};
const cpu_info: CpuInfo = switch (self.base.options.target.cpu.arch) {
.aarch64 => .{
.cpu_type = macho.CPU_TYPE_ARM64,
.cpu_subtype = macho.CPU_SUBTYPE_ARM_ALL,
},
.x86_64 => .{
.cpu_type = macho.CPU_TYPE_X86_64,
.cpu_subtype = macho.CPU_SUBTYPE_X86_64_ALL,
},
else => return error.UnsupportedMachOArchitecture,
};
hdr.cputype = cpu_info.cpu_type;
hdr.cpusubtype = cpu_info.cpu_subtype;
const filetype: u32 = switch (self.base.options.output_mode) {
.Exe => macho.MH_EXECUTE,
.Obj => macho.MH_OBJECT,
.Lib => switch (self.base.options.link_mode) {
.Static => return error.TODOStaticLibMachOType,
.Dynamic => macho.MH_DYLIB,
},
};
hdr.filetype = filetype;
const ncmds = try math.cast(u32, self.commands.items.len);
hdr.ncmds = ncmds;
var sizeof_cmds: u32 = 0;
for (self.commands.items) |cmd| {
sizeof_cmds += cmd.cmdsize;
}
hdr.sizeofcmds = sizeof_cmds;
// TODO should these be set to something else?
hdr.flags = 0;
hdr.reserved = 0;
try self.base.file.?.pwriteAll(@ptrCast([*]const u8, &hdr)[0..@sizeOf(macho.mach_header_64)], 0);
}
pub fn flush(self: *MachO, module: *Module) !void {
// Save segments first
{
const buf = try self.base.allocator.alloc(macho.segment_command_64, self.segments.items.len);
defer self.base.allocator.free(buf);
self.command_file_offset = @sizeOf(macho.mach_header_64);
for (buf) |*seg, i| {
seg.* = self.segments.items[i];
self.command_file_offset.? += self.segments.items[i].cmdsize;
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), @sizeOf(macho.mach_header_64));
}
switch (self.base.options.output_mode) {
.Exe => {
{
// Specify path to dynamic linker dyld
const cmdsize = commandSize(@sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH));
const load_dylinker = [1]macho.dylinker_command{
.{
.cmd = macho.LC_LOAD_DYLINKER,
.cmdsize = cmdsize,
.name = @sizeOf(macho.dylinker_command),
},
};
try self.commands.append(self.base.allocator, .{
.cmd = macho.LC_LOAD_DYLINKER,
.cmdsize = cmdsize,
});
try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), self.command_file_offset.?);
const file_offset = self.command_file_offset.? + @sizeOf(macho.dylinker_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylinker_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(DEFAULT_DYLD_PATH), file_offset);
self.command_file_offset.? += cmdsize;
}
{
// Link against libSystem
const cmdsize = commandSize(@sizeOf(macho.dylib_command) + mem.lenZ(LIB_SYSTEM_PATH));
// TODO Find a way to work out runtime version from the OS version triple stored in std.Target.
// In the meantime, we're gonna hardcode to the minimum compatibility version of 1.0.0.
const min_version = 0x10000;
const dylib = .{
.name = @sizeOf(macho.dylib_command),
.timestamp = 2, // not sure why not simply 0; this is reverse engineered from Mach-O files
.current_version = min_version,
.compatibility_version = min_version,
};
const load_dylib = [1]macho.dylib_command{
.{
.cmd = macho.LC_LOAD_DYLIB,
.cmdsize = cmdsize,
.dylib = dylib,
},
};
try self.commands.append(self.base.allocator, .{
.cmd = macho.LC_LOAD_DYLIB,
.cmdsize = cmdsize,
});
try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), self.command_file_offset.?);
const file_offset = self.command_file_offset.? + @sizeOf(macho.dylib_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylib_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), file_offset);
self.command_file_offset.? += cmdsize;
}
},
.Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
try self.writeMachOHeader();
}
}
pub fn deinit(self: *MachO) void {
self.commands.deinit(self.base.allocator);
self.segments.deinit(self.base.allocator);
self.sections.deinit(self.base.allocator);
}
pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {}
pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {}
pub fn updateDeclExports(
self: *MachO,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {}
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
@panic("TODO implement getDeclVAddr for MachO");
}
pub fn populateMissingMetadata(self: *MachO) !void {
if (self.text_segment_offset == null) {
self.text_segment_offset = @intCast(u64, self.segments.items.len);
const file_size = alignSize(u64, self.base.options.program_code_size_hint, 0x1000);
log.debug("vmsize/filesize = {}", .{file_size});
const file_offset = 0;
const vm_address = self.vm_start_address; // the end of __PAGEZERO segment in VM
const protection = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE;
const cmdsize = commandSize(@sizeOf(macho.segment_command_64));
const text_segment = .{
.cmd = macho.LC_SEGMENT_64,
.cmdsize = cmdsize,
.segname = makeString("__TEXT"),
.vmaddr = vm_address,
.vmsize = file_size,
.fileoff = 0, // __TEXT segment *always* starts at 0 file offset
.filesize = 0, //file_size,
.maxprot = protection,
.initprot = protection,
.nsects = 0,
.flags = 0,
};
try self.commands.append(self.base.allocator, .{
.cmd = macho.LC_SEGMENT_64,
.cmdsize = cmdsize,
});
try self.segments.append(self.base.allocator, text_segment);
}
}
fn makeString(comptime bytes: []const u8) [16]u8 {
var buf = [_]u8{0} ** 16;
if (bytes.len > buf.len) @compileError("MachO segment/section name too long");
mem.copy(u8, buf[0..], bytes);
return buf;
}
fn alignSize(comptime Int: type, min_size: anytype, alignment: Int) Int {
const size = @intCast(Int, min_size);
if (size % alignment == 0) return size;
const div = size / alignment;
return (div + 1) * alignment;
}
fn commandSize(min_size: anytype) u32 {
return alignSize(u32, min_size, @sizeOf(u64));
}
fn addPadding(self: *MachO, size: u32, file_offset: u64) !void {
if (size == 0) return;
const buf = try self.base.allocator.alloc(u8, size);
defer self.base.allocator.free(buf);
mem.set(u8, buf[0..], 0);
try self.base.file.?.pwriteAll(buf, file_offset);
} | src-self-hosted/link/MachO.zig |
const std = @import("std");
const testing = std.testing;
pub const Crypto = struct {
const Credentials = struct {
public_key: usize,
private_key: usize,
loop_size: usize,
pub fn init(public_key: usize) Credentials {
var self = Credentials{
.public_key = public_key,
.private_key = 0,
.loop_size = 0,
};
return self;
}
pub fn guess_loop_size(self: *Credentials) void {
const INITIAL_VALUE: usize = 7;
const GUESSES: usize = 10_000_000;
var result: usize = 1;
var loop_size: usize = 1;
while (loop_size <= GUESSES) : (loop_size += 1) {
result = step(result, INITIAL_VALUE);
if (result == self.public_key) {
self.loop_size = loop_size;
// std.debug.warn("Guessed loop size for public key {} => {}\n", .{ self.public_key, self.loop_size });
return;
}
}
@panic("TOO MANY GUESSES");
}
pub fn operate(subject_number: usize, loop_size: usize) usize {
var result: usize = 1;
var c: usize = 0;
while (c < loop_size) : (c += 1) {
result = step(result, subject_number);
}
// std.debug.warn("Operated {} times on subject number {} => {}\n", .{ loop_size, subject_number, result });
return result;
}
fn step(curr: usize, subject_number: usize) usize {
const CRYPTO_DIVISOR: usize = 20201227;
var next = curr;
next *= subject_number;
next %= CRYPTO_DIVISOR;
return next;
}
};
door: Credentials,
card: Credentials,
count: usize,
pub fn init() Crypto {
var self = Crypto{
.door = undefined,
.card = undefined,
.count = 0,
};
return self;
}
pub fn deinit(self: *Crypto) void {
_ = self;
}
pub fn add_public_key(self: *Crypto, line: []const u8) void {
const public_key = std.fmt.parseInt(usize, line, 10) catch unreachable;
switch (self.count) {
0 => self.door = Credentials.init(public_key),
1 => self.card = Credentials.init(public_key),
else => @panic("TOO MANY CREDENTIALS"),
}
self.count += 1;
}
pub fn guess_encryption_key(self: *Crypto) usize {
self.door.guess_loop_size();
self.card.guess_loop_size();
self.door.private_key = Credentials.operate(self.card.public_key, self.door.loop_size);
self.card.private_key = Credentials.operate(self.door.public_key, self.card.loop_size);
// std.debug.warn("Door private key = {}, Card private key = {}, OK = {}\n", .{
// self.door.private_key,
// self.card.private_key,
// self.door.private_key == self.card.private_key,
// });
return self.door.private_key;
}
};
test "sample part a" {
const data: []const u8 =
\\5764801
\\17807724
;
var crypto = Crypto.init();
defer crypto.deinit();
var it = std.mem.split(u8, data, "\n");
while (it.next()) |line| {
crypto.add_public_key(line);
}
const encryption_key = crypto.guess_encryption_key();
try testing.expect(encryption_key == 14897079);
} | 2020/p25/crypto.zig |
/// The function fiat25519AddcarryxU51 is an addition with carry.
/// Postconditions:
/// out1 = (arg1 + arg2 + arg3) mod 2^51
/// out2 = ⌊(arg1 + arg2 + arg3) / 2^51⌋
///
/// Input Bounds:
/// arg1: [0x0 ~> 0x1]
/// arg2: [0x0 ~> 0x7ffffffffffff]
/// arg3: [0x0 ~> 0x7ffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0x7ffffffffffff]
/// out2: [0x0 ~> 0x1]
fn fiat25519AddcarryxU51(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) callconv(.Inline) void {
const x1: u64 = ((@intCast(u64, arg1) + arg2) + arg3);
const x2: u64 = (x1 & 0x7ffffffffffff);
const x3: u1 = @intCast(u1, (x1 >> 51));
out1.* = x2;
out2.* = x3;
}
/// The function fiat25519SubborrowxU51 is a subtraction with borrow.
/// Postconditions:
/// out1 = (-arg1 + arg2 + -arg3) mod 2^51
/// out2 = -⌊(-arg1 + arg2 + -arg3) / 2^51⌋
///
/// Input Bounds:
/// arg1: [0x0 ~> 0x1]
/// arg2: [0x0 ~> 0x7ffffffffffff]
/// arg3: [0x0 ~> 0x7ffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0x7ffffffffffff]
/// out2: [0x0 ~> 0x1]
fn fiat25519SubborrowxU51(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) callconv(.Inline) void {
const x1: i64 = @intCast(i64, (@intCast(i128, @intCast(i64, (@intCast(i128, arg2) - @intCast(i128, arg1)))) - @intCast(i128, arg3)));
const x2: i1 = @intCast(i1, (x1 >> 51));
const x3: u64 = @intCast(u64, (@intCast(i128, x1) & @intCast(i128, 0x7ffffffffffff)));
out1.* = x3;
out2.* = @intCast(u1, (@intCast(i2, 0x0) - @intCast(i2, x2)));
}
/// The function fiat25519CmovznzU64 is a single-word conditional move.
/// Postconditions:
/// out1 = (if arg1 = 0 then arg2 else arg3)
///
/// Input Bounds:
/// arg1: [0x0 ~> 0x1]
/// arg2: [0x0 ~> 0xffffffffffffffff]
/// arg3: [0x0 ~> 0xffffffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
fn fiat25519CmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) callconv(.Inline) void {
const x1: u1 = (~(~arg1));
const x2: u64 = @intCast(u64, (@intCast(i128, @intCast(i1, (@intCast(i2, 0x0) - @intCast(i2, x1)))) & @intCast(i128, 0xffffffffffffffff)));
const x3: u64 = ((x2 & arg3) | ((~x2) & arg2));
out1.* = x3;
}
/// The function fiat25519CarryMul multiplies two field elements and reduces the result.
/// Postconditions:
/// eval out1 mod m = (eval arg1 * eval arg2) mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
/// arg2: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
pub fn fiat25519CarryMul(out1: *[5]u64, arg1: [5]u64, arg2: [5]u64) void {
const x1: u128 = (@intCast(u128, (arg1[4])) * @intCast(u128, ((arg2[4]) * 0x13)));
const x2: u128 = (@intCast(u128, (arg1[4])) * @intCast(u128, ((arg2[3]) * 0x13)));
const x3: u128 = (@intCast(u128, (arg1[4])) * @intCast(u128, ((arg2[2]) * 0x13)));
const x4: u128 = (@intCast(u128, (arg1[4])) * @intCast(u128, ((arg2[1]) * 0x13)));
const x5: u128 = (@intCast(u128, (arg1[3])) * @intCast(u128, ((arg2[4]) * 0x13)));
const x6: u128 = (@intCast(u128, (arg1[3])) * @intCast(u128, ((arg2[3]) * 0x13)));
const x7: u128 = (@intCast(u128, (arg1[3])) * @intCast(u128, ((arg2[2]) * 0x13)));
const x8: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, ((arg2[4]) * 0x13)));
const x9: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, ((arg2[3]) * 0x13)));
const x10: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, ((arg2[4]) * 0x13)));
const x11: u128 = (@intCast(u128, (arg1[4])) * @intCast(u128, (arg2[0])));
const x12: u128 = (@intCast(u128, (arg1[3])) * @intCast(u128, (arg2[1])));
const x13: u128 = (@intCast(u128, (arg1[3])) * @intCast(u128, (arg2[0])));
const x14: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, (arg2[2])));
const x15: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, (arg2[1])));
const x16: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, (arg2[0])));
const x17: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, (arg2[3])));
const x18: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, (arg2[2])));
const x19: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, (arg2[1])));
const x20: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, (arg2[0])));
const x21: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, (arg2[4])));
const x22: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, (arg2[3])));
const x23: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, (arg2[2])));
const x24: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, (arg2[1])));
const x25: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, (arg2[0])));
const x26: u128 = (x25 + (x10 + (x9 + (x7 + x4))));
const x27: u64 = @intCast(u64, (x26 >> 51));
const x28: u64 = @intCast(u64, (x26 & @intCast(u128, 0x7ffffffffffff)));
const x29: u128 = (x21 + (x17 + (x14 + (x12 + x11))));
const x30: u128 = (x22 + (x18 + (x15 + (x13 + x1))));
const x31: u128 = (x23 + (x19 + (x16 + (x5 + x2))));
const x32: u128 = (x24 + (x20 + (x8 + (x6 + x3))));
const x33: u128 = (@intCast(u128, x27) + x32);
const x34: u64 = @intCast(u64, (x33 >> 51));
const x35: u64 = @intCast(u64, (x33 & @intCast(u128, 0x7ffffffffffff)));
const x36: u128 = (@intCast(u128, x34) + x31);
const x37: u64 = @intCast(u64, (x36 >> 51));
const x38: u64 = @intCast(u64, (x36 & @intCast(u128, 0x7ffffffffffff)));
const x39: u128 = (@intCast(u128, x37) + x30);
const x40: u64 = @intCast(u64, (x39 >> 51));
const x41: u64 = @intCast(u64, (x39 & @intCast(u128, 0x7ffffffffffff)));
const x42: u128 = (@intCast(u128, x40) + x29);
const x43: u64 = @intCast(u64, (x42 >> 51));
const x44: u64 = @intCast(u64, (x42 & @intCast(u128, 0x7ffffffffffff)));
const x45: u64 = (x43 * 0x13);
const x46: u64 = (x28 + x45);
const x47: u64 = (x46 >> 51);
const x48: u64 = (x46 & 0x7ffffffffffff);
const x49: u64 = (x47 + x35);
const x50: u1 = @intCast(u1, (x49 >> 51));
const x51: u64 = (x49 & 0x7ffffffffffff);
const x52: u64 = (@intCast(u64, x50) + x38);
out1[0] = x48;
out1[1] = x51;
out1[2] = x52;
out1[3] = x41;
out1[4] = x44;
}
/// The function fiat25519CarrySquare squares a field element and reduces the result.
/// Postconditions:
/// eval out1 mod m = (eval arg1 * eval arg1) mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
pub fn fiat25519CarrySquare(out1: *[5]u64, arg1: [5]u64) void {
const x1: u64 = ((arg1[4]) * 0x13);
const x2: u64 = (x1 * 0x2);
const x3: u64 = ((arg1[4]) * 0x2);
const x4: u64 = ((arg1[3]) * 0x13);
const x5: u64 = (x4 * 0x2);
const x6: u64 = ((arg1[3]) * 0x2);
const x7: u64 = ((arg1[2]) * 0x2);
const x8: u64 = ((arg1[1]) * 0x2);
const x9: u128 = (@intCast(u128, (arg1[4])) * @intCast(u128, x1));
const x10: u128 = (@intCast(u128, (arg1[3])) * @intCast(u128, x2));
const x11: u128 = (@intCast(u128, (arg1[3])) * @intCast(u128, x4));
const x12: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, x2));
const x13: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, x5));
const x14: u128 = (@intCast(u128, (arg1[2])) * @intCast(u128, (arg1[2])));
const x15: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, x2));
const x16: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, x6));
const x17: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, x7));
const x18: u128 = (@intCast(u128, (arg1[1])) * @intCast(u128, (arg1[1])));
const x19: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, x3));
const x20: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, x6));
const x21: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, x7));
const x22: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, x8));
const x23: u128 = (@intCast(u128, (arg1[0])) * @intCast(u128, (arg1[0])));
const x24: u128 = (x23 + (x15 + x13));
const x25: u64 = @intCast(u64, (x24 >> 51));
const x26: u64 = @intCast(u64, (x24 & @intCast(u128, 0x7ffffffffffff)));
const x27: u128 = (x19 + (x16 + x14));
const x28: u128 = (x20 + (x17 + x9));
const x29: u128 = (x21 + (x18 + x10));
const x30: u128 = (x22 + (x12 + x11));
const x31: u128 = (@intCast(u128, x25) + x30);
const x32: u64 = @intCast(u64, (x31 >> 51));
const x33: u64 = @intCast(u64, (x31 & @intCast(u128, 0x7ffffffffffff)));
const x34: u128 = (@intCast(u128, x32) + x29);
const x35: u64 = @intCast(u64, (x34 >> 51));
const x36: u64 = @intCast(u64, (x34 & @intCast(u128, 0x7ffffffffffff)));
const x37: u128 = (@intCast(u128, x35) + x28);
const x38: u64 = @intCast(u64, (x37 >> 51));
const x39: u64 = @intCast(u64, (x37 & @intCast(u128, 0x7ffffffffffff)));
const x40: u128 = (@intCast(u128, x38) + x27);
const x41: u64 = @intCast(u64, (x40 >> 51));
const x42: u64 = @intCast(u64, (x40 & @intCast(u128, 0x7ffffffffffff)));
const x43: u64 = (x41 * 0x13);
const x44: u64 = (x26 + x43);
const x45: u64 = (x44 >> 51);
const x46: u64 = (x44 & 0x7ffffffffffff);
const x47: u64 = (x45 + x33);
const x48: u1 = @intCast(u1, (x47 >> 51));
const x49: u64 = (x47 & 0x7ffffffffffff);
const x50: u64 = (@intCast(u64, x48) + x36);
out1[0] = x46;
out1[1] = x49;
out1[2] = x50;
out1[3] = x39;
out1[4] = x42;
}
/// The function fiat25519Carry reduces a field element.
/// Postconditions:
/// eval out1 mod m = eval arg1 mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
pub fn fiat25519Carry(out1: *[5]u64, arg1: [5]u64) void {
const x1: u64 = (arg1[0]);
const x2: u64 = ((x1 >> 51) + (arg1[1]));
const x3: u64 = ((x2 >> 51) + (arg1[2]));
const x4: u64 = ((x3 >> 51) + (arg1[3]));
const x5: u64 = ((x4 >> 51) + (arg1[4]));
const x6: u64 = ((x1 & 0x7ffffffffffff) + ((x5 >> 51) * 0x13));
const x7: u64 = (@intCast(u64, @intCast(u1, (x6 >> 51))) + (x2 & 0x7ffffffffffff));
const x8: u64 = (x6 & 0x7ffffffffffff);
const x9: u64 = (x7 & 0x7ffffffffffff);
const x10: u64 = (@intCast(u64, @intCast(u1, (x7 >> 51))) + (x3 & 0x7ffffffffffff));
const x11: u64 = (x4 & 0x7ffffffffffff);
const x12: u64 = (x5 & 0x7ffffffffffff);
out1[0] = x8;
out1[1] = x9;
out1[2] = x10;
out1[3] = x11;
out1[4] = x12;
}
/// The function fiat25519Add adds two field elements.
/// Postconditions:
/// eval out1 mod m = (eval arg1 + eval arg2) mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
/// arg2: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
pub fn fiat25519Add(out1: *[5]u64, arg1: [5]u64, arg2: [5]u64) void {
const x1: u64 = ((arg1[0]) + (arg2[0]));
const x2: u64 = ((arg1[1]) + (arg2[1]));
const x3: u64 = ((arg1[2]) + (arg2[2]));
const x4: u64 = ((arg1[3]) + (arg2[3]));
const x5: u64 = ((arg1[4]) + (arg2[4]));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
/// The function fiat25519Sub subtracts two field elements.
/// Postconditions:
/// eval out1 mod m = (eval arg1 - eval arg2) mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
/// arg2: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
pub fn fiat25519Sub(out1: *[5]u64, arg1: [5]u64, arg2: [5]u64) void {
const x1: u64 = ((0xfffffffffffda + (arg1[0])) - (arg2[0]));
const x2: u64 = ((0xffffffffffffe + (arg1[1])) - (arg2[1]));
const x3: u64 = ((0xffffffffffffe + (arg1[2])) - (arg2[2]));
const x4: u64 = ((0xffffffffffffe + (arg1[3])) - (arg2[3]));
const x5: u64 = ((0xffffffffffffe + (arg1[4])) - (arg2[4]));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
/// The function fiat25519Opp negates a field element.
/// Postconditions:
/// eval out1 mod m = -eval arg1 mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
pub fn fiat25519Opp(out1: *[5]u64, arg1: [5]u64) void {
const x1: u64 = (0xfffffffffffda - (arg1[0]));
const x2: u64 = (0xffffffffffffe - (arg1[1]));
const x3: u64 = (0xffffffffffffe - (arg1[2]));
const x4: u64 = (0xffffffffffffe - (arg1[3]));
const x5: u64 = (0xffffffffffffe - (arg1[4]));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
/// The function fiat25519Selectznz is a multi-limb conditional select.
/// Postconditions:
/// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
///
/// Input Bounds:
/// arg1: [0x0 ~> 0x1]
/// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
/// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
/// Output Bounds:
/// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
pub fn fiat25519Selectznz(out1: *[5]u64, arg1: u1, arg2: [5]u64, arg3: [5]u64) void {
var x1: u64 = undefined;
fiat25519CmovznzU64(&x1, arg1, (arg2[0]), (arg3[0]));
var x2: u64 = undefined;
fiat25519CmovznzU64(&x2, arg1, (arg2[1]), (arg3[1]));
var x3: u64 = undefined;
fiat25519CmovznzU64(&x3, arg1, (arg2[2]), (arg3[2]));
var x4: u64 = undefined;
fiat25519CmovznzU64(&x4, arg1, (arg2[3]), (arg3[3]));
var x5: u64 = undefined;
fiat25519CmovznzU64(&x5, arg1, (arg2[4]), (arg3[4]));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
/// The function fiat25519ToBytes serializes a field element to bytes in little-endian order.
/// Postconditions:
/// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]]
pub fn fiat25519ToBytes(out1: *[32]u8, arg1: [5]u64) void {
var x1: u64 = undefined;
var x2: u1 = undefined;
fiat25519SubborrowxU51(&x1, &x2, 0x0, (arg1[0]), 0x7ffffffffffed);
var x3: u64 = undefined;
var x4: u1 = undefined;
fiat25519SubborrowxU51(&x3, &x4, x2, (arg1[1]), 0x7ffffffffffff);
var x5: u64 = undefined;
var x6: u1 = undefined;
fiat25519SubborrowxU51(&x5, &x6, x4, (arg1[2]), 0x7ffffffffffff);
var x7: u64 = undefined;
var x8: u1 = undefined;
fiat25519SubborrowxU51(&x7, &x8, x6, (arg1[3]), 0x7ffffffffffff);
var x9: u64 = undefined;
var x10: u1 = undefined;
fiat25519SubborrowxU51(&x9, &x10, x8, (arg1[4]), 0x7ffffffffffff);
var x11: u64 = undefined;
fiat25519CmovznzU64(&x11, x10, @intCast(u64, 0x0), 0xffffffffffffffff);
var x12: u64 = undefined;
var x13: u1 = undefined;
fiat25519AddcarryxU51(&x12, &x13, 0x0, x1, (x11 & 0x7ffffffffffed));
var x14: u64 = undefined;
var x15: u1 = undefined;
fiat25519AddcarryxU51(&x14, &x15, x13, x3, (x11 & 0x7ffffffffffff));
var x16: u64 = undefined;
var x17: u1 = undefined;
fiat25519AddcarryxU51(&x16, &x17, x15, x5, (x11 & 0x7ffffffffffff));
var x18: u64 = undefined;
var x19: u1 = undefined;
fiat25519AddcarryxU51(&x18, &x19, x17, x7, (x11 & 0x7ffffffffffff));
var x20: u64 = undefined;
var x21: u1 = undefined;
fiat25519AddcarryxU51(&x20, &x21, x19, x9, (x11 & 0x7ffffffffffff));
const x22: u64 = (x20 << 4);
const x23: u64 = (x18 * @intCast(u64, 0x2));
const x24: u64 = (x16 << 6);
const x25: u64 = (x14 << 3);
const x26: u8 = @intCast(u8, (x12 & @intCast(u64, 0xff)));
const x27: u64 = (x12 >> 8);
const x28: u8 = @intCast(u8, (x27 & @intCast(u64, 0xff)));
const x29: u64 = (x27 >> 8);
const x30: u8 = @intCast(u8, (x29 & @intCast(u64, 0xff)));
const x31: u64 = (x29 >> 8);
const x32: u8 = @intCast(u8, (x31 & @intCast(u64, 0xff)));
const x33: u64 = (x31 >> 8);
const x34: u8 = @intCast(u8, (x33 & @intCast(u64, 0xff)));
const x35: u64 = (x33 >> 8);
const x36: u8 = @intCast(u8, (x35 & @intCast(u64, 0xff)));
const x37: u8 = @intCast(u8, (x35 >> 8));
const x38: u64 = (x25 + @intCast(u64, x37));
const x39: u8 = @intCast(u8, (x38 & @intCast(u64, 0xff)));
const x40: u64 = (x38 >> 8);
const x41: u8 = @intCast(u8, (x40 & @intCast(u64, 0xff)));
const x42: u64 = (x40 >> 8);
const x43: u8 = @intCast(u8, (x42 & @intCast(u64, 0xff)));
const x44: u64 = (x42 >> 8);
const x45: u8 = @intCast(u8, (x44 & @intCast(u64, 0xff)));
const x46: u64 = (x44 >> 8);
const x47: u8 = @intCast(u8, (x46 & @intCast(u64, 0xff)));
const x48: u64 = (x46 >> 8);
const x49: u8 = @intCast(u8, (x48 & @intCast(u64, 0xff)));
const x50: u8 = @intCast(u8, (x48 >> 8));
const x51: u64 = (x24 + @intCast(u64, x50));
const x52: u8 = @intCast(u8, (x51 & @intCast(u64, 0xff)));
const x53: u64 = (x51 >> 8);
const x54: u8 = @intCast(u8, (x53 & @intCast(u64, 0xff)));
const x55: u64 = (x53 >> 8);
const x56: u8 = @intCast(u8, (x55 & @intCast(u64, 0xff)));
const x57: u64 = (x55 >> 8);
const x58: u8 = @intCast(u8, (x57 & @intCast(u64, 0xff)));
const x59: u64 = (x57 >> 8);
const x60: u8 = @intCast(u8, (x59 & @intCast(u64, 0xff)));
const x61: u64 = (x59 >> 8);
const x62: u8 = @intCast(u8, (x61 & @intCast(u64, 0xff)));
const x63: u64 = (x61 >> 8);
const x64: u8 = @intCast(u8, (x63 & @intCast(u64, 0xff)));
const x65: u1 = @intCast(u1, (x63 >> 8));
const x66: u64 = (x23 + @intCast(u64, x65));
const x67: u8 = @intCast(u8, (x66 & @intCast(u64, 0xff)));
const x68: u64 = (x66 >> 8);
const x69: u8 = @intCast(u8, (x68 & @intCast(u64, 0xff)));
const x70: u64 = (x68 >> 8);
const x71: u8 = @intCast(u8, (x70 & @intCast(u64, 0xff)));
const x72: u64 = (x70 >> 8);
const x73: u8 = @intCast(u8, (x72 & @intCast(u64, 0xff)));
const x74: u64 = (x72 >> 8);
const x75: u8 = @intCast(u8, (x74 & @intCast(u64, 0xff)));
const x76: u64 = (x74 >> 8);
const x77: u8 = @intCast(u8, (x76 & @intCast(u64, 0xff)));
const x78: u8 = @intCast(u8, (x76 >> 8));
const x79: u64 = (x22 + @intCast(u64, x78));
const x80: u8 = @intCast(u8, (x79 & @intCast(u64, 0xff)));
const x81: u64 = (x79 >> 8);
const x82: u8 = @intCast(u8, (x81 & @intCast(u64, 0xff)));
const x83: u64 = (x81 >> 8);
const x84: u8 = @intCast(u8, (x83 & @intCast(u64, 0xff)));
const x85: u64 = (x83 >> 8);
const x86: u8 = @intCast(u8, (x85 & @intCast(u64, 0xff)));
const x87: u64 = (x85 >> 8);
const x88: u8 = @intCast(u8, (x87 & @intCast(u64, 0xff)));
const x89: u64 = (x87 >> 8);
const x90: u8 = @intCast(u8, (x89 & @intCast(u64, 0xff)));
const x91: u8 = @intCast(u8, (x89 >> 8));
out1[0] = x26;
out1[1] = x28;
out1[2] = x30;
out1[3] = x32;
out1[4] = x34;
out1[5] = x36;
out1[6] = x39;
out1[7] = x41;
out1[8] = x43;
out1[9] = x45;
out1[10] = x47;
out1[11] = x49;
out1[12] = x52;
out1[13] = x54;
out1[14] = x56;
out1[15] = x58;
out1[16] = x60;
out1[17] = x62;
out1[18] = x64;
out1[19] = x67;
out1[20] = x69;
out1[21] = x71;
out1[22] = x73;
out1[23] = x75;
out1[24] = x77;
out1[25] = x80;
out1[26] = x82;
out1[27] = x84;
out1[28] = x86;
out1[29] = x88;
out1[30] = x90;
out1[31] = x91;
}
/// The function fiat25519FromBytes deserializes a field element from bytes in little-endian order.
/// Postconditions:
/// eval out1 mod m = bytes_eval arg1 mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
pub fn fiat25519FromBytes(out1: *[5]u64, arg1: [32]u8) void {
const x1: u64 = (@intCast(u64, (arg1[31])) << 44);
const x2: u64 = (@intCast(u64, (arg1[30])) << 36);
const x3: u64 = (@intCast(u64, (arg1[29])) << 28);
const x4: u64 = (@intCast(u64, (arg1[28])) << 20);
const x5: u64 = (@intCast(u64, (arg1[27])) << 12);
const x6: u64 = (@intCast(u64, (arg1[26])) << 4);
const x7: u64 = (@intCast(u64, (arg1[25])) << 47);
const x8: u64 = (@intCast(u64, (arg1[24])) << 39);
const x9: u64 = (@intCast(u64, (arg1[23])) << 31);
const x10: u64 = (@intCast(u64, (arg1[22])) << 23);
const x11: u64 = (@intCast(u64, (arg1[21])) << 15);
const x12: u64 = (@intCast(u64, (arg1[20])) << 7);
const x13: u64 = (@intCast(u64, (arg1[19])) << 50);
const x14: u64 = (@intCast(u64, (arg1[18])) << 42);
const x15: u64 = (@intCast(u64, (arg1[17])) << 34);
const x16: u64 = (@intCast(u64, (arg1[16])) << 26);
const x17: u64 = (@intCast(u64, (arg1[15])) << 18);
const x18: u64 = (@intCast(u64, (arg1[14])) << 10);
const x19: u64 = (@intCast(u64, (arg1[13])) << 2);
const x20: u64 = (@intCast(u64, (arg1[12])) << 45);
const x21: u64 = (@intCast(u64, (arg1[11])) << 37);
const x22: u64 = (@intCast(u64, (arg1[10])) << 29);
const x23: u64 = (@intCast(u64, (arg1[9])) << 21);
const x24: u64 = (@intCast(u64, (arg1[8])) << 13);
const x25: u64 = (@intCast(u64, (arg1[7])) << 5);
const x26: u64 = (@intCast(u64, (arg1[6])) << 48);
const x27: u64 = (@intCast(u64, (arg1[5])) << 40);
const x28: u64 = (@intCast(u64, (arg1[4])) << 32);
const x29: u64 = (@intCast(u64, (arg1[3])) << 24);
const x30: u64 = (@intCast(u64, (arg1[2])) << 16);
const x31: u64 = (@intCast(u64, (arg1[1])) << 8);
const x32: u8 = (arg1[0]);
const x33: u64 = (x31 + @intCast(u64, x32));
const x34: u64 = (x30 + x33);
const x35: u64 = (x29 + x34);
const x36: u64 = (x28 + x35);
const x37: u64 = (x27 + x36);
const x38: u64 = (x26 + x37);
const x39: u64 = (x38 & 0x7ffffffffffff);
const x40: u8 = @intCast(u8, (x38 >> 51));
const x41: u64 = (x25 + @intCast(u64, x40));
const x42: u64 = (x24 + x41);
const x43: u64 = (x23 + x42);
const x44: u64 = (x22 + x43);
const x45: u64 = (x21 + x44);
const x46: u64 = (x20 + x45);
const x47: u64 = (x46 & 0x7ffffffffffff);
const x48: u8 = @intCast(u8, (x46 >> 51));
const x49: u64 = (x19 + @intCast(u64, x48));
const x50: u64 = (x18 + x49);
const x51: u64 = (x17 + x50);
const x52: u64 = (x16 + x51);
const x53: u64 = (x15 + x52);
const x54: u64 = (x14 + x53);
const x55: u64 = (x13 + x54);
const x56: u64 = (x55 & 0x7ffffffffffff);
const x57: u8 = @intCast(u8, (x55 >> 51));
const x58: u64 = (x12 + @intCast(u64, x57));
const x59: u64 = (x11 + x58);
const x60: u64 = (x10 + x59);
const x61: u64 = (x9 + x60);
const x62: u64 = (x8 + x61);
const x63: u64 = (x7 + x62);
const x64: u64 = (x63 & 0x7ffffffffffff);
const x65: u8 = @intCast(u8, (x63 >> 51));
const x66: u64 = (x6 + @intCast(u64, x65));
const x67: u64 = (x5 + x66);
const x68: u64 = (x4 + x67);
const x69: u64 = (x3 + x68);
const x70: u64 = (x2 + x69);
const x71: u64 = (x1 + x70);
out1[0] = x39;
out1[1] = x47;
out1[2] = x56;
out1[3] = x64;
out1[4] = x71;
}
/// The function fiat25519CarryScmul121666 multiplies a field element by 121666 and reduces the result.
/// Postconditions:
/// eval out1 mod m = (121666 * eval arg1) mod m
///
/// Input Bounds:
/// arg1: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]]
/// Output Bounds:
/// out1: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]]
pub fn fiat25519CarryScmul121666(out1: *[5]u64, arg1: [5]u64) void {
const x1: u128 = (@intCast(u128, 0x1db42) * @intCast(u128, (arg1[4])));
const x2: u128 = (@intCast(u128, 0x1db42) * @intCast(u128, (arg1[3])));
const x3: u128 = (@intCast(u128, 0x1db42) * @intCast(u128, (arg1[2])));
const x4: u128 = (@intCast(u128, 0x1db42) * @intCast(u128, (arg1[1])));
const x5: u128 = (@intCast(u128, 0x1db42) * @intCast(u128, (arg1[0])));
const x6: u64 = @intCast(u64, (x5 >> 51));
const x7: u64 = @intCast(u64, (x5 & @intCast(u128, 0x7ffffffffffff)));
const x8: u128 = (@intCast(u128, x6) + x4);
const x9: u64 = @intCast(u64, (x8 >> 51));
const x10: u64 = @intCast(u64, (x8 & @intCast(u128, 0x7ffffffffffff)));
const x11: u128 = (@intCast(u128, x9) + x3);
const x12: u64 = @intCast(u64, (x11 >> 51));
const x13: u64 = @intCast(u64, (x11 & @intCast(u128, 0x7ffffffffffff)));
const x14: u128 = (@intCast(u128, x12) + x2);
const x15: u64 = @intCast(u64, (x14 >> 51));
const x16: u64 = @intCast(u64, (x14 & @intCast(u128, 0x7ffffffffffff)));
const x17: u128 = (@intCast(u128, x15) + x1);
const x18: u64 = @intCast(u64, (x17 >> 51));
const x19: u64 = @intCast(u64, (x17 & @intCast(u128, 0x7ffffffffffff)));
const x20: u64 = (x18 * 0x13);
const x21: u64 = (x7 + x20);
const x22: u1 = @intCast(u1, (x21 >> 51));
const x23: u64 = (x21 & 0x7ffffffffffff);
const x24: u64 = (@intCast(u64, x22) + x10);
const x25: u1 = @intCast(u1, (x24 >> 51));
const x26: u64 = (x24 & 0x7ffffffffffff);
const x27: u64 = (@intCast(u64, x25) + x13);
out1[0] = x23;
out1[1] = x26;
out1[2] = x27;
out1[3] = x16;
out1[4] = x19;
} | fiat-zig/src/curve25519_64.zig |
const c = @import("c.zig");
const std = @import("std");
const Renderer = @import("gfx/renderer.zig").Renderer;
const Logger = @import("logger.zig").Logger;
const Game = @import("game/game.zig").Game;
const MovementDirection = @import("game/player.zig").MovementDirection;
const Scene = @import("gfx/view/scene.zig").Scene;
const ns_per_s = comptime std.time.ns_per_ms * std.time.ms_per_s;
var allocator = comptime std.heap.page_allocator;
pub const App = struct {
running: bool,
logger: Logger,
renderer: *Renderer,
game: *Game,
scene: *Scene,
frameTimer: std.time.Timer,
pub fn init() App {
var logger = Logger.init();
var renderer: *Renderer = Renderer.init(allocator, logger) catch unreachable;
var scene: *Scene = Scene.init(allocator);
var app = .{
.running = true,
.logger = logger,
.renderer = renderer,
.game = Game.init(allocator, scene),
.scene = scene,
.frameTimer = std.time.Timer.start() catch unreachable,
};
renderer.addScene(app.scene);
return app;
}
pub fn deinit(self: *App) void {
self.scene.deinit();
self.renderer.deinit();
}
pub fn run(self: *App) void {
while(self.running) {
const dt: f32 = @intToFloat(f32, self.frameTimer.lap()) / @intToFloat(f32, ns_per_s);
self.handleEvents();
self.game.update(dt);
self.renderer.draw();
c.SDL_Delay(17);
}
}
fn handleEvents(self: *App) void {
var event: c.SDL_Event = undefined;
while(c.SDL_PollEvent(&event) != 0) {
switch(event.@"type") {
c.SDL_QUIT => {
self.running = false;
},
c.SDL_KEYDOWN => {
self.handleKeyDown(event.@"key");
},
c.SDL_KEYUP => {
self.handleKeyUp(event.@"key");
},
else => {},
}
}
}
fn handleKeyDown(self: *App, keyEvent: c.SDL_KeyboardEvent) void {
switch(keyEvent.keysym.sym) {
c.SDLK_w => self.game.player.onMovement(MovementDirection.up),
c.SDLK_a => self.game.player.onMovement(MovementDirection.left),
c.SDLK_d => self.game.player.onMovement(MovementDirection.right),
else => {}
}
}
fn handleKeyUp(self: *App, keyEvent: c.SDL_KeyboardEvent) void {
switch(keyEvent.keysym.sym) {
c.SDLK_w => self.game.player.onStopMovement(MovementDirection.up),
c.SDLK_a => self.game.player.onStopMovement(MovementDirection.left),
c.SDLK_d => self.game.player.onStopMovement(MovementDirection.right),
else => {}
}
}
}; | src/app.zig |
const std = @import("std");
const assert = std.debug.assert;
const tools = @import("tools");
const Policy = struct { letter: u8, min: u8, max: u8 };
fn check_password1(pol: Policy, password: []const u8) bool {
var count: u8 = 0;
for (password) |it| {
if (it == pol.letter) count += 1;
}
return count >= pol.min and count <= pol.max;
}
fn check_password2(pol: Policy, password: []const u8) bool {
if (password.len < pol.max)
return false;
return ((password[pol.min - 1] == pol.letter and password[pol.max - 1] != pol.letter) or (password[pol.min - 1] != pol.letter and password[pol.max - 1] == pol.letter));
}
pub fn run(input: []const u8, allocator: std.mem.Allocator) ![2][]const u8 {
// part1
const ans1 = ans: {
var valid: usize = 0;
var it = std.mem.tokenize(u8, input, "\n\r");
while (it.next()) |line| {
const fields = tools.match_pattern("{}-{} {}: {}", line) orelse unreachable;
const policy = Policy{
.min = @intCast(u8, fields[0].imm),
.max = @intCast(u8, fields[1].imm),
.letter = fields[2].lit[0],
};
const password = fields[3].lit;
if (check_password1(policy, password))
valid += 1;
}
break :ans valid;
};
// part1
const ans2 = ans: {
var valid: usize = 0;
var it = std.mem.tokenize(u8, input, "\n\r");
while (it.next()) |line| {
const fields = tools.match_pattern("{}-{} {}: {}", line) orelse unreachable;
const policy = Policy{
.min = @intCast(u8, fields[0].imm),
.max = @intCast(u8, fields[1].imm),
.letter = fields[2].lit[0],
};
const password = fields[3].lit;
if (check_password2(policy, password))
valid += 1;
}
break :ans valid;
};
return [_][]const u8{
try std.fmt.allocPrint(allocator, "{}", .{ans1}),
try std.fmt.allocPrint(allocator, "{}", .{ans2}),
};
}
pub const main = tools.defaultMain("2020/input_day02.txt", run); | 2020/day02.zig |
const is_test = @import("builtin").is_test;
const std = @import("std");
const math = std.math;
const testing = std.testing;
const fixint = @import("fixint.zig").fixint;
fn test__fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t, expected: fixint_t) !void {
const x = fixint(fp_t, fixint_t, a);
try testing.expect(x == expected);
}
test "fixint.i1" {
try test__fixint(f32, i1, -math.inf(f32), -1);
try test__fixint(f32, i1, -math.floatMax(f32), -1);
try test__fixint(f32, i1, -2.0, -1);
try test__fixint(f32, i1, -1.1, -1);
try test__fixint(f32, i1, -1.0, -1);
try test__fixint(f32, i1, -0.9, 0);
try test__fixint(f32, i1, -0.1, 0);
try test__fixint(f32, i1, -math.floatMin(f32), 0);
try test__fixint(f32, i1, -0.0, 0);
try test__fixint(f32, i1, 0.0, 0);
try test__fixint(f32, i1, math.floatMin(f32), 0);
try test__fixint(f32, i1, 0.1, 0);
try test__fixint(f32, i1, 0.9, 0);
try test__fixint(f32, i1, 1.0, 0);
try test__fixint(f32, i1, 2.0, 0);
try test__fixint(f32, i1, math.floatMax(f32), 0);
try test__fixint(f32, i1, math.inf(f32), 0);
}
test "fixint.i2" {
try test__fixint(f32, i2, -math.inf(f32), -2);
try test__fixint(f32, i2, -math.floatMax(f32), -2);
try test__fixint(f32, i2, -2.0, -2);
try test__fixint(f32, i2, -1.9, -1);
try test__fixint(f32, i2, -1.1, -1);
try test__fixint(f32, i2, -1.0, -1);
try test__fixint(f32, i2, -0.9, 0);
try test__fixint(f32, i2, -0.1, 0);
try test__fixint(f32, i2, -math.floatMin(f32), 0);
try test__fixint(f32, i2, -0.0, 0);
try test__fixint(f32, i2, 0.0, 0);
try test__fixint(f32, i2, math.floatMin(f32), 0);
try test__fixint(f32, i2, 0.1, 0);
try test__fixint(f32, i2, 0.9, 0);
try test__fixint(f32, i2, 1.0, 1);
try test__fixint(f32, i2, 2.0, 1);
try test__fixint(f32, i2, math.floatMax(f32), 1);
try test__fixint(f32, i2, math.inf(f32), 1);
}
test "fixint.i3" {
try test__fixint(f32, i3, -math.inf(f32), -4);
try test__fixint(f32, i3, -math.floatMax(f32), -4);
try test__fixint(f32, i3, -4.0, -4);
try test__fixint(f32, i3, -3.0, -3);
try test__fixint(f32, i3, -2.0, -2);
try test__fixint(f32, i3, -1.9, -1);
try test__fixint(f32, i3, -1.1, -1);
try test__fixint(f32, i3, -1.0, -1);
try test__fixint(f32, i3, -0.9, 0);
try test__fixint(f32, i3, -0.1, 0);
try test__fixint(f32, i3, -math.floatMin(f32), 0);
try test__fixint(f32, i3, -0.0, 0);
try test__fixint(f32, i3, 0.0, 0);
try test__fixint(f32, i3, math.floatMin(f32), 0);
try test__fixint(f32, i3, 0.1, 0);
try test__fixint(f32, i3, 0.9, 0);
try test__fixint(f32, i3, 1.0, 1);
try test__fixint(f32, i3, 2.0, 2);
try test__fixint(f32, i3, 3.0, 3);
try test__fixint(f32, i3, 4.0, 3);
try test__fixint(f32, i3, math.floatMax(f32), 3);
try test__fixint(f32, i3, math.inf(f32), 3);
}
test "fixint.i32" {
try test__fixint(f64, i32, -math.inf(f64), math.minInt(i32));
try test__fixint(f64, i32, -math.floatMax(f64), math.minInt(i32));
try test__fixint(f64, i32, @as(f64, math.minInt(i32)), math.minInt(i32));
try test__fixint(f64, i32, @as(f64, math.minInt(i32)) + 1, math.minInt(i32) + 1);
try test__fixint(f64, i32, -2.0, -2);
try test__fixint(f64, i32, -1.9, -1);
try test__fixint(f64, i32, -1.1, -1);
try test__fixint(f64, i32, -1.0, -1);
try test__fixint(f64, i32, -0.9, 0);
try test__fixint(f64, i32, -0.1, 0);
try test__fixint(f64, i32, -@as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i32, -0.0, 0);
try test__fixint(f64, i32, 0.0, 0);
try test__fixint(f64, i32, @as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i32, 0.1, 0);
try test__fixint(f64, i32, 0.9, 0);
try test__fixint(f64, i32, 1.0, 1);
try test__fixint(f64, i32, @as(f64, math.maxInt(i32)) - 1, math.maxInt(i32) - 1);
try test__fixint(f64, i32, @as(f64, math.maxInt(i32)), math.maxInt(i32));
try test__fixint(f64, i32, math.floatMax(f64), math.maxInt(i32));
try test__fixint(f64, i32, math.inf(f64), math.maxInt(i32));
}
test "fixint.i64" {
try test__fixint(f64, i64, -math.inf(f64), math.minInt(i64));
try test__fixint(f64, i64, -math.floatMax(f64), math.minInt(i64));
try test__fixint(f64, i64, @as(f64, math.minInt(i64)), math.minInt(i64));
try test__fixint(f64, i64, @as(f64, math.minInt(i64)) + 1, math.minInt(i64));
try test__fixint(f64, i64, @as(f64, math.minInt(i64) / 2), math.minInt(i64) / 2);
try test__fixint(f64, i64, -2.0, -2);
try test__fixint(f64, i64, -1.9, -1);
try test__fixint(f64, i64, -1.1, -1);
try test__fixint(f64, i64, -1.0, -1);
try test__fixint(f64, i64, -0.9, 0);
try test__fixint(f64, i64, -0.1, 0);
try test__fixint(f64, i64, -@as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i64, -0.0, 0);
try test__fixint(f64, i64, 0.0, 0);
try test__fixint(f64, i64, @as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i64, 0.1, 0);
try test__fixint(f64, i64, 0.9, 0);
try test__fixint(f64, i64, 1.0, 1);
try test__fixint(f64, i64, @as(f64, math.maxInt(i64)) - 1, math.maxInt(i64));
try test__fixint(f64, i64, @as(f64, math.maxInt(i64)), math.maxInt(i64));
try test__fixint(f64, i64, math.floatMax(f64), math.maxInt(i64));
try test__fixint(f64, i64, math.inf(f64), math.maxInt(i64));
}
test "fixint.i128" {
try test__fixint(f64, i128, -math.inf(f64), math.minInt(i128));
try test__fixint(f64, i128, -math.floatMax(f64), math.minInt(i128));
try test__fixint(f64, i128, @as(f64, math.minInt(i128)), math.minInt(i128));
try test__fixint(f64, i128, @as(f64, math.minInt(i128)) + 1, math.minInt(i128));
try test__fixint(f64, i128, -2.0, -2);
try test__fixint(f64, i128, -1.9, -1);
try test__fixint(f64, i128, -1.1, -1);
try test__fixint(f64, i128, -1.0, -1);
try test__fixint(f64, i128, -0.9, 0);
try test__fixint(f64, i128, -0.1, 0);
try test__fixint(f64, i128, -@as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i128, -0.0, 0);
try test__fixint(f64, i128, 0.0, 0);
try test__fixint(f64, i128, @as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i128, 0.1, 0);
try test__fixint(f64, i128, 0.9, 0);
try test__fixint(f64, i128, 1.0, 1);
try test__fixint(f64, i128, @as(f64, math.maxInt(i128)) - 1, math.maxInt(i128));
try test__fixint(f64, i128, @as(f64, math.maxInt(i128)), math.maxInt(i128));
try test__fixint(f64, i128, math.floatMax(f64), math.maxInt(i128));
try test__fixint(f64, i128, math.inf(f64), math.maxInt(i128));
} | lib/std/special/compiler_rt/fixint_test.zig |
const std = @import("std");
const debug = std.debug;
const mem = std.mem;
const heap = std.heap;
const json = std.json;
const testing = std.testing;
const meta = std.meta;
pub fn MaybeDefined(comptime T: type) type {
return union(enum) {
NotDefined,
Defined: T,
const __maybe_defined = void;
};
}
fn getMaybeDefinedChildType(comptime T: type) ?type {
if (T == json.Value) return null; // workaround, json.Value.dump would get referenced but it doesn't compile
switch (@typeInfo(T)) {
.Union => |unionInfo| {
if (unionInfo.decls.len == 1 and comptime mem.eql(u8, unionInfo.decls[0].name, "__maybe_defined")) {
return unionInfo.fields[1].field_type;
} else {
return null;
}
},
else => return null,
}
}
pub const Structured = union(enum) {
Array: json.Array,
Object: json.ObjectMap,
};
pub const Primitive = union(enum) {
Bool: bool,
Integer: i64,
Float: f64,
String: []const u8,
};
pub const Number = union(enum) {
Integer: i64,
Float: f64,
};
/// jsonStream must be a pointer to std.json.WriteStream
pub fn serialize(value: var, jsonStream: var) !void {
comptime const T = @TypeOf(value);
comptime const info = @typeInfo(T);
if (T == json.Value) {
try jsonStream.emitJson(value);
return;
}
switch (info) {
.Null => {
try jsonStream.emitNull();
},
.Int, .ComptimeInt, .Float, .ComptimeFloat => {
try jsonStream.emitNumber(value);
},
.Bool => {
try jsonStream.emitBool(value);
},
.Pointer => |ptrInfo| {
if (ptrInfo.child == u8) {
try jsonStream.emitString(value);
} else {
try jsonStream.beginArray();
for (value) |item, index| {
try jsonStream.arrayElem();
try serialize(item, jsonStream);
}
try jsonStream.endArray();
}
},
.Struct => |structInfo| {
try jsonStream.beginObject();
inline for (structInfo.fields) |field, index| {
if (getMaybeDefinedChildType(field.field_type) != null) {
if (@field(value, field.name) == .Defined) {
try jsonStream.objectField(field.name);
try serialize(@field(value, field.name).Defined, jsonStream);
}
} else {
try jsonStream.objectField(field.name);
try serialize(@field(value, field.name), jsonStream);
}
}
try jsonStream.endObject();
},
.Optional => {
if (value) |notNull| {
try serialize(notNull, jsonStream);
} else {
try jsonStream.emitNull();
}
},
.Union => |unionInfo| {
if (unionInfo.tag_type) |UnionTagType| {
inline for (unionInfo.fields) |u_field| {
if (@enumToInt(@as(UnionTagType, value)) == u_field.enum_field.?.value) {
return try serialize(@field(value, u_field.name), jsonStream);
}
}
} else {
@compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'");
}
},
.Enum => {
try jsonStream.emitNumber(@enumToInt(value));
},
else => {
@compileError("JSON serialize: Unsupported type: " ++ @typeName(T));
},
}
}
pub fn serialize2(value: var, alloc: *mem.Allocator) mem.Allocator.Error!json.ValueTree {
var arena = heap.ArenaAllocator.init(alloc);
errdefer arena.deinit();
return json.ValueTree{
.root = try serialize2Impl(value, &arena.allocator),
.arena = arena,
};
}
fn serialize2Impl(value: var, alloc: *mem.Allocator) mem.Allocator.Error!json.Value {
const T = @TypeOf(value);
const info = @typeInfo(T);
if (T == json.Value) {
return value;
}
switch (info) {
.Null => {
return json.Value{ .Null = {} };
},
.Int, .ComptimeInt => {
return json.Value{ .Integer = value };
},
.Float, .ComptimeFloat => {
return json.Value{ .Float = value };
},
.Bool => {
return json.Value{ .Bool = value };
},
.Pointer => {
comptime if (!meta.trait.isSlice(T) and !meta.trait.isPtrTo(.Array)(T)) {
@compileError("JSON deserialize: Unsupported pointer type: " ++ @typeName(T));
};
if (meta.Elem(T) == u8) {
return json.Value{ .String = value };
} else {
var arr = json.Value{ .Array = json.Array.init(alloc) };
for (value) |item, index| {
try arr.Array.append(try serialize2Impl(item, alloc));
}
return arr;
}
},
.Struct => |structInfo| {
var obj = json.Value{ .Object = json.ObjectMap.init(alloc) };
inline for (structInfo.fields) |field, index| {
if (getMaybeDefinedChildType(field.field_type) != null) {
if (@field(value, field.name) == .Defined) {
try obj.Object.putNoClobber(field.name, try serialize2Impl(@field(value, field.name).Defined, alloc));
}
} else {
try obj.Object.putNoClobber(field.name, try serialize2Impl(@field(value, field.name), alloc));
}
}
return obj;
},
.Optional => {
if (value) |notNull| {
return try serialize2Impl(notNull, alloc);
} else {
return json.Value{ .Null = {} };
}
},
.Union => |unionInfo| {
if (unionInfo.tag_type) |tagType| {
inline for (unionInfo.fields) |field| {
if (@enumToInt(@as(tagType, value)) == field.enum_field.?.value) {
return try serialize2Impl(@field(value, field.name), alloc);
}
}
unreachable;
} else {
@compileError("JSON serialize: Unsupported untagged union type: " ++ @typeName(T));
}
},
.Enum => {
return json.Value{ .Integer = @enumToInt(value) };
},
else => {
@compileError("JSON serialize: Unsupported type: " ++ @typeName(T));
},
}
}
pub const DeserializeError = error{
InvalidType,
MissingField,
} || mem.Allocator.Error;
pub const DeserializeOptions = struct {
copyStrings: bool,
undefinedToNull: bool,
allowExtraFields: bool,
};
pub fn DeserializeResult(comptime T: type) type {
return struct {
result: T,
arena: heap.ArenaAllocator,
pub fn deinit(self: *@This()) void {
self.arena.deinit();
}
};
}
/// Unions are tried to be filled with first matching json.Value type
pub fn deserialize(comptime T: type, value: json.Value, alloc: *mem.Allocator) DeserializeError!DeserializeResult(T) {
var arena = heap.ArenaAllocator.init(alloc);
errdefer arena.deinit();
return DeserializeResult(T){
.result = try deserializeImpl(T, value, &arena.allocator),
.arena = arena,
};
}
fn deserializeImpl(comptime T: type, value: json.Value, alloc: *mem.Allocator) DeserializeError!T {
comptime const info = @typeInfo(T);
if (T == json.Value) {
return value;
}
switch (info) {
.Int => {
if (value != .Integer) {
return error.InvalidType;
}
return value.Integer;
},
.Float => {
if (value != .Float) {
return error.InvalidType;
}
return value.Float;
},
.Bool => {
if (value != .Bool) {
return error.InvalidType;
}
return value.Bool;
},
.Pointer => |ptrInfo| {
if (ptrInfo.size != .Slice) {
@compileError("JSON deserialize: Unsupported pointer type: " ++ @typeName(T));
}
if (ptrInfo.child == u8 and ptrInfo.is_const) {
if (value != .String) {
return error.InvalidType;
}
return value.String;
} else {
if (value != .Array) {
return error.InvalidType;
}
var arr: T = try alloc.alloc(ptrInfo.child, value.Array.items.len);
for (value.Array.items) |item, index| {
arr[index] = try deserializeImpl(ptrInfo.child, item, alloc);
}
return arr;
}
},
.Struct => |structInfo| {
if (value != .Object) {
return error.InvalidType;
}
var obj: T = undefined;
inline for (structInfo.fields) |field, index| {
if (getMaybeDefinedChildType(field.field_type)) |childType| {
if (value.Object.getValue(field.name)) |fieldVal| {
@field(obj, field.name) = MaybeDefined(childType){ .Defined = try deserializeImpl(childType, fieldVal, alloc) };
} else {
@field(obj, field.name) = MaybeDefined(childType).NotDefined;
}
} else {
@field(obj, field.name) = try deserializeImpl(field.field_type, value.Object.getValue(field.name) orelse return error.MissingField, alloc);
}
}
return obj;
},
.Optional => |optionalInfo| {
if (value == .Null) {
return null;
} else {
return try deserializeImpl(optionalInfo.child, value, alloc);
}
},
.Union => |unionInfo| {
if (unionInfo.tag_type) |_| {
inline for (unionInfo.fields) |field| {
if (typesMatch(field.field_type, value)) {
const successOrError = deserializeImpl(field.field_type, value, alloc);
if (successOrError) |success| {
return @unionInit(T, field.enum_field.?.name, success);
} else |err| {
// if it's just a type error try the next type in the union
if (err != error.InvalidType and err != error.MissingField) {
return err;
}
}
}
}
return error.InvalidType;
} else {
@compileError("JSON deserialize: Unsupported untagged union type: " ++ @typeName(T));
}
},
.Enum => {
if (value != .Integer) {
return error.InvalidType;
}
return @intToEnum(T, value.Integer);
},
else => {
@compileError("JSON deserialize: Unsupported type: " ++ @typeName(T));
},
}
}
fn typesMatch(comptime fieldType: type, jsonTag: @TagType(json.Value)) bool {
const info = @typeInfo(fieldType);
switch (jsonTag) {
.Null => unreachable, // null is handled by optionals
.Bool => {
return info == .Bool;
},
.Integer => {
return info == .Int;
},
.Float => {
return info == .Float;
},
.String => {
return isStringType(fieldType);
},
.Array => {
return info == .Pointer and info.Pointer.size == .Slice and !isStringType(fieldType);
},
.Object => {
return info == .Struct;
},
}
}
fn isStringType(comptime fieldType: type) bool {
return switch (@typeInfo(fieldType)) {
.Pointer => |ptr| ptr.size == .Slice and ptr.child == u8 and ptr.is_const,
else => false,
};
}
test "deserialize" {
const Test = struct {
int: i64,
arr: []?bool,
str: []const u8,
maybe1: MaybeDefined(i64),
maybe2: MaybeDefined(i64),
maybeNull1: MaybeDefined(?i64),
maybeNull2: MaybeDefined(?i64),
maybeNull3: MaybeDefined(?i64),
};
const in =
\\{
\\ "int": 8,
\\ "somethingUnexpected": 12,
\\ "arr": [null, true, false],
\\ "str": "str",
\\ "maybe1": 42,
\\ "maybeNull1": 42,
\\ "maybeNull2": null
\\}
;
var parser = json.Parser.init(testing.allocator, false);
defer parser.deinit();
var tree = try parser.parse(in);
defer tree.deinit();
var testOut = try deserialize(Test, tree.root, testing.allocator);
defer testOut.deinit();
const result = testOut.result;
testing.expect(result.maybeNull1.Defined.? == 42);
testing.expect(result.maybeNull2.Defined == null);
testing.expect(result.maybeNull3 == .NotDefined);
var valTree = try serialize2(result, testing.allocator);
defer valTree.deinit();
} | src/json_serialize.zig |
const std = @import("std");
const Builder = std.build.Builder;
const LibExeObjStep = std.build.LibExeObjStep;
fn join(builder: *Builder, a: []const u8, b: []const u8) []const u8 {
return std.fs.path.join(builder.allocator, &[_][]const u8{ a, b }) catch unreachable;
}
pub fn build(b: *Builder) void {
const target = b.standardTargetOptions(.{});
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("adf-box", "src/main.zig");
exe.setTarget(target);
exe.setBuildMode(mode);
exe.addPackagePath("zalgebra", "zalgebra/src/main.zig");
var d = std.fs.cwd().openDir("src/shaders", .{ .access_sub_paths = false, .iterate = true }) catch unreachable;
std.fs.cwd().makeDir("src/shaders/bin") catch |err| switch(err) {
error.PathAlreadyExists => {},
else => unreachable,
};
defer d.close();
var it = d.iterate();
while (it.next() catch unreachable) |entry| {
if (entry.kind == .File) {
var source = join(b, "src/shaders", entry.name);
var compiled = join(b, "src/shaders/bin", entry.name);
var dest = join(b, "shaders", entry.name);
var installShader = b.addInstallFileWithDir(compiled, .Bin, dest);
exe.step.dependOn(&installShader.step);
if (needs_update(source, compiled)) {
var compileShader = b.addSystemCommand(&[_][]const u8{ "glslangValidator", "-V", source, "-o", compiled }); //"-g", "-Od",
installShader.step.dependOn(&compileShader.step);
}
}
}
exe.linkSystemLibrary("c");
exe.addLibPath("V-EZ/Bin/x86_64/");
exe.linkSystemLibrary("VEZ");
if (target.getOsTag() == .windows) {
exe.linkSystemLibrary("user32");
exe.linkSystemLibrary("gdi32");
exe.linkSystemLibrary("shell32");
// standard installation path
exe.addLibPath("C:/Program Files (x86)/GLFW/lib-vc2019");
exe.linkSystemLibrary("glfw3");
var vkbase = std.fs.cwd().openDir("C:/VulkanSDK", .{ .access_sub_paths = false, .iterate = true }) catch unreachable;
defer vkbase.close();
// TODO: find newest version
const first = vkbase.iterate().next() catch unreachable orelse unreachable;
const vkpath = join(b, join(b, "C:/VulkanSDK", first.name), "Lib");
exe.addLibPath(vkpath);
exe.linkSystemLibrary("vulkan-1");
const dll = b.addInstallBinFile("./V-EZ/Bin/x86_64/VEZ.dll", "VEZ.dll");
exe.step.dependOn(&dll.step);
} else {
exe.linkSystemLibrary("glfw");
exe.linkSystemLibrary("vulkan");
}
exe.install();
const run_cmd = exe.run();
run_cmd.step.dependOn(b.getInstallStep());
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
}
// the Zig buildsystem needs to provide such generalized build caching
fn needs_update(a: []const u8, b: []const u8) bool {
var a_time = get_mtime(a) orelse return true;
var b_time = get_mtime(b) orelse return true;
return a_time > b_time;
}
fn get_mtime(path: []const u8) ?i128 {
const f = std.fs.cwd().openFile(path, .{ .read = false, .write = false}) catch |err|
switch (err) {
error.FileNotFound => return null,
else => unreachable
};
defer f.close();
const stats = f.stat() catch return null;
return stats.mtime;
} | build.zig |
const std = @import("std");
const assert = std.debug.assert;
const zp = @import("../../zplay.zig");
const drawcall = zp.graphics.common.drawcall;
const alg = zp.deps.alg;
const Vec2 = alg.Vec2;
const Vec3 = alg.Vec3;
const Vec4 = alg.Vec4;
pub const c = @import("c.zig");
pub const Data = c.cgltf_data;
pub const Scene = c.cgltf_scene;
pub const Node = c.cgltf_node;
pub const Mesh = c.cgltf_mesh;
pub const Primitive = c.cgltf_primitive;
pub const Material = c.cgltf_material;
pub const Image = c.cgltf_image;
pub const Error = error{
DataTooShort,
UnknownFormat,
InvalidJson,
InvalidGLTF,
InvalidOptions,
FileNotFound,
IoError,
OutOfMemory,
LegacyGLTF,
InvalidParams,
};
fn resultToError(result: c.cgltf_result) Error {
return switch (result) {
c.cgltf_result_data_too_short => error.DataTooShort,
c.cgltf_result_unknown_format => error.UnknownFormat,
c.cgltf_result_invalid_json => error.InvalidJson,
c.cgltf_result_invalid_gltf => error.InvalidGLTF,
c.cgltf_result_invalid_options => error.InvalidOptions,
c.cgltf_result_file_not_found => error.FileNotFound,
c.cgltf_result_io_error => error.IoError,
c.cgltf_result_out_of_memory => error.OutOfMemory,
c.cgltf_result_legacy_gltf => error.LegacyGLTF,
else => {
std.debug.panic("unknown error!", .{});
},
};
}
/// parse gltf from data bytes, also load buffers if gltf_path is valid
pub fn loadBuffer(data: []const u8, gltf_path: ?[]const u8, options: ?c.cgltf_options) Error!*Data {
const parse_option = options orelse std.mem.zeroes(c.cgltf_options);
var out: *Data = undefined;
var result = c.cgltf_parse(
&parse_option,
data.ptr,
data.len,
@ptrCast([*c][*c]Data, &out),
);
if (result != c.cgltf_result_success) {
return resultToError(result);
}
errdefer free(out);
if (gltf_path) |path| {
result = c.cgltf_load_buffers(&parse_option, out, path.ptr);
if (result != c.cgltf_result_success) {
return resultToError(result);
}
}
return out;
}
/// parse gltf from file, and load buffers (assuming assets are in the same directory)
pub fn loadFile(filename: [:0]const u8, options: ?c.cgltf_options) Error!*Data {
const parse_option = options orelse std.mem.zeroes(c.cgltf_options);
var out: *Data = undefined;
var result = c.cgltf_parse_file(
&parse_option,
filename.ptr,
@ptrCast([*c][*c]Data, &out),
);
if (result != c.cgltf_result_success) {
return resultToError(result);
}
errdefer free(out);
result = c.cgltf_load_buffers(&parse_option, out, filename.ptr);
if (result != c.cgltf_result_success) {
return resultToError(result);
}
return out;
}
/// read data from accessor
pub fn readFromAccessor(accessor: *c.cgltf_accessor, index: ?u32, T: type, out: []T) Error!void {
const success = switch (T) {
f32 => c.cgltf_accessor_read_float(
accessor,
index,
out.ptr,
out.len,
),
c_uint, u32, i32 => c.cgltf_accessor_read_uint(
accessor,
index,
@ptrCast(*c_uint, out.ptr),
out.len,
),
else => {
std.debug.panic("invalid element type", .{});
},
};
if (!success) {
return error.InvalidParams;
}
}
pub fn free(data: *Data) void {
c.cgltf_free(data);
}
pub fn appendMeshPrimitiveByIndex(
data: *Data,
mesh_index: u32,
prim_index: u32,
indices: *std.ArrayList(u32),
positions: *std.ArrayList(Vec3),
normals: ?*std.ArrayList(Vec3),
texcoords0: ?*std.ArrayList(Vec2),
tangents: ?*std.ArrayList(Vec4),
) void {
assert(mesh_index < data.meshes_count);
assert(prim_index < data.meshes[mesh_index].primitives_count);
appendMeshPrimitive(
&data.meshes[mesh_index].primitives[prim_index],
indices,
positions,
normals,
texcoords0,
tangents,
);
}
pub fn appendMeshPrimitive(
primitive: *const Primitive,
indices: *std.ArrayList(u32),
positions: *std.ArrayList(Vec3),
normals: ?*std.ArrayList(Vec3),
texcoords0: ?*std.ArrayList(Vec2),
tangents: ?*std.ArrayList(Vec4),
) void {
const num_vertices: u32 = @intCast(u32, primitive.attributes[0].data.*.count);
const num_indices: u32 = @intCast(u32, primitive.indices.*.count);
// Indices.
{
indices.ensureTotalCapacity(indices.items.len + num_indices) catch unreachable;
const accessor = primitive.indices;
assert(accessor.*.buffer_view != null);
assert(accessor.*.stride == accessor.*.buffer_view.*.stride or accessor.*.buffer_view.*.stride == 0);
assert((accessor.*.stride * accessor.*.count) == accessor.*.buffer_view.*.size);
assert(accessor.*.buffer_view.*.buffer.*.data != null);
const data_addr = @alignCast(4, @ptrCast([*]const u8, accessor.*.buffer_view.*.buffer.*.data) +
accessor.*.offset + accessor.*.buffer_view.*.offset);
if (accessor.*.stride == 1) {
assert(accessor.*.component_type == c.cgltf_component_type_r_8u);
const src = @ptrCast([*]const u8, data_addr);
const offset = @intCast(u8, positions.items.len);
var i: u32 = 0;
while (i < num_indices) : (i += 1) {
indices.appendAssumeCapacity(src[i] + offset);
}
} else if (accessor.*.stride == 2) {
assert(accessor.*.component_type == c.cgltf_component_type_r_16u);
const src = @ptrCast([*]const u16, data_addr);
const offset = @intCast(u16, positions.items.len);
var i: u32 = 0;
while (i < num_indices) : (i += 1) {
indices.appendAssumeCapacity(src[i] + offset);
}
} else if (accessor.*.stride == 4) {
assert(accessor.*.component_type == c.cgltf_component_type_r_32u);
const src = @ptrCast([*]const u32, data_addr);
const offset = @intCast(u32, positions.items.len);
var i: u32 = 0;
while (i < num_indices) : (i += 1) {
indices.appendAssumeCapacity(src[i] + offset);
}
} else {
unreachable;
}
}
// Attributes.
{
positions.resize(positions.items.len + num_vertices) catch unreachable;
if (normals != null) normals.?.resize(normals.?.items.len + num_vertices) catch unreachable;
if (texcoords0 != null) texcoords0.?.resize(texcoords0.?.items.len + num_vertices) catch unreachable;
if (tangents != null) tangents.?.resize(tangents.?.items.len + num_vertices) catch unreachable;
const num_attribs: u32 = @intCast(u32, primitive.attributes_count);
var attrib_index: u32 = 0;
while (attrib_index < num_attribs) : (attrib_index += 1) {
const attrib = &primitive.attributes[attrib_index];
const accessor = attrib.data;
assert(accessor.*.buffer_view != null);
assert(accessor.*.stride == accessor.*.buffer_view.*.stride or accessor.*.buffer_view.*.stride == 0);
assert((accessor.*.stride * accessor.*.count) == accessor.*.buffer_view.*.size);
assert(accessor.*.buffer_view.*.buffer.*.data != null);
const data_addr = @ptrCast([*]const u8, accessor.*.buffer_view.*.buffer.*.data) +
accessor.*.offset + accessor.*.buffer_view.*.offset;
if (attrib.*.type == c.cgltf_attribute_type_position) {
assert(accessor.*.type == c.cgltf_type_vec3);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &positions.items[positions.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
} else if (attrib.*.type == c.cgltf_attribute_type_normal and normals != null) {
assert(accessor.*.type == c.cgltf_type_vec3);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &normals.?.items[normals.?.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
} else if (attrib.*.type == c.cgltf_attribute_type_texcoord and texcoords0 != null) {
assert(accessor.*.type == c.cgltf_type_vec2);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &texcoords0.?.items[texcoords0.?.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
} else if (attrib.*.type == c.cgltf_attribute_type_tangent and tangents != null) {
assert(accessor.*.type == c.cgltf_type_vec4);
assert(accessor.*.component_type == c.cgltf_component_type_r_32f);
@memcpy(
@ptrCast([*]u8, &tangents.?.items[tangents.?.items.len - num_vertices]),
data_addr,
accessor.*.count * accessor.*.stride,
);
}
}
}
}
pub fn getPrimitiveType(primitive: *Primitive) drawcall.PrimitiveType {
return switch (primitive.type) {
c.cgltf_primitive_type_points => .points,
c.cgltf_primitive_type_lines => .lines,
c.cgltf_primitive_type_line_loop => .line_loop,
c.cgltf_primitive_type_line_strip => .line_strip,
c.cgltf_primitive_type_triangles => .triangles,
c.cgltf_primitive_type_triangle_strip => .triangle_strip,
c.cgltf_primitive_type_triangle_fan => .triangle_fan,
else => unreachable,
};
} | src/deps/gltf/gltf.zig |
const std = @import("std");
const koino = @import("koino");
const libpcre = @import("libpcre");
const StringList = std.ArrayList(u8);
const BuildFile = @import("build_file.zig").BuildFile;
const PageBuildStatus = enum {
Unbuilt,
Built,
Error,
};
const Page = struct {
filesystem_path: []const u8,
title: []const u8,
status: PageBuildStatus = .Unbuilt,
html_path: ?[]const u8 = null,
web_path: ?[]const u8 = null,
errors: ?[]const u8 = null,
};
const PageMap = std.StringHashMap(Page);
// article on path a/b/c/d/e.md is mapped as "e" in this title map.
const TitleMap = std.StringHashMap([]const u8);
const PageFile = union(enum) {
dir: PageFolder,
file: []const u8,
};
const PageFolder = std.StringHashMap(PageFile);
/// recursively deinitialize a PageFolder
fn deinitPageFolder(folder: *PageFolder) void {
var folder_it = folder.iterator();
while (folder_it.next()) |entry| {
var child = entry.value_ptr;
switch (child.*) {
.dir => |*child_folder| deinitPageFolder(child_folder),
.file => {},
}
}
folder.deinit();
}
const sepstr = &[_]u8{std.fs.path.sep};
const PageTree = struct {
allocator: std.mem.Allocator,
root: PageFolder,
const Self = @This();
pub fn init(allocator: std.mem.Allocator) Self {
return Self{
.allocator = allocator,
.root = PageFolder.init(allocator),
};
}
pub fn deinit(self: *Self) void {
deinitPageFolder(&self.root);
}
pub fn addPage(self: *Self, fspath: []const u8) !void {
const total_seps = std.mem.count(u8, fspath, sepstr);
var path_it = std.mem.split(u8, fspath, sepstr);
var current_page: ?*PageFolder = &self.root;
var idx: usize = 0;
while (true) : (idx += 1) {
const maybe_path_component = path_it.next();
if (maybe_path_component == null) break;
const path_component = maybe_path_component.?;
if (current_page.?.getPtr(path_component)) |child_page| {
current_page = &child_page.dir;
} else {
// if last component, create file (and set current_page to null), else, create folder
if (idx == total_seps) {
try current_page.?.put(path_component, .{ .file = fspath });
} else {
try current_page.?.put(path_component, .{ .dir = PageFolder.init(self.allocator) });
current_page = ¤t_page.?.getPtr(path_component).?.dir;
}
}
}
}
};
fn addFilePage(
pages: *PageMap,
titles: *TitleMap,
tree: *PageTree,
local_path: []const u8,
fspath: []const u8,
) !void {
if (!std.mem.endsWith(u8, local_path, ".md")) return;
std.log.info("new page: local='{s}' fs='{s}'", .{ local_path, fspath });
const title_raw = std.fs.path.basename(local_path);
const title = title_raw[0 .. title_raw.len - 3];
std.log.info(" title='{s}'", .{title});
try titles.put(title, local_path);
try pages.put(local_path, Page{ .filesystem_path = fspath, .title = title });
try tree.addPage(local_path);
}
const StringBuffer = std.ArrayList(u8);
const ProcessorContext = struct {
build_file: *const BuildFile,
titles: *TitleMap,
pages: *PageMap,
captures: []?libpcre.Capture,
file_contents: []const u8,
current_html_path: []const u8,
};
const CheckmarkProcessor = struct {
regex: libpcre.Regex,
const Self = @This();
pub fn deinit(self: *Self) void {
self.regex.deinit();
}
pub fn handle(self: *Self, ctx: ProcessorContext, result: *StringBuffer) !void {
_ = self;
const match = ctx.captures[0].?;
const check = ctx.file_contents[match.start..match.end];
try result.writer().print("<code>{s}</code>", .{check});
}
};
const LinkProcessor = struct {
regex: libpcre.Regex,
const Self = @This();
pub fn deinit(self: *Self) void {
self.regex.deinit();
}
pub fn handle(self: *Self, ctx: ProcessorContext, result: *StringBuffer) !void {
_ = self;
const match = ctx.captures[0].?;
std.log.info("match {} {}", .{ match.start, match.end });
const referenced_title = ctx.file_contents[match.start + 2 .. match.end - 2];
std.log.info("link to '{s}'", .{referenced_title});
// TODO strict_links support goes here
var maybe_page_local_path = ctx.titles.get(referenced_title);
if (maybe_page_local_path) |page_local_path| {
var page = ctx.pages.get(page_local_path).?;
try result.writer().print("<a href=\"{s}/{s}\">{s}</a>", .{ ctx.build_file.config.webroot, page.web_path, referenced_title });
} else {
if (ctx.build_file.config.strict_links) {
std.log.err(
"file '{s}' has link to file '{s}' which is not included!",
.{ ctx.current_html_path, referenced_title },
);
return error.InvalidLinksFound;
} else {
try result.writer().print("[[{s}]]", .{referenced_title});
}
}
}
};
const Paths = struct {
/// Path to given page in the web browser
web_path: []const u8,
/// Path to given page in the public/ folder
html_path: []const u8,
};
pub fn parsePaths(local_path: []const u8, string_buffer: []u8) !Paths {
var fba = std.heap.FixedBufferAllocator{ .buffer = string_buffer, .end_index = 0 };
var alloc = fba.allocator();
// local_path contains path to markdown file relative to vault_dir
// (so if you want to access it, concatenate vault_dir with local_path)
//
// to generate html path, take public/ + local_path, and replace
// ".md" with ".html"
const html_path_raw = try std.fs.path.join(alloc, &[_][]const u8{ "public", local_path });
const offset = std.mem.replacementSize(u8, html_path_raw, ".md", ".html");
var html_path_buffer = try alloc.alloc(u8, offset);
_ = std.mem.replace(u8, html_path_raw, ".md", ".html", html_path_buffer);
const html_path = html_path_buffer[0..offset];
// to generate web path, we need to:
// - take html_path
// - remove public/
// - replace std.fs.path.sep to '/'
// - done!
const web_path_r1_size = std.mem.replacementSize(u8, html_path, "public" ++ sepstr, "");
var web_path_r1_buffer = try alloc.alloc(u8, web_path_r1_size);
_ = std.mem.replace(u8, html_path, "public" ++ sepstr, "", web_path_r1_buffer);
const web_path_r1 = web_path_r1_buffer[0..web_path_r1_size];
const web_path_r2_size = std.mem.replacementSize(u8, web_path_r1, sepstr, "/");
var web_path_r2_buffer = try alloc.alloc(u8, web_path_r2_size);
_ = std.mem.replace(u8, web_path_r1, sepstr, "/", web_path_r2_buffer);
const web_path_raw = web_path_r2_buffer[0..web_path_r2_size];
var result = StringList.init(alloc);
defer result.deinit();
for (web_path_raw) |char| {
switch (char) {
'$' => try result.appendSlice("%24"),
'%' => try result.appendSlice("%25"),
'&' => try result.appendSlice("%26"),
'+' => try result.appendSlice("%2B"),
',' => try result.appendSlice("%2C"),
':' => try result.appendSlice("%3A"),
';' => try result.appendSlice("%3B"),
'=' => try result.appendSlice("%3D"),
'?' => try result.appendSlice("%3F"),
'@' => try result.appendSlice("%40"),
else => try result.append(char),
}
}
// full web_path does not contain the dot .
const web_path = std.mem.trimLeft(u8, result.toOwnedSlice(), ".");
return Paths{
.web_path = web_path,
.html_path = html_path,
};
}
const SliceList = std.ArrayList([]const u8);
const lexicographicalCompare = struct {
pub fn inner(innerCtx: void, a: []const u8, b: []const u8) bool {
_ = innerCtx;
var i: usize = 0;
if (a.len == 0 or b.len == 0) return false;
while (a[i] == b[i]) : (i += 1) {
if (i == a.len or i == b.len) return false;
}
return a[i] < b[i];
}
}.inner;
const TocContext = struct {
current_relative_path: ?[]const u8 = null,
ident: usize = 0,
};
/// Generate Table of Contents given the root folder.
///
/// Operates recursively.
pub fn generateToc(
result: *StringList,
build_file: *const BuildFile,
pages: *const PageMap,
folder: *const PageFolder,
context: *TocContext,
) error{OutOfMemory}!void {
var folder_iterator = folder.iterator();
// step 1: find all the folders at this level.
var folders = SliceList.init(result.allocator);
defer folders.deinit();
var files = SliceList.init(result.allocator);
defer files.deinit();
while (folder_iterator.next()) |entry| {
switch (entry.value_ptr.*) {
.dir => try folders.append(entry.key_ptr.*),
.file => try files.append(entry.key_ptr.*),
}
}
std.sort.sort([]const u8, folders.items, {}, lexicographicalCompare);
std.sort.sort([]const u8, files.items, {}, lexicographicalCompare);
// draw folders first (by recursing), then draw files second!
if (context.ident > 0)
try result.writer().print("<ul class=\"nested\">", .{});
for (folders.items) |folder_name| {
const child_folder_entry = folder.getEntry(folder_name).?;
try result.writer().print("<li><span class=\"caret\">{s}</span>", .{folder_name});
context.ident += 1;
defer context.ident -= 1;
try generateToc(result, build_file, pages, &child_folder_entry.value_ptr.*.dir, context);
}
for (files.items) |file_name| {
const local_path = folder.get(file_name).?.file;
var toc_path_buffer: [2048]u8 = undefined;
const toc_paths = try parsePaths(local_path, &toc_path_buffer);
const title = std.fs.path.basename(toc_paths.html_path);
try result.writer().print(
"<li><a class=\"toc-link\" href=\"{s}{s}\">{s}</a></li>",
.{ build_file.config.webroot, toc_paths.web_path, title },
);
}
if (context.ident > 0)
try result.writer().print("</ul>", .{});
}
pub const MatchList = std.ArrayList([]?libpcre.Capture);
pub fn captureAll(
self: libpcre.Regex,
allocator: std.mem.Allocator,
full_string: []const u8,
options: libpcre.Options,
) (libpcre.Regex.ExecError || std.mem.Allocator.Error)!MatchList {
var offset: usize = 0;
var match_list = MatchList.init(allocator);
errdefer match_list.deinit();
while (true) {
var maybe_single_capture = try self.captures(allocator, full_string[offset..], options);
if (maybe_single_capture) |single_capture| {
const first_group = single_capture[0].?;
for (single_capture) |maybe_group, idx| {
if (maybe_group != null) {
// convert from relative offsets to absolute file offsets
single_capture[idx].?.start += offset;
single_capture[idx].?.end += offset;
}
}
try match_list.append(single_capture);
offset += first_group.end;
} else {
break;
}
}
return match_list;
}
const FOOTER =
\\ <footer>
\\ made with love using <a href="https://github.com/lun-4/obsidian2web">obsidian2web!</a>
\\ </footer>
;
pub fn main() anyerror!void {
var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = allocator_instance.deinit();
var alloc = allocator_instance.allocator();
var args_it = std.process.args();
_ = args_it.skip();
const build_file_path = args_it.next() orelse @panic("want build file path");
defer args_it.deinit();
const build_file_fd = try std.fs.cwd().openFile(build_file_path, .{ .mode = .read_only });
defer build_file_fd.close();
var buffer: [8192]u8 = undefined;
const build_file_data_count = try build_file_fd.read(&buffer);
const build_file_data = buffer[0..build_file_data_count];
var build_file = try BuildFile.parse(alloc, build_file_data);
defer build_file.deinit();
var vault_dir = try std.fs.cwd().openDir(build_file.vault_path, .{ .iterate = true });
defer vault_dir.close();
var pages = PageMap.init(alloc);
defer pages.deinit();
var titles = TitleMap.init(alloc);
defer titles.deinit();
var arena = std.heap.ArenaAllocator.init(alloc);
defer arena.deinit();
var string_arena = arena.allocator();
var tree = PageTree.init(alloc);
defer tree.deinit();
for (build_file.includes.items) |include_path| {
const joined_path = try std.fs.path.resolve(alloc, &[_][]const u8{ build_file.vault_path, include_path });
defer alloc.free(joined_path);
std.log.info("include path: {s}", .{joined_path});
// attempt to openDir first, if it fails assume file
var included_dir = std.fs.cwd().openDir(joined_path, .{ .iterate = true }) catch |err| switch (err) {
error.NotDir => {
const owned_path = try string_arena.dupe(u8, joined_path);
try addFilePage(&pages, &titles, &tree, include_path, owned_path);
continue;
},
else => return err,
};
defer included_dir.close();
var walker = try included_dir.walk(alloc);
defer walker.deinit();
while (try walker.next()) |entry| {
switch (entry.kind) {
.File => {
const joined_inner_path = try std.fs.path.join(string_arena, &[_][]const u8{ joined_path, entry.path });
const joined_local_inner_path = try std.fs.path.join(string_arena, &[_][]const u8{ include_path, entry.path });
// we own joined_inner_path's memory, so we can use it
try addFilePage(&pages, &titles, &tree, joined_local_inner_path, joined_inner_path);
},
else => {},
}
}
}
const resources = .{ .{ "resources/styles.css", "styles.css" }, .{ "resources/main.js", "main.js" } };
inline for (resources) |resource| {
const resource_text = @embedFile(resource.@"0");
const resource_fspath = "public/" ++ resource.@"1";
const leading_path_to_file = std.fs.path.dirname(resource_fspath).?;
try std.fs.cwd().makePath(leading_path_to_file);
var resource_fd = try std.fs.cwd().createFile(resource_fspath, .{ .truncate = true });
defer resource_fd.close();
_ = try resource_fd.write(resource_text);
}
var pages_it = pages.iterator();
var toc_result = StringList.init(alloc);
defer toc_result.deinit();
try toc_result.writer().print("<ul id=\"tree-of-contents\">", .{});
var toc_ctx: TocContext = .{};
try generateToc(&toc_result, &build_file, &pages, &tree.root.getPtr(".").?.dir, &toc_ctx);
try toc_result.writer().print("</ul>", .{});
const toc = toc_result.toOwnedSlice();
defer alloc.free(toc);
const webroot = build_file.config.webroot;
// first pass: use koino to parse all that markdown into html
while (pages_it.next()) |entry| {
const local_path = entry.key_ptr.*;
const page = entry.value_ptr.*;
const fspath = entry.value_ptr.*.filesystem_path;
std.log.info("processing '{s}'", .{fspath});
var page_fd = try std.fs.cwd().openFile(fspath, .{ .mode = .read_only });
defer page_fd.close();
const file_contents = try page_fd.reader().readAllAlloc(alloc, std.math.maxInt(usize));
defer alloc.free(file_contents);
var p = try koino.parser.Parser.init(alloc, .{});
defer p.deinit();
// trying to feed 1k chunks or something is not taken well
// by the parser.
try p.feed(file_contents);
var doc = try p.finish();
defer doc.deinit();
var result = StringList.init(alloc);
defer result.deinit();
var path_buffer: [2048]u8 = undefined;
const paths = try parsePaths(local_path, &path_buffer);
try result.writer().print(
\\<!DOCTYPE html>
\\<html lang="en">
\\ <head>
\\ <meta charset="UTF-8">
\\ <meta name="viewport" content="width=device-width, initial-scale=1.0">
\\ <title>{s}</title>
\\ <script src="{s}/main.js"></script>
\\ <link rel="stylesheet" href="{s}/styles.css">
\\ </head>
\\ <body>
\\ <div class="toc">
, .{ page.title, webroot, webroot });
try result.appendSlice(toc);
try result.appendSlice(
\\ </div>
\\ <div class="text">
);
try result.writer().print(
\\ <h2>{s}</h2><p>
, .{page.title});
try koino.html.print(result.writer(), alloc, .{ .render = .{ .hard_breaks = true } }, doc);
try result.appendSlice(
\\ </p></div>
);
if (build_file.config.project_footer) {
try result.appendSlice(FOOTER);
}
try result.appendSlice(
\\ </body>
\\</html>
);
const leading_path_to_file = std.fs.path.dirname(paths.html_path).?;
try std.fs.cwd().makePath(leading_path_to_file);
var output_fd = try std.fs.cwd().createFile(paths.html_path, .{ .read = false, .truncate = true });
defer output_fd.close();
_ = try output_fd.write(result.items);
entry.value_ptr.*.html_path = try string_arena.dupe(u8, paths.html_path);
entry.value_ptr.*.web_path = try string_arena.dupe(u8, paths.web_path);
entry.value_ptr.*.status = .Built;
}
const link_processor = LinkProcessor{
.regex = try libpcre.Regex.compile("\\[\\[.+\\]\\]", .{}),
};
const check_processor = CheckmarkProcessor{
.regex = try libpcre.Regex.compile("\\[.\\]", .{}),
};
const processors = .{ link_processor, check_processor };
comptime var i = 0;
inline while (i < processors.len) : (i += 1) {
var processor = processors[i];
defer processor.deinit();
var link_pages_it = pages.iterator();
while (link_pages_it.next()) |entry| {
const page = entry.value_ptr.*;
try std.testing.expectEqual(PageBuildStatus.Built, page.status);
const html_path = entry.value_ptr.html_path.?;
std.log.info("running {s} for file '{s}'", .{ @typeName(@TypeOf(processor)), html_path });
var file_contents_mut: []const u8 = undefined;
{
var page_fd = try std.fs.cwd().openFile(html_path, .{ .mode = .read_only });
defer page_fd.close();
file_contents_mut = try page_fd.reader().readAllAlloc(alloc, std.math.maxInt(usize));
}
const file_contents = file_contents_mut;
defer alloc.free(file_contents);
const matches = try captureAll(processor.regex, alloc, file_contents, .{});
defer {
for (matches.items) |match| alloc.free(match);
matches.deinit();
}
var result = StringBuffer.init(alloc);
defer result.deinit();
// our replacing algorithm works by copying from 0 to match.start
// then printing the wanted text
// our replacing algorithm works by copying from match.end to another_match.start
// then printing the wanted text
// etc...
// note: [[x]] will become <a href="/x">x</a>
var last_match: ?libpcre.Capture = null;
for (matches.items) |captures| {
const match = captures[0].?;
_ = if (last_match == null)
try result.writer().write(file_contents[0..match.start])
else
try result.writer().write(file_contents[last_match.?.end..match.start]);
var ctx = ProcessorContext{
.build_file = &build_file,
.titles = &titles,
.pages = &pages,
.captures = captures,
.file_contents = file_contents,
.current_html_path = html_path,
};
try processor.handle(ctx, &result);
last_match = match;
}
// last_match.?.end to end of file
_ = if (last_match == null)
try result.writer().write(file_contents[0..file_contents.len])
else
try result.writer().write(file_contents[last_match.?.end..file_contents.len]);
{
var page_fd = try std.fs.cwd().openFile(
entry.value_ptr.html_path.?,
.{ .mode = .write_only },
);
defer page_fd.close();
_ = try page_fd.write(result.items);
}
}
}
{
const index_out_fd = try std.fs.cwd().createFile("public/index.html", .{ .truncate = true });
defer index_out_fd.close();
if (build_file.config.index) |path_to_index_file| {
// just copy the html into index.html LOL
var path_buffer: [2048]u8 = undefined;
const paths = try parsePaths(path_to_index_file, &path_buffer);
std.log.info("copying '{s}' to index.html", .{paths.html_path});
const index_fd = try std.fs.cwd().openFile(paths.html_path, .{ .mode = .read_only });
defer index_fd.close();
const written_bytes =
try index_fd.copyRangeAll(0, index_out_fd, 0, std.math.maxInt(u64));
try std.testing.expect(written_bytes > 0);
} else {
// generate our own empty file that contains the table of contents
const writer = index_out_fd.writer();
try writer.print(
\\<!DOCTYPE html>
\\<html lang="en">
\\ <head>
\\ <meta charset="UTF-8">
\\ <meta name="viewport" content="width=device-width, initial-scale=1.0">
\\ <title>{s}</title>
\\ <script src="/main.js"></script>
\\ <link rel="stylesheet" href="/styles.css">
\\ </head>
\\ <body>
\\ <div class="toc">
, .{"Index Page"});
_ = try writer.write(toc);
_ = try writer.write(
\\ </div>
\\ <div class="text">
\\ </div>
);
if (build_file.config.project_footer) {
_ = try writer.write(FOOTER);
}
_ = try writer.write(
\\ </body>
\\</html>
);
}
}
}
test "basic test" {
_ = std.testing.refAllDecls(@This());
} | src/main.zig |
const std = @import("std");
pub const descriptors = @import("descriptors.zig");
pub const Reflector = @import("Reflector.zig");
pub usingnamespace @import("types.zig");
pub fn exportAs(comptime name: []const u8, function: anytype) void {
var z: [name.len]u8 = undefined;
for (name) |v, i| z[i] = switch (v) {
'.' => '_',
else => v,
};
@export(function, .{ .name = "Java_" ++ &z, .linkage = .Strong });
}
pub fn exportUnder(comptime class_name: []const u8, functions: anytype) void {
inline for (std.meta.fields(@TypeOf(functions))) |field| {
const z = @field(functions, field.name);
if (std.mem.eql(u8, field.name, "onLoad"))
@export(z, .{ .name = "JNI_OnLoad", .linkage = .Strong })
else if (std.mem.eql(u8, field.name, "onUnload"))
@export(z, .{ .name = "JNI_OnUnload", .linkage = .Strong })
else
exportAs(class_name ++ "." ++ field.name, z);
}
}
// --- Code ~~stolen~~ adapted from debug.zig starts here ---
// Copyright (c) 2015-2021 Zig Contributors
// This code is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
/// This is required because SymbolInfo doesn't expose its deinit for some weird reason
fn deinitSymbolInfo(self: std.debug.SymbolInfo) void {
if (self.line_info) |li| {
deinitLineInfo(li);
}
}
/// This is required because LineInfo doesn't expose its deinit for some weird reason
fn deinitLineInfo(self: std.debug.LineInfo) void {
const allocator = self.allocator orelse return;
allocator.free(self.file_name);
}
fn printSourceAtAddressJava(debug_info: *std.debug.DebugInfo, writer: anytype, address: usize) !void {
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
return try writer.writeAll((" " ** 8) ++ "at unknown (missing/invalud debug info)");
},
else => return err,
};
const symbol_info = try module.getSymbolAtAddress(address);
defer deinitSymbolInfo(symbol_info);
if (symbol_info.line_info) |li| {
try writer.print((" " ** 8) ++ "at {s}({s}:{d}:{d})", .{ symbol_info.symbol_name, li.file_name, li.line, li.column });
} else {
try writer.print((" " ** 8) ++ "at {s}({s}:unknown)", .{ symbol_info.symbol_name, symbol_info.compile_unit_name });
}
}
fn writeStackTraceJava(
stack_trace: std.builtin.StackTrace,
writer: anytype,
debug_info: *std.debug.DebugInfo,
) !void {
if (std.builtin.strip_debug_info) return error.MissingDebugInfo;
var frame_index: usize = 0;
var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len);
while (frames_left != 0) : ({
frames_left -= 1;
frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len;
}) {
const return_address = stack_trace.instruction_addresses[frame_index];
try printSourceAtAddressJava(debug_info, writer, return_address - 1);
if (frames_left != 1) try writer.writeByte('\n');
}
}
fn formatStackTraceJava(writer: anytype, trace: std.builtin.StackTrace) !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const debug_info = std.debug.getSelfDebugInfo() catch return;
try writer.writeAll("\n");
writeStackTraceJava(trace, writer, debug_info) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
};
}
// --- Code ~~stolen~~ adapted from debug.zig ends here ---
fn splitError(comptime T: type) struct { error_set: ?type = null, payload: type } {
return switch (@typeInfo(T)) {
.ErrorUnion => |u| .{ .error_set = u.error_set, .payload = u.payload },
else => .{ .payload = T },
};
}
/// NOTE: This is sadly required as @Type for Fn is not implemented so we cannot autowrap functions
pub fn wrapErrors(function: anytype, args: anytype) splitError(@typeInfo(@TypeOf(function)).Fn.return_type.?).payload {
const se = splitError(@typeInfo(@TypeOf(function)).Fn.return_type.?);
var env: *JNIEnv = undefined;
switch (@TypeOf(args[0])) {
*JNIEnv => env = args[0],
*JavaVM => env = args[0].getEnv(JNIVersion{ .major = 10, .minor = 0 }) catch unreachable,
else => unreachable,
}
if (se.error_set) |_| {
return @call(.{}, function, args) catch |err| {
var maybe_ert = @errorReturnTrace();
if (maybe_ert) |ert| {
var err_buf = std.ArrayList(u8).init(std.heap.page_allocator);
defer err_buf.deinit();
err_buf.writer().writeAll(@errorName(err)) catch unreachable;
formatStackTraceJava(err_buf.writer(), ert.*) catch unreachable;
err_buf.writer().writeByte(0) catch unreachable;
env.throwGeneric(@ptrCast([*c]const u8, err_buf.items)) catch unreachable;
// Even though an exception technically kills execution we
// must still return something; just return a zeroed payload
return std.mem.zeroes(se.payload);
} else {
var buf: [512]u8 = undefined;
var msg = std.fmt.bufPrintZ(&buf, "{s}", .{err}) catch unreachable;
env.throwGeneric(msg) catch unreachable;
return std.mem.zeroes(se.payload);
}
};
} else {
return @call(.{}, function, args);
}
} | src/jui.zig |
const std = @import("std");
const lola = @import("lola");
const argsParser = @import("args");
const build_options = @import("build_options");
var gpa_state = std.heap.GeneralPurposeAllocator(.{}){};
const gpa = &gpa_state.allocator;
// This is our global object pool that is back-referenced
// by the runtime library.
pub const ObjectPool = lola.runtime.ObjectPool([_]type{
lola.libs.runtime.LoLaList,
lola.libs.runtime.LoLaDictionary,
});
pub fn main() !u8 {
defer _ = gpa_state.deinit();
var args = std.process.args();
var argsAllocator = gpa;
const exeName = try (args.next(argsAllocator) orelse {
try std.io.getStdErr().writer().writeAll("Failed to get executable name from the argument list!\n");
return 1;
});
defer argsAllocator.free(exeName);
const module = try (args.next(argsAllocator) orelse {
try print_usage();
return 1;
});
defer argsAllocator.free(module);
if (std.mem.eql(u8, module, "compile")) {
const options = try argsParser.parse(CompileCLI, &args, argsAllocator);
defer options.deinit();
return try compile(options.options, options.positionals);
} else if (std.mem.eql(u8, module, "dump")) {
const options = try argsParser.parse(DisassemblerCLI, &args, argsAllocator);
defer options.deinit();
return try disassemble(options.options, options.positionals);
} else if (std.mem.eql(u8, module, "run")) {
const options = try argsParser.parse(RunCLI, &args, argsAllocator);
defer options.deinit();
return try run(options.options, options.positionals);
} else if (std.mem.eql(u8, module, "help")) {
try print_usage();
return 0;
} else if (std.mem.eql(u8, module, "version")) {
try std.io.getStdOut().writeAll(build_options.version ++ "\n");
return 0;
} else {
try std.io.getStdErr().writer().print(
"Unrecognized command: {}\nSee `lola help` for detailed usage information.\n",
.{
module,
},
);
return 1;
}
return 0;
}
pub fn print_usage() !void {
const usage_msg =
\\Usage: lola [command] [options]
\\
\\Commands:
\\ compile [source] Compiles the given source file into a module.
\\ dump [module] Disassembles the given module.
\\ run [file] Runs the given file. Both modules and source files are allowed.
\\ version Prints version number and exits.
\\
\\General Options:
\\ -o [output file] Defines the output file for the action.
\\
\\Compile Options:
\\ --verify, -v Does not emit the output file, but only runs in-memory checks.
\\ This can be used to do syntax checks of the code.
\\
\\Disassemble Options:
\\ --with-offset, -O Adds offsets to the disassembly.
\\ --with-hexdump, -b Adds the hex dump in the disassembly.
\\ --metadata Dumps information about the module itself.
\\
\\Run Options:
\\ --limit [n] Limits execution to [n] instructions, then halts.
\\ --mode [autodetect|source|module] Determines if run should interpret the file as a source file,
\\ a precompiled module or if it should autodetect the file type.
\\ --no-stdlib Removes the standard library from the environment.
\\ --no-runtime Removes the system runtime from the environment.
\\ --benchmark Runs the script 100 times, measuring the duration of each run and
\\ will print a benchmark result in the end.
\\
;
// \\ -S Intermixes the disassembly with the original source code if possible.
try std.io.getStdErr().writer().writeAll(usage_msg);
}
const DisassemblerCLI = struct {
@"output": ?[]const u8 = null,
@"metadata": bool = false,
@"with-offset": bool = false,
@"with-hexdump": bool = false,
// @"intermix-source": bool = false,
pub const shorthands = .{
// .S = "intermix-source",
.b = "with-hexdump",
.O = "with-offset",
.o = "output",
.m = "metadata",
};
};
fn disassemble(options: DisassemblerCLI, files: []const []const u8) !u8 {
var stream = std.io.getStdOut().writer();
if (files.len == 0) {
try print_usage();
return 1;
}
var logfile: ?std.fs.File = null;
defer if (logfile) |f|
f.close();
if (options.output) |outfile| {
logfile = try std.fs.cwd().createFile(outfile, .{
.read = false,
.truncate = true,
.exclusive = false,
});
stream = logfile.?.writer();
}
for (files) |arg| {
if (files.len != 1) {
try stream.print("Disassembly for {}:\n", .{arg});
}
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
const allocator = &arena.allocator;
var cu = blk: {
var file = try std.fs.cwd().openFile(arg, .{ .read = true, .write = false });
defer file.close();
break :blk try lola.CompileUnit.loadFromStream(allocator, file.reader());
};
defer cu.deinit();
if (options.metadata) {
try stream.writeAll("metadata:\n");
try stream.print("\tcomment: {}\n", .{cu.comment});
try stream.print("\tcode size: {} bytes\n", .{cu.code.len});
try stream.print("\tnum globals: {}\n", .{cu.globalCount});
try stream.print("\tnum temporaries: {}\n", .{cu.temporaryCount});
try stream.print("\tnum functions: {}\n", .{cu.functions.len});
for (cu.functions) |fun| {
try stream.print("\t\tep={X:0>4} lc={: >3} {}\n", .{
fun.entryPoint,
fun.localCount,
fun.name,
});
}
try stream.print("\tnum debug syms: {}\n", .{cu.debugSymbols.len});
try stream.writeAll("disassembly:\n");
}
try lola.disassemble(stream, cu, lola.DisassemblerOptions{
.addressPrefix = options.@"with-offset",
.hexwidth = if (options.@"with-hexdump") 8 else null,
.labelOutput = true,
.instructionOutput = true,
});
}
return 0;
}
const CompileCLI = struct {
@"output": ?[]const u8 = null,
verify: bool = false,
pub const shorthands = .{
.o = "output",
.v = "verify",
};
};
const ModuleBuffer = extern struct {
data: [*]u8,
length: usize,
};
fn compile(options: CompileCLI, files: []const []const u8) !u8 {
if (files.len != 1) {
try print_usage();
return 1;
}
const allocator = gpa;
const inname = files[0];
const outname = if (options.output) |name|
name
else blk: {
var name = try allocator.alloc(u8, inname.len + 3);
std.mem.copy(u8, name[0..inname.len], inname);
std.mem.copy(u8, name[inname.len..], ".lm");
break :blk name;
};
defer if (options.output == null)
allocator.free(outname);
const cu = compileFileToUnit(allocator, inname) catch |err| switch (err) {
error.CompileError => return 1,
else => |e| return e,
};
defer cu.deinit();
if (!options.verify) {
var file = try std.fs.cwd().createFile(outname, .{ .truncate = true, .read = false, .exclusive = false });
defer file.close();
try cu.saveToStream(file.writer());
}
return 0;
}
const RunCLI = struct {
limit: ?u32 = null,
mode: enum { autodetect, source, module } = .autodetect,
@"no-stdlib": bool = false,
@"no-runtime": bool = false,
benchmark: bool = false,
};
fn autoLoadModule(allocator: *std.mem.Allocator, options: RunCLI, file: []const u8) !lola.CompileUnit {
return switch (options.mode) {
.autodetect => loadModuleFromFile(allocator, file) catch |err| if (err == error.InvalidFormat)
try compileFileToUnit(allocator, file)
else
return err,
.module => try loadModuleFromFile(allocator, file),
.source => try compileFileToUnit(allocator, file),
};
}
fn run(options: RunCLI, files: []const []const u8) !u8 {
if (files.len != 1) {
try print_usage();
return 1;
}
const allocator = gpa;
var cu = autoLoadModule(allocator, options, files[0]) catch |err| {
const stderr = std.io.getStdErr().writer();
if (err == error.FileNotFound) {
try stderr.print("Could not find '{}'. Are you sure you passed the right file?\n", .{files[0]});
return 1;
}
try stderr.writeAll(switch (options.mode) {
.autodetect => "Failed to run file: File seems not to be a compiled module or source file!\n",
.module => "Failed to run file: File seems not to be a compiled module.\n",
.source => return 1, // We already have the diagnostic output of the compiler anyways
});
if (err != error.InvalidFormat and err != error.CompileError) {
try stderr.print("The following error happened: {}\n", .{
@errorName(err),
});
}
return 1;
};
defer cu.deinit();
var pool = ObjectPool.init(allocator);
defer pool.deinit();
var env = try lola.runtime.Environment.init(allocator, &cu, pool.interface());
defer env.deinit();
if (!options.@"no-stdlib") {
try lola.libs.std.install(&env, allocator);
}
if (!options.@"no-runtime") {
try lola.libs.runtime.install(&env, allocator);
// Move these two to a test runner
try env.installFunction("Expect", lola.runtime.Function.initSimpleUser(struct {
fn call(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value {
if (args.len != 1)
return error.InvalidArgs;
const assertion = try args[0].toBoolean();
if (!assertion)
return error.AssertionFailed;
return .void;
}
}.call));
try env.installFunction("ExpectEqual", lola.runtime.Function.initSimpleUser(struct {
fn call(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value {
if (args.len != 2)
return error.InvalidArgs;
if (!args[0].eql(args[1])) {
std.log.err("Expected {}, got {}\n", .{ args[1], args[0] });
return error.AssertionFailed;
}
return .void;
}
}.call));
}
if (options.benchmark == false) {
var vm = try lola.runtime.VM.init(allocator, &env);
defer vm.deinit();
while (true) {
var result = vm.execute(options.limit) catch |err| {
var stderr = std.io.getStdErr().writer();
if (std.builtin.mode == .Debug) {
if (@errorReturnTrace()) |err_trace| {
std.debug.dumpStackTrace(err_trace.*);
} else {
try stderr.print("Panic during execution: {}\n", .{@errorName(err)});
}
} else {
try stderr.print("Panic during execution: {}\n", .{@errorName(err)});
}
try stderr.print("Call stack:\n", .{});
try vm.printStackTrace(stderr);
return 1;
};
pool.clearUsageCounters();
try pool.walkEnvironment(env);
try pool.walkVM(vm);
pool.collectGarbage();
switch (result) {
.completed => return 0,
.exhausted => {
try std.io.getStdErr().writer().print("Execution exhausted after {} instructions!\n", .{
options.limit,
});
return 1;
},
.paused => {
// continue execution here
std.time.sleep(100); // sleep at least 100 ns and return control to scheduler
},
}
}
} else {
var cycle: usize = 0;
var stats = lola.runtime.VM.Statistics{};
var total_time: u64 = 0;
var total_timer = try std.time.Timer.start();
// Run at least one second
while ((cycle < 100) or (total_timer.read() < std.time.ns_per_s)) : (cycle += 1) {
var vm = try lola.runtime.VM.init(allocator, &env);
defer vm.deinit();
var timer = try std.time.Timer.start();
emulation: while (true) {
var result = vm.execute(options.limit) catch |err| {
var stderr = std.io.getStdErr().writer();
try stderr.print("Panic during execution: {}\n", .{@errorName(err)});
try stderr.print("Call stack:\n", .{});
try vm.printStackTrace(stderr);
return 1;
};
pool.clearUsageCounters();
try pool.walkEnvironment(env);
try pool.walkVM(vm);
pool.collectGarbage();
switch (result) {
.completed => break :emulation,
.exhausted => {
try std.io.getStdErr().writer().print("Execution exhausted after {} instructions!\n", .{
options.limit,
});
return 1;
},
.paused => {},
}
}
total_time += timer.lap();
stats.instructions += vm.stats.instructions;
stats.stalls += vm.stats.stalls;
}
try std.io.getStdErr().writer().print(
\\Benchmark result:
\\ Number of runs: {}
\\ Mean time: {d} µs
\\ Mean #instructions: {d}
\\ Mean #stalls: {d}
\\ Mean instruction/s: {d}
\\
, .{
cycle,
(@intToFloat(f64, total_time) / @intToFloat(f64, cycle)) / std.time.ns_per_us,
@intToFloat(f64, stats.instructions) / @intToFloat(f64, cycle),
@intToFloat(f64, stats.stalls) / @intToFloat(f64, cycle),
std.time.ns_per_s * @intToFloat(f64, stats.instructions) / @intToFloat(f64, total_time),
});
}
return 0;
}
fn compileFileToUnit(allocator: *std.mem.Allocator, fileName: []const u8) !lola.CompileUnit {
const maxLength = 1 << 20; // 1 MB
var source = blk: {
var file = try std.fs.cwd().openFile(fileName, .{ .read = true, .write = false });
defer file.close();
break :blk try file.reader().readAllAlloc(gpa, maxLength);
};
defer gpa.free(source);
var diag = lola.compiler.Diagnostics.init(allocator);
defer {
for (diag.messages.items) |msg| {
std.debug.print("{}\n", .{msg});
}
diag.deinit();
}
const seq = try lola.compiler.tokenizer.tokenize(allocator, &diag, fileName, source);
defer allocator.free(seq);
var pgm = try lola.compiler.parser.parse(allocator, &diag, seq);
defer pgm.deinit();
const successful = try lola.compiler.validate(allocator, &diag, pgm);
if (!successful)
return error.CompileError;
var compile_unit = try lola.compiler.generateIR(allocator, pgm, fileName);
errdefer compile_unit;
return compile_unit;
}
fn loadModuleFromFile(allocator: *std.mem.Allocator, fileName: []const u8) !lola.CompileUnit {
var file = try std.fs.cwd().openFile(fileName, .{ .read = true, .write = false });
defer file.close();
return try lola.CompileUnit.loadFromStream(allocator, file.reader());
} | src/frontend/main.zig |
const std = @import("std");
const mem = std.mem;
const meta = std.meta;
const testing = std.testing;
const builtin = std.builtin;
const debug = std.debug;
const math = std.math;
const sort = std.sort.sort;
const TypeInfo = builtin.TypeInfo;
const ComptimeStringMap = std.ComptimeStringMap;
const RecursiveField = @import("recursive_field.zig").RecursiveField;
pub const DynRecFieldValue = @import("dyn_rec_field_value.zig").DynRecFieldValue;
/// A runtime keypath that can address one of the fields or (recursive)
/// subfields of a type at runtime.
pub fn RecursiveKeypath(comptime T: type) type {
const keypath_data = buildKeypathData(T);
const field_data = keypath_data.field_data;
var _enum_fields: [field_data.len]TypeInfo.EnumField = undefined;
for (field_data) |field, i| {
_enum_fields[i] = TypeInfo.EnumField{
.name = mangle(field.path),
.value = field.value,
};
}
comptime var _names: [field_data.len - 1][]const u8 = undefined;
for (field_data[1..field_data.len]) |field, i| {
_names[i] = field.path[field.path.len - 1];
}
sort([]const u8, &_names, {}, lexicalSort);
comptime var _name_count = 0;
for (_names) |name, i| {
if (i == 0 or !mem.eql(u8, name, _names[i - 1]))
_name_count += 1;
}
comptime var _names_dedup: [_name_count][]const u8 = undefined;
comptime var _names_j = 0;
for (_names) |name, i| {
if (i == 0 or !mem.eql(u8, name, _names[i - 1])) {
_names_dedup[_names_j] = name;
_names_j += 1;
}
}
const names = _names_dedup;
const Inner = @Type(.{ .Enum = TypeInfo.Enum{
.layout = .Auto,
.tag_type = keypath_data.stats.Tag(),
.fields = &_enum_fields,
.decls = &[0]TypeInfo.Declaration{},
.is_exhaustive = true,
} });
return struct {
_inner: Inner,
const Self = @This();
/// The keypath to the root of the struct, the struct itself.
pub fn root() Self {
return .{ ._inner = @field(Inner, "__keypath") }; // mangle(&[0][]const u8{}));
}
/// Retrieves a keypath to a subfield, based on a runtime known name, or
/// null if there is no such keypath.
///
/// `name` must contain the utf-8 encoded name of the field.
pub fn key(self: Self, name: []const u8) ?Self {
if (name_id_map.get(name)) |name_id| {
return subkey_table[self.toInt()][name_id];
} else {
return null;
}
}
/// Retreives the parent keypath of the keypath, or `null` if it is the
/// root keypath.
pub fn up(self: Self) ?Self {
if (self.eq(root())) {
return null;
} else {
return up_table[self.toInt()];
}
}
/// Gets the value of the field this keypath represents in an object,
/// returning it as a `DynRecFieldValue`.
pub fn get(self: Self, obj: *const T) DynRecFieldValue(T) {
if (self.isZst()) {
return DynRecFieldValue(T).fromZst(self.dynType());
} else {
return DynRecFieldValue(T).fromRaw(
self.dynType(),
@ptrCast([*]const u8, &@ptrCast([*]const u8, obj)[self.byteOffset().?]),
@intCast(u3, self.bitOffset().? - self.byteOffset().? * 8),
);
}
}
/// Sets the field represented by the keypath on an object of the
/// appropriate type. Passing a value of the wrong dynamic type is
/// safety-check illegal behavior.
pub fn set(self: Self, obj: *T, value: DynRecFieldValue(T)) void {
debug.assert(self.dynType() == value.tag);
if (self.isZst()) return;
value.writeRaw(
@ptrCast([*]u8, @ptrCast([*]u8, obj) + self.byteOffset().?),
@intCast(u3, self.bitOffset().? - self.byteOffset().? * 8),
);
}
/// Converts the keypath to a keypath for annother type, by matching the
/// names of the fields, or `null` if the target type does not have a
/// field with the same path.
pub fn duck(self: Self, comptime Duck: type) ?RecursiveKeypath(Duck) {
var path: [max_depth + 1][]const u8 = undefined;
var idx = max_depth;
var kp: ?Self = self;
while (kp != null) {
if (kp.?.fieldName()) |field_name| {
path[idx] = field_name;
} else {
break; // root
}
idx -= 1;
kp = kp.?.up();
}
return RecursiveKeypath(Duck).fromPath(path[idx + 1 .. max_depth + 1]);
}
/// Gets the keypath for the recursive path indicated by `path`.
pub fn fromPath(path: []const []const u8) ?Self {
var kp: ?Self = root();
for (path) |comp| {
if (kp != null) {
kp = kp.?.key(comp);
}
}
return kp;
}
fn fromPathComptime(comptime path: []const []const u8) ?Self {
for (field_data) |field| {
if (eqlStringSlices(field.path, path)) {
return Self{ ._inner = @intToEnum(Inner, field.value) };
}
}
return null;
}
/// Returns the name of the field represented by the keypath
/// (non-recursivly), or `null` if it is the root.
pub fn fieldName(self: Self) ?[]const u8 {
if (self.eq(root())) {
return null;
} else {
return name_table[self.toInt()];
}
}
/// Compares two keypaths for equality.
pub fn eq(self: Self, other: Self) bool {
return self._inner == other._inner;
}
const Int = meta.TagType(Inner);
fn toInt(self: Self) Int {
return @enumToInt(self._inner);
}
const DynType = DynRecFieldValue(T).DynType;
const table_size = math.maxInt(Int) + 1;
const type_table = tbl: {
var table: [table_size]DynType = undefined;
for (field_data) |field| {
table[field.value] = DynRecFieldValue(T).dynType(field.field_type).?;
}
break :tbl table;
};
const NameId = math.IntFittingRange(0, names.len);
const name_id_map: type = ComptimeStringMap(NameId, kvs: {
comptime var _kvs: [names.len]struct { @"0": []const u8, @"1": NameId } = undefined;
for (names) |name, i| {
_kvs[i] = .{ .@"0" = name, .@"1" = i };
}
break :kvs _kvs;
});
const max_depth: usize = max: {
var max: usize = 0;
for (field_data) |field| {
if (field.path.len > max) max = field.path.len;
}
break :max max;
};
const name_table: [table_size][]const u8 = tbl: {
var table: [table_size][]const u8 = undefined;
for (field_data) |field| {
if (field.path.len == 0) continue; // leave root undefined
table[field.value] = field.path[field.path.len - 1];
}
break :tbl table;
};
const up_table: [table_size]Self = tbl: {
var table: [table_size]Self = undefined;
for (field_data) |field| {
if (field.path.len == 0) continue;
table[field.value] = Self.fromPathComptime(field.path[0 .. field.path.len - 1]).?;
}
break :tbl table;
};
const subkey_table: [table_size][math.maxInt(NameId) + 1]?Self = tbl: {
var table: [table_size][math.maxInt(NameId) + 1]?Self = undefined;
for (field_data) |field| {
for (table[field.value]) |*k| {
k.* = null;
}
switch (meta.activeTag(@typeInfo(field.field_type))) {
// Note that non-extern unions cannot be included, becase we won't set the tag right.
.Struct => {},
.Union => if (@typeInfo(field.field_type).Union.layout != .Extern) continue,
else => continue,
}
//@compileLog("filling", field.path);
for (meta.fields(field.field_type)) |subfield| {
var name_id = name_id_map.get(subfield.name).?;
var subfield_path = field.path ++ [1][]const u8{subfield.name};
table[field.value][@as(usize, name_id)] = Self.fromPathComptime(subfield_path).?;
//@compileLog(" ", table[field.value][@as(usize, name_id)].?._inner);
}
}
//@compileLog(table);
break :tbl table;
};
/// Returns at runtime the type of the field represented by the keypath.
pub fn dynType(self: Self) DynType {
return type_table[@enumToInt(self._inner)];
}
fn isZst(self: Self) bool {
return self.bitOffset() == null;
}
fn bitOffset(self: Self) ?usize {
return TagComponents.from(keypath_data.stats, self.toInt()).offset;
}
fn byteOffset(self: Self) ?usize {
if (self.bitOffset()) |bit_offset| {
// No div by zero or overflow possible.
return math.divFloor(usize, bit_offset, 8) catch unreachable;
} else {
return null;
}
}
};
}
const FieldEntry = struct {
path: []const []const u8,
field_type: type,
value: comptime_int,
};
const TagComponents = struct {
offset: ?usize,
id: usize,
fn toTag(comptime self: TagComponents, comptime stats: TagStats) stats.Tag() {
if (self.offset) |offset| {
return @as(stats.Tag(), offset >> stats.aligned_bits | self.id << stats.offsetBits());
} else {
return @as(stats.Tag(), stats.max_sized_value + self.id);
}
}
fn from(comptime stats: TagStats, tag: stats.Tag()) TagComponents {
const offset_mask: usize = ((1 << stats.offsetBits()) - 1);
return .{
.offset = @as(usize, (tag & offset_mask)) << stats.aligned_bits,
.id = tag & ~offset_mask >> stats.aligned_bits,
};
}
};
const TagStats = struct {
aligned_bits: comptime_int,
max_offset: comptime_int,
max_sized_id: comptime_int,
num_zst: comptime_int,
fn offsetBits(comptime self: TagStats) comptime_int {
return math.log2_int_ceil(usize, self.max_offset + 1) - self.aligned_bits;
}
fn idBits(comptime self: TagStats) comptime_int {
return math.log2_int_ceil(usize, self.max_sized_id + self.num_zst + 1);
}
fn Tag(comptime self: TagStats) type {
return meta.Int(.unsigned, self.offsetBits() + self.idBits());
}
};
fn KeypathData(comptime T: type) type {
return struct {
field_data: [RecursiveField.count(T)]FieldEntry,
stats: TagStats,
};
}
fn buildKeypathData(comptime T: type) KeypathData(T) {
const fields = RecursiveField.of(T);
var field_data: [fields.len]FieldEntry = undefined;
var tag_components: [fields.len]TagComponents = undefined;
var next_sized_id: [@bitSizeOf(T)]comptime_int = [_]comptime_int{0} ** @bitSizeOf(T);
var stats = TagStats{
.aligned_bits = math.maxInt(usize),
.max_offset = 0,
.max_sized_id = 0,
.num_zst = 0,
};
for (fields) |field, i| {
if (@bitSizeOf(field.field_type) == 0) {
tag_components[i] = .{ .offset = null, .id = stats.num_zst };
stats.num_zst += 1;
} else {
const offset = recursiveBitOffset(T, field.path);
tag_components[i] = .{ .offset = offset, .id = next_sized_id[offset] };
if (next_sized_id[offset] > stats.max_sized_id) stats.max_sized_id = next_sized_id[offset];
next_sized_id[offset] += 1;
if (offset > stats.max_offset) stats.max_offset = offset;
const leading = leadingBits(0, offset) orelse math.maxInt(usize);
if (leading < stats.aligned_bits) stats.aligned_bits = leading;
}
}
for (fields) |field, i| {
field_data[i] = .{
.path = field.path,
.field_type = field.field_type,
.value = tag_components[i].toTag(stats),
};
}
return .{ .field_data = field_data, .stats = stats };
}
fn lexicalSort(_: void, lhs: []const u8, rhs: []const u8) bool {
return mem.lessThan(u8, lhs, rhs);
}
fn recursiveBitOffset(comptime T: type, path: []const []const u8) comptime_int {
if (path.len == 0) return 0;
return @bitOffsetOf(T, path[0]) + recursiveBitOffset(
meta.fieldInfo(T, meta.stringToEnum(meta.FieldEnum(T), path[0]).?).field_type,
path[1..],
);
}
fn LeadingBits(comptime T: type) type {
if (T == comptime_int) {
return ?comptime_int;
} else {
return ?math.IntFittingRange(0, meta.bitCount(T));
}
}
fn leadingBits(bit: u1, val: anytype) LeadingBits(@TypeOf(val)) {
if (val == 0) return null;
var leading: LeadingBits(@TypeOf(val)) = 0;
var _val = val;
while (_val & 0b1 == bit) {
leading.? += 1;
_val >>= 1;
}
return leading;
}
fn mangle(comptime path: []const []const u8) []const u8 {
// TODO: escape dots so that struct {@"a.__kp_b": usize, a: struct {b: usize}}
// doesn't create an ambigous name.
const sep = ".__kp_";
comptime var name: []const u8 = "__keypath";
comptime {
for (path) |component| {
name = name ++ sep ++ component;
}
}
return name;
}
fn eqlStringSlices(lhs: []const []const u8, rhs: []const []const u8) bool {
if (lhs.len != rhs.len) return false;
for (lhs) |lhs_str, idx| {
const rhs_str = rhs[idx];
if (lhs_str.len != rhs_str.len) return false;
for (lhs_str) |lhs_byte, str_idx| {
const rhs_byte = rhs_str[str_idx];
if (lhs_byte != rhs_byte) return false;
}
}
return true;
}
test "mangling" {
try testing.expectEqual(mangle(&.{ "a", "b", "c" }), "__keypath.__kp_a.__kp_b.__kp_c");
}
test "keypaths" {
const B = struct {
j: usize,
};
const Z = struct {
a: usize,
b: B,
c: usize,
};
const T = struct {
x: usize,
y: usize,
z: Z,
zz: Z,
};
var t = T{
.x = 1,
.y = 2,
.z = Z{
.a = 3,
.b = B{ .j = 4 },
.c = 5,
},
.zz = Z{
.a = 10,
.b = B{ .j = 40 },
.c = 42,
},
};
var kp = RecursiveKeypath(T).root();
var x_kp = kp.key("x").?;
var z_b_j_kp = kp.key("z").?.key("b").?.key("j").?;
var path_kp = RecursiveKeypath(T).fromPath(&.{ "z", "b", "j" });
try testing.expect(path_kp != null);
try testing.expect(z_b_j_kp.eq(path_kp.?));
try testing.expect(x_kp.get(&t).eq(DynRecFieldValue(T).of(usize, 1)));
x_kp.set(&t, DynRecFieldValue(T).of(usize, 32));
try testing.expect(t.x == 32);
try testing.expect(x_kp.up().?.eq(kp));
}
test "duck" {
const Looks = struct {
feathers: usize,
bill: usize,
};
const A = struct {
quack: usize,
looks: Looks,
};
const B = struct {
looks: Looks,
flap: usize,
};
var a_kp = RecursiveKeypath(A).root().key("looks").?.key("feathers").?;
var b_kp = RecursiveKeypath(B).root().key("looks").?.key("feathers").?;
var a_duck = b_kp.duck(A).?;
try testing.expect(a_kp.eq(a_duck));
} | src/main.zig |
usingnamespace @import("opcode.zig");
const std = @import("std");
const Register = u8;
pub const Emulator = struct {
const ProgramStartLocation = 0x200;
const FontsetStartLocation = 0x050;
memory: [4096]u8,
registers: [16]Register,
index_register: u16,
program_counter: u16,
gfx: [64 * 32]u1,
delay_timer: u8,
sound_timer: u8,
stack: [16]u16,
stack_pointer: u16,
keys: [16]bool,
draw_flag: bool,
rng: std.rand.DefaultPrng,
pub fn initialize() Emulator {
var result = Emulator {
.memory = [_]u8{0} ** 4096,
.registers = [_]Register{0} ** 16,
.index_register = 0,
.program_counter = ProgramStartLocation,
.gfx = [_]u1{0} ** (64 * 32),
.delay_timer = 0,
.sound_timer = 0,
.stack = [_]u16{0} ** 16,
.stack_pointer = 0,
.keys = [_]bool{false} ** 16,
.draw_flag = false,
.rng = std.rand.DefaultPrng.init(0xDEADBEEF)
};
// Load the fontset
comptime std.debug.assert(chip8_fontset.len == 80);
for (chip8_fontset) |value, i| {
result.memory[FontsetStartLocation + i] = value;
}
return result;
}
pub fn load_program(self: *Emulator, filename: []const u8, allocator: *std.mem.Allocator) !void {
const file = try std.fs.cwd().openFile(filename, .{ .read = true });
defer file.close();
const file_stats = try file.stat();
const memory = try allocator.alloc(u8, file_stats.size);
defer allocator.free(memory);
const read_bytes = try file.read(memory);
std.debug.assert(read_bytes == memory.len);
for (memory) |byte, i| {
self.memory[i + ProgramStartLocation] = byte;
}
}
fn fetch_opcode(self: *Emulator) ?OpCode {
if (self.program_counter + 2 >= 4096) {
return null;
}
// Fetch
const opcode: u16 = @intCast(u16, self.memory[self.program_counter]) << 8 | @intCast(u16, self.memory[self.program_counter + 1]);
self.program_counter += 2;
// Decode
switch (opcode & 0xF000) {
0x0000 => {
switch (opcode & 0x00FF) {
0x00EE => return OpCode{ .Return = .{} },
0x00E0 => return OpCode{ .ClearScreen = .{} },
else => std.debug.warn("Unknown opcode = {x}\n", .{opcode}),
}
},
0x1000 => return OpCode{ .Jump = opcode & 0x0FFF },
0xA000 => return OpCode{ .SetIndex = opcode & 0x0FFF },
0x6000 => return OpCode{ .SetRegister = .{
.register_name = @intCast(u8, (opcode & 0x0F00) >> 8),
.value = @intCast(u8, opcode & 0x00FF)
}},
0x7000 => return OpCode{ .Add = .{
.register_name = @intCast(u8, (opcode & 0x0F00) >> 8),
.value = @intCast(u8, opcode & 0x00FF)
}},
0x8000 => {
switch (opcode & 0x000F) {
0x0000 => return OpCode{ .Assign = .{
.register_destination = @intCast(u8, (opcode & 0x0F00) >> 8),
.register_source = @intCast(u8, (opcode & 0x00F0) >> 4),
}},
else => std.debug.warn("Unknown opcode = {x}\n", .{opcode}),
}
},
0xD000 => return OpCode{ .DrawSprite = .{
.x = @intCast(u8, (opcode & 0x0F00) >> 8),
.y = @intCast(u8, (opcode & 0x00F0) >> 4),
.height = @intCast(u8, opcode & 0x000F)
}},
0x2000 => {
return OpCode{ .CallFunction = opcode & 0x0FFF };
},
0x3000 => return OpCode{ .Equal = .{
.register_name = @intCast(u8, (opcode & 0x0F00) >> 8),
.value = @intCast(u8, opcode & 0x00FF)
}},
0x4000 => return OpCode{ .NotEqual = .{
.register_name = @intCast(u8, (opcode & 0x0F00) >> 8),
.value = @intCast(u8, opcode & 0x00FF)
}},
0xC000 => return OpCode{ .Random = .{
.register_name = @intCast(u8, (opcode & 0x0F00) >> 8),
.bitwise_and_value = @intCast(u8, opcode & 0x00FF)
}},
0xE000 => {
const second_byte: u8 = @intCast(u8, (opcode & 0x0F00) >> 8);
switch (opcode & 0x00FF) {
0x009E => return OpCode{ .IsKeyDown = second_byte },
0x00A1 => return OpCode{ .IsKeyUp = second_byte },
else => std.debug.warn("Unknown opcode = {x}\n", .{opcode}),
}
},
0xF000 => {
const second_byte: u8 = @intCast(u8, (opcode & 0x0F00) >> 8);
switch (opcode & 0x00FF) {
0x0007 => return OpCode{ .GetDelay = second_byte },
0x0015 => return OpCode{ .SetDelayTimer = second_byte },
0x001E => return OpCode{ .AddToIndex = second_byte },
0x0033 => return OpCode{ .StoreBCD = second_byte },
0x0065 => return OpCode{ .LoadIntoRegisters = second_byte },
0x0029 => return OpCode{ .SetIndexToSprite = second_byte },
else => std.debug.warn("Unknown opcode = {x}\n", .{opcode}),
}
},
else => std.debug.warn("Unknown opcode = {x}\n", .{opcode}),
}
return null;
}
fn execute_opcode(self: *Emulator, opcode: OpCode) void {
switch(opcode) {
OpCodeName.SetIndex => |value| {
self.index_register = value;
},
OpCodeName.SetIndexToSprite => |register| {
const character = self.registers[register];
self.index_register = FontsetStartLocation + (5 * character);
},
OpCodeName.SetRegister => |value| {
self.registers[value.register_name] = value.value;
},
OpCodeName.Add => |value| {
self.registers[value.register_name] +%= value.value;
},
OpCodeName.AddToIndex => |register_name| {
self.index_register += self.registers[register_name];
},
OpCodeName.Assign => |value| {
self.registers[value.register_destination] = self.registers[value.register_source];
},
OpCodeName.DrawSprite => |value| {
self.draw_sprite(self.registers[value.x], self.registers[value.y], value.height);
},
OpCodeName.Jump => |address| {
self.program_counter = address;
},
OpCodeName.CallFunction => |address| {
self.stack[self.stack_pointer] = self.program_counter;
self.stack_pointer += 1;
self.program_counter = address;
},
OpCodeName.Return => |_| {
std.debug.assert(self.stack_pointer > 0);
self.stack_pointer -= 1;
self.program_counter = self.stack[self.stack_pointer];
},
OpCodeName.SetDelayTimer => |register| {
self.delay_timer = self.registers[register];
},
OpCodeName.StoreBCD => |register| {
const value = self.registers[register];
self.memory[self.index_register + 0] = value / 100;
self.memory[self.index_register + 1] = (value / 10) % 10;
self.memory[self.index_register + 2] = (value % 100) % 10;
},
OpCodeName.LoadIntoRegisters => |until_register| {
var i: u8 = 0;
while (i <= until_register) : (i += 1) {
self.registers[i] = self.memory[self.index_register + i];
}
},
OpCodeName.Equal => |value| {
if (self.registers[value.register_name] == value.value) {
self.program_counter += 2;
}
},
OpCodeName.NotEqual => |value| {
if (self.registers[value.register_name] != value.value) {
self.program_counter += 2;
}
},
OpCodeName.Random => |value| {
self.registers[value.register_name] = self.rng.random.int(u8) & value.bitwise_and_value;
},
OpCodeName.IsKeyDown => |register_name| {
if (self.keys[self.registers[register_name]]) {
self.program_counter += 2;
}
},
OpCodeName.IsKeyUp => |register_name| {
if (!self.keys[self.registers[register_name]]) {
self.program_counter += 2;
}
},
OpCodeName.GetDelay => |register_name| {
self.registers[register_name] = self.delay_timer;
},
OpCodeName.ClearScreen => |_| {
for (self.gfx) |*value| {
value.* = 0;
}
},
OpCodeName.Nop => {},
OpCodeName.Count => unreachable,
}
}
fn draw_sprite(self: *Emulator, x: Register, y: Register, height: u8) void {
// VF is special register used for collision detection
self.registers[0xF] = 0;
var yline: u8 = 0;
while (yline < height) : (yline += 1) {
const pixel = self.memory[self.index_register + yline];
var xline: u8 = 0;
while (xline < 8) : (xline += 1) {
const sprite_pixel = pixel & (@intCast(u8, 0x80) >> @intCast(u3, xline));
if (sprite_pixel != 0) {
const screen_pixel: *u1 = &self.gfx[@intCast(u16, x + xline) + (@intCast(u16, (y + yline)) * 64)];
if (screen_pixel.* != 0) {
// Collision
self.registers[0xF] = 1;
}
screen_pixel.* ^= 1;
}
}
}
self.draw_flag = true;
}
pub fn emulate_cycle(self: *Emulator) void {
const opcode = self.fetch_opcode() orelse OpCode{ .Nop = {} };
self.execute_opcode(opcode);
if (self.delay_timer > 0) {
self.delay_timer -= 1;
}
if (self.sound_timer > 0) {
if (self.sound_timer == 1) {
std.log.info("BEEP!", .{});
}
self.sound_timer -= 1;
}
}
};
const chip8_fontset = [_]u8 {
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
}; | src/emulator.zig |
const std = @import("std");
// Generic arg parser
pub fn ArgParser(comptime T: type) type {
return struct {
pub fn parse(allocator: std.mem.Allocator, values: *T) !void {
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
inline for (@typeInfo(T).Struct.fields) |field| {
const info = @typeInfo(field.field_type);
for (args) |arg, index| {
if (!std.mem.startsWith(u8, arg, "--"))
continue;
const arg_field_name = try allocator.dupe(u8, arg[2..]);
defer allocator.free(arg_field_name);
// flag-name => flag_name
std.mem.replaceScalar(u8, arg_field_name, '-', '_');
if (!std.mem.eql(u8, field.name, arg_field_name))
continue;
if (index + 1 >= args.len and info != .Bool) // make sure we have an actual value (if not bool)
continue;
switch (info) {
.Bool => @field(values, field.name) = true,
.Int => @field(values, field.name) = try std.fmt.parseInt(field.field_type, args[index + 1], 10),
.Float => @field(values, field.name) = try std.fmt.parseFloat(field.field_type, args[index + 1]),
.Array => std.mem.copy(u8, &@field(values, field.name), args[index + 1]),
.Enum => {
inline for (info.Enum.fields) |enum_field| {
if (std.mem.eql(u8, enum_field.name, args[index + 1])) {
@field(values, field.name) = @intToEnum(field.field_type, enum_field.value);
}
}
},
else => @compileError("Argparse of type " ++ @typeName(
field.field_type,
) ++ " not supported"),
}
}
}
}
};
} | src/arg_parser.zig |
const std = @import("std");
const generator = @import("./generator.zig");
// pending resolution of https://github.com/ziglang/zig/issues/10442,
// this has to be a function separate from `Join`
fn joinedGenerator(comptime g: type, comptime T: type, comptime allocating: bool) type {
return struct {
generator: g,
state: enum { Next, Awaiting, Returned, Done } = .Next,
frame: if (allocating) *@Frame(next) else @Frame(next) = undefined,
fn next(self: *@This(), counter: *std.atomic.Atomic(usize), frame: anyframe) !?T {
defer {
self.state = .Returned;
if (counter.fetchAdd(1, .SeqCst) == 0) {
resume frame;
}
}
return self.generator.next();
}
};
}
fn initializer(
comptime Self: type,
comptime generators: []const type,
generator_fields: []const std.builtin.TypeInfo.StructField,
comptime allocating: bool,
) type {
return if (allocating) struct {
pub fn init(g: std.meta.Tuple(generators), allocator: std.mem.Allocator) Self {
var s = Self{ .allocator = allocator };
inline for (generator_fields) |_, i| {
s.generators[i] = .{ .generator = g[i] };
}
return s;
}
} else struct {
pub fn init(g: std.meta.Tuple(generators)) Self {
var s = Self{};
inline for (generator_fields) |_, i| {
s.generators[i] = .{ .generator = g[i] };
}
return s;
}
};
}
/// Joins multiple generators into one and yields values as they come from
/// either generator
pub fn Join(comptime generators: []const type, comptime T: type, comptime allocating: bool) type {
var generator_fields: [generators.len]std.builtin.TypeInfo.StructField = undefined;
inline for (generators) |g, field_index| {
const G = joinedGenerator(g, T, allocating);
generator_fields[field_index] = .{
.name = std.fmt.comptimePrint("{d}", .{field_index}),
.field_type = G,
.default_value = @as(?G, null),
.is_comptime = false,
.alignment = @alignOf(G),
};
}
const generators_struct = std.builtin.TypeInfo{
.Struct = .{
.layout = .Auto,
.fields = &generator_fields,
.decls = &[0]std.builtin.TypeInfo.Declaration{},
.is_tuple = true,
},
};
const generator_fields_const = generator_fields;
return generator.Generator(struct {
const Self = @This();
generators: @Type(generators_struct) = undefined,
frame: *@Frame(generate) = undefined,
allocator: if (allocating) std.mem.Allocator else void = undefined,
pub usingnamespace initializer(Self, generators, &generator_fields_const, allocating);
pub fn generate(self: *Self, handle: *generator.Handle(T)) !void {
if (allocating) {
inline for (generator_fields_const) |_, i| {
var g = &self.generators[i];
g.frame = self.allocator.create(@Frame(@TypeOf(g.*).next)) catch |e| {
@setEvalBranchQuota(generators.len * 1000);
inline for (generator_fields_const) |_, i_| {
if (i_ == i) return e;
var g_ = &self.generators[i_];
self.allocator.destroy(g_.frame);
}
};
}
}
defer {
if (allocating) {
inline for (generator_fields_const) |_, i| {
var g = &self.generators[i];
if (g.state != .Done)
self.allocator.destroy(g.frame);
}
}
}
var counter = std.atomic.Atomic(usize).init(0);
var active: usize = self.generators.len;
var reported: usize = 0;
while (true) {
// If there are no new reports, suspend until resumed by one
suspend {
if (counter.swap(0, .SeqCst) == reported) {
// run next() where needed
inline for (generator_fields_const) |_, i| {
var g = &self.generators[i];
if (g.state == .Next) {
g.state = .Awaiting;
if (allocating)
g.frame.* = async g.next(&counter, @frame())
else
g.frame = async g.next(&counter, @frame());
}
}
} else {
reported = 0;
resume @frame();
}
}
reported = counter.load(.SeqCst);
while (true) {
// check for returns
var yielded: usize = 0;
inline for (generator_fields_const) |_, i| {
var g = &self.generators[i];
if (g.state == .Returned) {
yielded += 1;
const value = try await g.frame;
if (value) |v| {
try handle.yield(v);
g.state = .Next;
} else {
if (allocating)
self.allocator.destroy(g.frame);
g.state = .Done;
active -= 1;
}
}
}
// ...until we run out of reports
if (yielded == 0) break;
}
if (active == 0) break;
}
}
}, T);
}
test "basic" {
const expect = std.testing.expect;
const ty = struct {
pub fn generate(_: *@This(), handle: *generator.Handle(u8)) !void {
try handle.yield(1);
try handle.yield(2);
try handle.yield(3);
}
};
const ty1 = struct {
pub fn generate(_: *@This(), handle: *generator.Handle(u8)) !void {
try handle.yield(10);
try handle.yield(20);
try handle.yield(30);
}
};
const G0 = generator.Generator(ty, u8);
const G1 = generator.Generator(ty1, u8);
const G = Join(&[_]type{ G0, G1 }, u8, false);
var g = G.init(G.Context.init(.{ G0.init(ty{}), G1.init(ty1{}) }));
var sum: usize = 0;
while (try g.next()) |v| {
sum += v;
}
try expect(sum == 66);
}
test "with async i/o" {
// determine file size
const test_file = try std.fs.cwd()
.openFile("README.md", std.fs.File.OpenFlags{ .read = true, .write = false });
const test_reader = test_file.reader();
var file_size: usize = 0;
while (true) {
_ = test_reader.readByte() catch break;
file_size += 1;
}
const expect = std.testing.expect;
// prepare reader type
const ty = struct {
pub fn generate(_: *@This(), handle: *generator.Handle(u8)) !void {
const file = try std.fs.cwd()
.openFile("README.md", std.fs.File.OpenFlags{ .read = true, .write = false });
const reader = file.reader();
while (true) {
const byte = reader.readByte() catch return;
try handle.yield(byte);
}
}
};
const G0 = generator.Generator(ty, u8);
const G = Join(&[_]type{ G0, G0 }, u8, false);
var g = G.init(G.Context.init(.{ G0.init(ty{}), G0.init(ty{}) }));
// test
var size: usize = 0;
while (try g.next()) |_| {
size += 1;
}
try expect(size == file_size * 2);
}
test "memory impact of not allocating vs allocating frames" {
const ty = struct {
pub fn generate(_: *@This(), handle: *generator.Handle(u8)) !void {
try handle.yield(1);
try handle.yield(2);
try handle.yield(3);
}
};
const G0 = generator.Generator(ty, u8);
const GAllocating = Join(&[_]type{G0} ** 50, u8, true);
const GNonAllocating = Join(&[_]type{G0} ** 50, u8, false);
_ = GNonAllocating.init(GNonAllocating.Context.init(.{G0.init(ty{})}));
_ = GAllocating.init(GAllocating.Context.init(.{G0.init(ty{})}, std.testing.allocator));
// The assertion below doesn't hold true for all number of joined generators
// as the frame of the allocating Join generator can get larger than of the non-allocating one.
// Could be related to this:
// https://zigforum.org/t/unacceptable-memory-overhead-with-nested-async-function-call/407/5
// try std.testing.expect(@sizeOf(GAllocating) < @sizeOf(GNonAllocating));
}
test "allocating join" {
const expect = std.testing.expect;
const ty = struct {
pub fn generate(_: *@This(), handle: *generator.Handle(u8)) !void {
try handle.yield(1);
try handle.yield(2);
try handle.yield(3);
}
};
const G0 = generator.Generator(ty, u8);
const G = Join(&[_]type{G0}, u8, true);
var g = G.init(G.Context.init(.{G0.init(ty{})}, std.testing.allocator));
try expect((try g.next()).? == 1);
try expect((try g.next()).? == 2);
try expect((try g.next()).? == 3);
try expect((try g.next()) == null);
} | src/join.zig |
const std = @import("std");
const os = std.os;
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.name = "return",
.source =
\\pub fn main() !void {
\\ return error.TheSkyIsFalling;
\\}
,
.Debug = .{
.expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
},
.ReleaseSafe = .{
.exclude_os = .{
.windows, // segfault
},
.expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
},
.ReleaseFast = .{
.expect =
\\error: TheSkyIsFalling
\\
,
},
.ReleaseSmall = .{
.expect =
\\error: TheSkyIsFalling
\\
,
},
});
cases.addCase(.{
.name = "try return",
.source =
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
\\
\\pub fn main() !void {
\\ try foo();
\\}
,
.Debug = .{
.expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:6:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
},
.ReleaseSafe = .{
.exclude_os = .{
.windows, // segfault
},
.expect =
\\error: TheSkyIsFalling
\\source.zig:2:5: [address] in [function]
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:6:5: [address] in [function]
\\ try foo();
\\ ^
\\
,
},
.ReleaseFast = .{
.expect =
\\error: TheSkyIsFalling
\\
,
},
.ReleaseSmall = .{
.expect =
\\error: TheSkyIsFalling
\\
,
},
});
cases.addCase(.{
.name = "try try return return",
.source =
\\fn foo() !void {
\\ try bar();
\\}
\\
\\fn bar() !void {
\\ return make_error();
\\}
\\
\\fn make_error() !void {
\\ return error.TheSkyIsFalling;
\\}
\\
\\pub fn main() !void {
\\ try foo();
\\}
,
.Debug = .{
.expect =
\\error: TheSkyIsFalling
\\source.zig:10:5: [address] in make_error (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:6:5: [address] in bar (test)
\\ return make_error();
\\ ^
\\source.zig:2:5: [address] in foo (test)
\\ try bar();
\\ ^
\\source.zig:14:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
},
.ReleaseSafe = .{
.exclude_os = .{
.windows, // segfault
},
.expect =
\\error: TheSkyIsFalling
\\source.zig:10:5: [address] in [function]
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:6:5: [address] in [function]
\\ return make_error();
\\ ^
\\source.zig:2:5: [address] in [function]
\\ try bar();
\\ ^
\\source.zig:14:5: [address] in [function]
\\ try foo();
\\ ^
\\
,
},
.ReleaseFast = .{
.expect =
\\error: TheSkyIsFalling
\\
,
},
.ReleaseSmall = .{
.expect =
\\error: TheSkyIsFalling
\\
,
},
});
cases.addCase(.{
.exclude_os = .{
.windows,
},
.name = "dumpCurrentStackTrace",
.source =
\\const std = @import("std");
\\
\\fn bar() void {
\\ std.debug.dumpCurrentStackTrace(@returnAddress());
\\}
\\fn foo() void {
\\ bar();
\\}
\\pub fn main() u8 {
\\ foo();
\\ return 1;
\\}
,
.Debug = .{
.expect =
\\source.zig:7:8: [address] in foo (test)
\\ bar();
\\ ^
\\source.zig:10:8: [address] in main (test)
\\ foo();
\\ ^
\\
,
},
});
} | test/stack_traces.zig |
const std = @import("../std.zig");
const builtin = std.builtin;
usingnamespace std.c;
extern "c" fn __errno() *c_int;
pub const _errno = __errno;
pub const dl_iterate_phdr_callback = extern fn (info: *dl_phdr_info, size: usize, data: ?*c_void) c_int;
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
pub extern "c" fn __fstat50(fd: fd_t, buf: *Stat) c_int;
pub extern "c" fn __stat50(path: [*:0]const u8, buf: *Stat) c_int;
pub extern "c" fn __clock_gettime50(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn __clock_getres50(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn __getdents30(fd: c_int, buf_ptr: [*]u8, nbytes: usize) c_int;
pub extern "c" fn __sigaltstack14(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn __sigaction14(sig: c_int, noalias act: *const Sigaction, noalias oact: ?*Sigaction) c_int;
pub extern "c" fn __sigprocmask14(how: c_int, noalias set: ?*const sigset_t, noalias oset: ?*sigset_t) c_int;
pub extern "c" fn __socket30(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;
pub extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
pub extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int;
// libc aliases this as sched_yield
pub extern "c" fn __libc_thr_yield() c_int;
pub const pthread_mutex_t = extern struct {
ptm_magic: u32 = 0x33330003,
ptm_errorcheck: padded_pthread_spin_t = 0,
ptm_ceiling: padded_pthread_spin_t = 0,
ptm_owner: usize = 0,
ptm_waiters: ?*u8 = null,
ptm_recursed: u32 = 0,
ptm_spare2: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
ptc_magic: u32 = 0x55550005,
ptc_lock: pthread_spin_t = 0,
ptc_waiters_first: ?*u8 = null,
ptc_waiters_last: ?*u8 = null,
ptc_mutex: ?*pthread_mutex_t = null,
ptc_private: ?*c_void = null,
};
const pthread_spin_t = switch (builtin.arch) {
.aarch64, .aarch64_be, .aarch64_32 => u8,
.mips, .mipsel, .mips64, .mips64el => u32,
.powerpc, .powerpc64, .powerpc64le => i32,
.i386, .x86_64 => u8,
.arm, .armeb, .thumb, .thumbeb => i32,
.sparc, .sparcel, .sparcv9 => u8,
.riscv32, .riscv64 => u32,
else => @compileError("undefined pthread_spin_t for this arch"),
};
const padded_pthread_spin_t = switch (builtin.arch) {
.i386, .x86_64 => u32,
.sparc, .sparcel, .sparcv9 => u32,
else => pthread_spin_t,
};
pub const pthread_attr_t = extern struct {
pta_magic: u32,
pta_flags: i32,
pta_private: ?*c_void,
}; | lib/std/c/netbsd.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const mem = std.mem;
const path = std.fs.path;
const assert = std.debug.assert;
const log = std.log.scoped(.mingw);
const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const Cache = @import("Cache.zig");
pub const CRTFile = enum {
crt2_o,
dllcrt2_o,
mingw32_lib,
msvcrt_os_lib,
mingwex_lib,
uuid_lib,
};
pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
switch (crt_file) {
.crt2_o => {
var args = std.ArrayList([]const u8).init(arena);
try add_cc_args(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_SYSCRT=1",
"-DCRTDLL=1",
"-U__CRTDLL__",
"-D__MSVCRT__",
// Uncomment these 3 things for crtu
//"-DUNICODE",
//"-D_UNICODE",
//"-DWPRFLAG=1",
});
return comp.build_crt_file("crt2", .Obj, &[1]Compilation.CSourceFile{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "crt", "crtexe.c",
}),
.extra_flags = args.items,
},
});
},
.dllcrt2_o => {
var args = std.ArrayList([]const u8).init(arena);
try add_cc_args(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_SYSCRT=1",
"-DCRTDLL=1",
"-U__CRTDLL__",
"-D__MSVCRT__",
});
return comp.build_crt_file("dllcrt2", .Obj, &[1]Compilation.CSourceFile{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "crt", "crtdll.c",
}),
.extra_flags = args.items,
},
});
},
.mingw32_lib => {
var c_source_files: [mingw32_lib_deps.len]Compilation.CSourceFile = undefined;
for (mingw32_lib_deps) |dep, i| {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-DHAVE_CONFIG_H",
"-D_SYSCRT=1",
"-DCRTDLL=1",
"-isystem",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "include", "any-windows-any",
}),
"-isystem",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "include" }),
"-std=gnu99",
"-D_CRTBLD",
"-D_WIN32_WINNT=0x0f00",
"-D__MSVCRT_VERSION__=0x700",
});
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "crt", dep,
}),
.extra_flags = args.items,
};
}
return comp.build_crt_file("mingw32", .Lib, &c_source_files);
},
.msvcrt_os_lib => {
const extra_flags = try arena.dupe([]const u8, &[_][]const u8{
"-DHAVE_CONFIG_H",
"-D__LIBMSVCRT__",
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "include" }),
"-std=gnu99",
"-D_CRTBLD",
"-D_WIN32_WINNT=0x0f00",
"-D__MSVCRT_VERSION__=0x700",
"-isystem",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", "any-windows-any" }),
});
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
for (msvcrt_common_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", dep }),
.extra_flags = extra_flags,
};
}
if (comp.getTarget().cpu.arch == .i386) {
for (msvcrt_i386_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", dep,
}),
.extra_flags = extra_flags,
};
}
} else {
for (msvcrt_other_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", dep,
}),
.extra_flags = extra_flags,
};
}
}
return comp.build_crt_file("msvcrt-os", .Lib, c_source_files.items);
},
.mingwex_lib => {
const extra_flags = try arena.dupe([]const u8, &[_][]const u8{
"-DHAVE_CONFIG_H",
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw" }),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "include" }),
"-std=gnu99",
"-D_CRTBLD",
"-D_WIN32_WINNT=0x0f00",
"-D__MSVCRT_VERSION__=0x700",
"-isystem",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", "any-windows-any" }),
});
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
for (mingwex_generic_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", dep,
}),
.extra_flags = extra_flags,
};
}
const target = comp.getTarget();
if (target.cpu.arch == .i386 or target.cpu.arch == .x86_64) {
for (mingwex_x86_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", dep,
}),
.extra_flags = extra_flags,
};
}
} else if (target.cpu.arch.isARM()) {
if (target.cpu.arch.ptrBitWidth() == 32) {
for (mingwex_arm32_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", dep,
}),
.extra_flags = extra_flags,
};
}
} else {
for (mingwex_arm64_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", dep,
}),
.extra_flags = extra_flags,
};
}
}
} else {
unreachable;
}
return comp.build_crt_file("mingwex", .Lib, c_source_files.items);
},
.uuid_lib => {
const extra_flags = try arena.dupe([]const u8, &[_][]const u8{
"-DHAVE_CONFIG_H",
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw" }),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "include" }),
"-std=gnu99",
"-D_CRTBLD",
"-D_WIN32_WINNT=0x0f00",
"-D__MSVCRT_VERSION__=0x700",
"-isystem",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "include", "any-windows-any",
}),
});
var c_source_files: [uuid_src.len]Compilation.CSourceFile = undefined;
for (uuid_src) |dep, i| {
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "libsrc", dep,
}),
.extra_flags = extra_flags,
};
}
return comp.build_crt_file("uuid", .Lib, &c_source_files);
},
}
}
fn add_cc_args(
comp: *Compilation,
arena: *Allocator,
args: *std.ArrayList([]const u8),
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-DHAVE_CONFIG_H",
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "include" }),
"-isystem",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", "any-windows-any" }),
});
const target = comp.getTarget();
if (target.cpu.arch.isARM() and target.cpu.arch.ptrBitWidth() == 32) {
try args.append("-mfpu=vfp");
}
try args.appendSlice(&[_][]const u8{
"-std=gnu11",
"-D_CRTBLD",
"-D_WIN32_WINNT=0x0f00",
"-D__MSVCRT_VERSION__=0x700",
});
}
pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const def_file_path = findDef(comp, arena, lib_name) catch |err| switch (err) {
error.FileNotFound => {
log.debug("no {s}.def file available to make a DLL import {s}.lib", .{ lib_name, lib_name });
// In this case we will end up putting foo.lib onto the linker line and letting the linker
// use its library paths to look for libraries and report any problems.
return;
},
else => |e| return e,
};
// We need to invoke `zig clang` to use the preprocessor.
if (!build_options.have_llvm) return error.ZigCompilerNotBuiltWithLLVMExtensions;
const self_exe_path = comp.self_exe_path orelse return error.PreprocessorDisabled;
const target = comp.getTarget();
var cache: Cache = .{
.gpa = comp.gpa,
.manifest_dir = comp.cache_parent.manifest_dir,
};
cache.hash.addBytes(build_options.version);
cache.hash.addOptionalBytes(comp.zig_lib_directory.path);
cache.hash.add(target.cpu.arch);
var man = cache.obtain();
defer man.deinit();
_ = try man.addFile(def_file_path, null);
const final_lib_basename = try std.fmt.allocPrint(comp.gpa, "{s}.lib", .{lib_name});
errdefer comp.gpa.free(final_lib_basename);
if (try man.hit()) {
const digest = man.final();
try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
comp.crt_files.putAssumeCapacityNoClobber(final_lib_basename, .{
.full_object_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{
"o", &digest, final_lib_basename,
}),
.lock = man.toOwnedLock(),
});
return;
}
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
var o_dir = try comp.global_cache_directory.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
const final_def_basename = try std.fmt.allocPrint(arena, "{s}.def", .{lib_name});
const def_final_path = try comp.global_cache_directory.join(arena, &[_][]const u8{
"o", &digest, final_def_basename,
});
const target_def_arg = switch (target.cpu.arch) {
.i386 => "-DDEF_I386",
.x86_64 => "-DDEF_X64",
.arm, .armeb, .thumb, .thumbeb, .aarch64_32 => "-DDEF_ARM32",
.aarch64, .aarch64_be => "-DDEF_ARM64",
else => unreachable,
};
const args = [_][]const u8{
self_exe_path,
"clang",
"-x",
"c",
def_file_path,
"-Wp,-w",
"-undef",
"-P",
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "def-include" }),
target_def_arg,
"-E",
"-o",
def_final_path,
};
if (comp.verbose_cc) {
Compilation.dump_argv(&args);
}
const child = try std.ChildProcess.init(&args, arena);
defer child.deinit();
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
try child.spawn();
const stdout_reader = child.stdout.?.reader();
const stderr_reader = child.stderr.?.reader();
// TODO https://github.com/ziglang/zig/issues/6343
const stdout = try stdout_reader.readAllAlloc(arena, std.math.maxInt(u32));
const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
const term = child.wait() catch |err| {
// TODO surface a proper error here
log.err("unable to spawn {}: {}", .{ args[0], @errorName(err) });
return error.ClangPreprocessorFailed;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
// TODO surface a proper error here
log.err("clang exited with code {d} and stderr: {s}", .{ code, stderr });
return error.ClangPreprocessorFailed;
}
},
else => {
// TODO surface a proper error here
log.err("clang terminated unexpectedly with stderr: {}", .{stderr});
return error.ClangPreprocessorFailed;
},
}
const lib_final_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{
"o", &digest, final_lib_basename,
});
errdefer comp.gpa.free(lib_final_path);
const llvm = @import("llvm_bindings.zig");
const arch_type = @import("target.zig").archToLLVM(target.cpu.arch);
const def_final_path_z = try arena.dupeZ(u8, def_final_path);
const lib_final_path_z = try arena.dupeZ(u8, lib_final_path);
if (llvm.WriteImportLibrary(def_final_path_z.ptr, arch_type, lib_final_path_z.ptr, true)) {
// TODO surface a proper error here
log.err("unable to turn {s}.def into {s}.lib", .{ lib_name, lib_name });
return error.WritingImportLibFailed;
}
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest for DLL import {s}.lib: {s}", .{ lib_name, @errorName(err) });
};
try comp.crt_files.putNoClobber(comp.gpa, final_lib_basename, .{
.full_object_path = lib_final_path,
.lock = man.toOwnedLock(),
});
}
/// This function body is verbose but all it does is test 3 different paths and see if a .def file exists.
fn findDef(comp: *Compilation, allocator: *Allocator, lib_name: []const u8) ![]u8 {
const target = comp.getTarget();
const lib_path = switch (target.cpu.arch) {
.i386 => "lib32",
.x86_64 => "lib64",
.arm, .armeb, .thumb, .thumbeb, .aarch64_32 => "libarm32",
.aarch64, .aarch64_be => "libarm64",
else => unreachable,
};
var override_path = std.ArrayList(u8).init(allocator);
defer override_path.deinit();
const s = path.sep_str;
{
// Try the archtecture-specific path first.
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "{s}" ++ s ++ "{s}.def";
if (comp.zig_lib_directory.path) |p| {
try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
} else {
try override_path.writer().print(fmt_path, .{ lib_path, lib_name });
}
if (std.fs.cwd().access(override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
}
}
{
// Try the generic version.
override_path.shrinkRetainingCapacity(0);
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def";
if (comp.zig_lib_directory.path) |p| {
try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
} else {
try override_path.writer().print(fmt_path, .{lib_name});
}
if (std.fs.cwd().access(override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
}
}
{
// Try the generic version and preprocess it.
override_path.shrinkRetainingCapacity(0);
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def.in";
if (comp.zig_lib_directory.path) |p| {
try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
} else {
try override_path.writer().print(fmt_path, .{lib_name});
}
if (std.fs.cwd().access(override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
}
}
return error.FileNotFound;
}
const mingw32_lib_deps = [_][]const u8{
"crt0_c.c",
"dll_argv.c",
"gccmain.c",
"natstart.c",
"pseudo-reloc-list.c",
"wildcard.c",
"charmax.c",
"crt0_w.c",
"dllargv.c",
"_newmode.c",
"tlssup.c",
"xncommod.c",
"cinitexe.c",
"merr.c",
"usermatherr.c",
"pesect.c",
"udllargc.c",
"xthdloc.c",
"CRT_fp10.c",
"mingw_helpers.c",
"pseudo-reloc.c",
"udll_argv.c",
"xtxtmode.c",
"crt_handler.c",
"tlsthrd.c",
"tlsmthread.c",
"tlsmcrt.c",
"cxa_atexit.c",
"cxa_thread_atexit.c",
"tls_atexit.c",
};
const msvcrt_common_src = [_][]const u8{
"misc" ++ path.sep_str ++ "_create_locale.c",
"misc" ++ path.sep_str ++ "_free_locale.c",
"misc" ++ path.sep_str ++ "onexit_table.c",
"misc" ++ path.sep_str ++ "register_tls_atexit.c",
"stdio" ++ path.sep_str ++ "acrt_iob_func.c",
"stdio" ++ path.sep_str ++ "snprintf_alias.c",
"stdio" ++ path.sep_str ++ "vsnprintf_alias.c",
"misc" ++ path.sep_str ++ "_configthreadlocale.c",
"misc" ++ path.sep_str ++ "_get_current_locale.c",
"misc" ++ path.sep_str ++ "invalid_parameter_handler.c",
"misc" ++ path.sep_str ++ "output_format.c",
"misc" ++ path.sep_str ++ "purecall.c",
"secapi" ++ path.sep_str ++ "_access_s.c",
"secapi" ++ path.sep_str ++ "_cgets_s.c",
"secapi" ++ path.sep_str ++ "_cgetws_s.c",
"secapi" ++ path.sep_str ++ "_chsize_s.c",
"secapi" ++ path.sep_str ++ "_controlfp_s.c",
"secapi" ++ path.sep_str ++ "_cprintf_s.c",
"secapi" ++ path.sep_str ++ "_cprintf_s_l.c",
"secapi" ++ path.sep_str ++ "_ctime32_s.c",
"secapi" ++ path.sep_str ++ "_ctime64_s.c",
"secapi" ++ path.sep_str ++ "_cwprintf_s.c",
"secapi" ++ path.sep_str ++ "_cwprintf_s_l.c",
"secapi" ++ path.sep_str ++ "_gmtime32_s.c",
"secapi" ++ path.sep_str ++ "_gmtime64_s.c",
"secapi" ++ path.sep_str ++ "_localtime32_s.c",
"secapi" ++ path.sep_str ++ "_localtime64_s.c",
"secapi" ++ path.sep_str ++ "_mktemp_s.c",
"secapi" ++ path.sep_str ++ "_sopen_s.c",
"secapi" ++ path.sep_str ++ "_strdate_s.c",
"secapi" ++ path.sep_str ++ "_strtime_s.c",
"secapi" ++ path.sep_str ++ "_umask_s.c",
"secapi" ++ path.sep_str ++ "_vcprintf_s.c",
"secapi" ++ path.sep_str ++ "_vcprintf_s_l.c",
"secapi" ++ path.sep_str ++ "_vcwprintf_s.c",
"secapi" ++ path.sep_str ++ "_vcwprintf_s_l.c",
"secapi" ++ path.sep_str ++ "_vscprintf_p.c",
"secapi" ++ path.sep_str ++ "_vscwprintf_p.c",
"secapi" ++ path.sep_str ++ "_vswprintf_p.c",
"secapi" ++ path.sep_str ++ "_waccess_s.c",
"secapi" ++ path.sep_str ++ "_wasctime_s.c",
"secapi" ++ path.sep_str ++ "_wctime32_s.c",
"secapi" ++ path.sep_str ++ "_wctime64_s.c",
"secapi" ++ path.sep_str ++ "_wstrtime_s.c",
"secapi" ++ path.sep_str ++ "_wmktemp_s.c",
"secapi" ++ path.sep_str ++ "_wstrdate_s.c",
"secapi" ++ path.sep_str ++ "asctime_s.c",
"secapi" ++ path.sep_str ++ "memcpy_s.c",
"secapi" ++ path.sep_str ++ "memmove_s.c",
"secapi" ++ path.sep_str ++ "rand_s.c",
"secapi" ++ path.sep_str ++ "sprintf_s.c",
"secapi" ++ path.sep_str ++ "strerror_s.c",
"secapi" ++ path.sep_str ++ "vsprintf_s.c",
"secapi" ++ path.sep_str ++ "wmemcpy_s.c",
"secapi" ++ path.sep_str ++ "wmemmove_s.c",
"stdio" ++ path.sep_str ++ "mingw_lock.c",
};
const msvcrt_i386_src = [_][]const u8{
"misc" ++ path.sep_str ++ "lc_locale_func.c",
"misc" ++ path.sep_str ++ "___mb_cur_max_func.c",
"misc" ++ path.sep_str ++ "wassert.c",
};
const msvcrt_other_src = [_][]const u8{
"misc" ++ path.sep_str ++ "__p___argv.c",
"misc" ++ path.sep_str ++ "__p__acmdln.c",
"misc" ++ path.sep_str ++ "__p__commode.c",
"misc" ++ path.sep_str ++ "__p__fmode.c",
"misc" ++ path.sep_str ++ "__p__wcmdln.c",
};
const mingwex_generic_src = [_][]const u8{
"complex" ++ path.sep_str ++ "_cabs.c",
"complex" ++ path.sep_str ++ "cabs.c",
"complex" ++ path.sep_str ++ "cabsf.c",
"complex" ++ path.sep_str ++ "cabsl.c",
"complex" ++ path.sep_str ++ "cacos.c",
"complex" ++ path.sep_str ++ "cacosf.c",
"complex" ++ path.sep_str ++ "cacosl.c",
"complex" ++ path.sep_str ++ "carg.c",
"complex" ++ path.sep_str ++ "cargf.c",
"complex" ++ path.sep_str ++ "cargl.c",
"complex" ++ path.sep_str ++ "casin.c",
"complex" ++ path.sep_str ++ "casinf.c",
"complex" ++ path.sep_str ++ "casinl.c",
"complex" ++ path.sep_str ++ "catan.c",
"complex" ++ path.sep_str ++ "catanf.c",
"complex" ++ path.sep_str ++ "catanl.c",
"complex" ++ path.sep_str ++ "ccos.c",
"complex" ++ path.sep_str ++ "ccosf.c",
"complex" ++ path.sep_str ++ "ccosl.c",
"complex" ++ path.sep_str ++ "cexp.c",
"complex" ++ path.sep_str ++ "cexpf.c",
"complex" ++ path.sep_str ++ "cexpl.c",
"complex" ++ path.sep_str ++ "cimag.c",
"complex" ++ path.sep_str ++ "cimagf.c",
"complex" ++ path.sep_str ++ "cimagl.c",
"complex" ++ path.sep_str ++ "clog.c",
"complex" ++ path.sep_str ++ "clog10.c",
"complex" ++ path.sep_str ++ "clog10f.c",
"complex" ++ path.sep_str ++ "clog10l.c",
"complex" ++ path.sep_str ++ "clogf.c",
"complex" ++ path.sep_str ++ "clogl.c",
"complex" ++ path.sep_str ++ "conj.c",
"complex" ++ path.sep_str ++ "conjf.c",
"complex" ++ path.sep_str ++ "conjl.c",
"complex" ++ path.sep_str ++ "cpow.c",
"complex" ++ path.sep_str ++ "cpowf.c",
"complex" ++ path.sep_str ++ "cpowl.c",
"complex" ++ path.sep_str ++ "cproj.c",
"complex" ++ path.sep_str ++ "cprojf.c",
"complex" ++ path.sep_str ++ "cprojl.c",
"complex" ++ path.sep_str ++ "creal.c",
"complex" ++ path.sep_str ++ "crealf.c",
"complex" ++ path.sep_str ++ "creall.c",
"complex" ++ path.sep_str ++ "csin.c",
"complex" ++ path.sep_str ++ "csinf.c",
"complex" ++ path.sep_str ++ "csinl.c",
"complex" ++ path.sep_str ++ "csqrt.c",
"complex" ++ path.sep_str ++ "csqrtf.c",
"complex" ++ path.sep_str ++ "csqrtl.c",
"complex" ++ path.sep_str ++ "ctan.c",
"complex" ++ path.sep_str ++ "ctanf.c",
"complex" ++ path.sep_str ++ "ctanl.c",
"crt" ++ path.sep_str ++ "dllentry.c",
"crt" ++ path.sep_str ++ "dllmain.c",
"gdtoa" ++ path.sep_str ++ "arithchk.c",
"gdtoa" ++ path.sep_str ++ "dmisc.c",
"gdtoa" ++ path.sep_str ++ "dtoa.c",
"gdtoa" ++ path.sep_str ++ "g__fmt.c",
"gdtoa" ++ path.sep_str ++ "g_dfmt.c",
"gdtoa" ++ path.sep_str ++ "g_ffmt.c",
"gdtoa" ++ path.sep_str ++ "g_xfmt.c",
"gdtoa" ++ path.sep_str ++ "gdtoa.c",
"gdtoa" ++ path.sep_str ++ "gethex.c",
"gdtoa" ++ path.sep_str ++ "gmisc.c",
"gdtoa" ++ path.sep_str ++ "hd_init.c",
"gdtoa" ++ path.sep_str ++ "hexnan.c",
"gdtoa" ++ path.sep_str ++ "misc.c",
"gdtoa" ++ path.sep_str ++ "qnan.c",
"gdtoa" ++ path.sep_str ++ "smisc.c",
"gdtoa" ++ path.sep_str ++ "strtodg.c",
"gdtoa" ++ path.sep_str ++ "strtodnrp.c",
"gdtoa" ++ path.sep_str ++ "strtof.c",
"gdtoa" ++ path.sep_str ++ "strtopx.c",
"gdtoa" ++ path.sep_str ++ "sum.c",
"gdtoa" ++ path.sep_str ++ "ulp.c",
"math" ++ path.sep_str ++ "abs64.c",
"math" ++ path.sep_str ++ "cbrt.c",
"math" ++ path.sep_str ++ "cbrtf.c",
"math" ++ path.sep_str ++ "cbrtl.c",
"math" ++ path.sep_str ++ "cephes_emath.c",
"math" ++ path.sep_str ++ "copysign.c",
"math" ++ path.sep_str ++ "copysignf.c",
"math" ++ path.sep_str ++ "coshf.c",
"math" ++ path.sep_str ++ "coshl.c",
"math" ++ path.sep_str ++ "erfl.c",
"math" ++ path.sep_str ++ "expf.c",
"math" ++ path.sep_str ++ "fabs.c",
"math" ++ path.sep_str ++ "fabsf.c",
"math" ++ path.sep_str ++ "fabsl.c",
"math" ++ path.sep_str ++ "fdim.c",
"math" ++ path.sep_str ++ "fdimf.c",
"math" ++ path.sep_str ++ "fdiml.c",
"math" ++ path.sep_str ++ "fma.c",
"math" ++ path.sep_str ++ "fmaf.c",
"math" ++ path.sep_str ++ "fmal.c",
"math" ++ path.sep_str ++ "fmax.c",
"math" ++ path.sep_str ++ "fmaxf.c",
"math" ++ path.sep_str ++ "fmaxl.c",
"math" ++ path.sep_str ++ "fmin.c",
"math" ++ path.sep_str ++ "fminf.c",
"math" ++ path.sep_str ++ "fminl.c",
"math" ++ path.sep_str ++ "fp_consts.c",
"math" ++ path.sep_str ++ "fp_constsf.c",
"math" ++ path.sep_str ++ "fp_constsl.c",
"math" ++ path.sep_str ++ "fpclassify.c",
"math" ++ path.sep_str ++ "fpclassifyf.c",
"math" ++ path.sep_str ++ "fpclassifyl.c",
"math" ++ path.sep_str ++ "frexpf.c",
"math" ++ path.sep_str ++ "hypot.c",
"math" ++ path.sep_str ++ "hypotf.c",
"math" ++ path.sep_str ++ "hypotl.c",
"math" ++ path.sep_str ++ "isnan.c",
"math" ++ path.sep_str ++ "isnanf.c",
"math" ++ path.sep_str ++ "isnanl.c",
"math" ++ path.sep_str ++ "ldexpf.c",
"math" ++ path.sep_str ++ "lgamma.c",
"math" ++ path.sep_str ++ "lgammaf.c",
"math" ++ path.sep_str ++ "lgammal.c",
"math" ++ path.sep_str ++ "llrint.c",
"math" ++ path.sep_str ++ "llrintf.c",
"math" ++ path.sep_str ++ "llrintl.c",
"math" ++ path.sep_str ++ "llround.c",
"math" ++ path.sep_str ++ "llroundf.c",
"math" ++ path.sep_str ++ "llroundl.c",
"math" ++ path.sep_str ++ "log10f.c",
"math" ++ path.sep_str ++ "logf.c",
"math" ++ path.sep_str ++ "lrint.c",
"math" ++ path.sep_str ++ "lrintf.c",
"math" ++ path.sep_str ++ "lrintl.c",
"math" ++ path.sep_str ++ "lround.c",
"math" ++ path.sep_str ++ "lroundf.c",
"math" ++ path.sep_str ++ "lroundl.c",
"math" ++ path.sep_str ++ "modf.c",
"math" ++ path.sep_str ++ "modff.c",
"math" ++ path.sep_str ++ "modfl.c",
"math" ++ path.sep_str ++ "nextafterf.c",
"math" ++ path.sep_str ++ "nextafterl.c",
"math" ++ path.sep_str ++ "nexttoward.c",
"math" ++ path.sep_str ++ "nexttowardf.c",
"math" ++ path.sep_str ++ "powf.c",
"math" ++ path.sep_str ++ "powi.c",
"math" ++ path.sep_str ++ "powif.c",
"math" ++ path.sep_str ++ "powil.c",
"math" ++ path.sep_str ++ "round.c",
"math" ++ path.sep_str ++ "roundf.c",
"math" ++ path.sep_str ++ "roundl.c",
"math" ++ path.sep_str ++ "s_erf.c",
"math" ++ path.sep_str ++ "sf_erf.c",
"math" ++ path.sep_str ++ "signbit.c",
"math" ++ path.sep_str ++ "signbitf.c",
"math" ++ path.sep_str ++ "signbitl.c",
"math" ++ path.sep_str ++ "signgam.c",
"math" ++ path.sep_str ++ "sinhf.c",
"math" ++ path.sep_str ++ "sinhl.c",
"math" ++ path.sep_str ++ "sqrt.c",
"math" ++ path.sep_str ++ "sqrtf.c",
"math" ++ path.sep_str ++ "sqrtl.c",
"math" ++ path.sep_str ++ "tanhf.c",
"math" ++ path.sep_str ++ "tanhl.c",
"math" ++ path.sep_str ++ "tgamma.c",
"math" ++ path.sep_str ++ "tgammaf.c",
"math" ++ path.sep_str ++ "tgammal.c",
"math" ++ path.sep_str ++ "truncl.c",
"misc" ++ path.sep_str ++ "alarm.c",
"misc" ++ path.sep_str ++ "basename.c",
"misc" ++ path.sep_str ++ "btowc.c",
"misc" ++ path.sep_str ++ "delay-f.c",
"misc" ++ path.sep_str ++ "delay-n.c",
"misc" ++ path.sep_str ++ "delayimp.c",
"misc" ++ path.sep_str ++ "dirent.c",
"misc" ++ path.sep_str ++ "dirname.c",
"misc" ++ path.sep_str ++ "feclearexcept.c",
"misc" ++ path.sep_str ++ "fegetenv.c",
"misc" ++ path.sep_str ++ "fegetexceptflag.c",
"misc" ++ path.sep_str ++ "fegetround.c",
"misc" ++ path.sep_str ++ "feholdexcept.c",
"misc" ++ path.sep_str ++ "feraiseexcept.c",
"misc" ++ path.sep_str ++ "fesetenv.c",
"misc" ++ path.sep_str ++ "fesetexceptflag.c",
"misc" ++ path.sep_str ++ "fesetround.c",
"misc" ++ path.sep_str ++ "fetestexcept.c",
"misc" ++ path.sep_str ++ "feupdateenv.c",
"misc" ++ path.sep_str ++ "ftruncate.c",
"misc" ++ path.sep_str ++ "ftw.c",
"misc" ++ path.sep_str ++ "ftw64.c",
"misc" ++ path.sep_str ++ "fwide.c",
"misc" ++ path.sep_str ++ "getlogin.c",
"misc" ++ path.sep_str ++ "getopt.c",
"misc" ++ path.sep_str ++ "gettimeofday.c",
"misc" ++ path.sep_str ++ "imaxabs.c",
"misc" ++ path.sep_str ++ "imaxdiv.c",
"misc" ++ path.sep_str ++ "isblank.c",
"misc" ++ path.sep_str ++ "iswblank.c",
"misc" ++ path.sep_str ++ "mbrtowc.c",
"misc" ++ path.sep_str ++ "mbsinit.c",
"misc" ++ path.sep_str ++ "mempcpy.c",
"misc" ++ path.sep_str ++ "mingw-aligned-malloc.c",
"misc" ++ path.sep_str ++ "mingw_getsp.S",
"misc" ++ path.sep_str ++ "mingw_matherr.c",
"misc" ++ path.sep_str ++ "mingw_mbwc_convert.c",
"misc" ++ path.sep_str ++ "mingw_usleep.c",
"misc" ++ path.sep_str ++ "mingw_wcstod.c",
"misc" ++ path.sep_str ++ "mingw_wcstof.c",
"misc" ++ path.sep_str ++ "mingw_wcstold.c",
"misc" ++ path.sep_str ++ "mkstemp.c",
"misc" ++ path.sep_str ++ "seterrno.c",
"misc" ++ path.sep_str ++ "sleep.c",
"misc" ++ path.sep_str ++ "strnlen.c",
"misc" ++ path.sep_str ++ "strsafe.c",
"misc" ++ path.sep_str ++ "strtoimax.c",
"misc" ++ path.sep_str ++ "strtold.c",
"misc" ++ path.sep_str ++ "strtoumax.c",
"misc" ++ path.sep_str ++ "tdelete.c",
"misc" ++ path.sep_str ++ "tfind.c",
"misc" ++ path.sep_str ++ "tsearch.c",
"misc" ++ path.sep_str ++ "twalk.c",
"misc" ++ path.sep_str ++ "uchar_c16rtomb.c",
"misc" ++ path.sep_str ++ "uchar_c32rtomb.c",
"misc" ++ path.sep_str ++ "uchar_mbrtoc16.c",
"misc" ++ path.sep_str ++ "uchar_mbrtoc32.c",
"misc" ++ path.sep_str ++ "wcrtomb.c",
"misc" ++ path.sep_str ++ "wcsnlen.c",
"misc" ++ path.sep_str ++ "wcstof.c",
"misc" ++ path.sep_str ++ "wcstoimax.c",
"misc" ++ path.sep_str ++ "wcstold.c",
"misc" ++ path.sep_str ++ "wcstoumax.c",
"misc" ++ path.sep_str ++ "wctob.c",
"misc" ++ path.sep_str ++ "wctrans.c",
"misc" ++ path.sep_str ++ "wctype.c",
"misc" ++ path.sep_str ++ "wdirent.c",
"misc" ++ path.sep_str ++ "winbs_uint64.c",
"misc" ++ path.sep_str ++ "winbs_ulong.c",
"misc" ++ path.sep_str ++ "winbs_ushort.c",
"misc" ++ path.sep_str ++ "wmemchr.c",
"misc" ++ path.sep_str ++ "wmemcmp.c",
"misc" ++ path.sep_str ++ "wmemcpy.c",
"misc" ++ path.sep_str ++ "wmemmove.c",
"misc" ++ path.sep_str ++ "wmempcpy.c",
"misc" ++ path.sep_str ++ "wmemset.c",
"stdio" ++ path.sep_str ++ "_Exit.c",
"stdio" ++ path.sep_str ++ "_findfirst64i32.c",
"stdio" ++ path.sep_str ++ "_findnext64i32.c",
"stdio" ++ path.sep_str ++ "_fstat.c",
"stdio" ++ path.sep_str ++ "_fstat64i32.c",
"stdio" ++ path.sep_str ++ "_ftime.c",
"stdio" ++ path.sep_str ++ "_getc_nolock.c",
"stdio" ++ path.sep_str ++ "_getwc_nolock.c",
"stdio" ++ path.sep_str ++ "_putc_nolock.c",
"stdio" ++ path.sep_str ++ "_putwc_nolock.c",
"stdio" ++ path.sep_str ++ "_stat.c",
"stdio" ++ path.sep_str ++ "_stat64i32.c",
"stdio" ++ path.sep_str ++ "_wfindfirst64i32.c",
"stdio" ++ path.sep_str ++ "_wfindnext64i32.c",
"stdio" ++ path.sep_str ++ "_wstat.c",
"stdio" ++ path.sep_str ++ "_wstat64i32.c",
"stdio" ++ path.sep_str ++ "asprintf.c",
"stdio" ++ path.sep_str ++ "atoll.c",
"stdio" ++ path.sep_str ++ "fgetpos64.c",
"stdio" ++ path.sep_str ++ "fopen64.c",
"stdio" ++ path.sep_str ++ "fseeko32.c",
"stdio" ++ path.sep_str ++ "fseeko64.c",
"stdio" ++ path.sep_str ++ "fsetpos64.c",
"stdio" ++ path.sep_str ++ "ftello.c",
"stdio" ++ path.sep_str ++ "ftello64.c",
"stdio" ++ path.sep_str ++ "ftruncate64.c",
"stdio" ++ path.sep_str ++ "lltoa.c",
"stdio" ++ path.sep_str ++ "lltow.c",
"stdio" ++ path.sep_str ++ "lseek64.c",
"stdio" ++ path.sep_str ++ "mingw_asprintf.c",
"stdio" ++ path.sep_str ++ "mingw_fprintf.c",
"stdio" ++ path.sep_str ++ "mingw_fprintfw.c",
"stdio" ++ path.sep_str ++ "mingw_fscanf.c",
"stdio" ++ path.sep_str ++ "mingw_fwscanf.c",
"stdio" ++ path.sep_str ++ "mingw_pformat.c",
"stdio" ++ path.sep_str ++ "mingw_pformatw.c",
"stdio" ++ path.sep_str ++ "mingw_printf.c",
"stdio" ++ path.sep_str ++ "mingw_printfw.c",
"stdio" ++ path.sep_str ++ "mingw_scanf.c",
"stdio" ++ path.sep_str ++ "mingw_snprintf.c",
"stdio" ++ path.sep_str ++ "mingw_snprintfw.c",
"stdio" ++ path.sep_str ++ "mingw_sprintf.c",
"stdio" ++ path.sep_str ++ "mingw_sprintfw.c",
"stdio" ++ path.sep_str ++ "mingw_sscanf.c",
"stdio" ++ path.sep_str ++ "mingw_swscanf.c",
"stdio" ++ path.sep_str ++ "mingw_vasprintf.c",
"stdio" ++ path.sep_str ++ "mingw_vfprintf.c",
"stdio" ++ path.sep_str ++ "mingw_vfprintfw.c",
"stdio" ++ path.sep_str ++ "mingw_vfscanf.c",
"stdio" ++ path.sep_str ++ "mingw_vprintf.c",
"stdio" ++ path.sep_str ++ "mingw_vprintfw.c",
"stdio" ++ path.sep_str ++ "mingw_vsnprintf.c",
"stdio" ++ path.sep_str ++ "mingw_vsnprintfw.c",
"stdio" ++ path.sep_str ++ "mingw_vsprintf.c",
"stdio" ++ path.sep_str ++ "mingw_vsprintfw.c",
"stdio" ++ path.sep_str ++ "mingw_wscanf.c",
"stdio" ++ path.sep_str ++ "mingw_wvfscanf.c",
"stdio" ++ path.sep_str ++ "scanf.S",
"stdio" ++ path.sep_str ++ "snprintf.c",
"stdio" ++ path.sep_str ++ "snwprintf.c",
"stdio" ++ path.sep_str ++ "strtof.c",
"stdio" ++ path.sep_str ++ "strtok_r.c",
"stdio" ++ path.sep_str ++ "truncate.c",
"stdio" ++ path.sep_str ++ "ulltoa.c",
"stdio" ++ path.sep_str ++ "ulltow.c",
"stdio" ++ path.sep_str ++ "vasprintf.c",
"stdio" ++ path.sep_str ++ "vfscanf.c",
"stdio" ++ path.sep_str ++ "vfscanf2.S",
"stdio" ++ path.sep_str ++ "vfwscanf.c",
"stdio" ++ path.sep_str ++ "vfwscanf2.S",
"stdio" ++ path.sep_str ++ "vscanf.c",
"stdio" ++ path.sep_str ++ "vscanf2.S",
"stdio" ++ path.sep_str ++ "vsnprintf.c",
"stdio" ++ path.sep_str ++ "vsnwprintf.c",
"stdio" ++ path.sep_str ++ "vsscanf.c",
"stdio" ++ path.sep_str ++ "vsscanf2.S",
"stdio" ++ path.sep_str ++ "vswscanf.c",
"stdio" ++ path.sep_str ++ "vswscanf2.S",
"stdio" ++ path.sep_str ++ "vwscanf.c",
"stdio" ++ path.sep_str ++ "vwscanf2.S",
"stdio" ++ path.sep_str ++ "wtoll.c",
};
const mingwex_x86_src = [_][]const u8{
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "acosf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "acosh.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "acoshf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "acoshl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "acosl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "asinf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "asinh.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "asinhf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "asinhl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "asinl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atan2.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atan2f.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atan2l.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atanf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atanh.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atanhf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atanhl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "atanl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ceilf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ceill.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ceil.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "_chgsignl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "copysignl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "cos.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "cosf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "cosl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "cosl_internal.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "cossin.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "exp2f.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "exp2l.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "exp2.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "exp.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "expl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "expm1.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "expm1f.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "expm1l.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "floorf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "floorl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "floor.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "fmod.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "fmodf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "fmodl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "fucom.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ilogbf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ilogbl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ilogb.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "internal_logl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ldexp.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "ldexpl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log10l.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log1pf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log1pl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log1p.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log2f.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log2l.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log2.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "logb.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "logbf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "logbl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "log.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "logl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "nearbyintf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "nearbyintl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "nearbyint.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "pow.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "powl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "remainderf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "remainderl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "remainder.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "remquof.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "remquol.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "remquo.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "rint.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "rintf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "scalbnf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "scalbnl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "scalbn.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "sin.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "sinf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "sinl.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "sinl_internal.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "tanf.c",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "tanl.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "truncf.S",
"math" ++ path.sep_str ++ "x86" ++ path.sep_str ++ "trunc.S",
};
const mingwex_arm32_src = [_][]const u8{
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "_chgsignl.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "s_rint.c",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "s_rintf.c",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "exp2.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "exp2f.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "nearbyint.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "nearbyintf.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "nearbyintl.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "sincos.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "sincosf.S",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "s_trunc.c",
"math" ++ path.sep_str ++ "arm" ++ path.sep_str ++ "s_truncf.c",
};
const mingwex_arm64_src = [_][]const u8{
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "_chgsignl.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "rint.c",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "rintf.c",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "sincos.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "sincosf.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "exp2f.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "exp2.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "nearbyintf.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "nearbyintl.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "nearbyint.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "truncf.S",
"math" ++ path.sep_str ++ "arm64" ++ path.sep_str ++ "trunc.S",
};
const uuid_src = [_][]const u8{
"ativscp-uuid.c",
"atsmedia-uuid.c",
"bth-uuid.c",
"cguid-uuid.c",
"comcat-uuid.c",
"devguid.c",
"docobj-uuid.c",
"dxva-uuid.c",
"exdisp-uuid.c",
"extras-uuid.c",
"fwp-uuid.c",
"guid_nul.c",
"hlguids-uuid.c",
"hlink-uuid.c",
"mlang-uuid.c",
"msctf-uuid.c",
"mshtmhst-uuid.c",
"mshtml-uuid.c",
"msxml-uuid.c",
"netcfg-uuid.c",
"netcon-uuid.c",
"ntddkbd-uuid.c",
"ntddmou-uuid.c",
"ntddpar-uuid.c",
"ntddscsi-uuid.c",
"ntddser-uuid.c",
"ntddstor-uuid.c",
"ntddvdeo-uuid.c",
"oaidl-uuid.c",
"objidl-uuid.c",
"objsafe-uuid.c",
"ocidl-uuid.c",
"oleacc-uuid.c",
"olectlid-uuid.c",
"oleidl-uuid.c",
"power-uuid.c",
"powrprof-uuid.c",
"uianimation-uuid.c",
"usbcamdi-uuid.c",
"usbiodef-uuid.c",
"uuid.c",
"vds-uuid.c",
"virtdisk-uuid.c",
"wia-uuid.c",
};
pub const always_link_libs = [_][]const u8{
"advapi32",
"kernel32",
"msvcrt",
"ntdll",
"shell32",
"user32",
}; | src/mingw.zig |
const std = @import("../std.zig");
const builtin = @import("builtin");
const os = std.os;
const io = std.io;
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const posix = os.posix;
const windows = os.windows;
const Os = builtin.Os;
const windows_util = @import("windows/util.zig");
const maxInt = std.math.maxInt;
const is_posix = builtin.os != builtin.Os.windows;
const is_windows = builtin.os == builtin.Os.windows;
pub const File = struct {
/// The OS-specific file descriptor or file handle.
handle: os.FileHandle,
pub const Mode = switch (builtin.os) {
Os.windows => void,
else => u32,
};
pub const default_mode = switch (builtin.os) {
Os.windows => {},
else => 0o666,
};
pub const OpenError = os.WindowsOpenError || os.PosixOpenError;
/// `openRead` except with a null terminated path
pub fn openReadC(path: [*]const u8) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_RDONLY;
const fd = try os.posixOpenC(path, flags, 0);
return openHandle(fd);
}
if (is_windows) {
return openRead(mem.toSliceConst(u8, path));
}
@compileError("Unsupported OS");
}
/// Call close to clean up.
pub fn openRead(path: []const u8) OpenError!File {
if (is_posix) {
const path_c = try os.toPosixPath(path);
return openReadC(&path_c);
}
if (is_windows) {
const path_w = try windows_util.sliceToPrefixedFileW(path);
return openReadW(&path_w);
}
@compileError("Unsupported OS");
}
pub fn openReadW(path_w: [*]const u16) OpenError!File {
const handle = try os.windowsOpenW(
path_w,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
windows.OPEN_EXISTING,
windows.FILE_ATTRIBUTE_NORMAL,
);
return openHandle(handle);
}
/// Calls `openWriteMode` with os.File.default_mode for the mode.
pub fn openWrite(path: []const u8) OpenError!File {
return openWriteMode(path, os.File.default_mode);
}
/// If the path does not exist it will be created.
/// If a file already exists in the destination it will be truncated.
/// Call close to clean up.
pub fn openWriteMode(path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
const fd = try os.posixOpen(path, flags, file_mode);
return openHandle(fd);
} else if (is_windows) {
const path_w = try windows_util.sliceToPrefixedFileW(path);
return openWriteModeW(&path_w, file_mode);
} else {
@compileError("TODO implement openWriteMode for this OS");
}
}
pub fn openWriteModeW(path_w: [*]const u16, file_mode: Mode) OpenError!File {
const handle = try os.windowsOpenW(
path_w,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
windows.CREATE_ALWAYS,
windows.FILE_ATTRIBUTE_NORMAL,
);
return openHandle(handle);
}
/// If the path does not exist it will be created.
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// Call close to clean up.
pub fn openWriteNoClobber(path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) {
const path_c = try os.toPosixPath(path);
return openWriteNoClobberC(&path_c, file_mode);
} else if (is_windows) {
const path_w = try windows_util.sliceToPrefixedFileW(path);
return openWriteNoClobberW(&path_w, file_mode);
} else {
@compileError("TODO implement openWriteMode for this OS");
}
}
pub fn openWriteNoClobberC(path: [*]const u8, file_mode: Mode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
const fd = try os.posixOpenC(path, flags, file_mode);
return openHandle(fd);
} else if (is_windows) {
const path_w = try windows_util.cStrToPrefixedFileW(path);
return openWriteNoClobberW(&path_w, file_mode);
} else {
@compileError("TODO implement openWriteMode for this OS");
}
}
pub fn openWriteNoClobberW(path_w: [*]const u16, file_mode: Mode) OpenError!File {
const handle = try os.windowsOpenW(
path_w,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
windows.CREATE_NEW,
windows.FILE_ATTRIBUTE_NORMAL,
);
return openHandle(handle);
}
pub fn openHandle(handle: os.FileHandle) File {
return File{ .handle = handle };
}
pub const AccessError = error{
PermissionDenied,
FileNotFound,
NameTooLong,
InputOutput,
SystemResources,
BadPathName,
/// On Windows, file paths must be valid Unicode.
InvalidUtf8,
Unexpected,
};
/// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string.
/// Otherwise use `access` or `accessC`.
pub fn accessW(path: [*]const u16) AccessError!void {
if (os.windows.GetFileAttributesW(path) != os.windows.INVALID_FILE_ATTRIBUTES) {
return;
}
const err = windows.GetLastError();
switch (err) {
windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
windows.ERROR.ACCESS_DENIED => return error.PermissionDenied,
else => return os.unexpectedErrorWindows(err),
}
}
/// Call if you have a UTF-8 encoded, null-terminated string.
/// Otherwise use `access` or `accessW`.
pub fn accessC(path: [*]const u8) AccessError!void {
if (is_windows) {
const path_w = try windows_util.cStrToPrefixedFileW(path);
return accessW(&path_w);
}
if (is_posix) {
const result = posix.access(path, posix.F_OK);
const err = posix.getErrno(result);
switch (err) {
0 => return,
posix.EACCES => return error.PermissionDenied,
posix.EROFS => return error.PermissionDenied,
posix.ELOOP => return error.PermissionDenied,
posix.ETXTBSY => return error.PermissionDenied,
posix.ENOTDIR => return error.FileNotFound,
posix.ENOENT => return error.FileNotFound,
posix.ENAMETOOLONG => return error.NameTooLong,
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
posix.EIO => return error.InputOutput,
posix.ENOMEM => return error.SystemResources,
else => return os.unexpectedErrorPosix(err),
}
}
@compileError("Unsupported OS");
}
pub fn access(path: []const u8) AccessError!void {
if (is_windows) {
const path_w = try windows_util.sliceToPrefixedFileW(path);
return accessW(&path_w);
}
if (is_posix) {
var path_with_null: [posix.PATH_MAX]u8 = undefined;
if (path.len >= posix.PATH_MAX) return error.NameTooLong;
mem.copy(u8, path_with_null[0..], path);
path_with_null[path.len] = 0;
return accessC(&path_with_null);
}
@compileError("Unsupported OS");
}
/// Upon success, the stream is in an uninitialized state. To continue using it,
/// you must use the open() function.
pub fn close(self: File) void {
os.close(self.handle);
}
/// Calls `os.isTty` on `self.handle`.
pub fn isTty(self: File) bool {
return os.isTty(self.handle);
}
pub const SeekError = error{
/// TODO make this error impossible to get
Overflow,
Unseekable,
Unexpected,
};
pub fn seekForward(self: File, amount: isize) SeekError!void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios, Os.freebsd, Os.netbsd => {
const result = posix.lseek(self.handle, amount, posix.SEEK_CUR);
const err = posix.getErrno(result);
if (err > 0) {
return switch (err) {
// We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable,
posix.ENXIO => error.Unseekable,
else => os.unexpectedErrorPosix(err),
};
}
},
Os.windows => {
if (windows.SetFilePointerEx(self.handle, amount, null, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err),
};
}
},
else => @compileError("unsupported OS"),
}
}
pub fn seekTo(self: File, pos: usize) SeekError!void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios, Os.freebsd, Os.netbsd => {
const ipos = try math.cast(isize, pos);
const result = posix.lseek(self.handle, ipos, posix.SEEK_SET);
const err = posix.getErrno(result);
if (err > 0) {
return switch (err) {
// We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable,
posix.ENXIO => error.Unseekable,
else => os.unexpectedErrorPosix(err),
};
}
},
Os.windows => {
const ipos = try math.cast(isize, pos);
if (windows.SetFilePointerEx(self.handle, ipos, null, windows.FILE_BEGIN) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_PARAMETER => unreachable,
windows.ERROR.INVALID_HANDLE => unreachable,
else => os.unexpectedErrorWindows(err),
};
}
},
else => @compileError("unsupported OS: " ++ @tagName(builtin.os)),
}
}
pub const GetSeekPosError = error{
Overflow,
SystemResources,
Unseekable,
Unexpected,
};
pub fn getPos(self: File) GetSeekPosError!usize {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios, Os.freebsd, Os.netbsd => {
const result = posix.lseek(self.handle, 0, posix.SEEK_CUR);
const err = posix.getErrno(result);
if (err > 0) {
return switch (err) {
// We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable,
posix.ENXIO => error.Unseekable,
else => os.unexpectedErrorPosix(err),
};
}
return result;
},
Os.windows => {
var pos: windows.LARGE_INTEGER = undefined;
if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err),
};
}
assert(pos >= 0);
return math.cast(usize, pos);
},
else => @compileError("unsupported OS"),
}
}
pub fn getEndPos(self: File) GetSeekPosError!usize {
if (is_posix) {
const stat = try os.posixFStat(self.handle);
return @intCast(usize, stat.size);
} else if (is_windows) {
var file_size: windows.LARGE_INTEGER = undefined;
if (windows.GetFileSizeEx(self.handle, &file_size) == 0) {
const err = windows.GetLastError();
return switch (err) {
else => os.unexpectedErrorWindows(err),
};
}
if (file_size < 0)
return error.Overflow;
return math.cast(usize, @intCast(u64, file_size));
} else {
@compileError("TODO support getEndPos on this OS");
}
}
pub const ModeError = error{
SystemResources,
Unexpected,
};
pub fn mode(self: File) ModeError!Mode {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
if (err > 0) {
return switch (err) {
// We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.ENOMEM => error.SystemResources,
else => os.unexpectedErrorPosix(err),
};
}
// TODO: we should be able to cast u16 to ModeError!u32, making this
// explicit cast not necessary
return Mode(stat.mode);
} else if (is_windows) {
return {};
} else {
@compileError("TODO support file mode on this OS");
}
}
pub const ReadError = os.WindowsReadError || os.PosixReadError;
pub fn read(self: File, buffer: []u8) ReadError!usize {
if (is_posix) {
return os.posixRead(self.handle, buffer);
} else if (is_windows) {
var index: usize = 0;
while (index < buffer.len) {
const want_read_count = @intCast(windows.DWORD, math.min(windows.DWORD(maxInt(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
if (windows.ReadFile(self.handle, buffer.ptr + index, want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
windows.ERROR.BROKEN_PIPE => return index,
else => os.unexpectedErrorWindows(err),
};
}
if (amt_read == 0) return index;
index += amt_read;
}
return index;
} else {
@compileError("Unsupported OS");
}
}
pub const WriteError = os.WindowsWriteError || os.PosixWriteError;
pub fn write(self: File, bytes: []const u8) WriteError!void {
if (is_posix) {
try os.posixWrite(self.handle, bytes);
} else if (is_windows) {
try os.windowsWrite(self.handle, bytes);
} else {
@compileError("Unsupported OS");
}
}
pub fn inStream(file: File) InStream {
return InStream{
.file = file,
.stream = InStream.Stream{ .readFn = InStream.readFn },
};
}
pub fn outStream(file: File) OutStream {
return OutStream{
.file = file,
.stream = OutStream.Stream{ .writeFn = OutStream.writeFn },
};
}
pub fn seekableStream(file: File) SeekableStream {
return SeekableStream{
.file = file,
.stream = SeekableStream.Stream{
.seekToFn = SeekableStream.seekToFn,
.seekForwardFn = SeekableStream.seekForwardFn,
.getPosFn = SeekableStream.getPosFn,
.getEndPosFn = SeekableStream.getEndPosFn,
},
};
}
/// Implementation of io.InStream trait for File
pub const InStream = struct {
file: File,
stream: Stream,
pub const Error = ReadError;
pub const Stream = io.InStream(Error);
fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
const self = @fieldParentPtr(InStream, "stream", in_stream);
return self.file.read(buffer);
}
};
/// Implementation of io.OutStream trait for File
pub const OutStream = struct {
file: File,
stream: Stream,
pub const Error = WriteError;
pub const Stream = io.OutStream(Error);
fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
const self = @fieldParentPtr(OutStream, "stream", out_stream);
return self.file.write(bytes);
}
};
/// Implementation of io.SeekableStream trait for File
pub const SeekableStream = struct {
file: File,
stream: Stream,
pub const Stream = io.SeekableStream(SeekError, GetSeekPosError);
pub fn seekToFn(seekable_stream: *Stream, pos: usize) SeekError!void {
const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
return self.file.seekTo(pos);
}
pub fn seekForwardFn(seekable_stream: *Stream, amt: isize) SeekError!void {
const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
return self.file.seekForward(amt);
}
pub fn getEndPosFn(seekable_stream: *Stream) GetSeekPosError!usize {
const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
return self.file.getEndPos();
}
pub fn getPosFn(seekable_stream: *Stream) GetSeekPosError!usize {
const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
return self.file.getPos();
}
};
}; | std/os/file.zig |
const std = @import("../std.zig");
const testing = std.testing;
const target = std.Target.current;
const Ordering = std.atomic.Ordering;
pub fn Atomic(comptime T: type) type {
return extern struct {
value: T,
const Self = @This();
pub fn init(value: T) Self {
return .{ .value = value };
}
/// Non-atomically load from the atomic value without synchronization.
/// Care must be taken to avoid data-races when interacting with other atomic operations.
pub fn loadUnchecked(self: Self) T {
return self.value;
}
/// Non-atomically store to the atomic value without synchronization.
/// Care must be taken to avoid data-races when interacting with other atomic operations.
pub fn storeUnchecked(self: *Self, value: T) void {
self.value = value;
}
pub fn load(self: *const Self, comptime ordering: Ordering) T {
return switch (ordering) {
.AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on atomic stores"),
.Release => @compileError(@tagName(ordering) ++ " is only allowed on atomic stores"),
else => @atomicLoad(T, &self.value, ordering),
};
}
pub fn store(self: *Self, value: T, comptime ordering: Ordering) void {
return switch (ordering) {
.AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Acquire) ++ " which is only allowed on atomic loads"),
.Acquire => @compileError(@tagName(ordering) ++ " is only allowed on atomic loads"),
else => @atomicStore(T, &self.value, value, ordering),
};
}
pub fn swap(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Xchg, value, ordering);
}
pub fn compareAndSwap(
self: *Self,
compare: T,
exchange: T,
comptime success: Ordering,
comptime failure: Ordering,
) callconv(.Inline) ?T {
return self.cmpxchg(true, compare, exchange, success, failure);
}
pub fn tryCompareAndSwap(
self: *Self,
compare: T,
exchange: T,
comptime success: Ordering,
comptime failure: Ordering,
) callconv(.Inline) ?T {
return self.cmpxchg(false, compare, exchange, success, failure);
}
fn cmpxchg(
self: *Self,
comptime is_strong: bool,
compare: T,
exchange: T,
comptime success: Ordering,
comptime failure: Ordering,
) callconv(.Inline) ?T {
if (success == .Unordered or failure == .Unordered) {
@compileError(@tagName(Ordering.Unordered) ++ " is only allowed on atomic loads and stores");
}
comptime var success_is_stronger = switch (failure) {
.SeqCst => success == .SeqCst,
.AcqRel => @compileError(@tagName(failure) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on success"),
.Acquire => success == .SeqCst or success == .AcqRel or success == .Acquire,
.Release => @compileError(@tagName(failure) ++ " is only allowed on success"),
.Monotonic => true,
.Unordered => unreachable,
};
if (!success_is_stronger) {
@compileError(@tagName(success) ++ " must be stronger than " ++ @tagName(failure));
}
return switch (is_strong) {
true => @cmpxchgStrong(T, &self.value, compare, exchange, success, failure),
false => @cmpxchgWeak(T, &self.value, compare, exchange, success, failure),
};
}
fn rmw(
self: *Self,
comptime op: std.builtin.AtomicRmwOp,
value: T,
comptime ordering: Ordering,
) callconv(.Inline) T {
return @atomicRmw(T, &self.value, op, value, ordering);
}
fn exportWhen(comptime condition: bool, comptime functions: type) type {
return if (condition) functions else struct {};
}
pub usingnamespace exportWhen(std.meta.trait.isNumber(T), struct {
pub fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Add, value, ordering);
}
pub fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Sub, value, ordering);
}
pub fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Min, value, ordering);
}
pub fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Max, value, ordering);
}
});
pub usingnamespace exportWhen(std.meta.trait.isIntegral(T), struct {
pub fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.And, value, ordering);
}
pub fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Nand, value, ordering);
}
pub fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Or, value, ordering);
}
pub fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
return self.rmw(.Xor, value, ordering);
}
const Bit = std.math.Log2Int(T);
const BitRmwOp = enum {
Set,
Reset,
Toggle,
};
pub fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 {
return bitRmw(self, .Set, bit, ordering);
}
pub fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 {
return bitRmw(self, .Reset, bit, ordering);
}
pub fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 {
return bitRmw(self, .Toggle, bit, ordering);
}
fn bitRmw(
self: *Self,
comptime op: BitRmwOp,
bit: Bit,
comptime ordering: Ordering,
) callconv(.Inline) u1 {
// x86 supports dedicated bitwise instructions
if (comptime target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) {
const instruction = switch (op) {
.Set => "lock bts",
.Reset => "lock btr",
.Toggle => "lock btc",
};
const suffix = switch (@sizeOf(T)) {
2 => "w",
4 => "l",
8 => "q",
else => @compileError("Invalid atomic type " ++ @typeName(T)),
};
const old_bit = asm volatile (instruction ++ suffix ++ " %[bit], %[ptr]"
: [result] "={@ccc}" (-> u8) // LLVM doesn't support u1 flag register return values
: [ptr] "*p" (&self.value),
[bit] "X" (@as(T, bit))
: "cc", "memory"
);
return @intCast(u1, old_bit);
}
const mask = @as(T, 1) << bit;
const value = switch (op) {
.Set => self.fetchOr(mask, ordering),
.Reset => self.fetchAnd(~mask, ordering),
.Toggle => self.fetchXor(mask, ordering),
};
return @boolToInt(value & mask != 0);
}
});
};
}
fn atomicIntTypes() []const type {
comptime var bytes = 1;
comptime var types: []const type = &[_]type{};
inline while (bytes <= @sizeOf(usize)) : (bytes *= 2) {
types = types ++ &[_]type{std.meta.Int(.unsigned, bytes * 8)};
}
return types;
}
test "Atomic.loadUnchecked" {
inline for (atomicIntTypes()) |Int| {
var x = Atomic(Int).init(5);
try testing.expectEqual(x.loadUnchecked(), 5);
}
}
test "Atomic.storeUnchecked" {
inline for (atomicIntTypes()) |Int| {
var x = Atomic(usize).init(5);
x.storeUnchecked(10);
try testing.expectEqual(x.loadUnchecked(), 10);
}
}
test "Atomic.load" {
inline for (atomicIntTypes()) |Int| {
inline for (.{ .Unordered, .Monotonic, .Acquire, .SeqCst }) |ordering| {
var x = Atomic(Int).init(5);
try testing.expectEqual(x.load(ordering), 5);
}
}
}
test "Atomic.store" {
inline for (atomicIntTypes()) |Int| {
inline for (.{ .Unordered, .Monotonic, .Release, .SeqCst }) |ordering| {
var x = Atomic(usize).init(5);
x.store(10, ordering);
try testing.expectEqual(x.load(.SeqCst), 10);
}
}
}
const atomic_rmw_orderings = [_]Ordering{
.Monotonic,
.Acquire,
.Release,
.AcqRel,
.SeqCst,
};
test "Atomic.swap" {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(usize).init(5);
try testing.expectEqual(x.swap(10, ordering), 5);
try testing.expectEqual(x.load(.SeqCst), 10);
var y = Atomic(enum(usize) { a, b, c }).init(.c);
try testing.expectEqual(y.swap(.a, ordering), .c);
try testing.expectEqual(y.load(.SeqCst), .a);
var z = Atomic(f32).init(5.0);
try testing.expectEqual(z.swap(10.0, ordering), 5.0);
try testing.expectEqual(z.load(.SeqCst), 10.0);
var a = Atomic(bool).init(false);
try testing.expectEqual(a.swap(true, ordering), false);
try testing.expectEqual(a.load(.SeqCst), true);
var b = Atomic(?*u8).init(null);
try testing.expectEqual(b.swap(@intToPtr(?*u8, @alignOf(u8)), ordering), null);
try testing.expectEqual(b.load(.SeqCst), @intToPtr(?*u8, @alignOf(u8)));
}
}
const atomic_cmpxchg_orderings = [_][2]Ordering{
.{ .Monotonic, .Monotonic },
.{ .Acquire, .Monotonic },
.{ .Acquire, .Acquire },
.{ .Release, .Monotonic },
// Although accepted by LLVM, acquire failure implies AcqRel success
// .{ .Release, .Acquire },
.{ .AcqRel, .Monotonic },
.{ .AcqRel, .Acquire },
.{ .SeqCst, .Monotonic },
.{ .SeqCst, .Acquire },
.{ .SeqCst, .SeqCst },
};
test "Atomic.compareAndSwap" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_cmpxchg_orderings) |ordering| {
var x = Atomic(Int).init(0);
try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), 0);
try testing.expectEqual(x.load(.SeqCst), 0);
try testing.expectEqual(x.compareAndSwap(0, 1, ordering[0], ordering[1]), null);
try testing.expectEqual(x.load(.SeqCst), 1);
try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), null);
try testing.expectEqual(x.load(.SeqCst), 0);
}
}
}
test "Atomic.tryCompareAndSwap" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_cmpxchg_orderings) |ordering| {
var x = Atomic(Int).init(0);
try testing.expectEqual(x.tryCompareAndSwap(1, 0, ordering[0], ordering[1]), 0);
try testing.expectEqual(x.load(.SeqCst), 0);
while (x.tryCompareAndSwap(0, 1, ordering[0], ordering[1])) |_| {}
try testing.expectEqual(x.load(.SeqCst), 1);
while (x.tryCompareAndSwap(1, 0, ordering[0], ordering[1])) |_| {}
try testing.expectEqual(x.load(.SeqCst), 0);
}
}
}
test "Atomic.fetchAdd" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(5);
try testing.expectEqual(x.fetchAdd(5, ordering), 5);
try testing.expectEqual(x.load(.SeqCst), 10);
try testing.expectEqual(x.fetchAdd(std.math.maxInt(Int), ordering), 10);
try testing.expectEqual(x.load(.SeqCst), 9);
}
}
}
test "Atomic.fetchSub" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(5);
try testing.expectEqual(x.fetchSub(5, ordering), 5);
try testing.expectEqual(x.load(.SeqCst), 0);
try testing.expectEqual(x.fetchSub(1, ordering), 0);
try testing.expectEqual(x.load(.SeqCst), std.math.maxInt(Int));
}
}
}
test "Atomic.fetchMin" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(5);
try testing.expectEqual(x.fetchMin(0, ordering), 5);
try testing.expectEqual(x.load(.SeqCst), 0);
try testing.expectEqual(x.fetchMin(10, ordering), 0);
try testing.expectEqual(x.load(.SeqCst), 0);
}
}
}
test "Atomic.fetchMax" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(5);
try testing.expectEqual(x.fetchMax(10, ordering), 5);
try testing.expectEqual(x.load(.SeqCst), 10);
try testing.expectEqual(x.fetchMax(5, ordering), 10);
try testing.expectEqual(x.load(.SeqCst), 10);
}
}
}
test "Atomic.fetchAnd" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0b11);
try testing.expectEqual(x.fetchAnd(0b10, ordering), 0b11);
try testing.expectEqual(x.load(.SeqCst), 0b10);
try testing.expectEqual(x.fetchAnd(0b00, ordering), 0b10);
try testing.expectEqual(x.load(.SeqCst), 0b00);
}
}
}
test "Atomic.fetchNand" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0b11);
try testing.expectEqual(x.fetchNand(0b10, ordering), 0b11);
try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b10));
try testing.expectEqual(x.fetchNand(0b00, ordering), ~@as(Int, 0b10));
try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b00));
}
}
}
test "Atomic.fetchOr" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0b11);
try testing.expectEqual(x.fetchOr(0b100, ordering), 0b11);
try testing.expectEqual(x.load(.SeqCst), 0b111);
try testing.expectEqual(x.fetchOr(0b010, ordering), 0b111);
try testing.expectEqual(x.load(.SeqCst), 0b111);
}
}
}
test "Atomic.fetchXor" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0b11);
try testing.expectEqual(x.fetchXor(0b10, ordering), 0b11);
try testing.expectEqual(x.load(.SeqCst), 0b01);
try testing.expectEqual(x.fetchXor(0b01, ordering), 0b01);
try testing.expectEqual(x.load(.SeqCst), 0b00);
}
}
}
test "Atomic.bitSet" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0);
const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
for (bit_array) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
// setting the bit should change the bit
try testing.expect(x.load(.SeqCst) & mask == 0);
try testing.expectEqual(x.bitSet(bit, ordering), 0);
try testing.expect(x.load(.SeqCst) & mask != 0);
// setting it again shouldn't change the bit
try testing.expectEqual(x.bitSet(bit, ordering), 1);
try testing.expect(x.load(.SeqCst) & mask != 0);
// all the previous bits should have not changed (still be set)
for (bit_array[0..bit_index]) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask != 0);
}
}
}
}
}
test "Atomic.bitReset" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0);
const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
for (bit_array) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
x.storeUnchecked(x.loadUnchecked() | mask);
// unsetting the bit should change the bit
try testing.expect(x.load(.SeqCst) & mask != 0);
try testing.expectEqual(x.bitReset(bit, ordering), 1);
try testing.expect(x.load(.SeqCst) & mask == 0);
// unsetting it again shouldn't change the bit
try testing.expectEqual(x.bitReset(bit, ordering), 0);
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be reset)
for (bit_array[0..bit_index]) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
}
}
}
}
}
test "Atomic.bitToggle" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0);
const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
for (bit_array) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
// toggling the bit should change the bit
try testing.expect(x.load(.SeqCst) & mask == 0);
try testing.expectEqual(x.bitToggle(bit, ordering), 0);
try testing.expect(x.load(.SeqCst) & mask != 0);
// toggling it again *should* change the bit
try testing.expectEqual(x.bitToggle(bit, ordering), 1);
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be toggled back)
for (bit_array[0..bit_index]) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
}
}
}
}
} | lib/std/atomic/Atomic.zig |
const std = @import("std");
const math = std.math;
pub fn __sqrth(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation
return @floatCast(f16, sqrtf(x));
}
pub fn sqrtf(x: f32) callconv(.C) f32 {
const tiny: f32 = 1.0e-30;
const sign: i32 = @bitCast(i32, @as(u32, 0x80000000));
var ix: i32 = @bitCast(i32, x);
if ((ix & 0x7F800000) == 0x7F800000) {
return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
}
// zero
if (ix <= 0) {
if (ix & ~sign == 0) {
return x; // sqrt (+-0) = +-0
}
if (ix < 0) {
return math.snan(f32);
}
}
// normalize
var m = ix >> 23;
if (m == 0) {
// subnormal
var i: i32 = 0;
while (ix & 0x00800000 == 0) : (i += 1) {
ix <<= 1;
}
m -= i - 1;
}
m -= 127; // unbias exponent
ix = (ix & 0x007FFFFF) | 0x00800000;
if (m & 1 != 0) { // odd m, double x to even
ix += ix;
}
m >>= 1; // m = [m / 2]
// sqrt(x) bit by bit
ix += ix;
var q: i32 = 0; // q = sqrt(x)
var s: i32 = 0;
var r: i32 = 0x01000000; // r = moving bit right -> left
while (r != 0) {
const t = s + r;
if (t <= ix) {
s = t + r;
ix -= t;
q += r;
}
ix += ix;
r >>= 1;
}
// floating add to find rounding direction
if (ix != 0) {
var z = 1.0 - tiny; // inexact
if (z >= 1.0) {
z = 1.0 + tiny;
if (z > 1.0) {
q += 2;
} else {
if (q & 1 != 0) {
q += 1;
}
}
}
}
ix = (q >> 1) + 0x3f000000;
ix += m << 23;
return @bitCast(f32, ix);
}
/// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
/// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
/// potentially some edge cases remaining that are not handled in the same way.
pub fn sqrt(x: f64) callconv(.C) f64 {
const tiny: f64 = 1.0e-300;
const sign: u32 = 0x80000000;
const u = @bitCast(u64, x);
var ix0 = @intCast(u32, u >> 32);
var ix1 = @intCast(u32, u & 0xFFFFFFFF);
// sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
if (ix0 & 0x7FF00000 == 0x7FF00000) {
return x * x + x;
}
// sqrt(+-0) = +-0
if (x == 0.0) {
return x;
}
// sqrt(-ve) = snan
if (ix0 & sign != 0) {
return math.snan(f64);
}
// normalize x
var m = @intCast(i32, ix0 >> 20);
if (m == 0) {
// subnormal
while (ix0 == 0) {
m -= 21;
ix0 |= ix1 >> 11;
ix1 <<= 21;
}
// subnormal
var i: u32 = 0;
while (ix0 & 0x00100000 == 0) : (i += 1) {
ix0 <<= 1;
}
m -= @intCast(i32, i) - 1;
ix0 |= ix1 >> @intCast(u5, 32 - i);
ix1 <<= @intCast(u5, i);
}
// unbias exponent
m -= 1023;
ix0 = (ix0 & 0x000FFFFF) | 0x00100000;
if (m & 1 != 0) {
ix0 += ix0 + (ix1 >> 31);
ix1 = ix1 +% ix1;
}
m >>= 1;
// sqrt(x) bit by bit
ix0 += ix0 + (ix1 >> 31);
ix1 = ix1 +% ix1;
var q: u32 = 0;
var q1: u32 = 0;
var s0: u32 = 0;
var s1: u32 = 0;
var r: u32 = 0x00200000;
var t: u32 = undefined;
var t1: u32 = undefined;
while (r != 0) {
t = s0 +% r;
if (t <= ix0) {
s0 = t + r;
ix0 -= t;
q += r;
}
ix0 = ix0 +% ix0 +% (ix1 >> 31);
ix1 = ix1 +% ix1;
r >>= 1;
}
r = sign;
while (r != 0) {
t1 = s1 +% r;
t = s0;
if (t < ix0 or (t == ix0 and t1 <= ix1)) {
s1 = t1 +% r;
if (t1 & sign == sign and s1 & sign == 0) {
s0 += 1;
}
ix0 -= t;
if (ix1 < t1) {
ix0 -= 1;
}
ix1 = ix1 -% t1;
q1 += r;
}
ix0 = ix0 +% ix0 +% (ix1 >> 31);
ix1 = ix1 +% ix1;
r >>= 1;
}
// rounding direction
if (ix0 | ix1 != 0) {
var z = 1.0 - tiny; // raise inexact
if (z >= 1.0) {
z = 1.0 + tiny;
if (q1 == 0xFFFFFFFF) {
q1 = 0;
q += 1;
} else if (z > 1.0) {
if (q1 == 0xFFFFFFFE) {
q += 1;
}
q1 += 2;
} else {
q1 += q1 & 1;
}
}
}
ix0 = (q >> 1) + 0x3FE00000;
ix1 = q1 >> 1;
if (q & 1 != 0) {
ix1 |= 0x80000000;
}
// NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
// behaviour at least.
var iix0 = @intCast(i32, ix0);
iix0 = iix0 +% (m << 20);
const uz = (@intCast(u64, iix0) << 32) | ix1;
return @bitCast(f64, uz);
}
pub fn __sqrtx(x: f80) callconv(.C) f80 {
// TODO: more efficient implementation
return @floatCast(f80, sqrtq(x));
}
pub fn sqrtq(x: f128) callconv(.C) f128 {
// TODO: more correct implementation
return sqrt(@floatCast(f64, x));
}
test "sqrtf" {
const V = [_]f32{
0.0,
4.089288054930154,
7.538757127071935,
8.97780793672623,
5.304443821913729,
5.682408965311888,
0.5846878579110049,
3.650338664297043,
0.3178091951800732,
7.1505232436382835,
3.6589165881946464,
};
// Note that @sqrt will either generate the sqrt opcode (if supported by the
// target ISA) or a call to `sqrtf` otherwise.
for (V) |val|
try std.testing.expectEqual(@sqrt(val), sqrtf(val));
}
test "sqrtf special" {
try std.testing.expect(math.isPositiveInf(sqrtf(math.inf(f32))));
try std.testing.expect(sqrtf(0.0) == 0.0);
try std.testing.expect(sqrtf(-0.0) == -0.0);
try std.testing.expect(math.isNan(sqrtf(-1.0)));
try std.testing.expect(math.isNan(sqrtf(math.nan(f32))));
}
test "sqrt" {
const V = [_]f64{
0.0,
4.089288054930154,
7.538757127071935,
8.97780793672623,
5.304443821913729,
5.682408965311888,
0.5846878579110049,
3.650338664297043,
0.3178091951800732,
7.1505232436382835,
3.6589165881946464,
};
// Note that @sqrt will either generate the sqrt opcode (if supported by the
// target ISA) or a call to `sqrtf` otherwise.
for (V) |val|
try std.testing.expectEqual(@sqrt(val), sqrt(val));
}
test "sqrt special" {
try std.testing.expect(math.isPositiveInf(sqrt(math.inf(f64))));
try std.testing.expect(sqrt(0.0) == 0.0);
try std.testing.expect(sqrt(-0.0) == -0.0);
try std.testing.expect(math.isNan(sqrt(-1.0)));
try std.testing.expect(math.isNan(sqrt(math.nan(f64))));
} | lib/std/special/compiler_rt/sqrt.zig |
const Address = std.net.Address;
const Allocator = std.mem.Allocator;
const LinearFifo = std.fifo.LinearFifo;
const network = @import("network");
const std = @import("std");
const Uri = @import("http").Uri;
pub const Socket = struct {
target: network.Socket,
pub fn connect(allocator: *Allocator, uri: Uri) !Socket {
const port = uri.port orelse 80;
var socket = switch(uri.host) {
.name => |host| try Socket.connectToHost(allocator, host, port),
.ip => |address| try Socket.connectToAddress(allocator, address),
};
return Socket { .target = socket };
}
fn connectToHost(allocator: *Allocator, host: []const u8, port: u16) !network.Socket {
return try network.connectToHost(allocator, host, port, .tcp);
}
fn connectToAddress(allocator: *Allocator, address: Address) !network.Socket {
switch(address.any.family) {
std.os.AF_INET => {
var socket = try network.Socket.create(.ipv4, .tcp);
const bytes = @ptrCast(*const [4]u8, &address.in.sa.addr);
try socket.connect(.{
.address = .{ .ipv4 = network.Address.IPv4.init(bytes[0], bytes[1], bytes[2], bytes[3]) },
.port = address.getPort(),
});
return socket;
},
else => unreachable,
}
}
pub fn receive(self: Socket, buffer: []u8) !usize {
return try self.target.receive(buffer);
}
pub fn write(self: Socket, buffer: []const u8) !void {
try self.target.writer().writeAll(buffer);
}
pub fn close(self: *Socket) void {
self.target.close();
}
};
pub const SocketMock = struct {
allocator: *Allocator,
receive_buffer: LinearFifo([]u8, .Dynamic),
write_buffer: std.ArrayList(u8),
pub fn connect(allocator: *Allocator, uri: Uri) !SocketMock {
return SocketMock {
.allocator = allocator,
.receive_buffer = LinearFifo([]u8, .Dynamic).init(allocator),
.write_buffer = std.ArrayList(u8).init(allocator),
};
}
pub fn receive(self: *SocketMock, buffer: []u8) !usize {
var result = self.receive_buffer.readItem() orelse unreachable;
defer self.allocator.free(result);
std.mem.copy(u8, buffer, result);
return result.len;
}
pub fn write(self: *SocketMock, buffer: []const u8) !void {
try self.write_buffer.appendSlice(buffer);
}
pub fn close(self: *SocketMock) void {
self.receive_buffer.deinit();
self.write_buffer.deinit();
}
pub fn have_received(self: *SocketMock, data: []const u8) !void {
var copy = try std.mem.dupe(self.allocator, u8, data);
try self.receive_buffer.writeItem(copy);
}
pub fn have_sent(self: *SocketMock, data: []const u8) bool {
return std.mem.eql(u8, self.write_buffer.items, data);
}
}; | src/socket.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const mem = std.mem;
const log = std.log;
const fs = std.fs;
const path = fs.path;
const assert = std.debug.assert;
const Version = std.builtin.Version;
const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
const Cache = @import("Cache.zig");
const Package = @import("Package.zig");
pub const Lib = struct {
name: []const u8,
sover: u8,
};
pub const ABI = struct {
all_versions: []const Version,
all_targets: []const target_util.ArchOsAbi,
/// The bytes from the file verbatim, starting from the u16 number
/// of function inclusions.
inclusions: []const u8,
arena_state: std.heap.ArenaAllocator.State,
pub fn destroy(abi: *ABI, gpa: Allocator) void {
abi.arena_state.promote(gpa).deinit();
}
};
// The order of the elements in this array defines the linking order.
pub const libs = [_]Lib{
.{ .name = "m", .sover = 6 },
.{ .name = "pthread", .sover = 0 },
.{ .name = "c", .sover = 6 },
.{ .name = "dl", .sover = 2 },
.{ .name = "rt", .sover = 1 },
.{ .name = "ld", .sover = 2 },
.{ .name = "util", .sover = 1 },
};
pub const LoadMetaDataError = error{
/// The files that ship with the Zig compiler were unable to be read, or otherwise had malformed data.
ZigInstallationCorrupt,
OutOfMemory,
};
/// This function will emit a log error when there is a problem with the zig
/// installation and then return `error.ZigInstallationCorrupt`.
pub fn loadMetaData(gpa: Allocator, zig_lib_dir: fs.Dir) LoadMetaDataError!*ABI {
const tracy = trace(@src());
defer tracy.end();
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
const arena = arena_allocator.allocator();
var glibc_dir = zig_lib_dir.openDir("libc" ++ path.sep_str ++ "glibc", .{}) catch |err| {
log.err("unable to open glibc dir: {s}", .{@errorName(err)});
return error.ZigInstallationCorrupt;
};
defer glibc_dir.close();
const max_size = 500 * 1024; // Bigger than this and something is definitely borked.
const contents = glibc_dir.readFileAlloc(arena, "abilists", max_size) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
log.err("unable to read libc" ++ path.sep_str ++ "glibc" ++ path.sep_str ++
"abilists: {s}", .{@errorName(err)});
return error.ZigInstallationCorrupt;
},
};
var index: usize = 0;
{
const libs_len = contents[index];
index += 1;
var i: u8 = 0;
while (i < libs_len) : (i += 1) {
const lib_name = mem.sliceTo(contents[index..], 0);
index += lib_name.len + 1;
if (i >= libs.len or !mem.eql(u8, libs[i].name, lib_name)) {
log.err("libc" ++ path.sep_str ++ "glibc" ++ path.sep_str ++
"abilists: invalid library name or index ({d}): '{s}'", .{ i, lib_name });
return error.ZigInstallationCorrupt;
}
}
}
const versions = b: {
const versions_len = contents[index];
index += 1;
const versions = try arena.alloc(Version, versions_len);
var i: u8 = 0;
while (i < versions.len) : (i += 1) {
versions[i] = .{
.major = contents[index + 0],
.minor = contents[index + 1],
.patch = contents[index + 2],
};
index += 3;
}
break :b versions;
};
const targets = b: {
const targets_len = contents[index];
index += 1;
const targets = try arena.alloc(target_util.ArchOsAbi, targets_len);
var i: u8 = 0;
while (i < targets.len) : (i += 1) {
const target_name = mem.sliceTo(contents[index..], 0);
index += target_name.len + 1;
var component_it = mem.tokenize(u8, target_name, "-");
const arch_name = component_it.next() orelse {
log.err("abilists: expected arch name", .{});
return error.ZigInstallationCorrupt;
};
const os_name = component_it.next() orelse {
log.err("abilists: expected OS name", .{});
return error.ZigInstallationCorrupt;
};
const abi_name = component_it.next() orelse {
log.err("abilists: expected ABI name", .{});
return error.ZigInstallationCorrupt;
};
const arch_tag = std.meta.stringToEnum(std.Target.Cpu.Arch, arch_name) orelse {
log.err("abilists: unrecognized arch: '{s}'", .{arch_name});
return error.ZigInstallationCorrupt;
};
if (!mem.eql(u8, os_name, "linux")) {
log.err("abilists: expected OS 'linux', found '{s}'", .{os_name});
return error.ZigInstallationCorrupt;
}
const abi_tag = std.meta.stringToEnum(std.Target.Abi, abi_name) orelse {
log.err("abilists: unrecognized ABI: '{s}'", .{abi_name});
return error.ZigInstallationCorrupt;
};
targets[i] = .{
.arch = arch_tag,
.os = .linux,
.abi = abi_tag,
};
}
break :b targets;
};
const abi = try arena.create(ABI);
abi.* = .{
.all_versions = versions,
.all_targets = targets,
.inclusions = contents[index..],
.arena_state = arena_allocator.state,
};
return abi;
}
pub const CRTFile = enum {
crti_o,
crtn_o,
scrt1_o,
libc_nonshared_a,
};
pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
switch (crt_file) {
.crti_o => {
var args = std.ArrayList([]const u8).init(arena);
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
"-DMODULE_NAME=libc",
"-Wno-nonportable-include-path",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-Wa,--noexecstack",
});
return comp.build_crt_file("crti", .Obj, &[1]Compilation.CSourceFile{
.{
.src_path = try start_asm_path(comp, arena, "crti.S"),
.extra_flags = args.items,
},
});
},
.crtn_o => {
var args = std.ArrayList([]const u8).init(arena);
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-DMODULE_NAME=libc",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-Wa,--noexecstack",
});
return comp.build_crt_file("crtn", .Obj, &[1]Compilation.CSourceFile{
.{
.src_path = try start_asm_path(comp, arena, "crtn.S"),
.extra_flags = args.items,
},
});
},
.scrt1_o => {
const start_os: Compilation.CSourceFile = blk: {
var args = std.ArrayList([]const u8).init(arena);
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
"-DMODULE_NAME=libc",
"-Wno-nonportable-include-path",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DPIC",
"-DSHARED",
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-Wa,--noexecstack",
});
break :blk .{
.src_path = try start_asm_path(comp, arena, "start.S"),
.extra_flags = args.items,
};
};
const abi_note_o: Compilation.CSourceFile = blk: {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-I",
try lib_path(comp, arena, lib_libc_glibc ++ "csu"),
});
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-DMODULE_NAME=libc",
"-DTOP_NAMESPACE=glibc",
"-DASSEMBLER",
"-Wa,--noexecstack",
});
break :blk .{
.src_path = try lib_path(comp, arena, lib_libc_glibc ++ "csu" ++ path.sep_str ++ "abi-note.S"),
.extra_flags = args.items,
};
};
return comp.build_crt_file("Scrt1", .Obj, &[_]Compilation.CSourceFile{ start_os, abi_note_o });
},
.libc_nonshared_a => {
const target = comp.getTarget();
const s = path.sep_str;
const linux_prefix = lib_libc_glibc ++
"sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux" ++ s;
const Flavor = enum { nonshared, shared };
const Dep = struct {
path: []const u8,
flavor: Flavor = .shared,
};
const deps = [_]Dep{
.{
.path = lib_libc_glibc ++ "stdlib" ++ s ++ "atexit.c",
.flavor = .nonshared,
},
.{
.path = lib_libc_glibc ++ "stdlib" ++ s ++ "at_quick_exit.c",
.flavor = .nonshared,
},
.{
.path = lib_libc_glibc ++ "sysdeps" ++ s ++ "pthread" ++ s ++ "pthread_atfork.c",
.flavor = .nonshared,
},
.{
.path = lib_libc_glibc ++ "debug" ++ s ++ "stack_chk_fail_local.c",
.flavor = .nonshared,
},
.{ .path = lib_libc_glibc ++ "csu" ++ s ++ "errno.c" },
.{ .path = linux_prefix ++ "stat.c" },
.{ .path = linux_prefix ++ "fstat.c" },
.{ .path = linux_prefix ++ "lstat.c" },
.{ .path = linux_prefix ++ "stat64.c" },
.{ .path = linux_prefix ++ "fstat64.c" },
.{ .path = linux_prefix ++ "lstat64.c" },
.{ .path = linux_prefix ++ "fstatat.c" },
.{ .path = linux_prefix ++ "fstatat64.c" },
.{ .path = linux_prefix ++ "mknodat.c" },
.{ .path = lib_libc_glibc ++ "io" ++ s ++ "mknod.c" },
.{ .path = linux_prefix ++ "stat_t64_cp.c" },
};
var c_source_files: [deps.len]Compilation.CSourceFile = undefined;
for (deps) |dep, i| {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-std=gnu11",
"-fgnu89-inline",
"-fmerge-all-constants",
// glibc sets this flag but clang does not support it.
// "-frounding-math",
"-fno-stack-protector",
"-fno-common",
"-fmath-errno",
"-ftls-model=initial-exec",
"-Wno-ignored-attributes",
});
try add_include_dirs(comp, arena, &args);
if (target.cpu.arch == .i386) {
// This prevents i386/sysdep.h from trying to do some
// silly and unnecessary inline asm hack that uses weird
// syntax that clang does not support.
try args.append("-DCAN_USE_REGISTER_ASM_EBP");
}
const shared_def = switch (dep.flavor) {
.nonshared => "-DLIBC_NONSHARED=1",
// glibc passes `-DSHARED` for these. However, empirically if
// we do that here we will see undefined symbols such as `__GI_memcpy`.
// So we pass the same thing as for nonshared.
.shared => "-DLIBC_NONSHARED=1",
};
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-modules.h"),
"-DMODULE_NAME=libc",
"-Wno-nonportable-include-path",
"-include",
try lib_path(comp, arena, lib_libc_glibc ++ "include" ++ path.sep_str ++ "libc-symbols.h"),
"-DPIC",
shared_def,
"-DTOP_NAMESPACE=glibc",
});
c_source_files[i] = .{
.src_path = try lib_path(comp, arena, dep.path),
.extra_flags = args.items,
};
}
return comp.build_crt_file("c_nonshared", .Lib, &c_source_files);
},
}
}
fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
const arch = comp.getTarget().cpu.arch;
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparcv9;
const is_64 = arch.ptrBitWidth() == 64;
const s = path.sep_str;
var result = std.ArrayList(u8).init(arena);
try result.appendSlice(comp.zig_lib_directory.path.?);
try result.appendSlice(s ++ "libc" ++ s ++ "glibc" ++ s ++ "sysdeps" ++ s);
if (is_sparc) {
if (mem.eql(u8, basename, "crti.S") or mem.eql(u8, basename, "crtn.S")) {
try result.appendSlice("sparc");
} else {
if (is_64) {
try result.appendSlice("sparc" ++ s ++ "sparc64");
} else {
try result.appendSlice("sparc" ++ s ++ "sparc32");
}
}
} else if (arch.isARM()) {
try result.appendSlice("arm");
} else if (arch.isMIPS()) {
if (!mem.eql(u8, basename, "crti.S") and !mem.eql(u8, basename, "crtn.S")) {
try result.appendSlice("mips");
} else {
if (is_64) {
const abi_dir = if (comp.getTarget().abi == .gnuabin32)
"n32"
else
"n64";
try result.appendSlice("mips" ++ s ++ "mips64" ++ s);
try result.appendSlice(abi_dir);
} else {
try result.appendSlice("mips" ++ s ++ "mips32");
}
}
} else if (arch == .x86_64) {
try result.appendSlice("x86_64");
} else if (arch == .i386) {
try result.appendSlice("i386");
} else if (is_aarch64) {
try result.appendSlice("aarch64");
} else if (arch.isRISCV()) {
try result.appendSlice("riscv");
} else if (is_ppc) {
if (is_64) {
try result.appendSlice("powerpc" ++ s ++ "powerpc64");
} else {
try result.appendSlice("powerpc" ++ s ++ "powerpc32");
}
}
try result.appendSlice(s);
try result.appendSlice(basename);
return result.items;
}
fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
const target = comp.getTarget();
const arch = target.cpu.arch;
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
const s = path.sep_str;
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "include"));
if (target.os.tag == .linux) {
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux"));
}
if (opt_nptl) |nptl| {
try add_include_dirs_arch(arena, args, arch, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
}
if (target.os.tag == .linux) {
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
"unix" ++ s ++ "sysv" ++ s ++ "linux" ++ s ++ "generic"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
"unix" ++ s ++ "sysv" ++ s ++ "linux" ++ s ++ "include"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++
"unix" ++ s ++ "sysv" ++ s ++ "linux"));
}
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc_glibc ++ "sysdeps", nptl }));
}
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "pthread"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv"));
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix"));
try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps"));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic"));
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc ++ "glibc" }));
try args.append("-I");
try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{
comp.zig_lib_directory.path.?, @tagName(arch), @tagName(target.os.tag), @tagName(target.abi),
}));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc ++ "include" ++ s ++ "generic-glibc"));
const arch_name = target_util.osArchName(target);
try args.append("-I");
try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-linux-any", .{
comp.zig_lib_directory.path.?, arch_name,
}));
try args.append("-I");
try args.append(try lib_path(comp, arena, lib_libc ++ "include" ++ s ++ "any-linux-any"));
}
fn add_include_dirs_arch(
arena: Allocator,
args: *std.ArrayList([]const u8),
arch: std.Target.Cpu.Arch,
opt_nptl: ?[]const u8,
dir: []const u8,
) error{OutOfMemory}!void {
const is_x86 = arch == .i386 or arch == .x86_64;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparcv9;
const is_64 = arch.ptrBitWidth() == 64;
const s = path.sep_str;
if (is_x86) {
if (arch == .x86_64) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64" }));
}
} else if (arch == .i386) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "i386", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "i386" }));
}
}
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86" }));
}
} else if (arch.isARM()) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "arm", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "arm" }));
}
} else if (arch.isMIPS()) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips", nptl }));
} else {
if (is_64) {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" ++ s ++ "mips64" }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" ++ s ++ "mips32" }));
}
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "mips" }));
}
} else if (is_sparc) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc", nptl }));
} else {
if (is_64) {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" ++ s ++ "sparc64" }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" ++ s ++ "sparc32" }));
}
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "sparc" }));
}
} else if (is_aarch64) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "aarch64", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "aarch64" }));
}
} else if (is_ppc) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc", nptl }));
} else {
if (is_64) {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" ++ s ++ "powerpc64" }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" ++ s ++ "powerpc32" }));
}
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "powerpc" }));
}
} else if (arch.isRISCV()) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "riscv", nptl }));
} else {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "riscv" }));
}
}
}
fn path_from_lib(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
}
const lib_libc = "libc" ++ path.sep_str;
const lib_libc_glibc = lib_libc ++ "glibc" ++ path.sep_str;
fn lib_path(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
}
pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: []u8,
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
self.lock.release();
gpa.free(self.dir_path);
self.* = undefined;
}
};
const all_map_basename = "all.map";
pub fn buildSharedObjects(comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const target = comp.getTarget();
const target_version = target.os.version_range.linux.glibc;
// Use the global cache directory.
var cache_parent: Cache = .{
.gpa = comp.gpa,
.manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
};
defer cache_parent.manifest_dir.close();
var cache = cache_parent.obtain();
defer cache.deinit();
cache.hash.addBytes(build_options.version);
cache.hash.addBytes(comp.zig_lib_directory.path orelse ".");
cache.hash.add(target.cpu.arch);
cache.hash.add(target.abi);
cache.hash.add(target_version);
const hit = try cache.hit();
const digest = cache.final();
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
// Even if we get a hit, it doesn't guarantee that we finished the job last time.
// We use the presence of an "ok" file to determine if it is a true hit.
var o_directory: Compilation.Directory = .{
.handle = try comp.global_cache_directory.handle.makeOpenPath(o_sub_path, .{}),
.path = try path.join(arena, &[_][]const u8{ comp.global_cache_directory.path.?, o_sub_path }),
};
defer o_directory.handle.close();
const ok_basename = "ok";
const actual_hit = if (hit) blk: {
o_directory.handle.access(ok_basename, .{}) catch |err| switch (err) {
error.FileNotFound => break :blk false,
else => |e| return e,
};
break :blk true;
} else false;
if (!actual_hit) {
const metadata = try loadMetaData(comp.gpa, comp.zig_lib_directory.handle);
defer metadata.destroy(comp.gpa);
const target_targ_index = for (metadata.all_targets) |targ, i| {
if (targ.arch == target.cpu.arch and
targ.os == target.os.tag and
targ.abi == target.abi)
{
break i;
}
} else {
unreachable; // target_util.available_libcs prevents us from getting here
};
const target_ver_index = for (metadata.all_versions) |ver, i| {
switch (ver.order(target_version)) {
.eq => break i,
.lt => continue,
.gt => {
// TODO Expose via compile error mechanism instead of log.
log.err("invalid target glibc version: {}", .{target_version});
return error.InvalidTargetGLibCVersion;
},
}
} else {
const latest_index = metadata.all_versions.len - 1;
// TODO Expose via compile error mechanism instead of log.
log.err("zig does not yet provide glibc version {}, the max provided version is {}", .{
target_version, metadata.all_versions[latest_index],
});
return error.InvalidTargetGLibCVersion;
};
{
var map_contents = std.ArrayList(u8).init(arena);
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
if (ver.patch == 0) {
try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
} else {
try map_contents.writer().print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
}
}
try o_directory.handle.writeFile(all_map_basename, map_contents.items);
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
}
var stubs_asm = std.ArrayList(u8).init(comp.gpa);
defer stubs_asm.deinit();
for (libs) |lib, lib_i| {
stubs_asm.shrinkRetainingCapacity(0);
try stubs_asm.appendSlice(".text\n");
var inc_i: usize = 0;
const fn_inclusions_len = mem.readIntLittle(u16, metadata.inclusions[inc_i..][0..2]);
inc_i += 2;
var sym_i: usize = 0;
var opt_symbol_name: ?[]const u8 = null;
var versions_buffer: [32]u8 = undefined;
var versions_len: usize = undefined;
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
const name = mem.sliceTo(metadata.inclusions[inc_i..], 0);
inc_i += name.len + 1;
opt_symbol_name = name;
versions_buffer = undefined;
versions_len = 0;
break :n name;
};
const targets = mem.readIntLittle(u32, metadata.inclusions[inc_i..][0..4]);
inc_i += 4;
const lib_index = metadata.inclusions[inc_i];
inc_i += 1;
const is_terminal = (targets & (1 << 31)) != 0;
if (is_terminal) opt_symbol_name = null;
// Test whether the inclusion applies to our current library and target.
const ok_lib_and_target =
(lib_index == lib_i) and
((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0);
while (true) {
const byte = metadata.inclusions[inc_i];
inc_i += 1;
const last = (byte & 0b1000_0000) != 0;
const ver_i = @truncate(u7, byte);
if (ok_lib_and_target and ver_i <= target_ver_index) {
versions_buffer[versions_len] = ver_i;
versions_len += 1;
}
if (last) break;
}
if (!is_terminal) continue;
// Pick the default symbol version:
// - If there are no versions, don't emit it
// - Take the greatest one <= than the target one
// - If none of them is <= than the
// specified one don't pick any default version
if (versions_len == 0) continue;
var chosen_def_ver_index: u8 = 255;
{
var ver_buf_i: u8 = 0;
while (ver_buf_i < versions_len) : (ver_buf_i += 1) {
const ver_index = versions_buffer[ver_buf_i];
if (chosen_def_ver_index == 255 or ver_index > chosen_def_ver_index) {
chosen_def_ver_index = ver_index;
}
}
}
{
var ver_buf_i: u8 = 0;
while (ver_buf_i < versions_len) : (ver_buf_i += 1) {
// Example:
// .globl _Exit_2_2_5
// .type _Exit_2_2_5, %function;
// .symver _Exit_2_2_5, _Exit@@GLIBC_2.2.5
// _Exit_2_2_5:
const ver_index = versions_buffer[ver_buf_i];
const ver = metadata.all_versions[ver_index];
// Default symbol version definition vs normal symbol version definition
const want_default = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index;
const at_sign_str: []const u8 = if (want_default) "@@" else "@";
if (ver.patch == 0) {
const sym_plus_ver = if (want_default)
sym_name
else
try std.fmt.allocPrint(
arena,
"{s}_GLIBC_{d}_{d}",
.{ sym_name, ver.major, ver.minor },
);
try stubs_asm.writer().print(
\\.globl {s}
\\.type {s}, %function;
\\.symver {s}, {s}{s}GLIBC_{d}.{d}
\\{s}:
\\
, .{
sym_plus_ver,
sym_plus_ver,
sym_plus_ver,
sym_name,
at_sign_str,
ver.major,
ver.minor,
sym_plus_ver,
});
} else {
const sym_plus_ver = if (want_default)
sym_name
else
try std.fmt.allocPrint(
arena,
"{s}_GLIBC_{d}_{d}_{d}",
.{ sym_name, ver.major, ver.minor, ver.patch },
);
try stubs_asm.writer().print(
\\.globl {s}
\\.type {s}, %function;
\\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d}
\\{s}:
\\
, .{
sym_plus_ver,
sym_plus_ver,
sym_plus_ver,
sym_name,
at_sign_str,
ver.major,
ver.minor,
ver.patch,
sym_plus_ver,
});
}
}
}
}
try stubs_asm.appendSlice(".data\n");
const obj_inclusions_len = mem.readIntLittle(u16, metadata.inclusions[inc_i..][0..2]);
inc_i += 2;
sym_i = 0;
opt_symbol_name = null;
versions_buffer = undefined;
versions_len = undefined;
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
const name = mem.sliceTo(metadata.inclusions[inc_i..], 0);
inc_i += name.len + 1;
opt_symbol_name = name;
versions_buffer = undefined;
versions_len = 0;
break :n name;
};
const targets = mem.readIntLittle(u32, metadata.inclusions[inc_i..][0..4]);
inc_i += 4;
const size = mem.readIntLittle(u16, metadata.inclusions[inc_i..][0..2]);
inc_i += 2;
const lib_index = metadata.inclusions[inc_i];
inc_i += 1;
const is_terminal = (targets & (1 << 31)) != 0;
if (is_terminal) opt_symbol_name = null;
// Test whether the inclusion applies to our current library and target.
const ok_lib_and_target =
(lib_index == lib_i) and
((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0);
while (true) {
const byte = metadata.inclusions[inc_i];
inc_i += 1;
const last = (byte & 0b1000_0000) != 0;
const ver_i = @truncate(u7, byte);
if (ok_lib_and_target and ver_i <= target_ver_index) {
versions_buffer[versions_len] = ver_i;
versions_len += 1;
}
if (last) break;
}
if (!is_terminal) continue;
// Pick the default symbol version:
// - If there are no versions, don't emit it
// - Take the greatest one <= than the target one
// - If none of them is <= than the
// specified one don't pick any default version
if (versions_len == 0) continue;
var chosen_def_ver_index: u8 = 255;
{
var ver_buf_i: u8 = 0;
while (ver_buf_i < versions_len) : (ver_buf_i += 1) {
const ver_index = versions_buffer[ver_buf_i];
if (chosen_def_ver_index == 255 or ver_index > chosen_def_ver_index) {
chosen_def_ver_index = ver_index;
}
}
}
{
var ver_buf_i: u8 = 0;
while (ver_buf_i < versions_len) : (ver_buf_i += 1) {
// Example:
// .globl environ_2_2_5
// .type environ_2_2_5, %object;
// .size environ_2_2_5, 4;
// .symver environ_2_2_5, environ@@GLIBC_2.2.5
// environ_2_2_5:
const ver_index = versions_buffer[ver_buf_i];
const ver = metadata.all_versions[ver_index];
// Default symbol version definition vs normal symbol version definition
const want_default = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index;
const at_sign_str: []const u8 = if (want_default) "@@" else "@";
if (ver.patch == 0) {
const sym_plus_ver = if (want_default)
sym_name
else
try std.fmt.allocPrint(
arena,
"{s}_GLIBC_{d}_{d}",
.{ sym_name, ver.major, ver.minor },
);
try stubs_asm.writer().print(
\\.globl {s}
\\.type {s}, %object;
\\.size {s}, {d};
\\.symver {s}, {s}{s}GLIBC_{d}.{d}
\\{s}:
\\
, .{
sym_plus_ver,
sym_plus_ver,
sym_plus_ver,
size,
sym_plus_ver,
sym_name,
at_sign_str,
ver.major,
ver.minor,
sym_plus_ver,
});
} else {
const sym_plus_ver = if (want_default)
sym_name
else
try std.fmt.allocPrint(
arena,
"{s}_GLIBC_{d}_{d}_{d}",
.{ sym_name, ver.major, ver.minor, ver.patch },
);
try stubs_asm.writer().print(
\\.globl {s}
\\.type {s}, %object;
\\.size {s}, {d};
\\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d}
\\{s}:
\\
, .{
sym_plus_ver,
sym_plus_ver,
sym_plus_ver,
size,
sym_plus_ver,
sym_name,
at_sign_str,
ver.major,
ver.minor,
ver.patch,
sym_plus_ver,
});
}
}
}
}
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
try o_directory.handle.writeFile(asm_file_basename, stubs_asm.items);
try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib);
}
// No need to write the manifest because there are no file inputs associated with this cache hash.
// However we do need to write the ok file now.
if (o_directory.handle.createFile(ok_basename, .{})) |file| {
file.close();
} else |err| {
log.warn("glibc shared objects: failed to mark completion: {s}", .{@errorName(err)});
}
}
assert(comp.glibc_so_files == null);
comp.glibc_so_files = BuiltSharedObjects{
.lock = cache.toOwnedLock(),
.dir_path = try path.join(comp.gpa, &[_][]const u8{ comp.global_cache_directory.path.?, o_sub_path }),
};
}
// zig fmt: on
fn buildSharedLib(
comp: *Compilation,
arena: Allocator,
zig_cache_directory: Compilation.Directory,
bin_directory: Compilation.Directory,
asm_file_basename: []const u8,
lib: Lib,
) !void {
const tracy = trace(@src());
defer tracy.end();
const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
const emit_bin = Compilation.EmitLoc{
.directory = bin_directory,
.basename = basename,
};
const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
const soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else basename;
const map_file_path = try path.join(arena, &[_][]const u8{ bin_directory.path.?, all_map_basename });
const c_source_files = [1]Compilation.CSourceFile{
.{
.src_path = try path.join(arena, &[_][]const u8{ bin_directory.path.?, asm_file_basename }),
},
};
const sub_compilation = try Compilation.create(comp.gpa, .{
.local_cache_directory = zig_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = comp.getTarget(),
.root_name = lib.name,
.main_pkg = null,
.output_mode = .Lib,
.link_mode = .Dynamic,
.thread_pool = comp.thread_pool,
.libc_installation = comp.bin_file.options.libc_installation,
.emit_bin = emit_bin,
.optimize_mode = comp.compilerRtOptMode(),
.want_sanitize_c = false,
.want_stack_check = false,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
.want_tsan = false,
.emit_h = null,
.strip = comp.compilerRtStrip(),
.is_native_os = false,
.is_native_abi = false,
.self_exe_path = comp.self_exe_path,
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.bin_file.options.verbose_link,
.verbose_air = comp.verbose_air,
.verbose_llvm_ir = comp.verbose_llvm_ir,
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.version = version,
.version_script = map_file_path,
.soname = soname,
.c_source_files = &c_source_files,
.skip_linker_dependencies = true,
});
defer sub_compilation.destroy();
try sub_compilation.updateSubCompilation();
}
// Return true if glibc has crti/crtn sources for that architecture.
pub fn needsCrtiCrtn(target: std.Target) bool {
return switch (target.cpu.arch) {
.riscv32, .riscv64 => false,
else => true,
};
} | src/glibc.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const panic = std.debug.panic;
const vk = @import("vulkan");
usingnamespace @import("instance.zig");
usingnamespace @import("device.zig");
pub const SwapchainInfo = struct {
image_count: u32,
format: vk.SurfaceFormatKHR,
extent: vk.Extent2D,
usage: vk.ImageUsageFlags,
mode: vk.PresentModeKHR,
};
pub const Swapchain = struct {
const Self = @This();
allocator: *Allocator,
instance_dispatch: InstanceDispatch,
device: Device,
pdevice: vk.PhysicalDevice,
surface: vk.SurfaceKHR,
invalid: bool,
extent: vk.Extent2D,
handle: vk.SwapchainKHR,
images: std.ArrayList(vk.Image),
image_views: std.ArrayList(vk.ImageView),
render_pass: vk.RenderPass,
framebuffers: std.ArrayList(vk.Framebuffer),
pub fn init(allocator: *Allocator, instance_dispatch: InstanceDispatch, device: Device, pdevice: vk.PhysicalDevice, surface: vk.SurfaceKHR) !Self {
var self = Self{
.allocator = allocator,
.instance_dispatch = instance_dispatch,
.device = device,
.pdevice = pdevice,
.surface = surface,
.invalid = false,
.extent = vk.Extent2D{ .width = 0, .height = 0 },
.handle = .null_handle,
.images = std.ArrayList(vk.Image).init(allocator),
.image_views = std.ArrayList(vk.ImageView).init(allocator),
.render_pass = .null_handle,
.framebuffers = std.ArrayList(vk.Framebuffer).init(allocator),
};
try self.rebuild();
return self;
}
pub fn deinit(self: Self) void {
//Wait for all frames to finish before deinitializing swapchain
self.device.dispatch.deviceWaitIdle(self.device.handle) catch {};
self.device.dispatch.destroySwapchainKHR(self.device.handle, self.handle, null);
self.images.deinit();
for (self.image_views.items) |view| {
self.device.dispatch.destroyImageView(self.device.handle, view, null);
}
self.image_views.deinit();
for (self.framebuffers.items) |framebuffer| {
self.device.dispatch.destroyFramebuffer(self.device.handle, framebuffer, null);
}
self.framebuffers.deinit();
self.device.dispatch.destroyRenderPass(self.device.handle, self.render_pass, null);
}
pub fn getNextImage(self: *Self, image_ready: vk.Semaphore) ?u32 {
//Try rebuilding once a frame when invalid
if (self.invalid) {
self.rebuild() catch |err| panic("Swapchain Rebuild Failed: {}", .{err});
if (self.invalid) {
return null;
}
}
var image_index: u32 = undefined;
const result_error = self.device.dispatch.acquireNextImageKHR(
self.device.handle,
self.handle,
std.math.maxInt(u64),
image_ready,
.null_handle,
);
if (result_error) |result| {
return result.image_index;
} else |err| switch (err) {
error.OutOfDateKHR => {
self.invalid = true;
return null;
},
else => panic("Swapchain Next Image Failed: {}", .{err}),
}
}
pub fn rebuild(self: *Self) !void {
const caps = try self.instance_dispatch.getPhysicalDeviceSurfaceCapabilitiesKHR(self.pdevice, self.surface);
//Invalid if the either extent is 0
if (caps.current_extent.width == 0 or caps.current_extent.height == 0) {
self.invalid = true;
return;
}
//Hardcoded Temp, TODO fix
const queue_family_index = [_]u32{0};
const image_useage = vk.ImageUsageFlags{ .color_attachment_bit = true, .transfer_dst_bit = true };
const image_count = std.math.min(caps.min_image_count + 1, caps.max_image_count);
const image_extent = getImageExtent(caps.current_extent, caps.min_image_extent, caps.max_image_extent);
const surface_format = try getSurfaceFormat(self.allocator, self.instance_dispatch, self.pdevice, self.surface);
const present_mode = try getPresentMode(self.allocator, self.instance_dispatch, self.pdevice, self.surface);
var create_info = vk.SwapchainCreateInfoKHR{
.surface = self.surface,
.min_image_count = image_count,
.image_format = surface_format.format,
.image_color_space = surface_format.color_space,
.image_extent = image_extent,
.image_array_layers = 1,
.image_usage = image_useage,
.image_sharing_mode = .exclusive,
.queue_family_index_count = queue_family_index.len,
.p_queue_family_indices = &queue_family_index,
.pre_transform = caps.current_transform,
.composite_alpha = .{ .opaque_bit_khr = true },
.present_mode = present_mode,
.clipped = vk.TRUE,
.old_swapchain = self.handle,
.flags = .{},
};
var swapchain = try self.device.dispatch.createSwapchainKHR(self.device.handle, create_info, null);
var count: u32 = undefined;
_ = try self.device.dispatch.getSwapchainImagesKHR(self.device.handle, swapchain, &count, null);
var images = try std.ArrayList(vk.Image).initCapacity(self.allocator, count);
var i: usize = 0;
while (i < count) : (i += 1) {
try images.append(.null_handle);
}
_ = try self.device.dispatch.getSwapchainImagesKHR(self.device.handle, swapchain, &count, @ptrCast([*]vk.Image, images.items));
var image_views = try createImageViews(self.allocator, self.device, surface_format.format, &images);
var render_pass = try createRenderPass(self.device, surface_format.format);
var framebuffers = try createFramebuffers(self.allocator, self.device, render_pass, surface_format.format, image_extent, &image_views);
//Destroy old
self.deinit();
//Update Object
self.invalid = false;
self.extent = image_extent;
self.handle = swapchain;
self.images = images;
self.image_views = image_views;
self.render_pass = render_pass;
self.framebuffers = framebuffers;
}
fn getSurfaceFormat(allocator: *Allocator, instance_dispatch: InstanceDispatch, pdevice: vk.PhysicalDevice, surface: vk.SurfaceKHR) !vk.SurfaceFormatKHR {
const preferred = vk.SurfaceFormatKHR{
.format = .b8g8r8a8_unorm,
.color_space = .srgb_nonlinear_khr,
};
var count: u32 = undefined;
_ = try instance_dispatch.getPhysicalDeviceSurfaceFormatsKHR(pdevice, surface, &count, null);
const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count);
defer allocator.free(surface_formats);
_ = try instance_dispatch.getPhysicalDeviceSurfaceFormatsKHR(pdevice, surface, &count, surface_formats.ptr);
for (surface_formats) |surface_format| {
if (preferred.format == surface_format.format and preferred.color_space == surface_format.color_space) {
return preferred;
}
}
// There must always be at least one supported surface format
return surface_formats[0];
}
fn getPresentMode(allocator: *Allocator, instance_dispatch: InstanceDispatch, pdevice: vk.PhysicalDevice, surface: vk.SurfaceKHR) !vk.PresentModeKHR {
var count: u32 = undefined;
_ = try instance_dispatch.getPhysicalDeviceSurfacePresentModesKHR(pdevice, surface, &count, null);
const present_modes = try allocator.alloc(vk.PresentModeKHR, count);
defer allocator.free(present_modes);
_ = try instance_dispatch.getPhysicalDeviceSurfacePresentModesKHR(pdevice, surface, &count, present_modes.ptr);
const preferred = [_]vk.PresentModeKHR{
.mailbox_khr,
.fifo_khr,
.immediate_khr,
};
for (preferred) |mode| {
if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) {
return mode;
}
}
return .fifo_khr;
}
fn getImageExtent(current: vk.Extent2D, min: vk.Extent2D, max: vk.Extent2D) vk.Extent2D {
return vk.Extent2D{
.width = std.math.clamp(current.width, min.width, max.width),
.height = std.math.clamp(current.height, min.height, max.height),
};
}
fn createImageViews(allocator: *Allocator, device: Device, format: vk.Format, images: *std.ArrayList(vk.Image)) !std.ArrayList(vk.ImageView) {
var image_views = try std.ArrayList(vk.ImageView).initCapacity(allocator, images.items.len);
for (images.items) |image| {
try image_views.append(try device.dispatch.createImageView(device.handle, .{
.flags = .{},
.image = image,
.view_type = .@"2d",
.format = format,
.components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity },
.subresource_range = .{
.aspect_mask = .{ .color_bit = true },
.base_mip_level = 0,
.level_count = 1,
.base_array_layer = 0,
.layer_count = 1,
},
}, null));
}
return image_views;
}
fn createRenderPass(device: Device, format: vk.Format) !vk.RenderPass {
const color_attachment = vk.AttachmentDescription{
.flags = .{},
.format = format,
.samples = .{ .@"1_bit" = true },
.load_op = .clear,
.store_op = .store,
.stencil_load_op = .dont_care,
.stencil_store_op = .dont_care,
.initial_layout = .@"undefined",
.final_layout = .present_src_khr,
};
const color_attachment_ref = vk.AttachmentReference{
.attachment = 0,
.layout = .color_attachment_optimal,
};
const subpass = vk.SubpassDescription{
.flags = .{},
.pipeline_bind_point = .graphics,
.input_attachment_count = 0,
.p_input_attachments = undefined,
.color_attachment_count = 1,
.p_color_attachments = @ptrCast([*]const vk.AttachmentReference, &color_attachment_ref),
.p_resolve_attachments = null,
.p_depth_stencil_attachment = null,
.preserve_attachment_count = 0,
.p_preserve_attachments = undefined,
};
return try device.dispatch.createRenderPass(device.handle, .{
.flags = .{},
.attachment_count = 1,
.p_attachments = @ptrCast([*]const vk.AttachmentDescription, &color_attachment),
.subpass_count = 1,
.p_subpasses = @ptrCast([*]const vk.SubpassDescription, &subpass),
.dependency_count = 0,
.p_dependencies = undefined,
}, null);
}
fn createFramebuffers(allocator: *Allocator, device: Device, render_pass: vk.RenderPass, format: vk.Format, extent: vk.Extent2D, image_views: *std.ArrayList(vk.ImageView)) !std.ArrayList(vk.Framebuffer) {
var framebuffers = try std.ArrayList(vk.Framebuffer).initCapacity(allocator, image_views.items.len);
for (image_views.items) |image_view| {
try framebuffers.append(try device.dispatch.createFramebuffer(device.handle, .{
.flags = .{},
.render_pass = render_pass,
.attachment_count = 1,
.p_attachments = @ptrCast([*]const vk.ImageView, &image_view),
.width = extent.width,
.height = extent.height,
.layers = 1,
}, null));
}
return framebuffers;
}
}; | src/vulkan/swapchain.zig |
const std = @import("std");
const builtin = @import("builtin");
// bswap - byteswap
// - bswapXi2_generic for unoptimized big and little endian
// ie for u32
// DE AD BE EF <- little|big endian
// FE BE AD DE <- big|little endian
// ie for u32
// ff 00 00 00 >> 3*8 (leftmost byte)
// 00 ff 00 00 >> 1*8 (2nd left byte)
// 00 00 ff 00 << 1*8 (2n right byte)
// 00 00 00 ff << 3*8 (rightmost byte)
fn bswapXi2_generic(comptime T: type) fn (a: T) callconv(.C) T {
return struct {
fn f(a: T) callconv(.C) T {
@setRuntimeSafety(builtin.is_test);
switch (@bitSizeOf(T)) {
32 => {
// zig fmt: off
return (((a & 0xff000000) >> 24)
| ((a & 0x00ff0000) >> 8 )
| ((a & 0x0000ff00) << 8 )
| ((a & 0x000000ff) << 24));
// zig fmt: on
},
64 => {
// zig fmt: off
return (((a & 0xff00000000000000) >> 56)
| ((a & 0x00ff000000000000) >> 40 )
| ((a & 0x0000ff0000000000) >> 24 )
| ((a & 0x000000ff00000000) >> 8 )
| ((a & 0x00000000ff000000) << 8 )
| ((a & 0x0000000000ff0000) << 24 )
| ((a & 0x000000000000ff00) << 40 )
| ((a & 0x00000000000000ff) << 56));
// zig fmt: on
},
128 => {
// zig fmt: off
return (((a & 0xff000000000000000000000000000000) >> 120)
| ((a & 0x00ff0000000000000000000000000000) >> 104)
| ((a & 0x0000ff00000000000000000000000000) >> 88 )
| ((a & 0x000000ff000000000000000000000000) >> 72 )
| ((a & 0x00000000ff0000000000000000000000) >> 56 )
| ((a & 0x0000000000ff00000000000000000000) >> 40 )
| ((a & 0x000000000000ff000000000000000000) >> 24 )
| ((a & 0x00000000000000ff0000000000000000) >> 8 )
| ((a & 0x0000000000000000ff00000000000000) << 8 )
| ((a & 0x000000000000000000ff000000000000) << 24 )
| ((a & 0x00000000000000000000ff0000000000) << 40 )
| ((a & 0x0000000000000000000000ff00000000) << 56 )
| ((a & 0x000000000000000000000000ff000000) << 72 )
| ((a & 0x00000000000000000000000000ff0000) << 88 )
| ((a & 0x0000000000000000000000000000ff00) << 104)
| ((a & 0x000000000000000000000000000000ff) << 120));
// zig fmt: on
},
else => {
unreachable;
},
}
}
}.f;
}
pub const __bswapsi2 = bswapXi2_generic(u32);
pub const __bswapdi2 = bswapXi2_generic(u64);
pub const __bswapti2 = bswapXi2_generic(u128);
test {
_ = @import("bswapsi2_test.zig");
_ = @import("bswapdi2_test.zig");
_ = @import("bswapti2_test.zig");
} | lib/std/special/compiler_rt/bswap.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.vr);
const config = @import("../config.zig");
const Time = @import("../time.zig").Time;
const MessageBus = @import("../message_bus.zig").MessageBusReplica;
const Message = @import("../message_bus.zig").Message;
const StateMachine = @import("../state_machine.zig").StateMachine;
const vr = @import("../vr.zig");
const Header = vr.Header;
const Clock = vr.Clock;
const Journal = vr.Journal;
const Timeout = vr.Timeout;
const Command = vr.Command;
pub const Status = enum {
normal,
view_change,
recovering,
};
pub const Replica = struct {
allocator: *Allocator,
/// The id of the cluster to which this replica belongs:
cluster: u128,
/// The number of replicas in the cluster:
replica_count: u16,
/// The index of this replica's address in the configuration array held by the MessageBus:
replica: u16,
/// The maximum number of replicas that may be faulty:
f: u16,
/// A distributed fault-tolerant clock to provide lower/upper bounds on the leader's wall clock:
clock: Clock,
/// The persistent log of hash-chained journal entries:
journal: *Journal,
/// An abstraction to send messages from the replica to itself or another replica or client.
/// The recipient replica or client may be a local in-memory pointer or network-addressable.
/// The message bus will also deliver messages to this replica by calling Replica.on_message().
message_bus: *MessageBus,
/// For executing service up-calls after an operation has been committed:
state_machine: *StateMachine,
/// The current view, initially 0:
view: u64,
/// Whether we have experienced a view jump:
/// If this is true then we must request a start_view message from the leader before committing.
/// This prevents us from committing ops that may have been reordered through a view change.
view_jump_barrier: bool = false,
/// The current status, either normal, view_change, or recovering:
/// TODO Don't default to normal, set the starting status according to the journal's health.
status: Status = .normal,
/// The op number assigned to the most recently prepared operation:
op: u64,
/// The op number of the latest committed and executed operation (according to the replica):
/// The replica may have to wait for repairs to complete before commit_min reaches commit_max.
commit_min: u64,
/// The op number of the latest committed operation (according to the cluster):
/// This is the commit number in terms of the VRR paper.
commit_max: u64,
/// The current request's checksum (used for now to enforce one-at-a-time request processing):
request_checksum: ?u128 = null,
/// The current prepare message (used to cross-check prepare_ok messages, and for resending):
prepare_message: ?*Message = null,
prepare_attempt: u64 = 0,
appending: bool = false,
appending_frame: @Frame(write_to_journal) = undefined,
repairing: bool = false,
repairing_frame: @Frame(write_to_journal) = undefined,
committing: bool = false,
sending_prepare: bool = false,
sending_prepare_frame: @Frame(send_prepare_to_replica) = undefined,
/// TODO Size repair_queue_max according to a reasonable bandwidth-delay product:
repair_queue: ?*Message = null,
repair_queue_len: usize = 0,
repair_queue_max: usize = 3,
/// Unique prepare_ok messages for the same view, op number and checksum from ALL replicas:
prepare_ok_from_all_replicas: []?*Message,
/// Unique start_view_change messages for the same view from OTHER replicas (excluding ourself):
start_view_change_from_other_replicas: []?*Message,
/// Unique do_view_change messages for the same view from ALL replicas (including ourself):
do_view_change_from_all_replicas: []?*Message,
/// Unique nack_prepare messages for the same view from OTHER replicas (excluding ourself):
nack_prepare_from_other_replicas: []?*Message,
/// Whether a replica has received a quorum of start_view_change messages for the view change:
start_view_change_quorum: bool = false,
/// Whether the leader has received a quorum of do_view_change messages for the view change:
/// Determines whether the leader may effect repairs according to the CTRL protocol.
do_view_change_quorum: bool = false,
/// Whether the leader is expecting to receive a nack_prepare and for which op:
nack_prepare_op: ?u64 = null,
/// The number of ticks before a leader or follower broadcasts a ping to the other replicas:
/// TODO Explain why we need this (MessageBus handshaking, leapfrogging faulty replicas,
/// deciding whether starting a view change would be detrimental under some network partitions).
ping_timeout: Timeout,
/// The number of ticks without enough prepare_ok's before the leader resends a prepare:
/// TODO Adjust this dynamically to match sliding window EWMA of recent network latencies.
prepare_timeout: Timeout,
/// The number of ticks before the leader sends a commit heartbeat:
/// The leader always sends a commit heartbeat irrespective of when it last sent a prepare.
/// This improves liveness when prepare messages cannot be replicated fully due to partitions.
commit_timeout: Timeout,
/// The number of ticks without hearing from the leader before a follower starts a view change:
/// This transitions from .normal status to .view_change status.
normal_timeout: Timeout,
/// The number of ticks before a view change is timed out:
/// This transitions from `view_change` status to `view_change` status but for a newer view.
view_change_timeout: Timeout,
/// The number of ticks before resending a `start_view_change` or `do_view_change` message:
view_change_message_timeout: Timeout,
/// The number of ticks before repairing missing/disconnected headers and/or dirty entries:
repair_timeout: Timeout,
/// Used to provide deterministic entropy to `choose_any_other_replica()`.
/// Incremented whenever `choose_any_other_replica()` is called.
choose_any_other_replica_ticks: u64 = 0,
pub fn init(
allocator: *Allocator,
cluster: u128,
replica_count: u16,
replica: u16,
time: *Time,
// TODO We should actually provide Storage here, a Replica will always use the same Journal:
journal: *Journal,
message_bus: *MessageBus,
state_machine: *StateMachine,
) !Replica {
// The smallest f such that 2f + 1 is less than or equal to the number of replicas.
const f = (replica_count - 1) / 2;
assert(cluster > 0);
assert(replica_count > 0);
assert(replica_count > f);
assert(replica < replica_count);
assert(f > 0 or replica_count <= 2);
var prepare_ok = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(prepare_ok);
std.mem.set(?*Message, prepare_ok, null);
var start_view_change = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(start_view_change);
std.mem.set(?*Message, start_view_change, null);
var do_view_change = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(do_view_change);
std.mem.set(?*Message, do_view_change, null);
var nack_prepare = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(nack_prepare);
std.mem.set(?*Message, nack_prepare, null);
// TODO Initialize the journal when initializing the cluster:
var init_prepare = Header{
.nonce = 0,
.client = 0,
.cluster = cluster,
.view = 0,
.op = 0,
.commit = 0,
.offset = 0,
.size = @sizeOf(Header),
.epoch = 0,
.request = 0,
.replica = 0,
.command = .prepare,
.operation = .init,
};
init_prepare.set_checksum_body(&[0]u8{});
init_prepare.set_checksum();
assert(init_prepare.valid_checksum());
assert(init_prepare.invalid() == null);
journal.headers[0] = init_prepare;
journal.assert_headers_reserved_from(init_prepare.op + 1);
var self = Replica{
.allocator = allocator,
.cluster = cluster,
.replica_count = replica_count,
.replica = replica,
.f = f,
// TODO Drop these @intCasts when the client table branch lands:
.clock = try Clock.init(
allocator,
@intCast(u8, replica_count),
@intCast(u8, replica),
time,
),
.journal = journal,
.message_bus = message_bus,
.state_machine = state_machine,
.view = init_prepare.view,
.op = init_prepare.op,
.commit_min = init_prepare.commit,
.commit_max = init_prepare.commit,
.prepare_ok_from_all_replicas = prepare_ok,
.start_view_change_from_other_replicas = start_view_change,
.do_view_change_from_all_replicas = do_view_change,
.nack_prepare_from_other_replicas = nack_prepare,
.ping_timeout = Timeout{
.name = "ping_timeout",
.replica = replica,
.after = 100,
},
.prepare_timeout = Timeout{
.name = "prepare_timeout",
.replica = replica,
.after = 50,
},
.commit_timeout = Timeout{
.name = "commit_timeout",
.replica = replica,
.after = 100,
},
.normal_timeout = Timeout{
.name = "normal_timeout",
.replica = replica,
.after = 500,
},
.view_change_timeout = Timeout{
.name = "view_change_timeout",
.replica = replica,
.after = 500,
},
.view_change_message_timeout = Timeout{
.name = "view_change_message_timeout",
.replica = replica,
.after = 50,
},
.repair_timeout = Timeout{
.name = "repair_timeout",
.replica = replica,
.after = 50,
},
};
// We must initialize timeouts here, not in tick() on the first tick, because on_message()
// can race with tick()... before timeouts have been initialized:
assert(self.status == .normal);
if (self.leader()) {
log.debug("{}: init: leader", .{self.replica});
self.ping_timeout.start();
self.commit_timeout.start();
self.repair_timeout.start();
} else {
log.debug("{}: init: follower", .{self.replica});
self.ping_timeout.start();
self.normal_timeout.start();
self.repair_timeout.start();
}
return self;
}
pub fn deinit(self: *Replica) void {
self.allocator.free(self.prepare_ok_from_all_replicas);
self.allocator.free(self.start_view_change_from_other_replicas);
self.allocator.free(self.do_view_change_from_all_replicas);
self.allocator.free(self.nack_prepare_from_other_replicas);
}
/// Returns whether the replica is a follower for the current view.
/// This may be used only when the replica status is normal.
pub fn follower(self: *Replica) bool {
return !self.leader();
}
/// Returns whether the replica is the leader for the current view.
/// This may be used only when the replica status is normal.
pub fn leader(self: *Replica) bool {
assert(self.status == .normal);
return self.leader_index(self.view) == self.replica;
}
/// Returns the index into the configuration of the leader for a given view.
pub fn leader_index(self: *Replica, view: u64) u16 {
return @intCast(u16, @mod(view, self.replica_count));
}
/// Time is measured in logical ticks that are incremented on every call to tick().
/// This eliminates a dependency on the system time and enables deterministic testing.
pub fn tick(self: *Replica) void {
self.clock.tick();
self.ping_timeout.tick();
self.prepare_timeout.tick();
self.commit_timeout.tick();
self.normal_timeout.tick();
self.view_change_timeout.tick();
self.view_change_message_timeout.tick();
self.repair_timeout.tick();
if (self.ping_timeout.fired()) self.on_ping_timeout();
if (self.prepare_timeout.fired()) self.on_prepare_timeout();
if (self.commit_timeout.fired()) self.on_commit_timeout();
if (self.normal_timeout.fired()) self.on_normal_timeout();
if (self.view_change_timeout.fired()) self.on_view_change_timeout();
if (self.view_change_message_timeout.fired()) self.on_view_change_message_timeout();
if (self.repair_timeout.fired()) self.on_repair_timeout();
self.repair_last_queued_message_if_any();
}
/// Called by the MessageBus to deliver a message to the replica.
pub fn on_message(self: *Replica, message: *Message) void {
log.debug("{}:", .{self.replica});
log.debug("{}: on_message: view={} status={s} {}", .{
self.replica,
self.view,
@tagName(self.status),
message.header,
});
if (message.header.invalid()) |reason| {
log.debug("{}: on_message: invalid ({s})", .{ self.replica, reason });
return;
}
if (message.header.cluster != self.cluster) {
log.warn("{}: on_message: wrong cluster (message.header.cluster={} instead of {})", .{
self.replica,
message.header.cluster,
self.cluster,
});
return;
}
assert(message.header.replica < self.replica_count);
switch (message.header.command) {
.ping => self.on_ping(message),
.pong => self.on_pong(message),
.request => self.on_request(message),
.prepare => self.on_prepare(message),
.prepare_ok => self.on_prepare_ok(message),
.commit => self.on_commit(message),
.start_view_change => self.on_start_view_change(message),
.do_view_change => self.on_do_view_change(message),
.start_view => self.on_start_view(message),
.request_start_view => self.on_request_start_view(message),
.request_prepare => self.on_request_prepare(message),
.request_headers => self.on_request_headers(message),
.headers => self.on_headers(message),
.nack_prepare => self.on_nack_prepare(message),
else => unreachable,
}
}
fn on_ping(self: *Replica, message: *const Message) void {
if (self.status != .normal and self.status != .view_change) return;
assert(self.status == .normal or self.status == .view_change);
// TODO Drop pings that were not addressed to us.
var pong = Header{
.command = .pong,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
};
if (message.header.client > 0) {
assert(message.header.replica == 0);
// 4.5 Client Recovery
// If a client crashes and recovers it must start up with a request number larger than
// what it had before it failed. It fetches its latest number from the replicas and adds
// 2 to this value to be sure the new request number is big enough. Adding 2 ensures
// that its next request will have a unique number even in the odd case where the latest
// request it sent before it failed is still in transit (since that request will have as
// its request number the number the client learns plus 1).
//
// TODO Lookup latest request number from client table:
pong.request = 0;
self.message_bus.send_header_to_client(message.header.client, pong);
} else if (message.header.replica == self.replica) {
log.warn("{}: on_ping: ignoring (self)", .{self.replica});
} else {
// Copy the ping's monotonic timestamp across to our pong and add our wall clock sample:
pong.op = message.header.op;
pong.offset = @bitCast(u64, self.clock.realtime());
self.message_bus.send_header_to_replica(message.header.replica, pong);
}
}
fn on_pong(self: *Replica, message: *const Message) void {
if (message.header.client > 0) return;
if (message.header.replica == self.replica) return;
const m0 = message.header.op;
const t1 = @bitCast(i64, message.header.offset);
const m2 = self.clock.monotonic();
// TODO Drop the @intCast when the client table branch lands.
self.clock.learn(@intCast(u8, message.header.replica), m0, t1, m2);
}
/// The primary advances op-number, adds the request to the end of the log, and updates the
/// information for this client in the client-table to contain the new request number, s.
/// Then it sends a ⟨PREPARE v, m, n, k⟩ message to the other replicas, where v is the current
/// view-number, m is the message it received from the client, n is the op-number it assigned to
/// the request, and k is the commit-number.
fn on_request(self: *Replica, message: *Message) void {
if (self.status != .normal) {
log.debug("{}: on_request: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view > self.view) {
log.debug("{}: on_request: ignoring (newer view)", .{self.replica});
return;
}
if (self.follower()) {
// TODO Re-enable this dead branch when the Client starts pinging the cluster.
// Otherwise, we will trip our one-request-at-a-time limit.
if (message.header.view < self.view and false) {
log.debug("{}: on_request: forwarding (follower)", .{self.replica});
self.send_message_to_replica(self.leader_index(self.view), message);
} else {
// The message has the same view, but was routed to the wrong replica.
// Don't amplify traffic, let the client retry to another replica.
log.warn("{}: on_request: ignoring (follower)", .{self.replica});
}
return;
}
assert(self.status == .normal);
assert(self.leader());
// TODO Check the client table to see if this is a duplicate request and reply if so.
// TODO If request is pending then this will also reflect in client table and we can ignore.
// TODO Add client information to client table.
if (self.request_checksum) |request_checksum| {
assert(message.header.command == .request);
if (message.header.checksum == request_checksum) {
log.debug("{}: on_request: ignoring (already preparing)", .{self.replica});
return;
}
}
// TODO Queue (or drop client requests after a limit) to handle one request at a time:
// TODO Clear this queue if we lose our leadership (critical for correctness).
assert(self.commit_min == self.commit_max and self.commit_max == self.op);
assert(self.request_checksum == null);
self.request_checksum = message.header.checksum;
log.debug("{}: on_request: request {}", .{ self.replica, message.header.checksum });
var body = message.buffer[@sizeOf(Header)..message.header.size];
self.state_machine.prepare(message.header.operation.to_state_machine_op(StateMachine), body);
var latest_entry = self.journal.entry_for_op_exact(self.op).?;
message.header.nonce = latest_entry.checksum;
message.header.view = self.view;
message.header.op = self.op + 1;
message.header.commit = self.commit_max;
message.header.offset = self.journal.next_offset(latest_entry);
message.header.replica = self.replica;
message.header.command = .prepare;
message.header.set_checksum_body(body);
message.header.set_checksum();
assert(message.header.checksum != self.request_checksum.?);
log.debug("{}: on_request: prepare {}", .{ self.replica, message.header.checksum });
assert(self.prepare_message == null);
assert(self.prepare_attempt == 0);
for (self.prepare_ok_from_all_replicas) |received| assert(received == null);
assert(self.prepare_timeout.ticking == false);
self.prepare_message = message.ref();
self.prepare_attempt = 0;
self.prepare_timeout.start();
// Use the same replication code path for the leader and followers:
self.send_message_to_replica(self.replica, message);
}
/// Replication is simple, with a single code path for the leader and followers:
///
/// The leader starts by sending a prepare message to itself.
///
/// Each replica (including the leader) then forwards this prepare message to the next replica
/// in the configuration, in parallel to writing to its own journal, closing the circle until
/// the next replica is back to the leader, in which case the replica does not forward.
///
/// This keeps the leader's outgoing bandwidth limited (one-for-one) to incoming bandwidth,
/// since the leader need only replicate to the next replica. Otherwise, the leader would need
/// to replicate to multiple followers, dividing available bandwidth.
///
/// This does not impact latency, since with Flexible Paxos we need only one remote prepare_ok.
/// It is ideal if this synchronous replication to one remote replica is to the next replica,
/// since that is the replica next in line to be leader, which will need to be up-to-date before
/// it can start the next view.
///
/// At the same time, asynchronous replication keeps going, so that if our local disk is slow
/// then any latency spike will be masked by more remote prepare_ok messages as they come in.
/// This gives automatic tail latency tolerance for storage latency spikes.
///
/// The remaining problem then is tail latency tolerance for network latency spikes.
/// If the next replica is down or partitioned, then the leader's prepare timeout will fire,
/// and the leader will resend but to another replica, until it receives enough prepare_ok's.
fn on_prepare(self: *Replica, message: *Message) void {
self.view_jump(message.header);
if (self.is_repair(message)) {
log.debug("{}: on_prepare: ignoring (repair)", .{self.replica});
self.on_repair(message);
return;
}
if (self.status != .normal) {
log.debug("{}: on_prepare: ignoring ({})", .{ self.replica, self.status });
return;
}
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.leader() or self.follower());
assert(message.header.replica == self.leader_index(message.header.view));
assert(message.header.op > self.op);
assert(message.header.op > self.commit_min);
if (self.follower()) self.normal_timeout.reset();
if (message.header.op > self.op + 1) {
log.debug("{}: on_prepare: newer op", .{self.replica});
self.jump_to_newer_op_in_normal_status(message.header);
}
if (self.journal.previous_entry(message.header)) |previous| {
// Any previous entry may be a whole journal's worth of ops behind due to wrapping.
// We therefore do not do any further op, offset or checksum assertions beyond this:
self.panic_if_hash_chain_would_break_in_the_same_view(previous, message.header);
}
// We must advance our op and set the header as dirty before replicating and journalling.
// The leader needs this before its journal is outrun by any prepare_ok quorum:
log.debug("{}: on_prepare: advancing: op={}..{} checksum={}..{}", .{
self.replica,
self.op,
message.header.op,
message.header.nonce,
message.header.checksum,
});
assert(message.header.op == self.op + 1);
self.op = message.header.op;
self.journal.set_entry_as_dirty(message.header);
// We have the latest op from the leader and have therefore cleared the view jump barrier:
if (self.view_jump_barrier) {
self.view_jump_barrier = false;
log.notice("{}: on_prepare: cleared view jump barrier", .{self.replica});
}
// TODO Update client's information in the client table.
self.replicate(message);
self.append(message);
if (self.follower()) {
// A prepare may already be committed if requested by repair() so take the max:
self.commit_ops_through(std.math.max(message.header.commit, self.commit_max));
}
}
fn on_prepare_ok(self: *Replica, message: *Message) void {
if (self.status != .normal) {
log.warn("{}: on_prepare_ok: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view < self.view) {
log.debug("{}: on_prepare_ok: ignoring (older view)", .{self.replica});
return;
}
if (message.header.view > self.view) {
// Another replica is treating us as the leader for a view we do not know about.
// This may be caused by a fault in the network topology.
log.warn("{}: on_prepare_ok: ignoring (newer view)", .{self.replica});
return;
}
if (self.follower()) {
// This may be caused by a fault in the network topology.
log.warn("{}: on_prepare_ok: ignoring (follower)", .{self.replica});
return;
}
if (self.prepare_message) |prepare_message| {
if (message.header.nonce != prepare_message.header.checksum) {
log.debug("{}: on_prepare_ok: ignoring (different nonce)", .{self.replica});
return;
}
} else {
log.debug("{}: on_prepare_ok: ignoring (not preparing)", .{self.replica});
return;
}
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.leader());
assert(message.header.command == .prepare_ok);
assert(message.header.nonce == self.prepare_message.?.header.checksum);
assert(message.header.client == self.prepare_message.?.header.client);
assert(message.header.cluster == self.prepare_message.?.header.cluster);
assert(message.header.view == self.prepare_message.?.header.view);
assert(message.header.op == self.prepare_message.?.header.op);
assert(message.header.commit == self.prepare_message.?.header.commit);
assert(message.header.offset == self.prepare_message.?.header.offset);
assert(message.header.epoch == self.prepare_message.?.header.epoch);
assert(message.header.request == self.prepare_message.?.header.request);
assert(message.header.operation == self.prepare_message.?.header.operation);
assert(message.header.op == self.op);
assert(message.header.op == self.commit_min + 1);
assert(message.header.op == self.commit_max + 1);
// Wait until we have `f + 1` messages (including ourself) for quorum:
const threshold = self.f + 1;
const count = self.add_message_and_receive_quorum_exactly_once(
self.prepare_ok_from_all_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
log.debug("{}: on_prepare_ok: quorum received", .{self.replica});
self.commit_op(self.prepare_message.?);
assert(self.commit_min == self.op);
assert(self.commit_max == self.op);
self.reset_quorum_prepare();
}
fn on_commit(self: *Replica, message: *const Message) void {
self.view_jump(message.header);
if (self.status != .normal) {
log.debug("{}: on_commit: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view < self.view) {
log.debug("{}: on_commit: ignoring (older view)", .{self.replica});
return;
}
if (self.leader()) {
log.warn("{}: on_commit: ignoring (leader)", .{self.replica});
return;
}
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.follower());
assert(message.header.replica == self.leader_index(message.header.view));
// We may not always have the latest commit entry but if we do these checksums must match:
if (self.journal.entry_for_op_exact(message.header.commit)) |commit_entry| {
if (commit_entry.checksum == message.header.nonce) {
log.debug("{}: on_commit: verified commit checksum", .{self.replica});
} else {
@panic("commit checksum verification failed");
}
}
self.normal_timeout.reset();
self.commit_ops_through(message.header.commit);
}
fn on_repair(self: *Replica, message: *Message) void {
assert(message.header.command == .prepare);
if (self.status != .normal and self.status != .view_change) {
log.debug("{}: on_repair: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view > self.view) {
log.debug("{}: on_repair: ignoring (newer view)", .{self.replica});
return;
}
if (self.status == .view_change and message.header.view == self.view) {
log.debug("{}: on_repair: ignoring (view started)", .{self.replica});
return;
}
if (self.status == .view_change and self.leader_index(self.view) != self.replica) {
log.debug("{}: on_repair: ignoring (view change, follower)", .{self.replica});
return;
}
if (self.status == .view_change and !self.do_view_change_quorum) {
log.debug("{}: on_repair: ignoring (view change, waiting for quorum)", .{self.replica});
return;
}
if (message.header.op > self.op) {
assert(message.header.view < self.view);
log.debug("{}: on_repair: ignoring (would advance self.op)", .{self.replica});
return;
}
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(message.header.view <= self.view);
assert(message.header.op <= self.op); // Repairs may never advance `self.op`.
if (self.journal.has_clean(message.header)) {
log.debug("{}: on_repair: duplicate", .{self.replica});
self.send_prepare_ok(message.header);
return;
}
if (self.repair_header(message.header)) {
assert(self.journal.has_dirty(message.header));
if (self.nack_prepare_op) |nack_prepare_op| {
if (nack_prepare_op == message.header.op) {
log.debug("{}: on_repair: repairing uncommitted op={}", .{
self.replica,
message.header.op,
});
self.reset_quorum_nack_prepare();
}
}
if (self.repairing) return self.repair_later(message);
log.debug("{}: on_repair: repairing journal", .{self.replica});
self.repairing_frame = async self.write_to_journal(message, &self.repairing);
}
}
fn on_start_view_change(self: *Replica, message: *Message) void {
if (self.ignore_view_change_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view >= self.view);
assert(message.header.replica != self.replica);
self.view_jump(message.header);
assert(!self.view_jump_barrier);
assert(self.status == .view_change);
assert(message.header.view == self.view);
// Wait until we have `f` messages (excluding ourself) for quorum:
assert(self.replica_count > 1);
assert(self.f > 0 or self.replica_count == 2);
const threshold = std.math.max(1, self.f);
const count = self.add_message_and_receive_quorum_exactly_once(
self.start_view_change_from_other_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
assert(self.start_view_change_from_other_replicas[self.replica] == null);
log.debug("{}: on_start_view_change: quorum received", .{self.replica});
assert(!self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
self.start_view_change_quorum = true;
// When replica i receives start_view_change messages for its view from f other replicas,
// it sends a ⟨do_view_change v, l, v’, n, k, i⟩ message to the node that will be the
// primary in the new view. Here v is its view, l is its log, v′ is the view number of the
// latest view in which its status was normal, n is the op number, and k is the commit
// number.
self.send_do_view_change();
}
/// When the new primary receives f + 1 do_view_change messages from different replicas
/// (including itself), it sets its view number to that in the messages and selects as the
/// new log the one contained in the message with the largest v′; if several messages have
/// the same v′ it selects the one among them with the largest n. It sets its op number to
/// that of the topmost entry in the new log, sets its commit number to the largest such
/// number it received in the do_view_change messages, changes its status to normal, and
/// informs the other replicas of the completion of the view change by sending
/// ⟨start_view v, l, n, k⟩ messages to the other replicas, where l is the new log, n is the
/// op number, and k is the commit number.
fn on_do_view_change(self: *Replica, message: *Message) void {
if (self.ignore_view_change_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view >= self.view);
assert(self.leader_index(message.header.view) == self.replica);
self.view_jump(message.header);
assert(!self.view_jump_barrier);
assert(self.status == .view_change);
assert(message.header.view == self.view);
// We may receive a `do_view_change` quorum from other replicas, which already have a
// `start_view_change_quorum`, before we receive a `start_view_change_quorum`:
if (!self.start_view_change_quorum) {
log.notice("{}: on_do_view_change: waiting for start_view_change quorum", .{
self.replica,
});
return;
}
// Wait until we have `f + 1` messages (including ourself) for quorum:
const threshold = self.f + 1;
const count = self.add_message_and_receive_quorum_exactly_once(
self.do_view_change_from_all_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
assert(self.do_view_change_from_all_replicas[self.replica] != null);
log.debug("{}: on_do_view_change: quorum received", .{self.replica});
var latest = Header.reserved();
var k: ?u64 = null;
for (self.do_view_change_from_all_replicas) |received, replica| {
if (received) |m| {
assert(m.header.command == .do_view_change);
assert(m.header.cluster == self.cluster);
assert(m.header.replica == replica);
assert(m.header.view == self.view);
if (k == null or m.header.commit > k.?) k = m.header.commit;
self.set_latest_header(self.message_body_as_headers(m), &latest);
}
}
self.set_latest_op_and_k(&latest, k.?, "on_do_view_change");
// Now that we have the latest op in place, repair any other headers:
for (self.do_view_change_from_all_replicas) |received| {
if (received) |m| {
for (self.message_body_as_headers(m)) |*h| {
_ = self.repair_header(h);
}
}
}
// Verify that the repairs above have not replaced or advanced the latest op:
assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum);
assert(self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
self.do_view_change_quorum = true;
// Start repairs according to the CTRL protocol:
assert(!self.repair_timeout.ticking);
self.repair_timeout.start();
self.repair();
}
/// When other replicas receive the start_view message, they replace their log with the one
/// in the message, set their op number to that of the latest entry in the log, set their
/// view number to the view number in the message, change their status to normal, and update
/// the information in their client table. If there are non-committed operations in the log,
/// they send a ⟨prepare_ok v, n, i⟩ message to the primary; here n is the op-number. Then
/// they execute all operations known to be committed that they haven’t executed previously,
/// advance their commit number, and update the information in their client table.
fn on_start_view(self: *Replica, message: *const Message) void {
if (self.ignore_view_change_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view >= self.view);
assert(message.header.replica != self.replica);
assert(message.header.replica == self.leader_index(message.header.view));
self.view_jump(message.header);
assert(!self.view_jump_barrier or self.status == .normal);
assert(self.status == .view_change or self.view_jump_barrier);
assert(message.header.view == self.view);
var latest = Header.reserved();
self.set_latest_header(self.message_body_as_headers(message), &latest);
assert(latest.op == message.header.op);
self.set_latest_op_and_k(&latest, message.header.commit, "on_start_view");
// Now that we have the latest op in place, repair any other headers:
for (self.message_body_as_headers(message)) |*h| {
_ = self.repair_header(h);
}
// Verify that the repairs above have not replaced or advanced the latest op:
assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum);
if (self.view_jump_barrier) {
assert(self.status == .normal);
self.view_jump_barrier = false;
log.notice("{}: on_start_view: resolved view jump barrier", .{self.replica});
} else {
assert(self.status == .view_change);
self.transition_to_normal_status(message.header.view);
}
assert(!self.view_jump_barrier);
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.follower());
// TODO Send prepare_ok messages for uncommitted ops.
self.commit_ops_through(self.commit_max);
self.repair();
}
fn on_request_start_view(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
assert(self.leader());
const start_view = self.create_do_view_change_or_start_view_message(.start_view) orelse {
log.debug("{}: on_request_start_view: dropping start_view, no message available", .{
self.replica,
});
return;
};
defer self.message_bus.unref(start_view);
assert(start_view.references == 1);
assert(start_view.header.command == .start_view);
assert(start_view.header.view == self.view);
assert(start_view.header.op == self.op);
assert(start_view.header.commit == self.commit_max);
self.send_message_to_replica(message.header.replica, start_view);
}
fn on_request_prepare(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
const op = message.header.op;
var checksum: ?u128 = message.header.nonce;
if (self.leader_index(self.view) == self.replica and message.header.nonce == 0) {
checksum = null;
}
if (self.journal.entry_for_op_exact_with_checksum(op, checksum)) |entry| {
assert(entry.op == op);
if (!self.journal.dirty.bit(op)) {
assert(!self.journal.faulty.bit(op));
if (self.sending_prepare) return;
self.sending_prepare_frame = async self.send_prepare_to_replica(
message.header.replica,
op,
checksum,
);
// We have guaranteed the prepare and our copy is clean (not safe to nack).
return;
} else if (self.journal.faulty.bit(op)) {
// We have gauranteed the prepare but our copy is faulty (not safe to nack).
return;
}
// We know of the prepare but we have yet to write or guarantee it (safe to nack).
// Continue through below...
}
if (self.status == .view_change) {
assert(message.header.replica == self.leader_index(self.view));
assert(checksum != null);
if (self.journal.entry_for_op_exact_with_checksum(op, checksum) != null) {
assert(self.journal.dirty.bit(op) and !self.journal.faulty.bit(op));
}
self.send_header_to_replica(message.header.replica, .{
.command = .nack_prepare,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = op,
.nonce = checksum.?,
});
}
}
fn on_request_headers(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
const op_min = message.header.commit;
const op_max = message.header.op;
assert(op_max >= op_min);
// We must add 1 because op_max and op_min are both inclusive:
const count_max = @intCast(u32, std.math.min(64, op_max - op_min + 1));
assert(count_max > 0);
const size_max = @sizeOf(Header) + @sizeOf(Header) * count_max;
const response = self.message_bus.get_message() orelse {
log.debug("{}: on_request_headers: dropping response, no message available", .{
self.replica,
});
return;
};
defer self.message_bus.unref(response);
response.header.* = .{
.command = .headers,
// We echo the nonce back to the replica so that they can match up our response:
.nonce = message.header.nonce,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
};
const count = self.journal.copy_latest_headers_between(
op_min,
op_max,
std.mem.bytesAsSlice(Header, response.buffer[@sizeOf(Header)..size_max]),
);
response.header.size = @intCast(u32, @sizeOf(Header) + @sizeOf(Header) * count);
const body = response.buffer[@sizeOf(Header)..response.header.size];
response.header.set_checksum_body(body);
response.header.set_checksum();
self.send_message_to_replica(message.header.replica, response);
}
fn on_nack_prepare(self: *Replica, message: *Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
assert(self.leader_index(self.view) == self.replica);
assert(self.do_view_change_quorum);
assert(self.repairs_allowed());
if (self.nack_prepare_op == null) {
log.debug("{}: on_nack_prepare: ignoring (no longer expected)", .{self.replica});
return;
}
const op = self.nack_prepare_op.?;
const checksum = self.journal.entry_for_op_exact(op).?.checksum;
if (message.header.op != op) {
log.debug("{}: on_nack_prepare: ignoring (repairing another op)", .{self.replica});
return;
}
// Followers may not send a `nack_prepare` for a different checksum:
assert(message.header.nonce == checksum);
// We require a `nack_prepare` from a majority of followers if our op is faulty:
// Otherwise, we know we do not have the op and need only `f` other nacks.
assert(self.replica_count > 1);
assert(self.f > 0 or self.replica_count == 2);
assert(self.f + 1 == (self.replica_count - 1) / 2 + 1);
const threshold = if (self.journal.faulty.bit(op)) self.f + 1 else std.math.max(1, self.f);
// Wait until we have `threshold` messages for quorum:
const count = self.add_message_and_receive_quorum_exactly_once(
self.nack_prepare_from_other_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
assert(self.nack_prepare_from_other_replicas[self.replica] == null);
log.debug("{}: on_nack_prepare: quorum received", .{self.replica});
assert(self.valid_hash_chain("on_nack_prepare"));
assert(op > self.commit_max);
assert(op <= self.op);
assert(self.journal.entry_for_op_exact_with_checksum(op, checksum) != null);
assert(self.journal.dirty.bit(op));
log.debug("{}: on_nack_prepare: discarding uncommitted ops={}..{}", .{
self.replica,
op,
self.op,
});
self.journal.remove_entries_from(op);
self.op = op - 1;
assert(self.journal.entry_for_op(op) == null);
assert(!self.journal.dirty.bit(op));
assert(!self.journal.faulty.bit(op));
// We require that `self.op` always exists. Rewinding `self.op` could change that.
// However, we do this only as the leader within a view change, with all headers intact.
assert(self.journal.entry_for_op_exact(self.op) != null);
self.reset_quorum_nack_prepare();
self.repair();
}
fn on_headers(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
var op_min: ?u64 = null;
var op_max: ?u64 = null;
for (self.message_body_as_headers(message)) |*h| {
if (op_min == null or h.op < op_min.?) op_min = h.op;
if (op_max == null or h.op > op_max.?) op_max = h.op;
_ = self.repair_header(h);
}
assert(op_max.? >= op_min.?);
self.repair();
}
fn on_ping_timeout(self: *Replica) void {
self.ping_timeout.reset();
// TODO We may want to ping for connectivity during a view change.
assert(self.status == .normal);
assert(self.leader() or self.follower());
var ping = Header{
.command = .ping,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = self.clock.monotonic(),
};
self.send_header_to_other_replicas(ping);
}
fn on_prepare_timeout(self: *Replica) void {
// TODO Exponential backoff.
// TODO Prevent flooding the network due to multiple concurrent rounds of replication.
self.prepare_timeout.reset();
self.prepare_attempt += 1;
assert(self.status == .normal);
assert(self.leader());
assert(self.request_checksum != null);
assert(self.prepare_message != null);
var message = self.prepare_message.?;
assert(message.header.view == self.view);
// The list of remote replicas yet to send a prepare_ok:
var waiting: [32]u16 = undefined;
var waiting_len: usize = 0;
for (self.prepare_ok_from_all_replicas) |received, replica| {
if (received == null and replica != self.replica) {
waiting[waiting_len] = @intCast(u16, replica);
waiting_len += 1;
if (waiting_len == waiting.len) break;
}
}
if (waiting_len == 0) {
log.debug("{}: on_prepare_timeout: waiting for journal", .{self.replica});
assert(self.prepare_ok_from_all_replicas[self.replica] == null);
return;
}
for (waiting[0..waiting_len]) |replica| {
log.debug("{}: on_prepare_timeout: waiting for replica {}", .{ self.replica, replica });
}
// Cycle through the list for each attempt to reach live replicas and get around partitions:
// If only the first replica in the list was chosen... liveness would suffer if it was down!
var replica = waiting[@mod(self.prepare_attempt, waiting_len)];
assert(replica != self.replica);
log.debug("{}: on_prepare_timeout: replicating to replica {}", .{ self.replica, replica });
self.send_message_to_replica(replica, message);
}
fn on_commit_timeout(self: *Replica) void {
self.commit_timeout.reset();
assert(self.status == .normal);
assert(self.leader());
assert(self.commit_min == self.commit_max);
// TODO Snapshots: Use snapshot checksum if commit is no longer in journal.
const latest_committed_entry = self.journal.entry_for_op_exact(self.commit_max).?;
self.send_header_to_other_replicas(.{
.command = .commit,
.nonce = latest_committed_entry.checksum,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.commit = self.commit_max,
});
}
fn on_normal_timeout(self: *Replica) void {
assert(self.status == .normal);
assert(self.follower());
self.transition_to_view_change_status(self.view + 1);
}
fn on_view_change_timeout(self: *Replica) void {
assert(self.status == .view_change);
self.transition_to_view_change_status(self.view + 1);
}
fn on_view_change_message_timeout(self: *Replica) void {
self.view_change_message_timeout.reset();
assert(self.status == .view_change);
// Keep sending `start_view_change` messages:
// We may have a `start_view_change_quorum` but other replicas may not.
// However, the leader may stop sending once it has a `do_view_change_quorum`.
if (!self.do_view_change_quorum) self.send_start_view_change();
// It is critical that a `do_view_change` message implies a `start_view_change_quorum`:
if (self.start_view_change_quorum) {
// The leader need not retry to send a `do_view_change` message to itself:
// We assume the MessageBus will not drop messages sent by a replica to itself.
if (self.leader_index(self.view) != self.replica) self.send_do_view_change();
}
}
fn on_repair_timeout(self: *Replica) void {
assert(self.status == .normal or self.status == .view_change);
self.repair();
}
fn add_message_and_receive_quorum_exactly_once(
self: *Replica,
messages: []?*Message,
message: *Message,
threshold: u32,
) ?usize {
assert(messages.len == self.replica_count);
assert(message.header.cluster == self.cluster);
assert(message.header.view == self.view);
switch (message.header.command) {
.prepare_ok => {
assert(self.status == .normal);
assert(self.leader());
assert(message.header.nonce == self.prepare_message.?.header.checksum);
},
.start_view_change => assert(self.status == .view_change),
.do_view_change, .nack_prepare => {
assert(self.status == .view_change);
assert(self.leader_index(self.view) == self.replica);
},
else => unreachable,
}
assert(threshold >= 1);
assert(threshold <= self.replica_count);
const command: []const u8 = @tagName(message.header.command);
// Do not allow duplicate messages to trigger multiple passes through a state transition:
if (messages[message.header.replica]) |m| {
// Assert that this truly is a duplicate message and not a different message:
assert(m.header.command == message.header.command);
assert(m.header.replica == message.header.replica);
assert(m.header.view == message.header.view);
assert(m.header.op == message.header.op);
assert(m.header.commit == message.header.commit);
assert(m.header.checksum_body == message.header.checksum_body);
assert(m.header.checksum == message.header.checksum);
log.debug("{}: on_{s}: ignoring (duplicate message)", .{ self.replica, command });
return null;
}
// Record the first receipt of this message:
assert(messages[message.header.replica] == null);
messages[message.header.replica] = message.ref();
// Count the number of unique messages now received:
const count = self.count_quorum(messages, message.header.command, message.header.nonce);
log.debug("{}: on_{s}: {} message(s)", .{ self.replica, command, count });
// Wait until we have exactly `threshold` messages for quorum:
if (count < threshold) {
log.debug("{}: on_{s}: waiting for quorum", .{ self.replica, command });
return null;
}
// This is not the first time we have had quorum, the state transition has already happened:
if (count > threshold) {
log.debug("{}: on_{s}: ignoring (quorum received already)", .{ self.replica, command });
return null;
}
assert(count == threshold);
return count;
}
fn append(self: *Replica, message: *Message) void {
assert(self.status == .normal);
assert(message.header.command == .prepare);
assert(message.header.view == self.view);
assert(message.header.op == self.op);
if (self.appending) {
log.debug("{}: append: skipping (slow journal outrun by quorum)", .{self.replica});
self.repair_later(message);
return;
}
log.debug("{}: append: appending to journal", .{self.replica});
self.appending_frame = async self.write_to_journal(message, &self.appending);
}
/// Returns whether `b` succeeds `a` by having a newer view or same view and newer op.
fn ascending_viewstamps(
self: *Replica,
a: *const Header,
b: *const Header,
) bool {
assert(a.command == .prepare);
assert(b.command == .prepare);
if (a.view < b.view) {
// We do not assert b.op >= a.op, ops may be reordered during a view change.
return true;
} else if (a.view > b.view) {
// We do not assert b.op <= a.op, ops may be reordered during a view change.
return false;
} else if (a.op < b.op) {
assert(a.view == b.view);
return true;
} else if (a.op > b.op) {
assert(a.view == b.view);
return false;
} else {
unreachable;
}
}
/// Choose a different replica each time if possible (excluding ourself).
/// The choice of replica is a deterministic function of:
/// 1. `choose_any_other_replica_ticks`, and
/// 2. whether the replica is connected and ready for sending in the MessageBus.
fn choose_any_other_replica(self: *Replica) ?u16 {
var count: usize = 0;
while (count < self.replica_count) : (count += 1) {
self.choose_any_other_replica_ticks += 1;
const replica = @mod(
self.replica + self.choose_any_other_replica_ticks,
self.replica_count,
);
if (replica == self.replica) continue;
// TODO if (!MessageBus.can_send_to_replica(replica)) continue;
return @intCast(u16, replica);
}
return null;
}
fn commit_ops_through(self: *Replica, commit: u64) void {
// TODO Restrict `view_change` status only to the leader purely as defense-in-depth.
// Be careful of concurrency when doing this, as successive view changes can happen quickly.
assert(self.status == .normal or self.status == .view_change);
assert(self.commit_min <= self.commit_max);
assert(self.commit_min <= self.op);
assert(self.commit_max <= self.op or self.commit_max > self.op);
assert(commit <= self.op or commit > self.op);
// We have already committed this far:
if (commit <= self.commit_min) return;
// Guard against multiple concurrent invocations of commit_ops_through():
if (self.committing) {
log.debug("{}: commit_ops_through: already committing...", .{self.replica});
return;
}
self.committing = true;
defer self.committing = false;
if (commit > self.commit_max) {
log.debug("{}: commit_ops_through: advancing commit_max={}..{}", .{
self.replica,
self.commit_max,
commit,
});
self.commit_max = commit;
}
if (!self.valid_hash_chain("commit_ops_through")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
// We may receive commit numbers for ops we do not yet have (`commit_max > self.op`):
// Even a naive state transfer may fail to correct for this.
while (self.commit_min < self.commit_max and self.commit_min < self.op) {
const op = self.commit_min + 1;
const checksum = self.journal.entry_for_op_exact(op).?.checksum;
if (self.create_prepare_message(op, checksum)) |prepare| {
defer self.message_bus.unref(prepare);
// Things change quickly when we're reading from disk:
if (self.status != .normal and self.status != .view_change) return;
// Guard against any re-entrancy concurrent to reading this prepare from disk:
assert(op == self.commit_min + 1);
assert(prepare.header.op == op);
assert(prepare.header.checksum == checksum);
const commit_min = self.commit_min;
self.commit_op(prepare);
assert(self.commit_min == commit_min + 1);
assert(self.commit_min <= self.op);
} else {
return;
}
}
// This is an optimization to expedite the view change without waiting for `repair_timeout`:
if (self.status == .view_change and self.repairs_allowed()) self.repair();
}
fn commit_op(self: *Replica, prepare: *const Message) void {
assert(self.status == .normal or self.status == .view_change);
assert(prepare.header.command == .prepare);
assert(prepare.header.op == self.commit_min + 1);
assert(prepare.header.op <= self.op);
if (!self.valid_hash_chain("commit_op")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
const reply = self.message_bus.get_message() orelse {
log.debug("{}: commit_op: waiting for a message", .{self.replica});
return;
};
defer self.message_bus.unref(reply);
const reply_body_size = @intCast(u32, self.state_machine.commit(
prepare.header.operation.to_state_machine_op(StateMachine),
prepare.buffer[@sizeOf(Header)..prepare.header.size],
reply.buffer[@sizeOf(Header)..],
));
log.debug("{}: commit_op: executing op={} checksum={} ({s})", .{
self.replica,
prepare.header.op,
prepare.header.checksum,
@tagName(prepare.header.operation.to_state_machine_op(StateMachine)),
});
self.commit_min += 1;
assert(self.commit_min == prepare.header.op);
if (self.commit_min > self.commit_max) self.commit_max = self.commit_min;
reply.header.* = .{
.command = .reply,
.operation = prepare.header.operation,
.nonce = prepare.header.checksum,
.client = prepare.header.client,
.request = prepare.header.request,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = prepare.header.op,
.commit = prepare.header.op,
.size = @sizeOf(Header) + reply_body_size,
};
assert(reply.header.offset == 0);
assert(reply.header.epoch == 0);
reply.header.set_checksum_body(reply.buffer[@sizeOf(Header)..reply.header.size]);
reply.header.set_checksum();
// TODO Add reply to the client table to answer future duplicate requests idempotently.
// Lookup client table entry using client id.
// If client's last request id is <= this request id, then update client table entry.
// Otherwise the client is already ahead of us, and we don't need to update the entry.
if (self.leader_index(self.view) == self.replica) {
log.debug("{}: commit_op: replying to client: {}", .{ self.replica, reply.header });
self.message_bus.send_message_to_client(reply.header.client, reply);
}
}
fn count_quorum(self: *Replica, messages: []?*Message, command: Command, nonce: u128) usize {
assert(messages.len == self.replica_count);
var count: usize = 0;
for (messages) |received, replica| {
if (received) |m| {
assert(m.header.command == command);
assert(m.header.nonce == nonce);
assert(m.header.cluster == self.cluster);
assert(m.header.replica == replica);
assert(m.header.view == self.view);
switch (command) {
.prepare_ok => {},
.start_view_change => assert(m.header.replica != self.replica),
.do_view_change => {},
.nack_prepare => {
assert(m.header.replica != self.replica);
assert(m.header.op == self.nack_prepare_op.?);
},
else => unreachable,
}
count += 1;
}
}
return count;
}
/// The caller owns the returned message, if any, which has exactly 1 reference.
fn create_do_view_change_or_start_view_message(self: *Replica, command: Command) ?*Message {
assert(command == .do_view_change or command == .start_view);
// We may also send a start_view message in normal status to resolve a follower's view jump:
assert(self.status == .normal or self.status == .view_change);
const size_max = @sizeOf(Header) * 8;
const message = self.message_bus.get_message() orelse return null;
defer self.message_bus.unref(message);
message.header.* = .{
.command = command,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = self.op,
.commit = self.commit_max,
};
var dest = std.mem.bytesAsSlice(Header, message.buffer[@sizeOf(Header)..size_max]);
const count = self.journal.copy_latest_headers_between(0, self.op, dest);
assert(count > 0);
message.header.size = @intCast(u32, @sizeOf(Header) + @sizeOf(Header) * count);
const body = message.buffer[@sizeOf(Header)..message.header.size];
message.header.set_checksum_body(body);
message.header.set_checksum();
return message.ref();
}
/// The caller owns the returned message, if any, which has exactly 1 reference.
fn create_prepare_message(self: *Replica, op: u64, checksum: ?u128) ?*Message {
if (op > self.op) {
self.create_prepare_message_notice(op, checksum, "beyond self.op");
return null;
}
const exact = self.journal.entry_for_op_exact_with_checksum(op, checksum);
if (exact == null) {
self.create_prepare_message_notice(op, checksum, "no entry exactly");
return null;
}
if (self.journal.faulty.bit(op)) {
self.create_prepare_message_notice(op, checksum, "faulty");
return null;
}
if (self.journal.dirty.bit(op)) {
self.create_prepare_message_notice(op, checksum, "dirty");
return null;
}
// Do not use this pointer beyond the read() below, as the header memory may then change:
const entry = exact.?;
const sector_size = @intCast(u32, Journal.sector_ceil(entry.size));
assert(sector_size >= entry.size);
const message = self.message_bus.get_message() orelse {
self.create_prepare_message_notice(op, checksum, "no message available");
return null;
};
defer self.message_bus.unref(message);
// Skip the disk read if the header is all we need:
if (entry.size == @sizeOf(Header)) {
message.header.* = entry.*;
return message.ref();
}
assert(entry.offset + sector_size <= self.journal.size_circular_buffer);
self.journal.read_sectors(
message.buffer[0..sector_size],
self.journal.offset_in_circular_buffer(entry.offset),
);
if (!message.header.valid_checksum()) {
self.create_prepare_message_notice(op, checksum, "corrupt header after read");
return null;
}
const body = message.buffer[@sizeOf(Header)..message.header.size];
if (!message.header.valid_checksum_body(body)) {
self.create_prepare_message_notice(op, checksum, "corrupt body after read");
return null;
}
if (message.header.op != op) {
self.create_prepare_message_notice(op, checksum, "op changed during read");
return null;
}
if (checksum != null and message.header.checksum != checksum.?) {
self.create_prepare_message_notice(op, checksum, "checksum changed during read");
return null;
}
return message.ref();
}
fn create_prepare_message_notice(
self: *Replica,
op: u64,
checksum: ?u128,
notice: []const u8,
) void {
log.notice("{}: create_prepare_message: op={} checksum={}: {s}", .{
self.replica,
op,
checksum,
notice,
});
}
fn discard_repair_queue(self: *Replica) void {
while (self.repair_queue) |message| {
log.notice("{}: discard_repair_queue: op={}", .{ self.replica, message.header.op });
assert(self.repair_queue_len > 0);
self.repair_queue = message.next;
self.repair_queue_len -= 1;
message.next = null;
self.message_bus.unref(message);
}
assert(self.repair_queue_len == 0);
}
fn ignore_repair_message(self: *Replica, message: *const Message) bool {
assert(message.header.command == .request_start_view or
message.header.command == .request_headers or
message.header.command == .request_prepare or
message.header.command == .headers or
message.header.command == .nack_prepare);
const command: []const u8 = @tagName(message.header.command);
if (self.status != .normal and self.status != .view_change) {
log.debug("{}: on_{s}: ignoring ({})", .{ self.replica, command, self.status });
return true;
}
if (message.header.view < self.view) {
log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command });
return true;
}
// We should never view jump unless we know what our status should be after the jump:
// Otherwise we may be normal before the leader, or in a view change that has completed.
if (message.header.view > self.view) {
log.debug("{}: on_{s}: ignoring (newer view)", .{ self.replica, command });
return true;
}
if (self.ignore_repair_message_during_view_change(message)) return true;
if (message.header.replica == self.replica) {
log.warn("{}: on_{s}: ignoring (self)", .{ self.replica, command });
return true;
}
if (self.leader_index(self.view) != self.replica) {
switch (message.header.command) {
// Only the leader may receive these messages:
.request_start_view, .nack_prepare => {
log.warn("{}: on_{s}: ignoring (follower)", .{ self.replica, command });
return true;
},
// Only the leader may answer a request for a prepare without a nonce:
.request_prepare => if (message.header.nonce == 0) {
log.warn("{}: on_{s}: ignoring (no nonce)", .{ self.replica, command });
return true;
},
else => {},
}
}
if (message.header.command == .nack_prepare and self.status == .normal) {
log.debug("{}: on_{s}: ignoring (view started)", .{ self.replica, command });
return true;
}
// Only allow repairs for same view as defense-in-depth:
assert(message.header.view == self.view);
return false;
}
fn ignore_repair_message_during_view_change(self: *Replica, message: *const Message) bool {
if (self.status != .view_change) return false;
const command: []const u8 = @tagName(message.header.command);
switch (message.header.command) {
.request_start_view => {
log.debug("{}: on_{s}: ignoring (view change)", .{ self.replica, command });
return true;
},
.request_headers, .request_prepare => {
if (self.leader_index(self.view) != message.header.replica) {
log.debug("{}: on_{s}: ignoring (view change, requested by follower)", .{
self.replica,
command,
});
return true;
}
},
.headers, .nack_prepare => {
if (self.leader_index(self.view) != self.replica) {
log.debug("{}: on_{s}: ignoring (view change, received by follower)", .{
self.replica,
command,
});
return true;
} else if (!self.do_view_change_quorum) {
log.debug("{}: on_{s}: ignoring (view change, waiting for quorum)", .{
self.replica,
command,
});
return true;
}
},
else => unreachable,
}
return false;
}
fn ignore_view_change_message(self: *Replica, message: *const Message) bool {
assert(message.header.command == .start_view_change or
message.header.command == .do_view_change or
message.header.command == .start_view);
assert(message.header.view > 0); // The initial view is already zero.
const command: []const u8 = @tagName(message.header.command);
// 4.3 Recovery
// While a replica's status is recovering it does not participate in either the request
// processing protocol or the view change protocol.
// This is critical for correctness (to avoid data loss):
if (self.status == .recovering) {
log.debug("{}: on_{s}: ignoring (recovering)", .{ self.replica, command });
return true;
}
if (message.header.view < self.view) {
log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command });
return true;
}
if (message.header.view == self.view and self.status == .normal) {
if (message.header.command != .start_view or !self.view_jump_barrier) {
log.debug("{}: on_{s}: ignoring (view started)", .{ self.replica, command });
return true;
}
}
// These may be caused by faults in the network topology.
switch (message.header.command) {
.start_view_change, .start_view => {
if (message.header.replica == self.replica) {
log.warn("{}: on_{s}: ignoring (self)", .{ self.replica, command });
return true;
}
},
.do_view_change => {
if (self.leader_index(message.header.view) != self.replica) {
log.warn("{}: on_{s}: ignoring (follower)", .{ self.replica, command });
return true;
}
},
else => unreachable,
}
return false;
}
fn is_repair(self: *Replica, message: *const Message) bool {
assert(message.header.command == .prepare);
if (self.status == .normal) {
if (message.header.view < self.view) return true;
if (message.header.view == self.view and message.header.op <= self.op) return true;
} else if (self.status == .view_change) {
if (message.header.view < self.view) return true;
// The view has already started or is newer.
}
return false;
}
/// Advances `op` to where we need to be before `header` can be processed as a prepare:
fn jump_to_newer_op_in_normal_status(self: *Replica, header: *const Header) void {
assert(self.status == .normal);
assert(self.follower());
assert(header.view == self.view);
assert(header.op > self.op + 1);
// We may have learned of a higher `commit_max` through a commit message before jumping to a
// newer op that is less than `commit_max` but greater than `commit_min`:
assert(header.op > self.commit_min);
log.debug("{}: jump_to_newer_op: advancing: op={}..{} checksum={}..{}", .{
self.replica,
self.op,
header.op - 1,
self.journal.entry_for_op_exact(self.op).?.checksum,
header.nonce,
});
self.op = header.op - 1;
assert(self.op >= self.commit_min);
assert(self.op + 1 == header.op);
}
fn message_body_as_headers(self: *Replica, message: *const Message) []Header {
// TODO Assert message commands that we expect this to be called for.
assert(message.header.size > @sizeOf(Header)); // Body must contain at least one header.
return std.mem.bytesAsSlice(Header, message.buffer[@sizeOf(Header)..message.header.size]);
}
/// Panics if immediate neighbors in the same view would have a broken hash chain.
/// Assumes gaps and does not require that a preceeds b.
fn panic_if_hash_chain_would_break_in_the_same_view(
self: *Replica,
a: *const Header,
b: *const Header,
) void {
assert(a.command == .prepare);
assert(b.command == .prepare);
assert(a.cluster == b.cluster);
if (a.view == b.view and a.op + 1 == b.op and a.checksum != b.nonce) {
assert(a.valid_checksum());
assert(b.valid_checksum());
log.emerg("{}: panic_if_hash_chain_would_break: a: {}", .{ self.replica, a });
log.emerg("{}: panic_if_hash_chain_would_break: b: {}", .{ self.replica, b });
@panic("hash chain would break");
}
}
/// Starting from the latest journal entry, backfill any missing or disconnected headers.
/// A header is disconnected if it breaks the hash chain with its newer neighbor to the right.
/// Since we work backwards from the latest entry, we should always be able to fix the chain.
/// Once headers are connected, backfill any dirty or faulty prepares.
fn repair(self: *Replica) void {
self.repair_timeout.reset();
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.commit_min <= self.op);
assert(self.commit_min <= self.commit_max);
// TODO Handle case where we are requesting reordered headers that no longer exist.
// We expect these always to exist:
assert(self.journal.entry_for_op_exact(self.commit_min) != null);
assert(self.journal.entry_for_op_exact(self.op) != null);
// Resolve any view jump by requesting the leader's latest op:
if (self.view_jump_barrier) {
assert(self.status == .normal);
assert(self.follower());
log.notice("{}: repair: resolving view jump barrier", .{self.replica});
self.send_header_to_replica(self.leader_index(self.view), .{
.command = .request_start_view,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
});
return;
}
// Request outstanding committed prepares to advance our op number:
// This handles the case of an idle cluster, where a follower will not otherwise advance.
// This is not required for correctness, but for durability.
if (self.op < self.commit_max) {
// If the leader repairs during a view change, it will have already advanced `self.op`
// to the latest op according to the quorum of `do_view_change` messages received, so we
// must therefore be in normal status:
assert(self.status == .normal);
assert(self.follower());
log.notice("{}: repair: op={} < commit_max={}", .{
self.replica,
self.op,
self.commit_max,
});
// We need to advance our op number and therefore have to `request_prepare`,
// since only `on_prepare()` can do this, not `repair_header()` in `on_headers()`.
self.send_header_to_replica(self.leader_index(self.view), .{
.command = .request_prepare,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = self.commit_max,
// We cannot yet know the nonce so we set it to 0:
// The nonce is optional when requesting from the leader but required otherwise.
.nonce = 0,
});
return;
}
// Request any missing or disconnected headers:
// TODO Snapshots: Ensure that self.commit_min op always exists in the journal.
var broken = self.journal.find_latest_headers_break_between(self.commit_min, self.op);
if (broken) |range| {
log.notice("{}: repair: latest break: {}", .{ self.replica, range });
assert(range.op_min > self.commit_min);
assert(range.op_max < self.op);
// A range of `op_min=0` or `op_max=0` should be impossible as a header break:
// This is the init op that is prepared when the cluster is initialized.
assert(range.op_min > 0);
assert(range.op_max > 0);
if (self.choose_any_other_replica()) |replica| {
self.send_header_to_replica(replica, .{
.command = .request_headers,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.commit = range.op_min,
.op = range.op_max,
});
}
return;
}
// Assert that all headers are now present and connected with a perfect hash chain:
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
assert(self.valid_hash_chain_between(self.commit_min, self.op));
// Request and repair any dirty or faulty prepares:
if (self.journal.dirty.len > 0) return self.repair_prepares();
// Commit ops, which may in turn discover faulty prepares and drive more repairs:
if (self.commit_min < self.commit_max) return self.commit_ops_through(self.commit_max);
if (self.status == .view_change and self.leader_index(self.view) == self.replica) {
// TODO Drive uncommitted ops to completion through replication to a majority:
if (self.commit_max < self.op) {
log.debug("{}: repair: waiting for uncomitted ops to replicate", .{self.replica});
return;
}
// Start the view as the new leader:
self.start_view_as_the_new_leader();
}
}
/// Decide whether or not to insert or update a header:
///
/// A repair may never advance or replace `self.op` (critical for correctness):
///
/// Repairs must always backfill in behind `self.op` but may never advance `self.op`.
/// Otherwise, a split-brain leader may reapply an op that was removed through a view
/// change, which could be committed by a higher `commit_max` number in a commit message.
///
/// See this commit message for an example:
/// https://github.com/coilhq/tigerbeetle/commit/6119c7f759f924d09c088422d5c60ac6334d03de
///
/// Our guiding principles around repairs in general:
///
/// * The latest op makes sense of everything else and must not be replaced with a different op
/// or advanced except by the leader in the current view.
///
/// * Do not jump to a view in normal status without imposing a view jump barrier.
///
/// * Do not commit before resolving the view jump barrier with the leader.
///
/// * Do not commit until the hash chain between `self.commit_min` and `self.op` is fully
/// connected, to ensure that all the ops in this range are correct.
///
/// * Ensure that `self.commit_max` is never advanced for a newer view without first imposing a
/// view jump barrier, otherwise `self.commit_max` may again refer to different ops.
///
/// * Ensure that `self.op` is never advanced by a repair since repairs may occur in a view
/// change where the view has not yet started.
///
/// * Do not assume that an existing op with a older viewstamp can be replaced by an op with a
/// newer viewstamp, but only compare ops in the same view or with reference to the hash chain.
/// See Figure 3.7 on page 41 in Diego Ongaro's Raft thesis for an example of where an op with
/// an older view number may be committed instead of an op with a newer view number:
/// http://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf.
///
fn repair_header(self: *Replica, header: *const Header) bool {
assert(header.valid_checksum());
assert(header.invalid() == null);
assert(header.command == .prepare);
switch (self.status) {
.normal => assert(header.view <= self.view),
.view_change => assert(header.view < self.view),
else => unreachable,
}
if (header.op > self.op) {
log.debug("{}: repair_header: ignoring (would advance self.op)", .{self.replica});
return false;
} else if (header.op == self.op) {
if (self.journal.entry_for_op_exact_with_checksum(self.op, header.checksum) == null) {
log.debug("{}: repair_header: ignoring (would replace self.op)", .{self.replica});
return false;
}
}
if (self.journal.entry(header)) |existing| {
// Do not replace any existing op lightly as doing so may impair durability and even
// violate correctness by undoing a prepare already acknowledged to the leader:
if (existing.checksum == header.checksum) {
if (self.journal.dirty.bit(header.op)) {
// We may safely replace this existing op (with hash chain and overlap caveats):
log.debug("{}: repair_header: exists (dirty checksum)", .{self.replica});
} else {
log.debug("{}: repair_header: ignoring (clean checksum)", .{self.replica});
return false;
}
} else if (existing.view == header.view) {
// We expect that the same view and op must have the same checksum:
assert(existing.op != header.op);
// The journal must have wrapped:
if (existing.op < header.op) {
// We may safely replace this existing op (with hash chain and overlap caveats):
log.debug("{}: repair_header: exists (same view, older op)", .{self.replica});
} else if (existing.op > header.op) {
log.debug("{}: repair_header: ignoring (same view, newer op)", .{self.replica});
return false;
} else {
unreachable;
}
} else {
assert(existing.view != header.view);
assert(existing.op == header.op or existing.op != header.op);
if (self.repair_header_would_connect_hash_chain(header)) {
// We may safely replace this existing op:
log.debug("{}: repair_header: exists (hash chain break)", .{self.replica});
} else {
// We cannot replace this existing op until we are sure that doing so would not
// violate any prior commitments made to the leader.
log.debug("{}: repair_header: ignoring (hash chain doubt)", .{self.replica});
return false;
}
}
} else {
// We may repair the gap (with hash chain and overlap caveats):
log.debug("{}: repair_header: gap", .{self.replica});
}
// Caveat: Do not repair an existing op or gap if doing so would break the hash chain:
if (self.repair_header_would_break_hash_chain_with_next_entry(header)) {
log.debug("{}: repair_header: ignoring (would break hash chain)", .{self.replica});
return false;
}
// Caveat: Do not repair an existing op or gap if doing so would overlap another:
if (self.repair_header_would_overlap_another(header)) {
if (self.repair_header_would_connect_hash_chain(header)) {
// We may overlap previous entries in order to connect the hash chain:
log.debug("{}: repair_header: overlap (would connect hash chain)", .{self.replica});
} else {
log.debug("{}: repair_header: ignoring (would overlap another)", .{self.replica});
return false;
}
}
// TODO Snapshots: Skip if this header is already snapshotted.
assert(header.op < self.op or
self.journal.entry_for_op_exact(self.op).?.checksum == header.checksum);
self.journal.set_entry_as_dirty(header);
return true;
}
/// If we repair this header, then would this break the hash chain only to our immediate right?
/// This offers a weak guarantee compared to `repair_header_would_connect_hash_chain()` below.
/// However, this is useful for allowing repairs when the hash chain is sparse.
fn repair_header_would_break_hash_chain_with_next_entry(
self: *Replica,
header: *const Header,
) bool {
if (self.journal.previous_entry(header)) |previous| {
self.panic_if_hash_chain_would_break_in_the_same_view(previous, header);
}
if (self.journal.next_entry(header)) |next| {
self.panic_if_hash_chain_would_break_in_the_same_view(header, next);
if (header.checksum == next.nonce) {
assert(header.view <= next.view);
assert(header.op + 1 == next.op);
// We don't break with `next` but this is no guarantee that `next` does not break.
return false;
} else {
// If the journal has wrapped, then err in favor of a break regardless of op order:
return true;
}
}
// We are not completely sure since there is no entry to the immediate right:
return false;
}
/// If we repair this header, then would this connect the hash chain through to the latest op?
/// This offers a strong guarantee that may be used to replace or overlap an existing op.
///
/// Here is an example of what could go wrong if we did not check for complete connection:
///
/// 1. We do a prepare that's going to be committed.
/// 2. We do a stale prepare to the right of this, ignoring the hash chain break to the left.
/// 3. We do another stale prepare that replaces the first op because it connects to the second.
///
/// This would violate our quorum replication commitment to the leader.
/// The mistake in this example was not that we ignored the break to the left, which we must do
/// to repair reordered ops, but that we did not check for complete connection to the right.
fn repair_header_would_connect_hash_chain(self: *Replica, header: *const Header) bool {
var entry = header;
while (entry.op < self.op) {
if (self.journal.next_entry(entry)) |next| {
if (entry.checksum == next.nonce) {
assert(entry.view <= next.view);
assert(entry.op + 1 == next.op);
entry = next;
} else {
return false;
}
} else {
return false;
}
}
assert(entry.op == self.op);
assert(entry.checksum == self.journal.entry_for_op_exact(self.op).?.checksum);
return true;
}
/// If we repair this header, then would this overlap and overwrite part of another batch?
/// Journal entries have variable-sized batches that may overlap if entries are disconnected.
fn repair_header_would_overlap_another(self: *Replica, header: *const Header) bool {
// TODO Snapshots: Handle journal wrap around.
{
// Look behind this entry for any preceeding entry that this would overlap:
var op: u64 = header.op;
while (op > 0) {
op -= 1;
if (self.journal.entry_for_op(op)) |neighbor| {
if (self.journal.next_offset(neighbor) > header.offset) return true;
break;
}
}
}
{
// Look beyond this entry for any succeeding entry that this would overlap:
var op: u64 = header.op + 1;
while (op <= self.op) : (op += 1) {
if (self.journal.entry_for_op(op)) |neighbor| {
if (self.journal.next_offset(header) > neighbor.offset) return true;
break;
}
}
}
return false;
}
fn repair_last_queued_message_if_any(self: *Replica) void {
if (self.status != .normal and self.status != .view_change) return;
if (!self.repairs_allowed()) return;
while (!self.repairing) {
if (self.repair_queue) |message| {
defer self.message_bus.unref(message);
assert(self.repair_queue_len > 0);
self.repair_queue = message.next;
self.repair_queue_len -= 1;
message.next = null;
self.on_repair(message);
assert(self.repair_queue != message); // Catch an accidental requeue by on_repair().
} else {
assert(self.repair_queue_len == 0);
break;
}
}
}
fn repair_later(self: *Replica, message: *Message) void {
assert(self.repairs_allowed());
assert(self.appending or self.repairing);
assert(message.references > 0);
assert(message.header.command == .prepare);
assert(message.next == null);
if (!self.repairing) {
log.debug("{}: repair_later: repairing immediately", .{self.replica});
self.on_repair(message);
return;
}
log.debug("{}: repair_later: {} message(s)", .{ self.replica, self.repair_queue_len });
if (self.repair_queue_len == self.repair_queue_max) {
log.debug("{}: repair_later: dropping", .{self.replica});
return;
}
log.debug("{}: repair_later: queueing", .{self.replica});
assert(self.repair_queue_len < self.repair_queue_max);
message.next = self.repair_queue;
self.repair_queue = message.ref();
self.repair_queue_len += 1;
}
fn repair_prepares(self: *Replica) void {
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.journal.dirty.len > 0);
if (self.repair_queue_len == self.repair_queue_max) {
log.debug("{}: repair_prepares: waiting for repair queue to drain", .{self.replica});
return;
}
// We may be appending to or repairing the journal concurrently.
// We do not want to re-request any of these prepares unnecessarily.
// TODO Add journal.writing bits to clear this up (and needed anyway).
if (self.appending or self.repairing) {
log.debug("{}: repair_prepares: waiting for dirty bits to settle", .{self.replica});
return;
}
// Request enough prepares to fill the repair queue:
var budget = self.repair_queue_max - self.repair_queue_len;
assert(budget > 0);
var op = self.op + 1;
while (op > 0) {
op -= 1;
if (self.journal.dirty.bit(op)) {
// If this is an uncommitted op, and we are the leader in `view_change` status, then
// we will `request_prepare` from the rest of the cluster, set `nack_prepare_op`,
// and stop repairing any further prepares:
// This will also rebroadcast any `request_prepare` every `repair_timeout` tick.
self.repair_prepare(op);
if (self.nack_prepare_op) |nack_prepare_op| {
assert(nack_prepare_op == op);
assert(self.status == .view_change);
assert(self.leader_index(self.view) == self.replica);
assert(op > self.commit_max);
return;
}
// Otherwise, we continue to request prepares until our budget is used up:
budget -= 1;
if (budget == 0) {
log.debug("{}: repair_prepares: request budget used up", .{self.replica});
return;
}
} else {
assert(!self.journal.faulty.bit(op));
}
}
}
/// During a view change, for uncommitted ops, which may be one or two, we optimize for latency:
///
/// * request a `prepare` or `nack_prepare` from all followers in parallel,
/// * repair as soon as we receive a `prepare`, or
/// * discard as soon as we receive a majority of `nack_prepare` messages for the same checksum.
///
/// For committed ops, which likely represent the bulk of repairs, we optimize for throughput:
///
/// * have multiple requests in flight to prime the repair queue,
/// * rotate these requests across the cluster round-robin,
/// * to spread the load across connected peers,
/// * to take advantage of each peer's outgoing bandwidth, and
/// * to parallelize disk seeks and disk read bandwidth.
///
/// This is effectively "many-to-one" repair, where a single replica recovers using the
/// resources of many replicas, for faster recovery.
fn repair_prepare(self: *Replica, op: u64) void {
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.journal.dirty.bit(op));
const request_prepare = Header{
.command = .request_prepare,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = op,
// If we request a prepare from a follower, as below, it is critical to pass a checksum:
// Otherwise we could receive different prepares for the same op number.
.nonce = self.journal.entry_for_op_exact(op).?.checksum,
};
if (self.status == .view_change and op > self.commit_max) {
// Only the leader is allowed to do repairs in a view change:
assert(self.leader_index(self.view) == self.replica);
// Initialize the `nack_prepare` quorum counter for this uncommitted op:
// It is also possible that we may start repairing a lower uncommitted op, having
// initialized `nack_prepare_op` before we learn of a higher uncommitted dirty op,
// in which case we also want to reset the quorum counter.
if (self.nack_prepare_op) |nack_prepare_op| {
assert(nack_prepare_op <= op);
if (nack_prepare_op != op) {
self.nack_prepare_op = op;
self.reset_quorum_counter(self.nack_prepare_from_other_replicas, .nack_prepare);
}
} else {
self.nack_prepare_op = op;
self.reset_quorum_counter(self.nack_prepare_from_other_replicas, .nack_prepare);
}
log.debug("{}: repair_prepare: requesting uncommitted op={}", .{ self.replica, op });
assert(self.nack_prepare_op.? == op);
assert(request_prepare.nonce != 0);
self.send_header_to_other_replicas(request_prepare);
} else {
log.debug("{}: repair_prepare: requesting committed op={}", .{ self.replica, op });
// We expect that `repair_prepare()` is called in reverse chronological order:
// Any uncommitted ops should have already been dealt with.
// We never roll back committed ops, and thus never regard any `nack_prepare` responses.
assert(self.nack_prepare_op == null);
assert(request_prepare.nonce != 0);
if (self.choose_any_other_replica()) |replica| {
self.send_header_to_replica(replica, request_prepare);
}
}
}
fn repairs_allowed(self: *Replica) bool {
switch (self.status) {
.view_change => {
if (self.do_view_change_quorum) {
assert(self.leader_index(self.view) == self.replica);
return true;
} else {
return false;
}
},
.normal => return true,
else => return false,
}
}
/// Replicates to the next replica in the configuration (until we get back to the leader):
/// Replication starts and ends with the leader, we never forward back to the leader.
/// Does not flood the network with prepares that have already committed.
/// TODO Use recent heartbeat data for next replica to leapfrog if faulty.
fn replicate(self: *Replica, message: *Message) void {
assert(self.status == .normal);
assert(message.header.command == .prepare);
assert(message.header.view == self.view);
assert(message.header.op == self.op);
if (message.header.op <= self.commit_max) {
log.debug("{}: replicate: not replicating (committed)", .{self.replica});
return;
}
const next = @mod(self.replica + 1, @intCast(u16, self.replica_count));
if (next == self.leader_index(message.header.view)) {
log.debug("{}: replicate: not replicating (completed)", .{self.replica});
return;
}
log.debug("{}: replicate: replicating to replica {}", .{ self.replica, next });
self.send_message_to_replica(next, message);
}
fn reset_quorum_counter(self: *Replica, messages: []?*Message, command: Command) void {
var count: usize = 0;
for (messages) |*received, replica| {
if (received.*) |message| {
assert(message.header.command == command);
assert(message.header.replica == replica);
assert(message.header.view <= self.view);
self.message_bus.unref(message);
count += 1;
}
received.* = null;
}
log.debug("{}: reset {} {s} message(s)", .{ self.replica, count, @tagName(command) });
}
fn reset_quorum_do_view_change(self: *Replica) void {
self.reset_quorum_counter(self.do_view_change_from_all_replicas, .do_view_change);
self.do_view_change_quorum = false;
}
fn reset_quorum_nack_prepare(self: *Replica) void {
self.reset_quorum_counter(self.nack_prepare_from_other_replicas, .nack_prepare);
self.nack_prepare_op = null;
}
fn reset_quorum_prepare(self: *Replica) void {
if (self.prepare_message) |message| {
self.request_checksum = null;
self.message_bus.unref(message);
self.prepare_message = null;
self.prepare_attempt = 0;
self.prepare_timeout.stop();
self.reset_quorum_counter(self.prepare_ok_from_all_replicas, .prepare_ok);
}
assert(self.request_checksum == null);
assert(self.prepare_message == null);
assert(self.prepare_attempt == 0);
assert(self.prepare_timeout.ticking == false);
for (self.prepare_ok_from_all_replicas) |received| assert(received == null);
}
fn reset_quorum_start_view_change(self: *Replica) void {
self.reset_quorum_counter(self.start_view_change_from_other_replicas, .start_view_change);
self.start_view_change_quorum = false;
}
fn send_prepare_ok(self: *Replica, header: *const Header) void {
assert(header.command == .prepare);
assert(header.cluster == self.cluster);
assert(header.replica == self.leader_index(header.view));
assert(header.view <= self.view);
assert(header.op <= self.op or header.view < self.view);
if (self.status != .normal) {
log.debug("{}: send_prepare_ok: not sending ({})", .{ self.replica, self.status });
return;
}
if (header.op > self.op) {
assert(header.view < self.view);
// An op may be reordered concurrently through a view change while being journalled:
log.debug("{}: send_prepare_ok: not sending (reordered)", .{self.replica});
return;
}
assert(self.status == .normal);
// After a view change followers must send prepare_oks for uncommitted ops with older views:
// However, we will only ever send to the leader of our current view.
assert(header.view <= self.view);
assert(header.op <= self.op);
if (header.op <= self.commit_max) {
log.debug("{}: send_prepare_ok: not sending (committed)", .{self.replica});
return;
}
// TODO Think through a scenario of where not doing this would be wrong.
if (!self.valid_hash_chain("send_prepare_ok")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
if (self.journal.has_clean(header)) {
// It is crucial that replicas stop accepting prepare messages from earlier views once
// they start the view change protocol. Without this constraint, the system could get
// into a state in which there are two active primaries: the old one, which hasn't
// failed but is merely slow or not well connected to the network, and the new one. If a
// replica sent a prepare_ok message to the old primary after sending its log to the new
// one, the old primary might commit an operation that the new primary doesn't learn
// about in the do_view_change messages.
// We therefore only send to the leader of the current view, never to the leader of the
// prepare header's view:
// TODO We could surprise the new leader with this, if it is preparing a different op.
self.send_header_to_replica(self.leader_index(self.view), .{
.command = .prepare_ok,
.nonce = header.checksum,
.client = header.client,
.cluster = self.cluster,
.replica = self.replica,
.view = header.view,
.op = header.op,
.commit = header.commit,
.offset = header.offset,
.epoch = header.epoch,
.request = header.request,
.operation = header.operation,
});
} else {
log.debug("{}: send_prepare_ok: not sending (dirty)", .{self.replica});
return;
}
}
fn send_prepare_oks_through(self: *Replica, op: u64) void {
assert(self.status == .normal);
assert(op >= self.commit_max);
assert(op <= self.op);
if (!self.valid_hash_chain("send_prepare_oks_through")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
while (op > self.commit_max and op < self.op) : (op += 1) {
const header = self.journal.entry_for_op_exact(op).?;
assert(header.op == op);
assert(header.operation != .init);
self.send_prepare_ok(header);
}
}
fn send_prepare_to_replica(self: *Replica, replica: u16, op: u64, checksum: ?u128) void {
assert(self.status == .normal or self.status == .view_change);
assert(replica != self.replica);
assert(!self.sending_prepare);
self.sending_prepare = true;
defer self.sending_prepare = false;
if (self.create_prepare_message(op, checksum)) |message| {
defer self.message_bus.unref(message);
assert(message.header.op == op);
assert(checksum == null or message.header.checksum == checksum.?);
self.send_message_to_replica(replica, message);
}
}
fn send_start_view_change(self: *Replica) void {
assert(self.status == .view_change);
assert(!self.do_view_change_quorum);
// Send only to other replicas (and not to ourself) to avoid a quorum off-by-one error:
// This could happen if the replica mistakenly counts its own message in the quorum.
self.send_header_to_other_replicas(.{
.command = .start_view_change,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
});
}
fn send_do_view_change(self: *Replica) void {
assert(self.status == .view_change);
assert(self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
const count_start_view_change = self.count_quorum(
self.start_view_change_from_other_replicas,
.start_view_change,
0,
);
assert(count_start_view_change >= self.f);
const message = self.create_do_view_change_or_start_view_message(.do_view_change) orelse {
log.warn("{}: send_do_view_change: dropping do_view_change, no message available", .{
self.replica,
});
return;
};
defer self.message_bus.unref(message);
assert(message.references == 1);
assert(message.header.command == .do_view_change);
assert(message.header.view == self.view);
assert(message.header.op == self.op);
assert(message.header.commit == self.commit_max);
// TODO Assert that latest header in message body matches self.op.
self.send_message_to_replica(self.leader_index(self.view), message);
}
fn send_header_to_other_replicas(self: *Replica, header: Header) void {
var replica: u16 = 0;
while (replica < self.replica_count) : (replica += 1) {
if (replica != self.replica) {
self.send_header_to_replica(replica, header);
}
}
}
// TODO Work out the maximum number of messages a replica may output per tick() or on_message().
fn send_header_to_replica(self: *Replica, replica: u16, header: Header) void {
log.debug("{}: sending {s} to replica {}: {}", .{
self.replica,
@tagName(header.command),
replica,
header,
});
assert(header.replica == self.replica);
assert(header.view == self.view);
self.message_bus.send_header_to_replica(replica, header);
}
fn send_message_to_other_replicas(self: *Replica, message: *Message) void {
var replica: u16 = 0;
while (replica < self.replica_count) : (replica += 1) {
if (replica != self.replica) {
self.send_message_to_replica(replica, message);
}
}
}
fn send_message_to_replica(self: *Replica, replica: u16, message: *Message) void {
log.debug("{}: sending {s} to replica {}: {}", .{
self.replica,
@tagName(message.header.command),
replica,
message.header,
});
switch (message.header.command) {
.request => {
// We do not assert message.header.replica as we would for send_header_to_replica()
// because we may forward .request or .prepare messages.
assert(self.status == .normal);
assert(message.header.view <= self.view);
},
.prepare => {
switch (self.status) {
.normal => assert(message.header.view <= self.view),
.view_change => assert(message.header.view < self.view),
else => unreachable,
}
},
.do_view_change => {
assert(self.status == .view_change);
assert(self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
assert(message.header.view == self.view);
},
.start_view => switch (self.status) {
.normal => {
// A follower may ask the leader to resend the start_view message.
assert(!self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
assert(message.header.view == self.view);
},
.view_change => {
assert(self.start_view_change_quorum);
assert(self.do_view_change_quorum);
assert(message.header.view == self.view);
},
else => unreachable,
},
.headers => {
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica == self.replica);
},
else => unreachable,
}
assert(message.header.cluster == self.cluster);
self.message_bus.send_message_to_replica(replica, message);
}
fn set_latest_header(self: *Replica, headers: []Header, latest: *Header) void {
switch (latest.command) {
.reserved, .prepare => assert(latest.valid_checksum()),
else => unreachable,
}
for (headers) |header| {
assert(header.valid_checksum());
assert(header.invalid() == null);
assert(header.command == .prepare);
if (latest.command == .reserved) {
latest.* = header;
} else if (header.view > latest.view) {
latest.* = header;
} else if (header.view == latest.view and header.op > latest.op) {
latest.* = header;
}
}
}
fn set_latest_op_and_k(self: *Replica, latest: *const Header, k: u64, method: []const u8) void {
assert(self.status == .view_change);
assert(latest.valid_checksum());
assert(latest.invalid() == null);
assert(latest.command == .prepare);
assert(latest.cluster == self.cluster);
assert(latest.view < self.view); // Latest normal view before this view change.
// Ops may be rewound through a view change so we use `self.commit_max` and not `self.op`:
assert(latest.op >= self.commit_max);
// We expect that `commit_min` may be greater than `latest.commit` because the latter is
// only the commit number at the time the latest op was prepared (but not committed).
// We therefore only assert `latest.commit` against `commit_max` above and `k` below.
assert(k >= latest.commit);
assert(k >= self.commit_max);
log.debug("{}: {s}: view={} op={}..{} commit={}..{} checksum={} offset={}", .{
self.replica,
method,
self.view,
self.op,
latest.op,
self.commit_max,
k,
latest.checksum,
latest.offset,
});
self.op = latest.op;
self.commit_max = k;
// Do not set the latest op as dirty if we already have it exactly:
// Otherwise, this would trigger a repair and delay the view change.
if (self.journal.entry_for_op_exact_with_checksum(latest.op, latest.checksum) == null) {
self.journal.set_entry_as_dirty(latest);
} else {
log.debug("{}: {s}: latest op exists exactly", .{ self.replica, method });
}
assert(self.op == latest.op);
self.journal.remove_entries_from(self.op + 1);
assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum);
}
fn start_view_as_the_new_leader(self: *Replica) void {
assert(self.status == .view_change);
assert(self.leader_index(self.view) == self.replica);
assert(self.do_view_change_quorum);
// TODO Do one last count of our do_view_change quorum messages.
assert(!self.view_jump_barrier);
assert(self.commit_min == self.op);
assert(self.commit_max == self.op);
assert(self.valid_hash_chain_between(self.commit_min, self.op));
assert(self.journal.dirty.len == 0);
assert(self.journal.faulty.len == 0);
assert(self.nack_prepare_op == null);
const start_view = self.create_do_view_change_or_start_view_message(.start_view) orelse {
log.warn("{}: start_view_as_the_new_leader: waiting for a message", .{self.replica});
return;
};
defer self.message_bus.unref(start_view);
self.transition_to_normal_status(self.view);
assert(self.status == .normal);
assert(self.leader());
assert(start_view.references == 1);
assert(start_view.header.command == .start_view);
assert(start_view.header.view == self.view);
assert(start_view.header.op == self.op);
assert(start_view.header.commit == self.commit_max);
self.send_message_to_other_replicas(start_view);
}
fn transition_to_normal_status(self: *Replica, new_view: u64) void {
log.debug("{}: transition_to_normal_status: view={}", .{ self.replica, new_view });
// In the VRR paper it's possible to transition from .normal to .normal for the same view.
// For example, this could happen after a state transfer triggered by an op jump.
assert(new_view >= self.view);
self.view = new_view;
self.status = .normal;
if (self.leader()) {
log.debug("{}: transition_to_normal_status: leader", .{self.replica});
self.ping_timeout.start();
self.commit_timeout.start();
self.normal_timeout.stop();
self.view_change_timeout.stop();
self.view_change_message_timeout.stop();
self.repair_timeout.start();
} else {
log.debug("{}: transition_to_normal_status: follower", .{self.replica});
self.ping_timeout.start();
self.commit_timeout.stop();
self.normal_timeout.start();
self.view_change_timeout.stop();
self.view_change_message_timeout.stop();
self.repair_timeout.start();
}
self.reset_quorum_prepare();
self.reset_quorum_start_view_change();
self.reset_quorum_do_view_change();
self.reset_quorum_nack_prepare();
assert(self.start_view_change_quorum == false);
assert(self.do_view_change_quorum == false);
assert(self.nack_prepare_op == null);
}
/// A replica i that notices the need for a view change advances its view, sets its status to
/// view_change, and sends a ⟨start_view_change v, i⟩ message to all the other replicas,
/// where v identifies the new view. A replica notices the need for a view change either based
/// on its own timer, or because it receives a start_view_change or do_view_change message for
/// a view with a larger number than its own view.
fn transition_to_view_change_status(self: *Replica, new_view: u64) void {
log.debug("{}: transition_to_view_change_status: view={}", .{ self.replica, new_view });
assert(new_view > self.view);
self.view = new_view;
self.status = .view_change;
self.ping_timeout.stop();
self.commit_timeout.stop();
self.normal_timeout.stop();
self.view_change_timeout.start();
self.view_change_message_timeout.start();
self.repair_timeout.stop();
// Do not reset quorum counters only on entering a view, assuming that the view will be
// followed only by a single subsequent view change to the next view, because multiple
// successive view changes can fail, e.g. after a view change timeout.
// We must therefore reset our counters here to avoid counting messages from an older view,
// which would violate the quorum intersection property essential for correctness.
self.reset_quorum_prepare();
self.reset_quorum_start_view_change();
self.reset_quorum_do_view_change();
self.reset_quorum_nack_prepare();
assert(self.start_view_change_quorum == false);
assert(self.do_view_change_quorum == false);
assert(self.nack_prepare_op == null);
self.send_start_view_change();
}
/// Whether it is safe to commit or send prepare_ok messages.
/// Returns true if the hash chain is valid and up to date for the current view.
/// This is a stronger guarantee than `valid_hash_chain_between()` below.
fn valid_hash_chain(self: *Replica, method: []const u8) bool {
// If we know we have uncommitted ops that may have been reordered through a view change
// then wait until the latest of these has been resolved with the leader:
if (self.view_jump_barrier) {
log.notice("{}: {s}: waiting to resolve view jump barrier", .{ self.replica, method });
return false;
}
// If we know we could validate the hash chain even further, then wait until we can:
// This is partial defense-in-depth in case `self.op` is ever advanced by a reordered op.
if (self.op < self.commit_max) {
log.notice("{}: {s}: waiting for repair (op={} < commit={})", .{
self.replica,
method,
self.op,
self.commit_max,
});
return false;
}
// We must validate the hash chain as far as possible, since `self.op` may disclose a fork:
if (!self.valid_hash_chain_between(self.commit_min, self.op)) {
log.notice("{}: {s}: waiting for repair (hash chain)", .{ self.replica, method });
return false;
}
return true;
}
/// Returns true if all operations are present, correctly ordered and connected by hash chain,
/// between `op_min` and `op_max` (both inclusive).
fn valid_hash_chain_between(self: *Replica, op_min: u64, op_max: u64) bool {
assert(op_min <= op_max);
// If we use anything less than self.op then we may commit ops for a forked hash chain that
// have since been reordered by a new leader.
assert(op_max == self.op);
var b = self.journal.entry_for_op_exact(op_max).?;
var op = op_max;
while (op > op_min) {
op -= 1;
if (self.journal.entry_for_op_exact(op)) |a| {
assert(a.op + 1 == b.op);
if (a.checksum == b.nonce) {
assert(self.ascending_viewstamps(a, b));
b = a;
} else {
log.notice("{}: valid_hash_chain_between: break: A: {}", .{ self.replica, a });
log.notice("{}: valid_hash_chain_between: break: B: {}", .{ self.replica, b });
return false;
}
} else {
log.notice("{}: valid_hash_chain_between: missing op={}", .{ self.replica, op });
return false;
}
}
assert(b.op == op_min);
return true;
}
fn view_jump(self: *Replica, header: *const Header) void {
const to_status: Status = switch (header.command) {
.prepare, .commit => .normal,
.start_view_change, .do_view_change, .start_view => .view_change,
else => unreachable,
};
if (self.status != .normal and self.status != .view_change) return;
// If this is for an older view, then ignore:
if (header.view < self.view) return;
// Compare status transitions and decide whether to view jump or ignore:
switch (self.status) {
.normal => switch (to_status) {
// If the transition is to `.normal`, then ignore if this is for the same view:
.normal => if (header.view == self.view) return,
// If the transition is to `.view_change`, then ignore if the view has started:
.view_change => if (header.view == self.view) return,
else => unreachable,
},
.view_change => switch (to_status) {
// This is an interesting special case:
// If the transition is to `.normal` in the same view, then we missed the
// `start_view` message and we must also consider this a view jump:
// If we don't view jump here, then our `view_change_timeout` will fire and we will
// disrupt the cluster by starting another view change for a newer view.
.normal => {},
// If the transition is to `.view_change`, then ignore if this is for the same view:
.view_change => if (header.view == self.view) return,
else => unreachable,
},
else => unreachable,
}
if (to_status == .normal) {
assert(header.view >= self.view);
const command: []const u8 = @tagName(header.command);
if (header.view == self.view) {
assert(self.status == .view_change and to_status == .normal);
log.debug("{}: view_jump: exiting view change and starting view", .{self.replica});
} else {
log.debug("{}: view_jump: jumping to newer view", .{self.replica});
}
if (self.op > self.commit_max) {
// We have uncommitted ops, and these may have been removed or replaced by the new
// leader through a view change in which we were not involved.
//
// In Section 5.2, the VR paper simply removes these uncommitted ops and does a
// state transfer. However, while strictly safe, this impairs safety in terms of
// durability, and adds unnecessary repair overhead if the ops were committed.
//
// We rather impose a view jump barrier to keep `commit_ops_through()` from
// committing. This preserves and maximizes durability and minimizes repair traffic.
//
// This view jump barrier is cleared or may be resolved, respectively, as soon as:
// 1. we receive a new prepare from the leader that advances our latest op, or
// 2. we request and receive a `start_view` message from the leader for this view.
//
// This is safe because advancing our latest op in the current view or receiving the
// latest op from the leader both ensure that we have the latest hash chain head.
log.notice("{}: view_jump: imposing view jump barrier", .{self.replica});
self.view_jump_barrier = true;
} else {
assert(self.op == self.commit_max);
// We may still need to resolve any prior view jump barrier:
// For example, if we jump to view 3 and jump again to view 7 both in normal status.
assert(self.view_jump_barrier == true or self.view_jump_barrier == false);
}
} else if (to_status == .view_change) {
assert(header.view > self.view);
// The view change will set the latest op in on_do_view_change() or on_start_view():
// There is no need to impose a view jump barrier and any existing barrier is cleared.
// We only need to transition to view change status.
if (self.view_jump_barrier) {
log.notice("{}: view_jump: clearing view jump barrier", .{self.replica});
self.view_jump_barrier = false;
}
} else {
unreachable;
}
switch (to_status) {
.normal => self.transition_to_normal_status(header.view),
.view_change => self.transition_to_view_change_status(header.view),
else => unreachable,
}
}
fn write_to_journal(self: *Replica, message: *Message, lock: *bool) void {
assert(lock.* == false);
lock.* = true;
defer lock.* = false;
assert(message.references > 0);
assert(message.header.command == .prepare);
assert(message.header.view <= self.view);
assert(message.header.op <= self.op);
if (!self.journal.has(message.header)) {
log.debug("{}: write_to_journal: ignoring (header changed)", .{self.replica});
return;
}
if (self.journal.dirty.bit(message.header.op)) {
self.journal.write(message);
} else {
// Any function that sets the faulty bit should also set the dirty bit:
assert(!self.journal.faulty.bit(message.header.op));
log.debug("{}: write_to_journal: skipping (clean)", .{self.replica});
// Continue through below to send a prepare_ok message if necessary.
}
self.send_prepare_ok(message.header);
// If this was a repair, continue immediately to repair the next prepare:
// This is an optimization to eliminate waiting until the next repair timeout.
if (lock == &self.repairing) self.repair();
}
}; | src/vr/replica.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const AutoHashMap = std.AutoHashMap;
const StringHashMap = std.StringHashMap;
const BitSet = std.DynamicBitSet;
const Str = []const u8;
const LoggingAllocator = std.heap.LoggingAllocator;
const util = @import("util.zig");
const gpa = util.gpa_log;
const data = @embedFile("../data/puzzle/day12.txt");
const NodeFlags = u64;
const NodeName = []const u8;
const Graph = struct {
const Self = @This();
const NodeId = u6;
const NodeData = struct {
name: NodeName,
id: NodeId,
minor: bool,
};
const Edge = struct {
from: NodeData,
to: NodeData,
weight: u64 = 1,
};
nodes: StringHashMap(NodeData),
edges: AutoHashMap(NodeId, AutoHashMap(NodeId, Edge)),
allocator: *Allocator,
minor_nodes_count: u64 = 0,
start_node_idx: usize = 0,
end_node_idx: usize = 0,
fn init(allocator: *Allocator) Self {
return .{
.nodes = StringHashMap(NodeData).init(allocator),
.edges = AutoHashMap(NodeId, AutoHashMap(NodeId, Edge)).init(allocator),
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
var it1 = self.edges.valueIterator();
while (it1.next()) |value_ptr| {
value_ptr.deinit();
}
var it2 = self.nodes.valueIterator();
while (it2.next()) |value_ptr| {
self.allocator.free(value_ptr.name);
}
self.nodes.deinit();
self.edges.deinit();
}
fn createNode(self: *Self, key: NodeName) void {
var own_key = self.allocator.alloc(u8, key.len) catch unreachable;
std.mem.copy(u8, own_key, key);
var is_minor = (own_key[0] >= 'a' and own_key[0] <= 'z');
var id = @intCast(u6, self.nodes.count());
self.nodes.put(own_key, NodeData{
.name = own_key,
.minor = is_minor,
.id = id,
}) catch unreachable;
var node_list = AutoHashMap(NodeId, Edge).init(self.allocator);
self.edges.put(id, node_list) catch unreachable;
}
fn addEdge(self: *Self, name_a: NodeName, name_b: NodeName) void {
if (!self.nodes.contains(name_a)) {
self.createNode(name_a);
}
if (!self.nodes.contains(name_b)) {
self.createNode(name_b);
}
var a = self.nodes.get(name_a).?;
var b = self.nodes.get(name_b).?;
self.edges.getPtr(a.id).?.put(b.id, Edge{ .from = a, .to = b }) catch unreachable;
self.edges.getPtr(b.id).?.put(a.id, Edge{ .from = b, .to = a }) catch unreachable;
}
fn printGraph(self: *Self) void {
print("Nodes\n", .{});
var it1 = self.nodes.valueIterator();
while (it1.next()) |node_ptr| {
print("{s} ({} {})\n", .{ node_ptr.name, node_ptr.minor, node_ptr.id });
}
print("\nEdges\n", .{});
var it2 = self.nodes.valueIterator();
while (it2.next()) |node_ptr| {
var node_list = self.edges.get(node_ptr.id).?;
print("{s} ({}):", .{ node_ptr.name, node_list.count() });
var it3 = self.edgeIterator(node_ptr.name);
while (it3.next()) |edge| {
print(" {s}({})", .{ edge.to.name, edge.weight });
}
print("\n", .{});
}
print("\n", .{});
}
fn computeWeights(self: *Self) void {
var iterator1 = self.nodes.valueIterator();
while (iterator1.next()) |node_data_ptr| {
if (node_data_ptr.minor) continue;
// print("{s} {s}\n", .{ node_data_ptr.name, node_data_ptr });
var nbr = ArrayList(NodeData).init(gpa);
defer nbr.deinit();
var edge_it1 = self.edgeIterator(node_data_ptr.name);
while (edge_it1.next()) |edge_1_ptr| {
nbr.append(edge_1_ptr.to) catch unreachable;
}
var idx1: usize = 0;
while (idx1 < nbr.items.len) : (idx1 += 1) {
const node_1 = nbr.items[idx1];
if (self.getEdgePtrById(node_1.id, node_1.id)) |edge| {
edge.weight += 1;
} else {
self.addEdge(node_1.name, node_1.name);
}
var idx2: usize = idx1 + 1;
while (idx2 < nbr.items.len) : (idx2 += 1) {
const node_2 = nbr.items[idx2];
if (self.getEdgePtrById(node_1.id, node_2.id)) |edge| {
edge.weight += 1;
self.getEdgePtrById(node_2.id, node_1.id).?.weight += 1;
} else {
self.addEdge(node_1.name, node_2.name);
}
}
}
}
}
fn getEdgePtrById(self: *Self, a: NodeId, b: NodeId) ?*Edge {
return self.edges.getPtr(a).?.getPtr(b);
}
fn getEdgePtr(self: *Self, name_a: NodeName, name_b: NodeName) ?*Edge {
const a = self.nodes.get(name_a).?;
const b = self.nodes.get(name_b).?;
return self.edges.getPtr(a.id).?.getPtr(b.id);
}
const EdgeIterator = AutoHashMap(NodeId, Edge).ValueIterator;
const NodeIterator = StringHashMap(NodeData).ValueIterator;
fn edgeIterator(self: *Self, node_name: NodeName) EdgeIterator {
const node = self.nodes.get(node_name).?;
const edges = self.edges.get(node.id).?;
return edges.valueIterator();
}
fn nodeIterator(self: *Self) NodeIterator {
return self.nodes.valueIterator();
}
};
fn countRoutesTo(graph: *Graph, to: NodeName, ignore_flags: u64) u64 {
if (std.mem.eql(u8, to, "start")) return 1;
var node = graph.nodes.get(to).?;
if (!node.minor) return 0;
var it = graph.edgeIterator(to);
var result: u64 = 0;
var new_flag = ignore_flags | @as(u64, 1) << node.id;
while (it.next()) |edge| {
if (!edge.to.minor) continue;
if (edge.to.id == node.id) continue;
if (ignore_flags & @as(u64, 1) << edge.to.id != 0) continue;
result += countRoutesTo(graph, edge.to.name, new_flag) * edge.weight;
}
return result;
}
const RouteStep = struct {
name: NodeName,
weight: u64,
};
fn countRoutesToWithDupes(graph: *Graph, to: NodeName, ignore_flags: u64, duplicated: bool, route: *ArrayList(RouteStep)) u64 {
if (std.mem.eql(u8, to, "start")) {
// var idx: usize = route.items.len;
// var k: u64 = 1;
// while (idx > 0) : (idx -= 1) {
// k *= route.items[idx - 1].weight;
// }
// print("{: >3} start", .{k});
// idx = route.items.len;
// while (idx > 0) : (idx -= 1) {
// const x = route.items[idx - 1];
// print("-{}-{s}", .{ x.weight, x.name });
// }
// print("\n", .{});
return 1;
}
var node = graph.nodes.get(to).?;
if (!node.minor) return 0;
var result: u64 = 0;
var new_flag = ignore_flags | @as(u64, 1) << node.id;
var it = graph.edgeIterator(to);
while (it.next()) |edge| {
if (!edge.to.minor) continue;
if (std.mem.eql(u8, edge.to.name, "end")) continue;
if (edge.to.id == node.id) {
if (duplicated) continue;
route.append(.{ .name = to, .weight = edge.weight }) catch unreachable;
result += countRoutesToWithDupes(graph, edge.to.name, new_flag, true, route) * edge.weight;
var q = route.popOrNull();
} else {
if (ignore_flags & @as(u64, 1) << edge.to.id != 0) {
if (duplicated) continue;
route.append(.{ .name = to, .weight = edge.weight }) catch unreachable;
result += countRoutesToWithDupes(graph, edge.to.name, new_flag, true, route) * edge.weight;
var q = route.popOrNull();
} else {
route.append(.{ .name = to, .weight = edge.weight }) catch unreachable;
result += countRoutesToWithDupes(graph, edge.to.name, new_flag, duplicated, route) * edge.weight;
var q = route.popOrNull();
}
}
}
return result;
}
pub fn main() !void {
var lines = tokenize(data, "\r\n");
var score: u64 = 0;
var graph = Graph.init(gpa);
defer graph.deinit();
while (lines.next()) |line| {
var words = split(line, "-");
const a = words.next().?;
const b = words.next().?;
graph.addEdge(a, b);
}
graph.printGraph();
graph.computeWeights();
graph.printGraph();
print("\nResults 1\n", .{});
print("{s} {}\n", .{ "end", countRoutesTo(&graph, "end", 0) });
print("\nResults 2\n", .{});
var route = ArrayList(RouteStep).init(gpa);
defer route.deinit();
print("{s} {}\n", .{ "end", countRoutesToWithDupes(&graph, "end", 0, false, &route) });
}
// Useful stdlib functions
const tokenize = std.mem.tokenize;
const split = std.mem.split;
const indexOf = std.mem.indexOfScalar;
const indexOfAny = std.mem.indexOfAny;
const indexOfStr = std.mem.indexOfPosLinear;
const lastIndexOf = std.mem.lastIndexOfScalar;
const lastIndexOfAny = std.mem.lastIndexOfAny;
const lastIndexOfStr = std.mem.lastIndexOfLinear;
const trim = std.mem.trim;
const sliceMin = std.mem.min;
const sliceMax = std.mem.max;
const parseInt = std.fmt.parseInt;
const parseFloat = std.fmt.parseFloat;
const min = std.math.min;
const min3 = std.math.min3;
const max = std.math.max;
const max3 = std.math.max3;
const print = std.debug.print;
const assert = std.debug.assert;
const sort = std.sort.sort;
const asc = std.sort.asc;
const desc = std.sort.desc; | src/day12.zig |
const std = @import("std");
const mem = std.mem;
const debug = std.debug;
const log = std.log;
//const cmqtt = @import("mqtt_paho.zig");
const cmqtt = @cImport({
@cInclude("MQTTClient.h");
});
const c = @cImport({
@cInclude("stdio.h");
@cInclude("unistd.h");
@cInclude("string.h");
@cInclude("time.h");
//@cInclude("paho.mqtt.c/src/MQTTClient.h");
});
const CallBack = fn (topic: []u8, message: []u8) void;
const MAX_REGISTERED_TOPICS = 10;
const mqttlibError = error{FailToRegister};
// Connexion to mqtt
//
pub const MqttCnx = struct {
const Self = @This();
allocator: *mem.Allocator,
handle: cmqtt.MQTTClient = undefined,
callBack: CallBack = undefined,
latestDeliveryToken: *cmqtt.MQTTClient_deliveryToken = undefined,
connected: bool = undefined,
connect_option: *cmqtt.MQTTClient_connectOptions = undefined,
reconnect_registered_topics_length: u16 = 0,
reconnect_registered_topics: [10]?[]u8,
// this message is received in an other thread
fn _defaultCallback(topic: []u8, message: []u8) void {
_ = c.printf("%s -> %s", topic.ptr, message.ptr);
}
fn _connLost(ctx: ?*anyopaque, m: [*c]u8) callconv(.C) void {
_ = m;
const self_ctx = @intToPtr(*Self, @ptrToInt(ctx));
_ = c.printf("connection lost %d", self_ctx);
}
fn _msgArrived(ctx: ?*anyopaque, topic: [*c]u8, topic_length: c_int, message: [*c]cmqtt.MQTTClient_message) callconv(.C) c_int {
_ = topic_length;
const messagePtrAddress = @ptrToInt(&message);
var cmsg = @intToPtr([*c][*c]cmqtt.MQTTClient_message, messagePtrAddress);
defer cmqtt.MQTTClient_freeMessage(cmsg);
defer cmqtt.MQTTClient_free(topic);
// unsafe conversion
const self_ctx = @intToPtr(*Self, @ptrToInt(ctx));
// paho always return a 0 in topic_length, we must then compute it
// _ = c.printf("topic length is %d \n", topic_length);
const tlength: usize = c.strlen(topic);
// topic_length always 0 ... non sense
const mlength = @intCast(u32, message.*.payloadlen);
const am = @ptrToInt(message.*.payload);
const mptr = @intToPtr([*]u8, am);
// pass to zig in proper way with slices
self_ctx.callBack(topic[0..tlength], mptr[0..mlength]);
return 1; // properly handled
}
fn _delivered(ctx: ?*anyopaque, token: cmqtt.MQTTClient_deliveryToken) callconv(.C) void {
// _ = c.printf("%s", "received token");
// unsafe conversion
const self_ctx = @intToPtr(*Self, @ptrToInt(ctx));
self_ctx.*.latestDeliveryToken.* = token;
}
pub fn deinit(self: *Self) !void {
_ = self;
}
pub fn init(allocator: *mem.Allocator, serverAddress: []const u8, clientid: []const u8, username: []const u8, password: []const u8) !*Self {
var handle: cmqtt.MQTTClient = undefined;
// we need to make a safe string with zero ending
const zServerAddress = try allocator.alloc(u8, serverAddress.len + 1);
// defer allocator.free(zServerAddress);
mem.copy(u8, zServerAddress, serverAddress[0..]);
zServerAddress[serverAddress.len] = '\x00';
const zusername = try allocator.alloc(u8, username.len + 1);
// defer allocator.free(zusername);
mem.copy(u8, zusername, username[0..]);
zusername[username.len] = '\x00';
const zpassword = try allocator.alloc(u8, password.len + 1);
// defer allocator.free(zpassword);
mem.copy(u8, zpassword, password[0..]);
zpassword[password.len] = '\x00';
const zclientid = try allocator.alloc(u8, clientid.len + 1);
// defer allocator.free(zclientid);
mem.copy(u8, zclientid, clientid[0..]);
zclientid[clientid.len] = '\x00';
// convert to C the input parameters (ensuring the sentinel)
const MQTTCLIENT_PERSISTENCE_NONE = 1;
const result = cmqtt.MQTTClient_create(&handle, zServerAddress.ptr, zclientid.ptr, MQTTCLIENT_PERSISTENCE_NONE, null);
if (result > 0) return error.MQTTCreateError;
const HEADER = [_]u8{ 'M', 'Q', 'T', 'C' };
// we setup the struct here, because the initializer is a macro in C,
var conn_options = cmqtt.MQTTClient_connectOptions{
.struct_id = HEADER,
.struct_version = 0,
.keepAliveInterval = 60,
.cleansession = 1,
.reliable = 1,
.will = null,
.username = zusername.ptr,
.password = <PASSWORD>,
.connectTimeout = 30,
.retryInterval = 0,
.ssl = null,
.serverURIcount = 0,
.serverURIs = null,
.MQTTVersion = 0,
.returned = .{
.serverURI = null,
.MQTTVersion = 0,
.sessionPresent = 0,
},
.binarypwd = .{
.len = 0,
.data = null,
},
.maxInflightMessages = -1,
.cleanstart = 0, // only available on V5 +
.httpHeaders = null,
};
if (username.len == 0) {
conn_options.username = null;
conn_options.password = null;
}
var self_ptr = try allocator.create(Self);
// init members
self_ptr.handle = handle;
self_ptr.allocator = allocator;
self_ptr.callBack = _defaultCallback;
self_ptr.latestDeliveryToken = try allocator.create(cmqtt.MQTTClient_deliveryToken);
self_ptr.connected = false;
// remember the connect options
self_ptr.connect_option = try allocator.create(cmqtt.MQTTClient_connectOptions);
self_ptr.connect_option.* = conn_options;
self_ptr.reconnect_registered_topics = mem.zeroes([10]?[]u8);
self_ptr.reconnect_registered_topics_length = 0;
const retCallBacks = cmqtt.MQTTClient_setCallbacks(handle, self_ptr, _connLost, _msgArrived, _delivered);
if (retCallBacks != cmqtt.MQTTCLIENT_SUCCESS) {
return mqttlibError.FailToRegister;
}
try self_ptr.reconnect(true);
return self_ptr;
}
fn reconnect(self: *Self, first: bool) !void {
if (self.*.connected) {
// nothing to do
return;
}
if (!first) {
const result = cmqtt.MQTTClient_disconnect(self.handle, 100);
if (result != 0) {
_ = c.printf("disconnection failed MQTTClient_disconnect returned %d, continue\n", result);
}
}
const r = cmqtt.MQTTClient_connect(self.handle, self.connect_option);
_ = c.printf("connect to mqtt returned %d\n\x00", r);
if (r != 0) return error.MQTTConnectError;
self.connected = true;
if (self.reconnect_registered_topics_length > 0) {
for (self.reconnect_registered_topics[0..self.reconnect_registered_topics_length]) |e| {
if (e) |nonNullPtr| {
_ = c.printf("re-registering %s \n", nonNullPtr.ptr);
self._register(nonNullPtr) catch {
_ = c.printf("cannot reregister \n");
};
}
}
}
}
// publish a message with default QOS 0
pub fn publish(self: *Self, topic: [*c]const u8, msg: []const u8) !void {
return publishWithQos(self, topic, msg, 0);
}
pub fn publishWithQos(self: *Self, topic: [*c]const u8, msg: []const u8, qos: u8) !void {
self._publishWithQos(topic, msg, qos) catch |e| {
_ = c.printf("fail to publish, try to reconnect \n");
log.warn("error : {}", .{e});
self.connected = false;
self.reconnect(false) catch |reconnecterr| {
_ = c.printf("failed to reconnect, will retry later \n");
log.warn("error: {}", .{reconnecterr});
};
};
}
// internal method, to permit to retry connect
fn _publishWithQos(self: *Self, topic: [*c]const u8, msg: []const u8, qos: u8) !void {
const messageLength: c_int = @intCast(c_int, msg.len);
if (msg.len == 0) {
return;
}
// beacause c declared the message as mutable (not const),
// convert it to const type
const constMessageContent: [*]u8 = @intToPtr([*]u8, @ptrToInt(msg.ptr));
const HEADER_MESSAGE = [_]u8{ 'M', 'Q', 'T', 'M' };
var mqttmessage = cmqtt.MQTTClient_message{
.struct_id = HEADER_MESSAGE,
.struct_version = 0, // no message properties
.payloadlen = messageLength,
.payload = constMessageContent,
.qos = qos,
.retained = 0,
.dup = 0,
.msgid = 0,
// below, these are MQTTV5 specific properties
.properties = cmqtt.MQTTProperties{
.count = 0,
.max_count = 0,
.length = 0,
.array = null,
},
};
var token = try self.allocator.create(cmqtt.MQTTClient_deliveryToken);
defer self.allocator.destroy(token);
const resultPublish = cmqtt.MQTTClient_publishMessage(self.handle, topic, &mqttmessage, token);
if (resultPublish != 0) {
std.log.warn("publish mqtt message returned {}\n", .{resultPublish});
return error.MQTTPublishError;
}
// wait for sent
if (qos > 0) {
const waitResult = cmqtt.MQTTClient_waitForCompletion(self.handle, token.*, @intCast(c_ulong, 2000));
if (waitResult != 0) return error.MQTTWaitTokenError;
while (self.latestDeliveryToken.* != token.*) {
// CPU breath, and yield
_ = c.usleep(1);
}
}
}
pub fn register(self: *Self, topic: []const u8) !void {
// remember the topic, to be able to re register at connection lost
//
if (self.*.reconnect_registered_topics_length >= self.reconnect_registered_topics.len) {
// not enought room remember registered topics
return error.TooMuchRegisteredTopics;
}
const ptr = try self.allocator.alloc(u8, topic.len + 1);
mem.copy(u8, ptr, topic[0..]);
ptr[topic.len] = '\x00';
self.reconnect_registered_topics[self.*.reconnect_registered_topics_length] = ptr;
self.reconnect_registered_topics_length = self.*.reconnect_registered_topics_length + 1;
try self._register(ptr);
}
fn _register(self: *Self, topic: []const u8) !void {
_ = c.printf("register to %s \n", topic.ptr);
const r = cmqtt.MQTTClient_subscribe(self.*.handle, topic.ptr, 0);
if (r != 0) return error.MQTTRegistrationError;
}
};
test "testconnect mqtt home" {
var arena = std.heap.ArenaAllocator.init(std.heap.c_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var serverAddress: []const u8 = "tcp://192.168.4.16:1883";
var clientid: []const u8 = "clientid";
var userName: []const u8 = "sys";
var password: []const u8 = "<PASSWORD>";
// var handle: cmqtt.MQTTClient = null;
var cnx = try MqttCnx.init(allocator, serverAddress, clientid, userName, password);
const myStaticMessage: []const u8 = "Hello static message";
_ = try cnx.register("home/#");
_ = c.sleep(10);
var i: u32 = 0;
while (i < 10000) : (i += 1) {
try cnx.publish("myothertopic", myStaticMessage);
}
while (i < 10000) : (i += 1) {
try cnx.publishWithQos("myothertopic", myStaticMessage, 1);
}
_ = c.printf("ended");
}
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.c_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var serverAddress: []const u8 = "tcp://192.168.4.16:1883";
var clientid: []const u8 = "clientid";
var userName: []const u8 = "sys";
var password: []const u8 = "<PASSWORD>";
var cnx = try MqttCnx.init(allocator, serverAddress, clientid, userName, password);
_ = try cnx.register("home/#");
_ = c.sleep(20);
_ = c.printf("ended");
return;
} | mqttlib.zig |
const high_bit = 1 << @typeInfo(usize).Int.bits - 1;
pub const Status = extern enum(usize) {
/// The operation completed successfully.
Success = 0,
/// The image failed to load.
LoadError = high_bit | 1,
/// A parameter was incorrect.
InvalidParameter = high_bit | 2,
/// The operation is not supported.
Unsupported = high_bit | 3,
/// The buffer was not the proper size for the request.
BadBufferSize = high_bit | 4,
/// The buffer is not large enough to hold the requested data. The required buffer size is returned in the appropriate parameter when this error occurs.
BufferTooSmall = high_bit | 5,
/// There is no data pending upon return.
NotReady = high_bit | 6,
/// The physical device reported an error while attempting the operation.
DeviceError = high_bit | 7,
/// The device cannot be written to.
WriteProtected = high_bit | 8,
/// A resource has run out.
OutOfResources = high_bit | 9,
/// An inconstancy was detected on the file system causing the operating to fail.
VolumeCorrupted = high_bit | 10,
/// There is no more space on the file system.
VolumeFull = high_bit | 11,
/// The device does not contain any medium to perform the operation.
NoMedia = high_bit | 12,
/// The medium in the device has changed since the last access.
MediaChanged = high_bit | 13,
/// The item was not found.
NotFound = high_bit | 14,
/// Access was denied.
AccessDenied = high_bit | 15,
/// The server was not found or did not respond to the request.
NoResponse = high_bit | 16,
/// A mapping to a device does not exist.
NoMapping = high_bit | 17,
/// The timeout time expired.
Timeout = high_bit | 18,
/// The protocol has not been started.
NotStarted = high_bit | 19,
/// The protocol has already been started.
AlreadyStarted = high_bit | 20,
/// The operation was aborted.
Aborted = high_bit | 21,
/// An ICMP error occurred during the network operation.
IcmpError = high_bit | 22,
/// A TFTP error occurred during the network operation.
TftpError = high_bit | 23,
/// A protocol error occurred during the network operation.
ProtocolError = high_bit | 24,
/// The function encountered an internal version that was incompatible with a version requested by the caller.
IncompatibleVersion = high_bit | 25,
/// The function was not performed due to a security violation.
SecurityViolation = high_bit | 26,
/// A CRC error was detected.
CrcError = high_bit | 27,
/// Beginning or end of media was reached
EndOfMedia = high_bit | 28,
/// The end of the file was reached.
EndOfFile = high_bit | 31,
/// The language specified was invalid.
InvalidLanguage = high_bit | 32,
/// The security status of the data is unknown or compromised and the data must be updated or replaced to restore a valid security status.
CompromisedData = high_bit | 33,
/// There is an address conflict address allocation
IpAddressConflict = high_bit | 34,
/// A HTTP error occurred during the network operation.
HttpError = high_bit | 35,
NetworkUnreachable = high_bit | 100,
HostUnreachable = high_bit | 101,
ProtocolUnreachable = high_bit | 102,
PortUnreachable = high_bit | 103,
ConnectionFin = high_bit | 104,
ConnectionReset = high_bit | 105,
ConnectionRefused = high_bit | 106,
/// The string contained one or more characters that the device could not render and were skipped.
WarnUnknownGlyph = 1,
/// The handle was closed, but the file was not deleted.
WarnDeleteFailure = 2,
/// The handle was closed, but the data to the file was not flushed properly.
WarnWriteFailure = 3,
/// The resulting buffer was too small, and the data was truncated to the buffer size.
WarnBufferTooSmall = 4,
/// The data has not been updated within the timeframe set by localpolicy for this type of data.
WarnStaleData = 5,
/// The resulting buffer contains UEFI-compliant file system.
WarnFileSystem = 6,
/// The operation will be processed across a system reset.
WarnResetRequired = 7,
_,
}; | lib/std/os/uefi/status.zig |
const std = @import("std");
const mem = std.mem;
const debug = std.debug;
const math = std.math;
const threshold = 2;
const default_mask = 0x80;
// TODO: Tests
// TODO: This file could use some love in the form of a refactor. So far, it is mostly
// a direct translation of blz.c, but with some minor refactors here and there.
// Sadly, it's still not clear at all what this code is trying to do other than
// some kind of encoding. searchMatch is an example of my refactor, that actually
// did help make a little piece of this code clearer.
// TODO: Figure out if it's possible to make these encode and decode functions use streams.
pub fn decode(data: []const u8, allocator: *mem.Allocator) ![]u8 {
const Lengths = struct {
enc: u32,
dec: u32,
pak: u32,
raw: u32,
};
if (data.len < 8)
return error.BadHeader;
const inc_len = mem.readIntLittle(u32, @ptrCast(*const [4]u8, data[data.len - 4 ..][0..4].ptr));
const lengths = blk: {
if (inc_len == 0) {
return error.BadHeaderLength;
} else {
const hdr_len = data[data.len - 5];
if (hdr_len < 8 or hdr_len > 0xB) return error.BadHeaderLength;
if (data.len <= hdr_len) return error.BadLength;
const enc_len = mem.readIntLittle(u32, @ptrCast(*const [4]u8, data[data.len - 8 ..][0..4].ptr)) & 0x00FFFFFF;
const dec_len = try math.sub(u32, try math.cast(u32, data.len), enc_len);
const pak_len = try math.sub(u32, enc_len, hdr_len);
const raw_len = dec_len + enc_len + inc_len;
if (raw_len > 0x00FFFFFF)
return error.BadLength;
break :blk Lengths{
.enc = enc_len,
.dec = dec_len,
.pak = pak_len,
.raw = raw_len,
};
}
};
const result = try allocator.alloc(u8, lengths.raw);
errdefer allocator.free(result);
const pak_buffer = try allocator.alloc(u8, data.len + 3);
defer allocator.free(pak_buffer);
mem.copy(u8, result, data[0..lengths.dec]);
mem.copy(u8, pak_buffer, data);
invert(pak_buffer[lengths.dec .. lengths.dec + lengths.pak]);
const pak_end = lengths.dec + lengths.pak;
var pak = lengths.dec;
var raw = lengths.dec;
var mask = usize(0);
var flags = usize(0);
while (raw < lengths.raw) {
mask = mask >> 1;
if (mask == 0) {
if (pak == pak_end) break;
flags = pak_buffer[pak];
mask = default_mask;
pak += 1;
}
if (flags & mask == 0) {
if (pak == pak_end) break;
result[raw] = pak_buffer[pak];
raw += 1;
pak += 1;
} else {
if (pak + 1 >= pak_end) break;
const pos = (usize(pak_buffer[pak]) << 8) | pak_buffer[pak + 1];
pak += 2;
const len = (pos >> 12) + threshold + 1;
if (raw + len > lengths.raw) return error.WrongDecodedLength;
const new_pos = (pos & 0xFFF) + 3;
var i = usize(0);
while (i < len) : (i += 1) {
result[raw] = result[raw - new_pos];
raw += 1;
}
}
}
if (raw != lengths.raw) return error.UnexpectedEnd;
invert(result[lengths.dec..lengths.raw]);
return result[0..raw];
}
pub const Mode = enum {
Normal,
Best,
};
pub fn encode(data: []const u8, mode: Mode, arm9: bool, allocator: *mem.Allocator) ![]u8 {
var pak_tmp = usize(0);
var raw_tmp = data.len;
var pak_len = data.len + ((data.len + 7) / 8) + 11;
var pak = usize(0);
var raw = usize(0);
var mask = usize(0);
var flag = usize(0);
var raw_end = blk: {
var res = data.len;
if (arm9) {
res -= 0x4000;
}
break :blk res;
};
const result = try allocator.alloc(u8, pak_len);
const raw_buffer = try allocator.alloc(u8, data.len + 3);
defer allocator.free(raw_buffer);
mem.copy(u8, raw_buffer, data);
invert(raw_buffer[0..data.len]);
while (raw < raw_end) {
mask = mask >> 1;
if (mask == 0) {
result[pak] = 0;
mask = default_mask;
flag = pak;
pak += 1;
}
const best = search(raw_buffer[0..raw_end], raw);
const pos_best = @ptrToInt(raw_buffer[raw..].ptr) - @ptrToInt(best.ptr);
const len_best = blk: {
if (mode == Mode.Best) {
if (best.len > threshold) {
if (raw + best.len < raw_end) {
raw += best.len;
const next = search(raw_buffer[0..raw_end], raw);
raw -= best.len - 1;
const post = search(raw_buffer[0..raw_end], raw);
raw -= 1;
const len_next = if (next.len <= threshold) 1 else next.len;
const len_post = if (post.len <= threshold) 1 else post.len;
if (best.len + len_next <= 1 + len_post)
break :blk 1;
}
}
}
break :blk best.len;
};
result[flag] = result[flag] << 1;
if (len_best > threshold) {
raw += len_best;
result[flag] |= 1;
result[pak] = @truncate(u8, ((len_best - (threshold + 1)) << 4) | ((pos_best - 3) >> 8));
result[pak + 1] = @truncate(u8, (pos_best - 3));
pak += 2;
} else {
result[pak] = raw_buffer[raw];
pak += 1;
raw += 1;
}
if (pak + data.len - raw < pak_tmp + raw_tmp) {
pak_tmp = pak;
raw_tmp = data.len - raw;
}
}
while (mask > 0) {
mask = mask >> 1;
result[flag] = result[flag] << 1;
}
pak_len = pak;
invert(raw_buffer[0..data.len]);
invert(result[0..pak_len]);
if (pak_tmp == 0 or data.len + 4 < ((pak_tmp + raw_tmp + 3) & 0xFFFFFFFC) + 8) {
mem.copy(u8, result[0..data.len], raw_buffer[0..data.len]);
pak = data.len;
while ((pak & 3) > 0) : (pak += 1) {
result[pak] = 0;
}
result[pak] = 0;
result[pak + 1] = 0;
result[pak + 2] = 0;
result[pak + 3] = 0;
pak += 4;
return result[0..pak];
} else {
defer allocator.free(result);
const new_result = try allocator.alloc(u8, raw_tmp + pak_tmp + 11);
mem.copy(u8, new_result[0..raw_tmp], raw_buffer[0..raw_tmp]);
mem.copy(u8, new_result[raw_tmp..][0..pak_tmp], result[pak_len - pak_tmp ..][0..pak_tmp]);
pak = raw_tmp + pak_tmp;
const enc_len = pak_tmp;
const inc_len = data.len - pak_tmp - raw_tmp;
var hdr_len = usize(8);
while ((pak & 3) != 0) : ({
pak += 1;
hdr_len += 1;
}) {
new_result[pak] = 0xFF;
}
mem.writeInt(new_result[pak..], @intCast(u32, enc_len + hdr_len), @import("builtin").Endian.Little);
pak += 3;
new_result[pak] = @truncate(u8, hdr_len);
pak += 1;
mem.writeInt(new_result[pak..], @intCast(u32, inc_len - hdr_len), @import("builtin").Endian.Little);
pak += 4;
return new_result[0..pak];
}
}
/// Searches for ::match in ::data, and returns a slice to the best match.
/// TODO: This function finds the last best match, aka if two matches are
/// the same len, the last in ::data will be returned.
/// We only do this so that we are binary equivalent with blz.c.
/// When we don't need blz.c anymore, change this behavior.
fn searchMatch(data: []const u8, match: []const u8) []const u8 {
var best = data[0..0];
var pos = usize(0);
while (pos < data.len) : (pos += 1) {
const max = math.min(match.len, data.len - pos);
var len = usize(0);
while (len < max) : (len += 1) {
if (data[pos + len] != match[len]) break;
}
if (best.len <= len) {
best = data[pos..][0..len];
}
}
return best;
}
/// Finding best match of data[raw..raw+0x12] in data[max(0, raw - 0x1002)..raw]
/// and return the pos and lenght to that match
fn search(data: []const u8, raw: usize) []const u8 {
const max = math.min(raw, usize(0x1002));
const pattern = data[raw..math.min(usize(0x12) + raw, data.len)];
const d = data[raw - max .. raw];
return searchMatch(d, pattern);
}
fn invert(data: []u8) void {
var bottom = data.len - 1;
var i = usize(0);
while (i < bottom) : ({
i += 1;
bottom -= 1;
}) {
const tmp = data[i];
data[i] = data[bottom];
data[bottom] = tmp;
}
} | src/blz.zig |
const USE_MKL = @import("build_options").USE_MKL;
const std = @import("std");
const reference_counter = @import("reference_counter.zig");
const ReferenceCounter = reference_counter.ReferenceCounter;
const mkl = @import("mkl");
// CUDNN_DIM_MAX is apparently 8
const maxNumDim = 8;
const defaultIntDType = DType.i64;
const defaultFloatDType = DType.f32;
pub fn sliceProduct(comptime T: type, arr: []const T) T {
var result: T = 1;
for (arr) |v| {
result *= v;
}
return result;
}
fn indent(writer: anytype, level: u64) !void {
var i: u64 = 0;
while (i < level) : (i += 1) {
try writer.writeAll(" ");
}
}
fn contains(comptime T: type, haystack: []const T, needle: T) bool {
for (haystack) |item| {
if (needle == item) {
return true;
}
}
return false;
}
const PositionIterator = struct {
remaining: u64,
length: u64,
ndim: u64,
shape: [maxNumDim]u64 = [_]u64{0} ** maxNumDim,
pos: [maxNumDim]u64 = [_]u64{0} ** maxNumDim,
const Self = @This();
fn init(shape: []const u64) Self {
var length = sliceProduct(u64, shape);
var inst = Self{ .length = length, .remaining = length, .ndim = shape.len };
std.mem.copy(u64, &inst.shape, shape);
return inst;
}
fn next(self: *Self) ?[]const u64 {
if (self.remaining == 0) {
return null;
}
self.remaining -= 1;
if (self.remaining == self.length - 1) {
// don't increase offset on first item
return self.pos[0..self.ndim];
}
if (self.ndim == 0) {
return null;
}
// increment our pos, moving offset according to strides
// start at right side and move to the left
var d: u64 = self.ndim - 1;
self.pos[d] += 1;
while (self.pos[d] == self.shape[d]) {
std.debug.assert(d > 0);
self.pos[d] = 0;
self.pos[d - 1] += 1;
d -= 1;
}
return self.pos[0..self.ndim];
}
};
const StridedIterator = struct {
remaining: u64,
length: u64,
offset: u64,
ndim: u64,
strides: [maxNumDim]u64 = [_]u64{0} ** maxNumDim,
shape: [maxNumDim]u64 = [_]u64{0} ** maxNumDim,
pos: [maxNumDim]u64 = [_]u64{0} ** maxNumDim,
const Self = @This();
fn init(shape: []const u64, strides: []const u64, offset: u64) Self {
var length = sliceProduct(u64, shape);
var inst = Self{ .length = length, .remaining = length, .offset = offset, .ndim = shape.len };
std.mem.copy(u64, &inst.strides, strides);
std.mem.copy(u64, &inst.shape, shape);
return inst;
}
fn next(self: *Self) ?u64 {
if (self.remaining == 0) {
return null;
}
self.remaining -= 1;
if (self.remaining == self.length - 1) {
// don't increase offset on first item
return self.offset;
}
// it seems this has to be its own if statement so that the compiler can realize that
// the code below that depends on ndim > 0 is never executed
if (self.ndim == 0) {
return null;
}
// increment our pos, moving offset according to strides
// start at right side and move to the left
var d: u64 = self.ndim - 1;
self.pos[d] += 1;
self.offset += self.strides[d];
while (self.pos[d] == self.shape[d]) {
std.debug.assert(d > 0);
self.pos[d] = 0;
self.offset -= self.strides[d] * self.shape[d];
self.pos[d - 1] += 1;
self.offset += self.strides[d - 1];
d -= 1;
}
return self.offset;
}
};
pub const DType = enum {
u8, // useful for pixel data
u64, // useful for shapes
i64,
f32,
f64, // useful for grad check
};
const DTypeBuffer = union(DType) {
u8: []u8,
u64: []u64,
i64: []i64,
f32: []f32,
f64: []f64,
};
const DTypeValue = union(DType) {
u8: u8,
u64: u64,
i64: i64,
f32: f32,
f64: f64,
};
fn typeToDType(comptime T: type) DType {
return switch (T) {
u8 => DType.u8,
u64 => DType.u64,
i64 => DType.i64,
f32 => DType.f32,
f64 => DType.f64,
else => @compileError("unsupported type"),
};
}
fn dtypeToTypeName(dtype: DType) []const u8 {
return switch (dtype) {
.u8 => "u8",
.u64 => "u64",
.i64 => "i64",
.f32 => "f32",
.f64 => "f64",
};
}
fn dtypeToPriority(dtype: DType) u8 {
return switch (dtype) {
.u8 => 0,
.u64 => 1,
.i64 => 2,
.f32 => 3,
.f64 => 4,
};
}
pub fn dtypeIsInteger(dtype: DType) bool {
return switch (dtype) {
.u8, .i64, .u64 => true,
else => false,
};
}
fn dtypeMax(dtype1: DType, dtype2: DType) DType {
if (dtypeToPriority(dtype1) > dtypeToPriority(dtype2)) {
return dtype1;
} else {
return dtype2;
}
}
fn getShapeFromString(str: []const u8) DimArray {
var ndim: u64 = 0;
for (str) |c| {
if (c == '[') {
ndim += 1;
} else if (c == ' ' or c == '\n') {
continue;
} else {
break;
}
}
var shape = DimArray{ .ndim = ndim };
var dim_complete = [_]bool{false} ** maxNumDim;
// assume we have at least 1 element per array
for (dim_complete) |_, d| {
shape.array[d] = 1;
}
var started: bool = false;
var d: u64 = 0;
for (str) |c| {
if (c == '[') {
if (started) {
d += 1;
} else {
started = true;
}
} else if (c == ']') {
dim_complete[d] = true;
if (d == 0) {
break;
}
d -= 1;
} else if (c == ',' and !dim_complete[d]) {
shape.array[d] += 1;
}
}
return shape;
}
test "get_shape_from_string" {
var shape = getShapeFromString("[[1,2], [3,4], [5,6]]");
std.testing.expect(std.mem.eql(u64, shape.getSlice(), &[_]u64{ 3, 2 }));
var shape2 = getShapeFromString("[[[1,2], [3,4], [5,6]], [[1,2], [3,4], [5,6]], [[1,2], [3,4], [5,6]], [[1,2], [3,4], [5,6]]]");
std.testing.expect(std.mem.eql(u64, shape2.getSlice(), &[_]u64{ 4, 3, 2 }));
}
fn readBufferFromString(comptime T: type, buf: []T, str: []const u8) void {
var buf_index: u64 = 0;
var index: u64 = 0;
while (index < str.len) {
var c = str[index];
if (('0' <= c and c <= '9') or c == '-') {
var start = index;
var end = index;
while (('0' <= c and c <= '9') or c == '-' or c == '.' or c == 'e' or c == 'E' or c == '+') {
end += 1;
c = str[end];
}
var val = std.fmt.parseFloat(f64, str[start..end]) catch @panic("Failed to parse float");
buf[buf_index] = switch (@typeInfo(T)) {
.Int => @floatToInt(T, val),
.Float => @floatCast(T, val),
else => @panic("Unexpected type"),
};
buf_index += 1;
index = end;
} else {
index += 1;
}
}
if (buf_index != buf.len) {
@panic("Values do not match expected shape");
}
}
test "read_buffer_from_string" {
var buf = [_]f32{0.0} ** 6;
readBufferFromString(f32, &buf, "[[1,2], [3,4], [5,6]]");
std.testing.expect(std.mem.eql(f32, &buf, &[_]f32{ 1, 2, 3, 4, 5, 6 }));
}
pub const Array = struct {
buffer_union: DTypeBuffer,
shape: DimArray,
strides: DimArray,
numel: u64,
ndim: u64,
dtype: DType,
// it is nice to have multiple views onto the same underlying data
// and not have to copy it each time, so keep a reference count
// if we don't own the memory for the data slice, this will be null
ref_counter: ?*ReferenceCounter,
alc: ?*std.mem.Allocator,
is_contiguous: bool,
offset: u64,
const Self = @This();
fn calculateStrides(shape: []const u64, strides_out: []u64) void {
var stride: u64 = 1;
for (shape) |_, i| {
strides_out[shape.len - 1 - i] = stride;
stride *= shape[shape.len - 1 - i];
}
}
fn createStrides(shape: []const u64) DimArray {
var strides = DimArray{ .ndim = shape.len };
calculateStrides(shape, strides.getSlice());
return strides;
}
pub fn alloc(comptime T: type, alc: *std.mem.Allocator, shape: []const u64) !Self {
var ndim = shape.len;
var numel = sliceProduct(u64, shape);
var ref_counter = try alc.create(ReferenceCounter);
ref_counter.* = ReferenceCounter.init();
var buf = try alc.alloc(T, numel);
var buffer_union = switch (T) {
u8 => DTypeBuffer{ .u8 = buf },
u64 => DTypeBuffer{ .u64 = buf },
i64 => DTypeBuffer{ .i64 = buf },
f32 => DTypeBuffer{ .f32 = buf },
f64 => DTypeBuffer{ .f64 = buf },
else => @panic("invalid type"),
};
return Self{ .ndim = ndim, .numel = numel, .dtype = typeToDType(T), .ref_counter = ref_counter, .buffer_union = buffer_union, .alc = alc, .is_contiguous = true, .offset = 0, .shape = DimArray.init(shape), .strides = createStrides(shape) };
}
pub fn allocWithRange(comptime T: type, alc: *std.mem.Allocator, shape: []const u64, start: T, step: T) !Self {
var t = try Self.alloc(T, alc, shape);
var v = start;
var buf = t.getBuffer(T);
for (buf) |_, i| {
buf[i] = v;
v += step;
}
return t;
}
pub fn allocWithString(comptime T: type, alc: *std.mem.Allocator, str: []const u8) !Self {
var shape = getShapeFromString(str);
var t = try Self.allocWithValue(T, alc, shape.getSlice(), 0);
readBufferFromString(T, t.getBuffer(T), str);
return t;
}
pub fn allocWithValue(comptime T: type, alc: *std.mem.Allocator, shape: []const u64, value: T) !Self {
var t = try Self.alloc(T, alc, shape);
var buf = t.getBuffer(T);
std.mem.set(T, buf, value);
return t;
}
pub fn fromBuffer(comptime T: type, shape: []const u64, buf: []T) Self {
var ndim = shape.len;
var numel = sliceProduct(u64, shape);
if (buf.len != numel) {
@panic("data length does not match shape");
}
var data = switch (T) {
u8 => DTypeBuffer{ .u8 = buf },
u64 => DTypeBuffer{ .u64 = buf },
i64 => DTypeBuffer{ .i64 = buf },
f32 => DTypeBuffer{ .f32 = buf },
f64 => DTypeBuffer{ .f64 = buf },
else => @panic("invalid type"),
};
return Self{ .ndim = ndim, .numel = numel, .dtype = typeToDType(T), .ref_counter = null, .buffer_union = data, .alc = null, .is_contiguous = true, .offset = 0, .shape = DimArray.init(shape), .strides = createStrides(shape) };
}
pub fn flatFromBuffer(comptime T: type, buf: []T) Self {
return Self.fromBuffer(T, &[_]u64{buf.len}, buf);
}
pub fn scalarFromBuffer(comptime T: type, buf: []T) Self {
if (buf.len != 1) {
std.debug.panic("Buffer length {} invalid for scalar, must be 1", .{buf.len});
}
return Self.fromBuffer(T, &[_]u64{}, buf);
}
pub fn getShape(self: *const Self) []const u64 {
return self.shape.getConstSlice();
}
pub fn getStrides(self: *const Self) []const u64 {
return self.strides.getConstSlice();
}
pub fn getBuffer(self: Self, comptime T: type) []T {
return switch (T) {
u8 => self.buffer_union.u8,
u64 => self.buffer_union.u64,
i64 => self.buffer_union.i64,
f32 => self.buffer_union.f32,
f64 => self.buffer_union.f64,
else => @panic("invalid type"),
};
}
pub fn flatView(self: Self) Self {
if (self.ndim == 0) {
@panic("attempted to flatten a scalar");
}
var cur_strides = self.getStrides();
var stride: u64 = cur_strides[self.ndim - 1];
if (!self.is_contiguous) {
var non_singleton_dims: u64 = 0;
for (self.getShape()) |s, i| {
if (s != 1) {
non_singleton_dims += 1;
stride = cur_strides[i];
}
}
if (non_singleton_dims > 1) {
@panic("can only flatten contiguous tensors or tensors with one non-singleton dimenson");
}
}
var inst = self;
inst.ndim = 1;
inst.shape = DimArray.init(&[_]u64{self.numel});
inst.strides = DimArray.init(&[_]u64{stride});
return inst;
}
pub fn expandView(self: Self, shape: []const u64) Self {
if (shape.len < self.ndim) {
@panic("new shape must have the same number or more dimensions");
}
// shift strides over to account for new dimensions, which are added on the left
var strides = [_]u64{0} ** maxNumDim;
const num_added_dims = shape.len - self.ndim;
std.mem.copy(u64, strides[num_added_dims .. num_added_dims + self.ndim], self.getStrides());
var contiguous = num_added_dims == 0;
var output_index: u64 = 0;
var cur_shape = self.getShape();
while (output_index < num_added_dims) {
// this is a newly added dimension, the stride will be 0
strides[output_index] = 0;
output_index += 1;
}
while (output_index < shape.len) {
var input_index: u64 = output_index - num_added_dims;
if (cur_shape[input_index] == 1) {
if (shape[output_index] == 0) {
@panic("attempted to expand size 0 dimension");
}
// we can now iterate along this dimension without advancing through the data
strides[output_index] = 0;
contiguous = false;
} else if (shape[output_index] != cur_shape[input_index]) {
@panic("expanded shape not compatible with existing shape");
}
output_index += 1;
}
var numel = sliceProduct(u64, shape);
return Self{ .ndim = shape.len, .numel = numel, .dtype = self.dtype, .ref_counter = self.ref_counter, .buffer_union = self.buffer_union, .alc = self.alc, .is_contiguous = contiguous, .offset = self.offset, .shape = DimArray.init(shape), .strides = DimArray.init(strides[0..shape.len]) };
}
pub fn narrowView(self: Self, pos: []const u64, shape: []const u64) Self {
if (pos.len != self.ndim) {
@panic("position has wrong number of dimensions");
}
if (shape.len != self.ndim) {
@panic("shape has wrong number of dimensions");
}
var offset = self.offset;
var d: u64 = 0;
var is_contiguous = self.is_contiguous;
var contiguous_strides = [_]u64{0} ** maxNumDim;
calculateStrides(shape, &contiguous_strides);
var cur_shape = self.getShape();
var cur_strides = self.getStrides();
while (d < self.ndim) : (d += 1) {
offset += pos[d] * cur_strides[d];
if (pos[d] + shape[d] > cur_shape[d]) {
@panic("Position with shape exceeds size of source shape");
}
if (pos[d] >= cur_shape[d]) {
@panic("Invalid position");
}
if (shape[d] != 1 and contiguous_strides[d] != cur_strides[d]) {
is_contiguous = false;
}
}
var numel = sliceProduct(u64, shape);
return Self{ .ndim = self.ndim, .numel = numel, .dtype = self.dtype, .ref_counter = self.ref_counter, .buffer_union = self.buffer_union, .alc = self.alc, .is_contiguous = is_contiguous, .offset = offset, .shape = DimArray.init(shape), .strides = self.strides };
}
pub fn reshapeView(self: Self, shape: []const u64) Self {
if (self.numel != sliceProduct(u64, shape)) {
@panic("Attempted to reshape to different number of elements");
}
if (!self.is_contiguous) {
@panic("Reshape view of non-contiguous arrays not yet supported");
}
return Self{ .ndim = shape.len, .numel = self.numel, .dtype = self.dtype, .ref_counter = self.ref_counter, .buffer_union = self.buffer_union, .alc = self.alc, .is_contiguous = self.is_contiguous, .offset = self.offset, .shape = DimArray.init(shape), .strides = createStrides(shape) };
}
pub fn createIterator(self: Self) StridedIterator {
return StridedIterator.init(self.getShape(), self.getStrides(), self.offset);
}
pub fn get(self: Self, comptime T: type, pos: []const u64) T {
var offset: u64 = self.offset;
if (pos.len != self.ndim) {
@panic("position has wrong number of dimensions");
}
for (self.getStrides()) |stride, i| {
offset += pos[i] * stride;
}
var buf = self.getBuffer(T);
return buf[offset];
}
pub fn getValue(self: Self, pos: []const u64) DTypeValue {
return switch (self.dtype) {
.u8 => DTypeValue{ .u8 = self.get(u8, pos) },
.u64 => DTypeValue{ .u64 = self.get(u64, pos) },
.i64 => DTypeValue{ .i64 = self.get(i64, pos) },
.f32 => DTypeValue{ .f32 = self.get(f32, pos) },
.f64 => DTypeValue{ .f64 = self.get(f64, pos) },
};
}
pub fn getItem(self: Self, comptime T: type) T {
if (self.numel != 1) {
@panic("Can only call getItem on single-element Tensors");
}
return self.getBuffer(T)[self.offset];
}
pub fn set(self: Self, comptime T: type, pos: []const u64, value: T) void {
var offset: u64 = self.offset;
for (self.getStrides()) |stride, i| {
offset += pos[i] * stride;
}
var buf = self.getBuffer(T);
buf[offset] = value;
}
pub fn setValue(self: Self, pos: []const u64, value: DTypeValue) void {
switch (value) {
.u8 => self.set(u8, pos, value.u8),
.u64 => self.set(u64, pos, value.u64),
.i64 => self.set(i64, pos, value.i64),
.f32 => self.set(f32, pos, value.f32),
.f64 => self.set(f64, pos, value.f64),
}
}
pub fn retain(self: Self) void {
if (self.ref_counter) |ref_counter| {
ref_counter.increment();
}
}
pub fn release(self: Self) void {
if (self.ref_counter) |ref_counter| {
// std.debug.print("release {}\n", .{ref_counter});
if (ref_counter.decrement()) {
// std.debug.print("dealloc: {*}\n", .{self.ref_counter});
switch (self.buffer_union) {
.u8 => self.alc.?.free(self.buffer_union.u8),
.u64 => self.alc.?.free(self.buffer_union.u64),
.i64 => self.alc.?.free(self.buffer_union.i64),
.f32 => self.alc.?.free(self.buffer_union.f32),
.f64 => self.alc.?.free(self.buffer_union.f64),
}
self.alc.?.destroy(ref_counter);
}
}
}
fn formatElem(self: Self, writer: anytype, pos: []u64) !void {
switch (self.buffer_union) {
.u8 => try std.fmt.format(writer, "{}", .{self.get(u8, pos)}),
.u64 => try std.fmt.format(writer, "{}", .{self.get(u64, pos)}),
.i64 => try std.fmt.format(writer, "{}", .{self.get(i64, pos)}),
.f32 => try std.fmt.format(writer, "{}", .{self.get(f32, pos)}),
.f64 => try std.fmt.format(writer, "{}", .{self.get(f64, pos)}),
}
}
pub fn format(
self: Self,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
if (fmt.len != 0) {
@compileError("Unknown format character: '" ++ f ++ "'");
}
try std.fmt.format(writer, "Array(dtype={}", .{dtypeToTypeName(self.buffer_union)});
try writer.writeAll(", shape=(");
for (self.getShape()) |s, i| {
try std.fmt.format(writer, "{}", .{s});
if (i < self.ndim - 1) {
try writer.writeAll(", ");
}
}
try writer.writeAll("), strides=(");
for (self.getStrides()) |s, i| {
try std.fmt.format(writer, "{}", .{s});
if (i < self.ndim - 1) {
try writer.writeAll(", ");
}
}
try writer.writeAll("), data=");
if (self.ndim == 0) {
try self.formatElem(writer, &[_]u64{});
return;
}
try writer.writeAll("\n");
var pos = [_]u64{0} ** maxNumDim;
const final_dim = self.ndim - 1;
var dim: u64 = 0;
var index: u64 = 0;
try writer.writeAll(" [\n");
var shape = self.getShape();
while (true) {
while (dim < final_dim) : (dim += 1) {
try indent(writer, 2 * (dim + 2));
try writer.writeAll("[");
if (dim < final_dim - 1) {
try writer.writeAll("\n");
}
}
var get_pos = pos[0..self.ndim];
try self.formatElem(writer, get_pos);
if (pos[final_dim] < shape[final_dim] - 1) {
try writer.writeAll(", ");
}
index += 1;
pos[final_dim] += 1;
// carry
while (dim > 0) {
if (pos[dim] >= shape[dim]) {
pos[dim] = 0;
pos[dim - 1] += 1;
if (dim < final_dim) {
try indent(writer, 2 * (dim + 1));
}
try writer.writeAll("]");
try writer.writeAll(",\n");
dim -= 1;
} else {
break;
}
}
if (dim == 0 and pos[dim] == self.getShape()[dim]) {
break;
}
}
try writer.writeAll(" ]\n)");
}
};
test "scalar" {
const a = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{}, 1.0);
defer a.release();
std.testing.expect(a.numel == 1);
std.testing.expect(a.ndim == 0);
var it = a.createIterator();
while (it.next()) |offset| {
std.testing.expect(offset == 0);
}
const b = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{}, 2.0);
defer b.release();
const c = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{}, 0.0);
defer c.release();
plus(a, b, c);
const d = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{}, 3.0);
defer d.release();
std.testing.expect(equal(c, d));
}
test "scalar_broadcast" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer a.release();
const b = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{}, 2.0);
defer b.release();
const c = b.expandView(&[_]u64{ 2, 1, 3 });
expectContiguous(c, false);
const d = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 0.0);
defer d.release();
plus(a, c, d);
var e_data = [_]f32{ 3.0, 4.0, 5.0, 6.0, 7.0, 8.0 };
const e = Array.fromBuffer(f32, &[_]u64{ 2, 1, 3 }, &e_data);
std.testing.expect(equal(d, e));
const f = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 0.0);
defer f.release();
plus(a, b, f);
std.testing.expect(equal(f, e));
}
test "narrow" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0, 1.0);
defer a.release();
expectContiguous(a, true);
const b = a.narrowView(&[_]u64{ 0, 0, 0 }, &[_]u64{ 1, 1, 1 });
expectContiguous(b, true);
std.testing.expect(b.numel == 1);
var buf = b.getBuffer(f32);
std.testing.expect(buf[0] == 0.0);
const c = a.narrowView(&[_]u64{ 0, 0, 0 }, &[_]u64{ 2, 3, 4 });
expectContiguous(c, true);
std.testing.expect(equal(a, c));
const d = a.narrowView(&[_]u64{ 1, 2, 3 }, &[_]u64{ 1, 1, 1 });
expectContiguous(d, true);
const e = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 1, 1, 1 }, 23.0);
defer e.release();
std.testing.expect(equal(d, e));
const f = a.narrowView(&[_]u64{ 1, 0, 0 }, &[_]u64{ 1, 3, 4 });
const g = f.narrowView(&[_]u64{ 0, 2, 0 }, &[_]u64{ 1, 1, 4 });
const h = g.narrowView(&[_]u64{ 0, 0, 3 }, &[_]u64{ 1, 1, 1 });
std.testing.expect(equal(h, e));
const i = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 0.0, 1.0);
defer i.release();
// select row
const k = i.narrowView(&[_]u64{ 0, 0 }, &[_]u64{ 1, 3 });
expectContiguous(k, true);
// select column
const j = i.narrowView(&[_]u64{ 0, 0 }, &[_]u64{ 2, 1 });
expectContiguous(j, false);
}
test "reshape_view" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0, 1.0);
defer a.release();
expectContiguous(a, true);
const b = a.reshapeView(&[_]u64{ 1, 2, 1, 3, 1, 4, 1 });
std.testing.expect(a.numel == b.numel);
std.testing.expect(allcloseBuffers(f32, a, b, 0.0, 0.0));
}
test "zero size array" {
const a = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{0}, 1.0);
defer a.release();
std.testing.expect(a.numel == 0);
std.testing.expect(a.ndim == 1);
var it = a.createIterator();
while (it.next()) |offset| {
std.testing.expect(false);
}
}
test "alloc_with_string" {
var output = try Array.allocWithString(f32, std.testing.allocator, "[[1, 2], [3, 4], [5, 6]]");
defer output.release();
var expected_output = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 3, 2 }, 1.0, 1.0);
defer expected_output.release();
std.testing.expect(equal(output, expected_output));
}
test "range" {
var a_buf = [_]f32{ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 };
const a = Array.fromBuffer(f32, &[_]u64{ 2, 1, 3 }, &a_buf);
defer a.release();
var b_buf = [_]f32{ 0.0, 1.0, 2.0, 3.0 };
const b = Array.fromBuffer(f32, &[_]u64{ 1, 4, 1 }, &b_buf);
defer b.release();
const ar = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 0.0, 1.0);
defer ar.release();
std.testing.expect(equal(a, ar));
const br = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 1, 4, 1 }, 0.0, 1.0);
defer br.release();
std.testing.expect(equal(b, br));
}
test "strides" {
var strides = [_]u64{0} ** 3;
Array.calculateStrides(&[_]u64{ 2, 1, 3 }, &strides);
std.testing.expect(std.mem.eql(u64, &strides, &[_]u64{ 3, 3, 1 }));
var t = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 3, 2, 1, 3 }, 0.0);
defer t.release();
var si = t.createIterator();
var index: u64 = 0;
while (si.next()) |v| {
std.debug.assert(index == v);
index += 1;
}
std.testing.expect(index == t.numel);
}
test "expand" {
const t = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 1, 3, 1 }, 1.0);
defer t.release();
const t2 = t.expandView(&[_]u64{ 2, 8, 3, 9 });
std.testing.expect(t.numel == 2 * 1 * 3 * 1);
std.testing.expect(t2.numel == 2 * 8 * 3 * 9);
var si = t2.createIterator();
var numel: u64 = 0;
while (si.next()) |_| {
numel += 1;
}
std.testing.expect(numel == t2.numel);
}
test "contiguous" {
const t = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0);
defer t.release();
std.testing.expect(t.is_contiguous);
const et = t.expandView(&[_]u64{ 2, 4, 3 });
std.testing.expect(!et.is_contiguous);
std.testing.expect(std.mem.eql(f32, t.getBuffer(f32), t.getBuffer(f32)));
const cet = try copyAlloc(std.testing.allocator, et);
defer cet.release();
std.testing.expect(cet.is_contiguous);
}
test "setvalue" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0, 1.0);
defer a.release();
const b = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0);
defer b.release();
var pos = [_]u64{ 1, 1, 1 };
b.setValue(&pos, a.getValue(&pos));
std.testing.expect(b.get(f32, &pos) == 3 * 4 + 4 + 1);
}
test "format" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 0.0, 1.0);
defer a.release();
std.debug.print("a: {}\n", .{a});
}
pub fn assertDimsAreTheSame(x: Array, y: Array) void {
if (x.ndim != y.ndim) {
@panic("Arrays dimension counts do not match");
}
}
pub fn assertTypesAreTheSame(x: Array, y: Array) void {
if (x.dtype != y.dtype) {
@panic("Arrays have different dtypes");
}
}
pub fn assertShapesAreTheSame(x: anytype, y: anytype) void {
if (!std.mem.eql(u64, x.getShape(), y.getShape())) {
@panic("Arrays have differing shape");
}
}
fn assertContiguous(x: anytype) void {
if (!x.is_contiguous) {
@panic("Array is not contiguous");
}
}
fn checkContiguous(x: anytype) bool {
if (x.numel == 0) {
return true;
}
var it = x.createIterator();
var prev_offset = it.next().?;
while (it.next()) |offset| {
if (prev_offset + 1 != offset) {
return false;
}
prev_offset = offset;
}
return true;
}
pub fn zerosAlloc(alc: *std.mem.Allocator, dtype: DType, shape: []const u64) !Array {
return switch (dtype) {
.u8 => try Array.allocWithValue(u8, alc, shape, 0),
.u64 => try Array.allocWithValue(u64, alc, shape, 0),
.i64 => try Array.allocWithValue(i64, alc, shape, 0),
.f32 => try Array.allocWithValue(f32, alc, shape, 0.0),
.f64 => try Array.allocWithValue(f64, alc, shape, 0.0),
};
}
pub fn zerosLikeAlloc(alc: *std.mem.Allocator, arr: Array) !Array {
return zerosAlloc(alc, arr.dtype, arr.getShape());
}
pub fn onesAlloc(alc: *std.mem.Allocator, dtype: DType, shape: []const u64) !Array {
return switch (dtype) {
.u8 => try Array.allocWithValue(u8, alc, shape, 1),
.u64 => try Array.allocWithValue(u64, alc, shape, 1),
.i64 => try Array.allocWithValue(i64, alc, shape, 1),
.f32 => try Array.allocWithValue(f32, alc, shape, 1.0),
.f64 => try Array.allocWithValue(f64, alc, shape, 1.0),
};
}
pub fn onesLikeAlloc(alc: *std.mem.Allocator, arr: Array) !Array {
return onesAlloc(alc, arr.dtype, arr.getShape());
}
pub fn scalarAlloc(alc: *std.mem.Allocator, dtype: DType, value: f64) !Array {
return switch (dtype) {
.u8 => try Array.allocWithValue(u8, alc, &[_]u64{}, @floatToInt(u8, value)),
.u64 => try Array.allocWithValue(u64, alc, &[_]u64{}, @floatToInt(u64, value)),
.i64 => try Array.allocWithValue(i64, alc, &[_]u64{}, @floatToInt(i64, value)),
.f32 => try Array.allocWithValue(f32, alc, &[_]u64{}, @floatCast(f32, value)),
.f64 => try Array.allocWithValue(f64, alc, &[_]u64{}, value),
};
}
fn fillUniformBuffer(comptime T: type, dst: Array, r: *std.rand.Random, low: Array, high: Array) void {
var low_it = low.createIterator();
var high_it = high.createIterator();
var dst_it = dst.createIterator();
var low_buf = low.getBuffer(T);
var high_buf = high.getBuffer(T);
var dst_buf = dst.getBuffer(T);
while (dst_it.next()) |dst_offset| {
var low_offset = low_it.next().?;
var high_offset = high_it.next().?;
var l = low_buf[low_offset];
var h = high_buf[high_offset];
if (l >= h) {
@panic("Low is greater than or equal to high");
}
dst_buf[dst_offset] = switch (T) {
u8, i64, u64 => r.intRangeAtMost(T, l, h),
f32, f64 => r.float(T) * (h - l) + l,
else => std.debug.panic("Invalid type {}", .{@typeName(T)}),
};
}
}
pub fn fillUniform(dst: Array, r: *std.rand.Random, low: Array, high: Array) void {
assertTypesAreTheSame(dst, low);
assertTypesAreTheSame(dst, high);
const low_expanded = low.expandView(dst.getShape());
const high_expanded = high.expandView(dst.getShape());
switch (dst.dtype) {
.u8 => fillUniformBuffer(u8, dst, r, low_expanded, high_expanded),
.u64 => fillUniformBuffer(u64, dst, r, low_expanded, high_expanded),
.i64 => fillUniformBuffer(i64, dst, r, low_expanded, high_expanded),
.f32 => fillUniformBuffer(f32, dst, r, low_expanded, high_expanded),
.f64 => fillUniformBuffer(f64, dst, r, low_expanded, high_expanded),
}
}
fn copyBufferIterator(comptime T: type, src: Array, dst: Array) void {
var src_it = src.createIterator();
var dst_it = dst.createIterator();
var src_buf = src.getBuffer(T);
var dst_buf = dst.getBuffer(T);
while (src_it.next()) |src_offset| {
var dst_offset = dst_it.next().?;
dst_buf[dst_offset] = src_buf[src_offset];
}
}
pub fn copy(src: Array, dst: Array) void {
assertTypesAreTheSame(src, dst);
const src_expanded = src.expandView(dst.getShape());
switch (src_expanded.buffer_union) {
.u8 => copyBufferIterator(u8, src_expanded, dst),
.u64 => copyBufferIterator(u64, src_expanded, dst),
.i64 => copyBufferIterator(i64, src_expanded, dst),
.f32 => copyBufferIterator(f32, src_expanded, dst),
.f64 => copyBufferIterator(f64, src_expanded, dst),
}
}
pub fn copyAlloc(alc: *std.mem.Allocator, src: Array) !Array {
var dst = try zerosLikeAlloc(alc, src);
copy(src, dst);
return dst;
}
fn castBufferIterator(comptime SrcT: type, comptime DstT: type, src: Array, dst: Array) void {
var src_it = src.createIterator();
var dst_it = dst.createIterator();
var src_buf = src.getBuffer(SrcT);
var dst_buf = dst.getBuffer(DstT);
while (src_it.next()) |src_offset| {
var dst_offset = dst_it.next().?;
var src_val = src_buf[src_offset];
var dst_val = switch (@typeInfo(DstT)) {
.Int => switch (@typeInfo(SrcT)) {
.Int => @intCast(DstT, src_val),
.Float => @floatToInt(DstT, src_val),
else => @panic("unknown type"),
},
.Float => switch (@typeInfo(SrcT)) {
.Int => @intToFloat(DstT, src_val),
.Float => @floatCast(DstT, src_val),
else => @panic("unknown type"),
},
else => @panic("unknown type"),
};
dst_buf[dst_offset] = dst_val;
}
}
pub fn cast(src: Array, dst: Array) void {
assertDimsAreTheSame(src, dst);
assertShapesAreTheSame(src, dst);
switch (src.buffer_union) {
.u8 => switch (dst.buffer_union) {
.u8 => castBufferIterator(u8, u8, src, dst),
.u64 => castBufferIterator(u8, u64, src, dst),
.i64 => castBufferIterator(u8, i64, src, dst),
.f32 => castBufferIterator(u8, f32, src, dst),
.f64 => castBufferIterator(u8, f64, src, dst),
},
.u64 => switch (dst.buffer_union) {
.u8 => castBufferIterator(u64, u8, src, dst),
.u64 => castBufferIterator(u64, u64, src, dst),
.i64 => castBufferIterator(u64, i64, src, dst),
.f32 => castBufferIterator(u64, f32, src, dst),
.f64 => castBufferIterator(u64, f64, src, dst),
},
.i64 => switch (dst.buffer_union) {
.u8 => castBufferIterator(i64, u8, src, dst),
.u64 => castBufferIterator(i64, u64, src, dst),
.i64 => castBufferIterator(i64, i64, src, dst),
.f32 => castBufferIterator(i64, f32, src, dst),
.f64 => castBufferIterator(i64, f64, src, dst),
},
.f32 => switch (dst.buffer_union) {
.u8 => castBufferIterator(f32, u8, src, dst),
.u64 => castBufferIterator(f32, u64, src, dst),
.i64 => castBufferIterator(f32, i64, src, dst),
.f32 => castBufferIterator(f32, f32, src, dst),
.f64 => castBufferIterator(f32, f64, src, dst),
},
.f64 => switch (dst.buffer_union) {
.u8 => castBufferIterator(f64, u8, src, dst),
.u64 => castBufferIterator(f64, u64, src, dst),
.i64 => castBufferIterator(f64, i64, src, dst),
.f32 => castBufferIterator(f64, f32, src, dst),
.f64 => castBufferIterator(f64, f64, src, dst),
},
}
}
pub fn castAlloc(alc: *std.mem.Allocator, src: Array, dtype: DType) !Array {
if (src.dtype == dtype) {
src.retain();
return src;
}
var dst = try zerosAlloc(alc, dtype, src.getShape());
cast(src, dst);
return dst;
}
test "cast" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer a.release();
const b = try Array.allocWithValue(f64, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 0.0);
defer b.release();
const c = try Array.allocWithRange(f64, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer c.release();
cast(a, b);
std.testing.expect(equal(b, c));
}
pub const DimArray = struct {
ndim: u64,
array: [maxNumDim]u64 = [_]u64{0} ** maxNumDim,
const Self = @This();
pub fn init(shape: []const u64) Self {
var inst = Self{ .ndim = shape.len };
std.mem.copy(u64, &inst.array, shape);
return inst;
}
pub fn getSlice(self: *Self) []u64 {
return self.array[0..self.ndim];
}
pub fn getConstSlice(self: *const Self) []const u64 {
return self.array[0..self.ndim];
}
};
pub fn binaryElementwiseShape(x_shape: []const u64, y_shape: []const u64) DimArray {
var shape = DimArray{ .ndim = std.math.max(x_shape.len, y_shape.len) };
// use the shape with a higher number of dimensions to populate our output shape
var larger_shape = x_shape;
var smaller_shape = y_shape;
if (y_shape.len > x_shape.len) {
larger_shape = y_shape;
smaller_shape = x_shape;
}
std.mem.copy(u64, shape.array[0..], larger_shape);
var offset: u64 = larger_shape.len - smaller_shape.len;
for (smaller_shape) |s| {
if (!(shape.array[offset] == s or shape.array[offset] == 1 or s == 1)) {
@panic("Shapes for each dimension must be equal or one of them must be 1");
}
shape.array[offset] = std.math.max(shape.array[offset], s);
offset += 1;
}
return shape;
}
pub const BinaryElementwiseOperation = enum {
plus,
minus,
times,
divide,
power,
max,
gt,
gte,
lt,
lte,
eq,
};
fn boolToValue(comptime T: type, b: bool) T {
if (b) {
return 1;
} else {
return 0;
}
}
fn binaryElementwiseOperationOnBuffers(comptime T: type, x_in: Array, y_in: Array, z_out: Array, op: BinaryElementwiseOperation) void {
var si_x = x_in.createIterator();
var si_y = y_in.createIterator();
var si_z = z_out.createIterator();
var x_buf = x_in.getBuffer(T);
var y_buf = y_in.getBuffer(T);
var z_buf = z_out.getBuffer(T);
while (si_x.next()) |x_offset| {
var y_offset = si_y.next().?;
var z_offset = si_z.next().?;
z_buf[z_offset] = switch (op) {
.plus => x_buf[x_offset] + y_buf[y_offset],
.minus => x_buf[x_offset] - y_buf[y_offset],
.times => x_buf[x_offset] * y_buf[y_offset],
.divide => switch (@typeInfo(T)) {
.Int => @divTrunc(x_buf[x_offset], y_buf[y_offset]),
else => x_buf[x_offset] / y_buf[y_offset],
},
.power => std.math.pow(T, x_buf[x_offset], y_buf[y_offset]),
.max => std.math.max(x_buf[x_offset], y_buf[y_offset]),
.eq => boolToValue(T, x_buf[x_offset] == y_buf[y_offset]),
.gt => boolToValue(T, x_buf[x_offset] > y_buf[y_offset]),
.gte => boolToValue(T, x_buf[x_offset] >= y_buf[y_offset]),
.lt => boolToValue(T, x_buf[x_offset] < y_buf[y_offset]),
.lte => boolToValue(T, x_buf[x_offset] <= y_buf[y_offset]),
};
}
}
fn binaryElementwiseOperation(x_in: Array, y_in: Array, z_out: Array, op: BinaryElementwiseOperation) void {
assertTypesAreTheSame(x_in, y_in);
assertTypesAreTheSame(x_in, z_out);
var out_shape = binaryElementwiseShape(x_in.getShape(), y_in.getShape());
const x_in_expanded = x_in.expandView(out_shape.getSlice());
const y_in_expanded = y_in.expandView(out_shape.getSlice());
assertShapesAreTheSame(x_in_expanded, y_in_expanded);
if (!std.mem.eql(u64, z_out.getShape(), out_shape.getSlice())) {
@panic("Attempted to use output Array with wrong shape");
}
switch (x_in.dtype) {
.u8 => binaryElementwiseOperationOnBuffers(u8, x_in_expanded, y_in_expanded, z_out, op),
.u64 => binaryElementwiseOperationOnBuffers(u64, x_in_expanded, y_in_expanded, z_out, op),
.i64 => binaryElementwiseOperationOnBuffers(i64, x_in_expanded, y_in_expanded, z_out, op),
.f32 => binaryElementwiseOperationOnBuffers(f32, x_in_expanded, y_in_expanded, z_out, op),
.f64 => binaryElementwiseOperationOnBuffers(f64, x_in_expanded, y_in_expanded, z_out, op),
}
}
/// Add 2 Arrays together, putting the result in the 3rd
pub fn plus(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.plus);
}
test "plus" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer a.release();
const b = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 1, 4, 1 }, 1.0, 1.0);
defer b.release();
const c = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 4, 3 }, 0.0);
defer c.release();
plus(a, b, c);
std.testing.expect(c.numel == 2 * 4 * 3);
var d_data = [_]f32{ 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 5.0, 6.0, 7.0, 6.0, 7.0, 8.0, 7.0, 8.0, 9.0, 8.0, 9.0, 10.0 };
const d = Array.fromBuffer(f32, &[_]u64{ 2, 4, 3 }, &d_data);
std.testing.expect(equal(c, d));
const e = try plusAlloc(std.testing.allocator, a, b);
defer e.release();
std.testing.expect(equal(c, e));
}
pub fn minus(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.minus);
}
pub fn times(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.times);
}
pub fn divide(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.divide);
}
pub fn power(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.power);
}
test "power" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer a.release();
const b = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 1, 4, 1 }, 1.0, 1.0);
defer b.release();
const c = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 4, 3 }, 0.0);
defer c.release();
power(a, b, c);
std.testing.expect(c.numel == 2 * 4 * 3);
var d_data = [_]f32{
1.000e+00, 2.000e+00, 3.000e+00, 1.000e+00, 4.000e+00, 9.000e+00,
1.000e+00, 8.000e+00, 2.700e+01, 1.000e+00, 1.600e+01, 8.100e+01,
4.000e+00, 5.000e+00, 6.000e+00, 1.600e+01, 2.500e+01, 3.600e+01,
6.400e+01, 1.250e+02, 2.160e+02, 2.560e+02, 6.250e+02, 1.296e+03,
};
const d = Array.fromBuffer(f32, &[_]u64{ 2, 4, 3 }, &d_data);
std.testing.expect(equal(c, d));
const e = try powerAlloc(std.testing.allocator, a, b);
defer e.release();
std.testing.expect(equal(c, e));
}
pub fn max(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.max);
}
test "max" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 0.0, 1.0);
defer a.release();
const b = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 5.0, -1.0);
defer b.release();
const c = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 0.0);
defer c.release();
max(a, b, c);
var d_data = [_]f32{
5.0, 4.0, 3.0, 3.0, 4.0, 5.0,
};
const d = Array.fromBuffer(f32, &[_]u64{ 2, 3 }, &d_data);
std.testing.expect(equal(c, d));
const e = try maxAlloc(std.testing.allocator, a, b);
defer e.release();
std.testing.expect(equal(e, d));
}
pub fn gt(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.gt);
}
pub fn gte(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.gte);
}
pub fn eq(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.eq);
}
pub fn lt(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.lt);
}
pub fn lte(x_in: Array, y_in: Array, z_out: Array) void {
binaryElementwiseOperation(x_in, y_in, z_out, BinaryElementwiseOperation.lte);
}
test "comparison" {
const ComparisonFn = fn (a: Array, b: Array, c: Array) void;
const ComparisonFnAlloc = fn (alc: *std.mem.Allocator, a: Array, b: Array) error{OutOfMemory}!Array;
const TestCase = struct {
func: ComparisonFn,
func_alloc: ComparisonFnAlloc,
expected_output: []const u8,
};
var testcases = [_]TestCase{
TestCase{
.func = gt,
.func_alloc = gtAlloc,
.expected_output = "[0.0, 0.0, 0.0, 1.0, 1.0]",
},
TestCase{
.func = gte,
.func_alloc = gteAlloc,
.expected_output = "[0.0, 0.0, 1.0, 1.0, 1.0]",
},
TestCase{
.func = eq,
.func_alloc = eqAlloc,
.expected_output = "[0.0, 0.0, 1.0, 0.0, 0.0]",
},
TestCase{
.func = lt,
.func_alloc = ltAlloc,
.expected_output = "[1.0, 1.0, 0.0, 0.0, 0.0]",
},
TestCase{
.func = lte,
.func_alloc = lteAlloc,
.expected_output = "[1.0, 1.0, 1.0, 0.0, 0.0]",
},
};
for (testcases) |tc| {
const first_input = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{5}, 0.0, 1.0);
defer first_input.release();
const second_input = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{5}, 4.0, -1.0);
defer second_input.release();
const output = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{5}, 0.0);
defer output.release();
tc.func(first_input, second_input, output);
var expected_output = try Array.allocWithString(f32, std.testing.allocator, tc.expected_output);
defer expected_output.release();
std.testing.expect(equal(output, expected_output));
const output2 = try tc.func_alloc(std.testing.allocator, first_input, second_input);
defer output2.release();
std.testing.expect(equal(output2, expected_output));
}
}
/// Add 2 Arrays together, allocating a result Array for the output
pub fn binaryElementwiseOperationAlloc(alc: *std.mem.Allocator, x: Array, y: Array, op: BinaryElementwiseOperation) !Array {
var z_shape = binaryElementwiseShape(x.getShape(), y.getShape());
var z = try zerosAlloc(alc, x.dtype, z_shape.getSlice());
binaryElementwiseOperation(x, y, z, op);
return z;
}
pub fn plusAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.plus);
}
pub fn minusAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.minus);
}
pub fn timesAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.times);
}
pub fn divideAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.divide);
}
pub fn powerAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.power);
}
pub fn maxAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.max);
}
pub fn eqAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.eq);
}
pub fn gtAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.gt);
}
pub fn gteAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.gte);
}
pub fn ltAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.lt);
}
pub fn lteAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
return try binaryElementwiseOperationAlloc(alc, x, y, BinaryElementwiseOperation.lte);
}
pub const UnaryElementwiseOperation = enum {
uminus,
log,
log2,
exp,
};
fn unaryElementwiseOperationOnBuffers(comptime T: type, x_in: Array, z_out: Array, op: UnaryElementwiseOperation) void {
var si_x = x_in.createIterator();
var si_z = z_out.createIterator();
var x_buf = x_in.getBuffer(T);
var z_buf = z_out.getBuffer(T);
while (si_x.next()) |x_offset| {
var z_offset = si_z.next().?;
z_buf[z_offset] = switch (@typeInfo(T)) {
.Int => |int| switch (op) {
.uminus => switch (int.is_signed) {
true => -x_buf[x_offset],
false => @panic("uminus on unsigned int"),
},
.log => @floatToInt(T, std.math.log(f64, std.math.e, @intToFloat(f64, x_buf[x_offset]))),
.log2 => @floatToInt(T, std.math.log(f64, 2.0, @intToFloat(f64, x_buf[x_offset]))),
.exp => @floatToInt(T, std.math.pow(f64, std.math.e, @intToFloat(f64, x_buf[x_offset]))),
},
.Float => switch (op) {
.uminus => -x_buf[x_offset],
.log => std.math.log(T, std.math.e, x_buf[x_offset]),
.log2 => std.math.log(T, 2.0, x_buf[x_offset]),
.exp => std.math.pow(T, std.math.e, x_buf[x_offset]),
},
else => @panic("unrecognized type"),
};
}
}
fn unaryElementwiseOperation(x_in: Array, z_out: Array, op: UnaryElementwiseOperation) void {
assertTypesAreTheSame(x_in, z_out);
if (!std.mem.eql(u64, z_out.getShape(), x_in.getShape())) {
@panic("Attempted to use output Array with wrong shape");
}
switch (x_in.dtype) {
.u8 => unaryElementwiseOperationOnBuffers(u8, x_in, z_out, op),
.u64 => unaryElementwiseOperationOnBuffers(u64, x_in, z_out, op),
.i64 => unaryElementwiseOperationOnBuffers(i64, x_in, z_out, op),
.f32 => unaryElementwiseOperationOnBuffers(f32, x_in, z_out, op),
.f64 => unaryElementwiseOperationOnBuffers(f64, x_in, z_out, op),
}
}
pub fn unaryElementwiseOperationAlloc(alc: *std.mem.Allocator, x: Array, op: UnaryElementwiseOperation) !Array {
var z = try zerosAlloc(alc, x.dtype, x.getShape());
unaryElementwiseOperation(x, z, op);
return z;
}
pub fn uplus(x_in: Array, z_out: Array) void {
copy(x_in, z_out);
}
pub fn uplusAlloc(alc: *std.mem.Allocator, x: Array) !Array {
return try copyAlloc(alc, x);
}
test "uplus" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer a.release();
const b = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 0.0);
defer b.release();
uplus(a, b);
std.testing.expect(equal(a, b));
}
pub fn uminus(x_in: Array, z_out: Array) void {
unaryElementwiseOperation(x_in, z_out, UnaryElementwiseOperation.uminus);
}
pub fn uminusAlloc(alc: *std.mem.Allocator, x: Array) !Array {
return try unaryElementwiseOperationAlloc(alc, x, UnaryElementwiseOperation.uminus);
}
pub fn log(x_in: Array, z_out: Array) void {
unaryElementwiseOperation(x_in, z_out, UnaryElementwiseOperation.ln);
}
pub fn logAlloc(alc: *std.mem.Allocator, x: Array) !Array {
return try unaryElementwiseOperationAlloc(alc, x, UnaryElementwiseOperation.log);
}
pub fn log2(x_in: Array, z_out: Array) void {
unaryElementwiseOperation(x_in, z_out, UnaryElementwiseOperation.log2);
}
pub fn log2Alloc(alc: *std.mem.Allocator, x: Array) !Array {
return try unaryElementwiseOperationAlloc(alc, x, UnaryElementwiseOperation.log2);
}
pub fn exp(x_in: Array, z_out: Array) void {
unaryElementwiseOperation(x_in, z_out, UnaryElementwiseOperation.exp);
}
pub fn expAlloc(alc: *std.mem.Allocator, x: Array) !Array {
return try unaryElementwiseOperationAlloc(alc, x, UnaryElementwiseOperation.exp);
}
fn transposeBuffer(comptime T: type, src: Array, dst: Array) void {
var src_strides = src.getStrides();
var src_strides_reversed_shape = DimArray.init(src_strides);
reverseSlice(u64, src_strides, src_strides_reversed_shape.getSlice());
var src_it = StridedIterator.init(dst.getShape(), src_strides_reversed_shape.getSlice(), src.offset);
var dst_it = dst.createIterator();
var src_buf = src.getBuffer(T);
var dst_buf = dst.getBuffer(T);
if (&src_buf[0] == &dst_buf[0]) {
@panic("src and dst buf point to the same data, but transpose cannot be run in place");
}
while (src_it.next()) |src_offset| {
var dst_offset = dst_it.next().?;
dst_buf[dst_offset] = src_buf[src_offset];
}
}
pub fn reverseSlice(comptime T: type, in: []const T, out: []T) void {
for (in) |s, i| {
out[out.len - 1 - i] = s;
}
}
pub fn transpose(x_in: Array, z_out: Array) void {
assertTypesAreTheSame(x_in, z_out);
var reversed_shape = DimArray.init(x_in.getShape());
reverseSlice(u64, x_in.getShape(), reversed_shape.getSlice());
if (!std.mem.eql(u64, z_out.getShape(), reversed_shape.getSlice())) {
@panic("output array for transpose has incorrect shape");
}
switch (x_in.dtype) {
.u8 => transposeBuffer(u8, x_in, z_out),
.u64 => transposeBuffer(u64, x_in, z_out),
.i64 => transposeBuffer(i64, x_in, z_out),
.f32 => transposeBuffer(f32, x_in, z_out),
.f64 => transposeBuffer(f64, x_in, z_out),
}
}
pub fn transposeAlloc(alc: *std.mem.Allocator, x: Array) !Array {
var reversed_shape = DimArray.init(x.getShape());
reverseSlice(u64, x.getShape(), reversed_shape.getSlice());
var z = try zerosAlloc(alc, x.dtype, reversed_shape.getSlice());
transpose(x, z);
return z;
}
test "transpose" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0, 1.0);
defer a.release();
const b = try zerosAlloc(std.testing.allocator, a.dtype, &[_]u64{ 4, 3, 2 });
defer b.release();
transpose(a, b);
var c_data = [_]f32{
0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10,
22, 3, 15, 7, 19, 11, 23,
};
const c = Array.fromBuffer(f32, &[_]u64{ 4, 3, 2 }, &c_data);
std.testing.expect(equal(b, c));
}
fn copyBufferDirect(comptime T: type, src: Array, dst: Array) void {
var src_buf = src.getBuffer(T);
var dst_buf = dst.getBuffer(T);
std.mem.copy(T, dst_buf, src_buf);
}
/// Reshape an Array, the new shape must have the same number of elements
pub fn reshape(in: Array, out: Array) void {
assertTypesAreTheSame(in, out);
if (in.numel != out.numel) {
@panic("Input and output Arrays have differing number of elements");
}
switch (in.buffer_union) {
.u8 => copyBufferDirect(u8, in, out),
.u64 => copyBufferDirect(u64, in, out),
.i64 => copyBufferDirect(i64, in, out),
.f32 => copyBufferDirect(f32, in, out),
.f64 => copyBufferDirect(f64, in, out),
}
}
/// Reshape an Array to the provided shape, allocating an Array to hold the result
pub fn reshapeAlloc(alc: *std.mem.Allocator, in: Array, shape: []const u64) !Array {
var out = try zerosAlloc(alc, in.dtype, shape);
reshape(in, out);
return out;
}
test "reshape" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 0.0, 1.0);
defer a.release();
const b = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 1, 3, 2, 1 }, 0.0);
defer b.release();
reshape(a, b);
std.testing.expect(std.mem.eql(f32, a.getBuffer(f32), b.getBuffer(f32)));
var shape = [_]u64{ 1, 3, 2, 1 };
const c = try reshapeAlloc(std.testing.allocator, a, &shape);
defer c.release();
std.testing.expect(std.mem.eql(f32, a.getBuffer(f32), c.getBuffer(f32)));
var a_it = a.createIterator();
var b_it = b.createIterator();
var a_buf = a.getBuffer(f32);
var b_buf = b.getBuffer(f32);
while (a_it.next()) |a_offset| {
var b_offset = b_it.next().?;
std.testing.expect(a_buf[a_offset] == b_buf[b_offset]);
}
}
fn mtimesShape(x_shape: []const u64, y_shape: []const u64) DimArray {
if (x_shape.len != 2 or y_shape.len != 2) {
@panic("Matmul arguments must have 2 dimensions each");
}
if (x_shape[1] != y_shape[0]) {
@panic("Dimension mismatch for matrix times");
}
return DimArray.init(&[2]u64{ x_shape[0], y_shape[1] });
}
pub fn mtimes(x_in: Array, y_in: Array, z_out: Array) void {
assertContiguous(z_out);
assertTypesAreTheSame(x_in, y_in);
assertTypesAreTheSame(x_in, z_out);
if (!(x_in.getShape()[1] == y_in.getShape()[0])) {
@panic("Shapes for reduced dimension must be equal");
}
var out_shape = mtimesShape(x_in.getShape(), y_in.getShape());
if (!std.mem.eql(u64, z_out.getShape(), out_shape.getSlice())) {
@panic("Output shape incorrect");
}
switch (x_in.buffer_union) {
.u8 => mtimesBuffers(u8, x_in, y_in, z_out),
.u64 => mtimesBuffers(u64, x_in, y_in, z_out),
.i64 => mtimesBuffers(i64, x_in, y_in, z_out),
.f32 => mtimesBuffers(f32, x_in, y_in, z_out),
.f64 => mtimesBuffers(f64, x_in, y_in, z_out),
}
}
fn mklMtimesBuffers(comptime T: type, x_in: Array, y_in: Array, z_out: Array) void {
var m = x_in.getShape()[0];
var k = x_in.getShape()[1];
var n = y_in.getShape()[1];
var x_buf = x_in.getBuffer(T);
var y_buf = y_in.getBuffer(T);
var z_buf = z_out.getBuffer(T);
var lda: u64 = k;
var ldb: u64 = n;
var ldc: u64 = n;
var alpha: T = 1.0;
var beta: T = 1.0;
switch (T) {
f32 => mkl.cblas_sgemm(&x_buf[x_in.offset], &y_buf[y_in.offset], &z_buf[z_out.offset], lda, ldb, ldc, m, n, k, alpha, beta),
f64 => mkl.cblas_dgemm(&x_buf[x_in.offset], &y_buf[y_in.offset], &z_buf[z_out.offset], lda, ldb, ldc, m, n, k, alpha, beta),
else => @compileError("Unsupported type for MKL mtimes"),
}
}
fn mklMtimes(x_in: Array, y_in: Array, z_out: Array) void {
assertTypesAreTheSame(x_in, y_in);
assertTypesAreTheSame(x_in, z_out);
assertContiguous(x_in);
assertContiguous(y_in);
assertContiguous(z_out);
if (!(x_in.getShape()[1] == y_in.getShape()[0])) {
@panic("Shapes for reduced dimension must be equal");
}
var out_shape = mtimesShape(x_in.getShape(), y_in.getShape());
if (!std.mem.eql(u64, z_out.getShape(), out_shape.getSlice())) {
@panic("Output shape incorrect");
}
switch (x_in.dtype) {
.f32 => mklMtimesBuffers(f32, x_in, y_in, z_out),
.f64 => mklMtimesBuffers(f64, x_in, y_in, z_out),
else => std.debug.panic("Unsupported dtype for MKL mtimes {}", .{x_in.dtype}),
}
}
test "cblas_sgemm" {
if (USE_MKL) {
{
var a: f32 = 2.0;
var b: f32 = 2.0;
var c: f32 = 2.0;
mkl.cblas_sgemm(&a, &b, &c, 4, 4, 4, 1, 1, 1, 1.0, 1.0);
std.testing.expect(c == 6.0);
}
{
var a = try Array.allocWithString(f32, std.testing.allocator, "[[1, 2], [3, 4], [5, 6]]");
defer a.release();
var b = try Array.allocWithString(f32, std.testing.allocator, "[[1, 2, 3, 4], [5, 6, 7, 8]]");
defer b.release();
var c = try Array.allocWithString(f32, std.testing.allocator, "[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]");
defer c.release();
mklMtimes(a, b, c);
var expected_output = try Array.allocWithString(f32, std.testing.allocator, "[[12, 16, 20, 24], [28, 36, 44, 52], [44, 56, 68, 80]]");
defer expected_output.release();
std.testing.expect(equal(c, expected_output));
}
}
}
fn mtimesBuffers(comptime T: type, x_in: Array, y_in: Array, z_out: Array) void {
var z_r: u64 = 0;
while (z_r < z_out.getShape()[0]) : (z_r += 1) {
var z_c: u64 = 0;
while (z_c < z_out.getShape()[1]) : (z_c += 1) {
var total: T = z_out.get(T, &[2]u64{ z_r, z_c });
var i: u64 = 0;
while (i < x_in.getShape()[1]) : (i += 1) {
total += x_in.get(T, &[2]u64{ z_r, i }) * y_in.get(T, &[2]u64{ i, z_c });
}
z_out.set(T, &[2]u64{ z_r, z_c }, total);
}
}
}
pub fn mtimesAlloc(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
var z_shape = mtimesShape(x.getShape(), y.getShape());
var z = try zerosAlloc(alc, x.dtype, z_shape.getSlice());
if (USE_MKL) {
mklMtimes(x, y, z);
} else {
mtimes(x, y, z);
}
return z;
}
test "mtimes" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 0.0, 1.0);
defer a.release();
const b = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 3, 2 }, 0.0, 1.0);
defer b.release();
const c = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 2 }, 0.0);
defer c.release();
mtimes(a, b, c);
var d_data = [_]f32{ 10, 13, 28, 40 };
const d = Array.fromBuffer(f32, &[_]u64{ 2, 2 }, &d_data);
std.testing.expect(equal(c, d));
const e = try mtimesAlloc(std.testing.allocator, a, b);
defer e.release();
std.testing.expect(equal(c, e));
}
fn reduceShape(x_shape: []const u64, dims: []const u64, keepdims: bool) DimArray {
var ndim: u64 = 0;
if (keepdims) {
ndim = x_shape.len;
} else {
ndim = x_shape.len - dims.len;
}
for (dims) |d| {
if (d >= x_shape.len) {
@panic("invalid dimension for reduce");
}
}
var shape = DimArray{ .ndim = ndim };
var i: u64 = 0;
for (x_shape) |s, d| {
if (keepdims) {
if (contains(u64, dims, d)) {
shape.array[i] = 1;
} else {
shape.array[i] = s;
}
i += 1;
} else {
if (!contains(u64, dims, d)) {
shape.array[i] = s;
i += 1;
}
}
}
return shape;
}
pub const ReduceOperation = enum {
sum,
max,
mean,
};
fn reduceBuffers(comptime T: type, in: Array, in_it: *StridedIterator, out: Array, out_offset: u64, op: ReduceOperation) void {
var r: T = 0;
var in_buf = in.getBuffer(T);
var out_buf = out.getBuffer(T);
while (in_it.next()) |offset| {
r = switch (op) {
.sum => r + in_buf[offset],
.max => std.math.max(r, in_buf[offset]),
.mean => r + in_buf[offset],
};
}
if (op == .mean) {
r = switch (@typeInfo(T)) {
.Int => @divTrunc(r, @intCast(T, in.numel)),
.Float => r / @intToFloat(T, in.numel),
else => @panic("invalid type"),
};
}
out_buf[out_offset] = r;
}
/// Find the sum of an Array, removes reduced dimensions
pub fn reduceSum(in: Array, out: Array, dims: []const u64) void {
reduce(in, out, dims, false, .sum);
}
/// Find the sum of an Array, keep reduced dimensions
pub fn keepSum(in: Array, out: Array, dims: []const u64) void {
reduce(in, out, dims, true, .sum);
}
pub fn reduce(in: Array, out: Array, dims: []const u64, keepdims: bool, op: ReduceOperation) void {
assertTypesAreTheSame(in, out);
if (in.ndim < dims.len) {
@panic("Invalid number of dims to sum across");
}
var out_ndim = in.ndim;
if (!keepdims) {
out_ndim = in.ndim - dims.len;
}
if (out_ndim != out.ndim) {
@panic("Output has wrong number of dimensions");
}
var out_shape = reduceShape(in.getShape(), dims, keepdims);
if (!std.mem.eql(u64, out.getShape(), out_shape.getSlice())) {
@panic("Output has wrong shape");
}
var iterShape = DimArray.init(in.getShape());
var reducedShape = DimArray.init(in.getShape());
var d: u64 = 0;
while (d < in.ndim) : (d += 1) {
if (contains(u64, dims, d)) {
iterShape.array[d] = 1;
} else {
reducedShape.array[d] = 1;
}
}
var in_it = PositionIterator.init(iterShape.getSlice());
var out_it = out.createIterator();
while (in_it.next()) |pos| {
const inner = in.narrowView(pos, reducedShape.getSlice());
var inner_it = inner.createIterator();
var out_offset = out_it.next().?;
switch (in.buffer_union) {
.u8 => reduceBuffers(u8, in, &inner_it, out, out_offset, op),
.u64 => reduceBuffers(u64, in, &inner_it, out, out_offset, op),
.i64 => reduceBuffers(i64, in, &inner_it, out, out_offset, op),
.f32 => reduceBuffers(f32, in, &inner_it, out, out_offset, op),
.f64 => reduceBuffers(f64, in, &inner_it, out, out_offset, op),
}
}
}
/// Find the max of an Array, removes reduced dimensions
pub fn reduceMax(in: Array, out: Array, dims: []const u64) void {
reduce(in, out, dims, false, .max);
}
/// Find the max of an Array, keep reduced dimensions
pub fn keepMax(in: Array, out: Array, dims: []const u64) void {
reduce(in, out, dims, true, .max);
}
pub fn reduceMean(in: Array, out: Array, dims: []const u64) void {
reduce(in, out, dims, false, .mean);
}
pub fn keepMean(in: Array, out: Array, dims: []const u64) void {
reduce(in, out, dims, true, .mean);
}
fn findBroadcastDims(input_shape: []const u64, output_shape: []const u64) DimArray {
if (output_shape.len < input_shape.len) {
@panic("invalid shapes");
}
// copy the initial dims from the output shape (that are missing on the input shape)
var offset = output_shape.len - input_shape.len;
var result = DimArray{ .ndim = 0 };
while (result.ndim < offset) {
result.array[result.ndim] = result.ndim;
result.ndim += 1;
}
// add in any existing dims that are broadcast
for (input_shape) |_, d| {
if (input_shape[d] == 1 and output_shape[d + offset] != 1) {
result.array[result.ndim] = d + offset;
result.ndim += 1;
}
}
return result;
}
test "find_bcast_dims" {
{
var dims_shape = findBroadcastDims(&[_]u64{3}, &[_]u64{ 3, 3 });
std.testing.expect(std.mem.eql(u64, dims_shape.getSlice(), &[_]u64{0}));
}
{
var dims_shape = findBroadcastDims(&[_]u64{ 1, 3 }, &[_]u64{ 3, 3 });
std.testing.expect(std.mem.eql(u64, dims_shape.getSlice(), &[_]u64{0}));
}
{
var dims_shape = findBroadcastDims(&[_]u64{1}, &[_]u64{ 3, 3 });
std.testing.expect(std.mem.eql(u64, dims_shape.getSlice(), &[_]u64{ 0, 1 }));
}
{
var dims_shape = findBroadcastDims(&[_]u64{ 1, 1 }, &[_]u64{ 3, 3 });
std.testing.expect(std.mem.eql(u64, dims_shape.getSlice(), &[_]u64{ 0, 1 }));
}
{
var dims_shape = findBroadcastDims(&[_]u64{}, &[_]u64{ 3, 3 });
std.testing.expect(std.mem.eql(u64, dims_shape.getSlice(), &[_]u64{ 0, 1 }));
}
}
/// Do a sum where we find the reduced dimensions by following broadcasting rules
/// this assumes that `out` was broadcast to the shape of `in`, so do the reverse
/// mapping
pub fn bcastsum(in: Array, out: Array) void {
// "in" here is the post broadcast shape
// "out" is the pre-broadcast shape
//
// in.shape = (3,3)
// out.shape = (1,3) or (3)
// dims_shape = (0)
//
// in.shape = (3,3)
// out.shape = (1,1)
// dims_shape = (0, 1)
var dims_shape = findBroadcastDims(out.getShape(), in.getShape());
var expanded_shape = DimArray.init(in.getShape());
for (dims_shape.getSlice()) |d| {
expanded_shape.array[d] = 1;
}
var expanded_out = out.reshapeView(expanded_shape.getSlice());
reduce(in, expanded_out, dims_shape.getSlice(), true, ReduceOperation.sum);
}
pub fn reduceAlloc(alc: *std.mem.Allocator, in: Array, dims: []const u64, keepdims: bool, op: ReduceOperation) !Array {
var out_shape = reduceShape(in.getShape(), dims, keepdims);
var out = try zerosAlloc(alc, in.dtype, out_shape.getSlice());
reduce(in, out, dims, keepdims, op);
return out;
}
pub fn reduceSumAlloc(alc: *std.mem.Allocator, in: Array, dims: []const u64) !Array {
return reduceAlloc(alc, in, dims, false, .sum);
}
pub fn keepSumAlloc(alc: *std.mem.Allocator, in: Array, dims: []const u64) !Array {
return reduceAlloc(alc, in, dims, true, .sum);
}
test "reduce_sum" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0, 1.0);
defer a.release();
const b = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 3 }, 0.0);
defer b.release();
reduceSum(a, b, &[_]u64{2});
var c_data = [_]f32{ 6.0, 22.0, 38.0, 54.0, 70.0, 86.0 };
const c = Array.fromBuffer(f32, &[_]u64{ 2, 3 }, &c_data);
std.testing.expect(equal(b, c));
const d = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 4 }, 0.0);
defer d.release();
reduceSum(a, d, &[_]u64{1});
var e_data = [_]f32{ 12.0, 15.0, 18.0, 21.0, 48.0, 51.0, 54.0, 57.0 };
const e = Array.fromBuffer(f32, &[_]u64{ 2, 4 }, &e_data);
std.testing.expect(equal(d, e));
const f = try reduceSumAlloc(std.testing.allocator, a, &[_]u64{2});
defer f.release();
std.testing.expect(equal(c, f));
}
test "keep_sum" {
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0, 1.0);
defer a.release();
const b = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 3, 1 }, 0.0);
defer b.release();
keepSum(a, b, &[_]u64{2});
var c_data = [_]f32{ 6.0, 22.0, 38.0, 54.0, 70.0, 86.0 };
const c = Array.fromBuffer(f32, &[_]u64{ 2, 3, 1 }, &c_data);
std.testing.expect(equal(b, c));
}
pub fn reduceMaxAlloc(alc: *std.mem.Allocator, in: Array, dims: []const u64) !Array {
return reduceAlloc(alc, in, dims, false, .max);
}
pub fn keepMaxAlloc(alc: *std.mem.Allocator, in: Array, dims: []const u64) !Array {
return reduceAlloc(alc, in, dims, true, .max);
}
pub fn reduceMeanAlloc(alc: *std.mem.Allocator, in: Array, dims: []const u64) !Array {
return reduceAlloc(alc, in, dims, false, .mean);
}
pub fn keepMeanAlloc(alc: *std.mem.Allocator, in: Array, dims: []const u64) !Array {
return reduceAlloc(alc, in, dims, true, .mean);
}
fn argMaxBuffer(comptime T: type, in: Array, in_it: *PositionIterator, dim: u64) u64 {
var m: T = 0;
var idx: u64 = 0;
while (in_it.next()) |pos| {
var v = in.get(T, pos);
if (v > m or pos[dim] == 0) {
m = v;
idx = pos[dim];
}
}
return idx;
}
fn argMax(in: Array, out: Array, dim: u64, keepdims: bool) void {
if (out.dtype != .u64) {
@panic("Output dtype must be u64");
}
var out_ndim = in.ndim;
if (!keepdims) {
out_ndim = in.ndim - 1;
}
if (out_ndim != out.ndim) {
@panic("Output has wrong number of dimensions");
}
var out_shape = reduceShape(in.getShape(), &[_]u64{dim}, keepdims);
if (!std.mem.eql(u64, out.getShape(), out_shape.getSlice())) {
@panic("Output has wrong shape");
}
var iterShape = DimArray.init(in.getShape());
var reducedShape = DimArray.init(in.getShape());
var d: u64 = 0;
while (d < in.ndim) : (d += 1) {
if (dim == d) {
iterShape.array[d] = 1;
} else {
reducedShape.array[d] = 1;
}
}
var in_it = PositionIterator.init(iterShape.getSlice());
var out_it = out.createIterator();
var out_buf = out.getBuffer(u64);
while (in_it.next()) |pos| {
const inner = in.narrowView(pos, reducedShape.getSlice());
var inner_it = PositionIterator.init(inner.getShape());
var out_offset = out_it.next().?;
out_buf[out_offset] = switch (inner.buffer_union) {
.u8 => argMaxBuffer(u8, inner, &inner_it, dim),
.u64 => argMaxBuffer(u64, inner, &inner_it, dim),
.i64 => argMaxBuffer(i64, inner, &inner_it, dim),
.f32 => argMaxBuffer(f32, inner, &inner_it, dim),
.f64 => argMaxBuffer(f64, inner, &inner_it, dim),
};
}
}
pub fn reduceArgMax(in: Array, out: Array, dim: u64) void {
argMax(in, out, dim, false);
}
pub fn keepArgMax(in: Array, out: Array, dim: u64) void {
argMax(in, out, dim, true);
}
pub fn reduceArgMaxAlloc(alc: *std.mem.Allocator, in: Array, dim: u64) !Array {
var out_shape = reduceShape(in.getShape(), &[_]u64{dim}, false);
var out = try zerosAlloc(alc, .u64, out_shape.getSlice());
argMax(in, out, dim, false);
return out;
}
pub fn keepArgMaxAlloc(alc: *std.mem.Allocator, in: Array, dim: u64) !Array {
var out_shape = reduceShape(in.getShape(), &[_]u64{dim}, true);
var out = try zerosAlloc(alc, .u64, out_shape.getSlice());
argMax(in, out, dim, true);
return out;
}
test "argmax" {
var input_buf = [_]f32{ 1.0, 0.0, 0.0, 2.0, 1.0, 1.0 };
const input = Array.fromBuffer(f32, &[_]u64{ 1, 3, 2 }, &input_buf);
const output = try Array.allocWithValue(u64, std.testing.allocator, &[_]u64{ 1, 3 }, 0.0);
defer output.release();
reduceArgMax(input, output, 2);
var expected_output_buf = [_]u64{ 0, 1, 0 };
const expected_output = Array.fromBuffer(u64, &[_]u64{ 1, 3 }, &expected_output_buf);
std.testing.expect(equal(output, expected_output));
var output2 = try reduceArgMaxAlloc(std.testing.allocator, input, 2);
defer output2.release();
std.testing.expect(equal(output2, expected_output));
}
pub fn gather(in: Array, out: Array, dim: u64, index: Array) void {
assertTypesAreTheSame(in, out);
if (in.ndim != index.ndim) {
@panic("Index must have same number of dimensions as input");
}
if (index.dtype != .u64) {
@panic("Index must have u64 dtype");
}
for (in.getShape()) |s, d| {
if (d != dim and index.getShape()[d] > s) {
@panic("Index has invalid shape");
}
}
assertShapesAreTheSame(out, index);
var out_pos_it = PositionIterator.init(out.getShape());
while (out_pos_it.next()) |out_pos| {
var in_pos_shape = DimArray.init(out_pos);
var in_pos = in_pos_shape.getSlice();
in_pos[dim] = index.get(u64, out_pos);
out.setValue(out_pos, in.getValue(in_pos));
}
}
pub fn gatherAlloc(alc: *std.mem.Allocator, in: Array, dim: u64, index: Array) !Array {
var out = try zerosAlloc(alc, in.dtype, index.getShape());
gather(in, out, dim, index);
return out;
}
test "gather" {
const input = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0, 1.0);
defer input.release();
var index_data = [_]u64{ 0, 1, 2, 3, 2, 1 };
const index = Array.fromBuffer(u64, &[_]u64{ 2, 3 }, &index_data);
defer index.release();
const index_expanded = index.reshapeView(&[_]u64{ 2, 3, 1 });
const output = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 3, 1 }, 0.0);
defer output.release();
gather(input, output, 2, index_expanded);
var expected_output_data = [_]f32{ 0, 5, 10, 15, 18, 21 };
const expected_output = Array.fromBuffer(f32, &[_]u64{ 2, 3, 1 }, &expected_output_data);
std.testing.expect(equal(output, expected_output));
const output2 = try gatherAlloc(std.testing.allocator, input, 2, index_expanded);
defer output2.release();
std.testing.expect(equal(output2, expected_output));
}
pub fn scatter(in: Array, out: Array, dim: u64, index: Array) void {
assertTypesAreTheSame(in, out);
if (out.ndim != index.ndim) {
@panic("Index must have same number of dimensions as output");
}
for (out.getShape()) |s, d| {
if (d != dim and index.getShape()[d] > s) {
@panic("Index has invalid shape");
}
}
assertShapesAreTheSame(in, index);
var in_pos_it = PositionIterator.init(in.getShape());
while (in_pos_it.next()) |in_pos| {
var out_pos_shape = DimArray.init(in_pos);
var out_pos = out_pos_shape.getSlice();
out_pos[dim] = index.get(u64, in_pos);
out.setValue(out_pos, in.getValue(in_pos));
}
}
test "scatter" {
const input = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 3, 1 }, 0.0, 1.0);
defer input.release();
var index_data = [_]u64{ 0, 1, 2, 3, 2, 1 };
const index = Array.fromBuffer(u64, &[_]u64{ 2, 3 }, &index_data);
defer index.release();
const index_expanded = index.reshapeView(&[_]u64{ 2, 3, 1 });
const output = try Array.allocWithValue(f32, std.testing.allocator, &[_]u64{ 2, 3, 4 }, 0.0);
defer output.release();
scatter(input, output, 2, index_expanded);
var expected_output_data = [_]f32{ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0, 5, 0, 0 };
const expected_output = Array.fromBuffer(f32, &[_]u64{ 2, 3, 4 }, &expected_output_data);
std.testing.expect(equal(output, expected_output));
}
fn abs(comptime T: type, value: T) T {
if (value > 0) {
return value;
} else {
return -value;
}
}
fn castToF64(comptime T: type, value: T) f64 {
return switch (@typeInfo(T)) {
.Int => @intToFloat(f64, value),
else => value,
};
}
fn allcloseBuffers(comptime T: type, x: Array, y: Array, rtol: f64, atol: f64) bool {
var it_x = x.createIterator();
var x_buf = x.getBuffer(T);
var y_buf = y.getBuffer(T);
var it_y = y.createIterator();
while (it_x.next()) |x_offset| {
var y_offset = it_y.next().?;
var x_value = castToF64(T, x_buf[x_offset]);
var y_value = castToF64(T, y_buf[y_offset]);
var close = abs(f64, x_value - y_value) <= atol + rtol * abs(f64, y_value);
if (!close) {
return false;
}
}
return true;
}
pub fn equal(x: Array, y: Array) bool {
assertDimsAreTheSame(x, y);
assertTypesAreTheSame(x, y);
if (!std.mem.eql(u64, x.getShape(), y.getShape())) {
return false;
}
return switch (x.dtype) {
.u8 => allcloseBuffers(u8, x, y, 0.0, 0.0),
.u64 => allcloseBuffers(u64, x, y, 0.0, 0.0),
.i64 => allcloseBuffers(i64, x, y, 0.0, 0.0),
.f32 => allcloseBuffers(f32, x, y, 0.0, 0.0),
.f64 => allcloseBuffers(f64, x, y, 0.0, 0.0),
};
}
pub fn allclose(x: Array, y: Array, rtol: f64, atol: f64) bool {
assertDimsAreTheSame(x, y);
assertTypesAreTheSame(x, y);
if (!std.mem.eql(u64, x.getShape(), y.getShape())) {
return false;
}
return switch (x.dtype) {
.u8 => allcloseBuffers(u8, x, y, rtol, atol),
.u64 => allcloseBuffers(u64, x, y, rtol, atol),
.i64 => allcloseBuffers(i64, x, y, rtol, atol),
.f32 => allcloseBuffers(f32, x, y, rtol, atol),
.f64 => allcloseBuffers(f64, x, y, rtol, atol),
};
}
fn expectContiguous(x: anytype, is_contiguous: bool) void {
std.testing.expect(x.is_contiguous == is_contiguous);
std.testing.expect(x.is_contiguous == checkContiguous(x));
}
fn dumpStruct(s: anytype) void {
const type_info = @typeInfo(@TypeOf(s));
comptime std.debug.assert(type_info == .Struct);
var field_names: [type_info.Struct.fields.len][]const u8 = undefined;
comptime {
var i: comptime_int = 0;
while (i < type_info.Struct.fields.len) {
field_names[i] = type_info.Struct.fields[i].name;
i += 1;
}
}
comptime var i = 0;
inline while (i < type_info.Struct.fields.len) {
std.debug.print("name {} value {}\n", .{ type_info.Struct.fields[i].name, @field(s, type_info.Struct.fields[i].name) });
i += 1;
}
}
fn getValue(name: []const u8, names: [][]const u8, values: []i64) i64 {
var i: u64 = 0;
while (i < names.len) {
if (std.mem.eql(u8, name, names[i])) {
return values[i];
}
i += 1;
}
@panic("did not find value, this should never happen");
}
const MAX_TOKENS = 128;
const MAX_ITEMS = MAX_TOKENS;
const MAX_VALUES = 2 * MAX_TOKENS;
const MAX_OPERATIONS = MAX_TOKENS;
const MAX_FUNCTION_ARGS = 3;
const Operation = struct {
function: Function,
input_indices: [MAX_FUNCTION_ARGS]u64,
output_index: u64,
};
fn functionToNumArgs(f: Function) u64 {
return switch (f) {
.plus => 2,
.minus => 2,
.uplus => 1,
.uminus => 1,
.times => 2,
.mtimes => 2,
.divide => 2,
.mdivide => 2,
.power => 2,
.mpower => 2,
.eq => 2,
.gt => 2,
.gte => 2,
.lt => 2,
.lte => 2,
.transpose => 1,
.ctranspose => 1,
.f32 => 1,
.detach => 1,
.log => 1,
.log2 => 1,
.exp => 1,
.max => 2,
.reduce_sum => 2,
.keep_sum => 2,
.reduce_max => 2,
.keep_max => 2,
.reduce_mean => 2,
.keep_mean => 2,
.reduce_arg_max => 2,
.keep_arg_max => 2,
.gather => 3,
};
}
fn functionToAutocastArgs(f: Function) []const u64 {
return switch (f) {
.plus => &[_]u64{ 0, 1 },
.minus => &[_]u64{ 0, 1 },
.uplus => &[_]u64{},
.uminus => &[_]u64{},
.times => &[_]u64{ 0, 1 },
.mtimes => &[_]u64{ 0, 1 },
.divide => &[_]u64{ 0, 1 },
.mdivide => &[_]u64{ 0, 1 },
.power => &[_]u64{ 0, 1 },
.mpower => &[_]u64{ 0, 1 },
.eq => &[_]u64{ 0, 1 },
.gt => &[_]u64{ 0, 1 },
.gte => &[_]u64{ 0, 1 },
.lt => &[_]u64{ 0, 1 },
.lte => &[_]u64{ 0, 1 },
.transpose => &[_]u64{},
.ctranspose => &[_]u64{},
.f32 => &[_]u64{},
.detach => &[_]u64{},
.log => &[_]u64{},
.log2 => &[_]u64{},
.exp => &[_]u64{},
.max => &[_]u64{ 0, 1 },
.reduce_sum => &[_]u64{},
.keep_sum => &[_]u64{},
.reduce_max => &[_]u64{},
.keep_max => &[_]u64{},
.reduce_mean => &[_]u64{},
.keep_mean => &[_]u64{},
.reduce_arg_max => &[_]u64{},
.keep_arg_max => &[_]u64{},
.gather => &[_]u64{},
};
}
const CompiledExpression = struct {
operations: [MAX_OPERATIONS]Operation,
operation_count: u64,
numbers: [MAX_VALUES]Number,
number_count: u64,
index_count: u64,
output_index: u64,
};
const TokenType = enum {
operator,
comma,
number,
identifier,
leftParen,
rightParen,
};
const Token = struct {
typ: TokenType,
val: []const u8,
};
const EOS = 0;
/// Scanner converts input string into tokens
const Scanner = struct {
input: []const u8,
start: u64 = 0, // start of current item
pos: u64 = 0, // current pos within the input
const Self = @This();
fn init(input: []const u8) Self {
return Self{ .input = input };
}
fn next(self: *Self) u8 {
var c: u8 = undefined;
if (self.pos >= self.input.len) {
c = EOS;
} else {
c = self.input[self.pos];
}
self.pos += 1;
return c;
}
fn backup(self: *Self) void {
self.pos -= 1;
}
fn peek(self: *Self) u8 {
var c = self.next();
self.backup();
return c;
}
fn ignore(self: *Self) void {
self.start = self.pos;
}
fn accept(self: *Self, chars: []const u8) bool {
var c = self.peek();
if (contains(u8, chars, c)) {
_ = self.next();
return true;
}
return false;
}
fn acceptRun(self: *Self, chars: []const u8) bool {
var result: bool = false;
while (self.accept(chars)) {
result = true;
}
return result;
}
fn lexChar(self: *Self) void {
_ = self.next();
}
fn lexOperator(self: *Self) void {
const operators = "*/^+-'";
if (self.accept("><=")) {
_ = self.accept("=");
} else if (self.accept(".")) {
_ = self.accept(operators);
} else {
_ = self.accept(operators);
}
}
fn lexNumber(self: *Self) void {
const digits = "0123456789";
_ = self.acceptRun(digits);
if (self.accept(".")) {
_ = self.acceptRun(digits);
}
if (self.accept("eE")) {
_ = self.accept("+-");
_ = self.acceptRun(digits);
}
}
fn lexIdentifier(self: *Self) void {
if (self.accept("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")) {
_ = self.acceptRun("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890_");
}
}
fn nextLexeme(self: *Self) ?Token {
// skip whitespace
_ = self.acceptRun(" ");
self.ignore();
// look at next char
var c = self.peek();
if (c == EOS) {
return null;
}
// extract the next item
var typ: TokenType = undefined;
if (c == ',' or c == '(' or c == ')') {
// comma, this is a standalone item
self.lexChar();
if (c == ',') {
typ = TokenType.comma;
} else if (c == '(') {
typ = TokenType.leftParen;
} else if (c == ')') {
typ = TokenType.rightParen;
}
} else if ('0' <= c and c <= '9') {
self.lexNumber();
typ = TokenType.number;
} else if (c == '+' or c == '-' or c == '^' or c == '*' or c == '-' or c == '+' or c == '/' or c == '.' or c == '\'' or c == '>' or c == '<' or c == '=') {
self.lexOperator();
typ = TokenType.operator;
} else if (('a' <= c and c <= 'z') or ('A' <= c and c <= 'Z')) {
self.lexIdentifier();
typ = TokenType.identifier;
} else {
@panic("unrecognized input");
}
var l = Token{ .typ = typ, .val = self.input[self.start..self.pos] };
self.start = self.pos;
return l;
}
fn getTokens(self: *Self, tokens: []Token) []Token {
var count: u64 = 0;
while (true) {
if (count == tokens.len) {
@panic("token buffer not large enough");
}
if (self.nextLexeme()) |l| {
tokens[count] = l;
} else {
break;
}
count += 1;
}
return tokens[0..count];
}
};
test "scanner" {
var token_buffer: [MAX_TOKENS]Token = undefined;
{
var s = Scanner.init("(-aa +bb^2 .* cc -1.234 ) +");
var tokens = s.getTokens(&token_buffer);
var expected_tokens = [_][]const u8{ "(", "-", "aa", "+", "bb", "^", "2", ".*", "cc", "-", "1.234", ")", "+" };
std.testing.expect(expected_tokens.len == tokens.len);
for (tokens) |l, i| {
std.testing.expect(std.mem.eql(u8, l.val, expected_tokens[i]));
}
}
}
const Function = enum {
plus,
minus,
uplus,
uminus,
times,
mtimes,
divide,
mdivide,
power,
mpower,
transpose,
ctranspose,
detach,
log,
log2,
exp,
eq,
gt,
gte,
lt,
lte,
f32,
max,
reduce_sum,
keep_sum,
reduce_max,
keep_max,
reduce_mean,
keep_mean,
reduce_arg_max,
keep_arg_max,
gather,
};
const Number = union(enum) {
int: i64,
float: f64,
};
const Parenthesis = enum {
left,
right,
};
const Value = struct {
index: u64,
};
const Comma = struct {};
const Item = union(enum) {
value: Value,
number: Number,
operator: Function,
function: Function,
paren: Parenthesis,
comma: Comma,
};
/// Evaluator converts tokens into items
const Evaluator = struct {
tokens: []Token,
arg_names: [][]const u8,
pos: u64 = 0,
const Self = @This();
fn init(tokens: []Token, arg_names: [][]const u8) Self {
return Self{ .tokens = tokens, .arg_names = arg_names };
}
fn nextItem(self: *Self) ?Item {
if (self.pos == self.tokens.len) {
return null;
}
var prev_typ: ?TokenType = null;
if (self.pos > 0) {
prev_typ = self.tokens[self.pos - 1].typ;
}
var l = self.tokens[self.pos];
self.pos += 1;
return switch (l.typ) {
.operator => blk: {
if (std.mem.eql(u8, l.val, "+") or std.mem.eql(u8, l.val, ".+")) {
// differentiate uplus/uminus from plus/minus based on previous token
// operator: unary
// comma: unary
// number: binary
// identifier: binary
// left paren: unary
// right paren: binary
if (prev_typ == null or prev_typ.? == .operator or prev_typ.? == .comma or prev_typ.? == .leftParen) {
break :blk Item{ .operator = .uplus };
} else {
break :blk Item{ .operator = .plus };
}
} else if (std.mem.eql(u8, l.val, "-") or std.mem.eql(u8, l.val, ".-")) {
if (prev_typ == null or prev_typ.? == .operator or prev_typ.? == .comma or prev_typ.? == .leftParen) {
break :blk Item{ .operator = .uminus };
} else {
break :blk Item{ .operator = .minus };
}
} else if (std.mem.eql(u8, l.val, ".*")) {
break :blk Item{ .operator = .times };
} else if (std.mem.eql(u8, l.val, "*")) {
break :blk Item{ .operator = .mtimes };
} else if (std.mem.eql(u8, l.val, "./")) {
break :blk Item{ .operator = .divide };
} else if (std.mem.eql(u8, l.val, "/")) {
break :blk Item{ .operator = .mdivide };
} else if (std.mem.eql(u8, l.val, ".^")) {
break :blk Item{ .operator = .power };
} else if (std.mem.eql(u8, l.val, "^")) {
break :blk Item{ .operator = .mpower };
} else if (std.mem.eql(u8, l.val, "==")) {
break :blk Item{ .operator = .eq };
} else if (std.mem.eql(u8, l.val, ">")) {
break :blk Item{ .operator = .gt };
} else if (std.mem.eql(u8, l.val, ">=")) {
break :blk Item{ .operator = .gte };
} else if (std.mem.eql(u8, l.val, "<")) {
break :blk Item{ .operator = .lt };
} else if (std.mem.eql(u8, l.val, "<=")) {
break :blk Item{ .operator = .lte };
} else if (std.mem.eql(u8, l.val, ".'")) {
break :blk Item{ .operator = .transpose };
} else if (std.mem.eql(u8, l.val, "'")) {
break :blk Item{ .operator = .ctranspose };
} else {
@panic("unrecognized operator");
}
},
.comma => Item{ .comma = Comma{} },
.number => blk: {
if (contains(u8, l.val, '.') or contains(u8, l.val, 'e')) {
var float = std.fmt.parseFloat(f64, l.val) catch @panic("failed to parse float");
break :blk Item{ .number = Number{ .float = float } };
} else {
var int = std.fmt.parseInt(i64, l.val, 10) catch @panic("failed to parse int");
break :blk Item{ .number = Number{ .int = int } };
}
},
.identifier => blk: {
if (std.mem.eql(u8, l.val, "plus")) {
break :blk Item{ .function = .plus };
} else if (std.mem.eql(u8, l.val, "minus")) {
break :blk Item{ .function = .minus };
} else if (std.mem.eql(u8, l.val, "uplus")) {
break :blk Item{ .function = .uplus };
} else if (std.mem.eql(u8, l.val, "uminus")) {
break :blk Item{ .function = .uminus };
} else if (std.mem.eql(u8, l.val, "times")) {
break :blk Item{ .function = .times };
} else if (std.mem.eql(u8, l.val, "mtimes")) {
break :blk Item{ .function = .mtimes };
} else if (std.mem.eql(u8, l.val, "divide")) {
break :blk Item{ .function = .divide };
} else if (std.mem.eql(u8, l.val, "mdivide")) {
break :blk Item{ .function = .mdivide };
} else if (std.mem.eql(u8, l.val, "power")) {
break :blk Item{ .function = .power };
} else if (std.mem.eql(u8, l.val, "mpower")) {
break :blk Item{ .function = .mpower };
} else if (std.mem.eql(u8, l.val, "eq")) {
break :blk Item{ .function = .eq };
} else if (std.mem.eql(u8, l.val, "gt")) {
break :blk Item{ .function = .gt };
} else if (std.mem.eql(u8, l.val, "gte")) {
break :blk Item{ .function = .gte };
} else if (std.mem.eql(u8, l.val, "lt")) {
break :blk Item{ .function = .lt };
} else if (std.mem.eql(u8, l.val, "lte")) {
break :blk Item{ .function = .lte };
} else if (std.mem.eql(u8, l.val, "transpose")) {
break :blk Item{ .function = .transpose };
} else if (std.mem.eql(u8, l.val, "ctranspose")) {
break :blk Item{ .function = .ctranspose };
} else if (std.mem.eql(u8, l.val, "detach")) {
break :blk Item{ .function = .detach };
} else if (std.mem.eql(u8, l.val, "f32")) {
break :blk Item{ .function = .f32 };
} else if (std.mem.eql(u8, l.val, "log")) {
break :blk Item{ .function = .log };
} else if (std.mem.eql(u8, l.val, "log2")) {
break :blk Item{ .function = .log2 };
} else if (std.mem.eql(u8, l.val, "exp")) {
break :blk Item{ .function = .exp };
} else if (std.mem.eql(u8, l.val, "max")) {
break :blk Item{ .function = .max };
} else if (std.mem.eql(u8, l.val, "reduce_sum")) {
break :blk Item{ .function = .reduce_sum };
} else if (std.mem.eql(u8, l.val, "keep_sum")) {
break :blk Item{ .function = .keep_sum };
} else if (std.mem.eql(u8, l.val, "reduce_max")) {
break :blk Item{ .function = .reduce_max };
} else if (std.mem.eql(u8, l.val, "keep_max")) {
break :blk Item{ .function = .keep_max };
} else if (std.mem.eql(u8, l.val, "reduce_mean")) {
break :blk Item{ .function = .reduce_mean };
} else if (std.mem.eql(u8, l.val, "keep_mean")) {
break :blk Item{ .function = .keep_mean };
} else if (std.mem.eql(u8, l.val, "reduce_arg_max")) {
break :blk Item{ .function = .reduce_arg_max };
} else if (std.mem.eql(u8, l.val, "keep_arg_max")) {
break :blk Item{ .function = .keep_arg_max };
} else if (std.mem.eql(u8, l.val, "gather")) {
break :blk Item{ .function = .gather };
} else {
// assume it's the name of an argument
var arg_index: u64 = 0;
while (arg_index < self.arg_names.len) {
if (std.mem.eql(u8, l.val, self.arg_names[arg_index])) {
break;
}
arg_index += 1;
}
if (arg_index == self.arg_names.len) {
@panic("invalid function name or argument missing");
}
break :blk Item{ .value = Value{ .index = arg_index } };
}
},
.leftParen => Item{ .paren = .left },
.rightParen => Item{ .paren = .right },
};
}
fn getItems(self: *Self, items: []Item) []Item {
var count: u64 = 0;
while (true) {
if (count == items.len) {
@panic("Item buffer not large enough");
}
if (self.nextItem()) |item| {
items[count] = item;
} else {
break;
}
count += 1;
}
return items[0..count];
}
};
test "evaluator" {
var token_buffer: [MAX_TOKENS]Token = undefined;
var s = Scanner.init("(-aa +bb^2 .* cc -+1.234e-3 ) +");
var tokens = s.getTokens(&token_buffer);
var arg_names = [_][]const u8{ "aa", "bb", "cc" };
var e = Evaluator.init(tokens, &arg_names);
var item_buffer: [MAX_ITEMS]Item = undefined;
var items = e.getItems(&item_buffer);
var expected_items = [_]Item{
Item{ .paren = .left },
Item{ .operator = .uminus },
Item{ .value = Value{ .index = 0 } },
Item{ .operator = .plus },
Item{ .value = Value{ .index = 1 } },
Item{ .operator = .mpower },
Item{ .number = Number{ .int = 2 } },
Item{ .operator = .times },
Item{ .value = Value{ .index = 2 } },
Item{ .operator = .minus },
Item{ .operator = .uplus },
Item{ .number = Number{ .float = 1.234e-3 } },
Item{ .paren = .right },
Item{ .operator = .plus },
};
std.testing.expect(expected_items.len == items.len);
for (items) |t, i| {
std.testing.expect(itemsEqual(t, expected_items[i]));
}
}
fn Stack(comptime T: type) type {
return struct {
buf: []T,
len: u64 = 0,
const Self = @This();
fn init(buf: []T) Self {
return Self{ .buf = buf };
}
fn push(self: *Self, item: T) void {
self.buf[self.len] = item;
self.len += 1;
}
fn pop(self: *Self) T {
self.len -= 1;
return self.buf[self.len];
}
fn top(self: *Self) T {
if (self.len == 0) {
@panic("attempted to get top of empty stack");
}
return self.buf[self.len - 1];
}
fn getSlice(self: *Self) []T {
return self.buf[0..self.len];
}
};
}
fn operatorPrecedence(op: Function) u8 {
// https://www.mathworks.com/help/matlab/matlab_prog/operator-precedence.html
return switch (op) {
.transpose, .ctranspose, .power, .mpower => 10,
.uplus, .uminus => 8,
.times, .mtimes, .divide, .mdivide => 7,
.plus, .minus => 6,
.eq, .gt, .gte, .lt, .lte => 4,
.f32, .log, .log2, .exp, .reduce_sum, .keep_sum, .max, .reduce_max, .keep_max, .reduce_mean, .keep_mean, .reduce_arg_max, .keep_arg_max, .detach, .gather => @panic("not an operator"),
};
}
const Associativity = enum {
left,
right,
};
fn operatorAssociativity(op: Function) Associativity {
return switch (op) {
.uminus, .uplus => .right,
else => .left,
};
}
/// infixToPostfix reorders items from infix to postfix
// https://en.wikipedia.org/wiki/Shunting-yard_algorithm
fn infixToPostfix(items_in: []Item, items_out_buf: []Item) []Item {
var op_buf: [MAX_TOKENS]Item = undefined;
var op_stack = Stack(Item).init(&op_buf);
var output = Stack(Item).init(items_out_buf);
var out_count: u64 = 0;
// https://www.chris-j.co.uk/parsing.php
for (items_in) |item| {
switch (item) {
// If the token is an operand, append it to the postfix output.
.number, .value => {
output.push(item);
},
// If the token is a unary postfix operator, append it to the postfix output.
// We don't have any of these
// If the token is a function token, push it on to the stack.
.function => {
op_stack.push(item);
},
.operator => |op| {
// If the token is a unary prefix operator, push it on to the stack.
if (functionToNumArgs(op) == 1) {
op_stack.push(item);
} else {
// If the token is a binary operator A then
while (op_stack.len > 0) {
var top = op_stack.top();
if (top != .operator) {
break;
}
// If A is left-associative, while there is an operator B of higher or equal precidence than A at the top of the stack, pop B off the stack and append it to the output.
// If A is right-associative, while there is an operator B of higher precidence than A at the top of the stack, pop B off the stack and append it to the output.
var should_move = (operatorAssociativity(op) == .left and operatorPrecedence(top.operator) >= operatorPrecedence(op)) or (operatorAssociativity(op) == .right and operatorPrecedence(top.operator) > operatorPrecedence(op));
if (!should_move) {
break;
}
output.push(op_stack.pop());
}
// Push A onto the stack.
op_stack.push(item);
}
},
// If the token is a function argument separator
// Pop the top element off the stack and append it to the output, until the top element of the stack is an opening bracket
.comma => {
while (op_stack.len > 0) {
// Pop operators off the stack and append them to the output, until the operator at the top of the stack is a opening bracket.
var top = op_stack.top();
if (top == .paren and top.paren == .left) {
break;
}
output.push(op_stack.pop());
}
},
.paren => |paren| {
if (paren == .left) {
// If the token is an opening bracket, then push it onto the stack.
op_stack.push(item);
} else {
// If the token is a closing bracket
var found_left_paren = false;
while (op_stack.len > 0) {
// Pop operators off the stack and append them to the output, until the operator at the top of the stack is a opening bracket.
var top = op_stack.top();
if (top == .paren and top.paren == .left) {
found_left_paren = true;
break;
}
output.push(op_stack.pop());
}
if (!found_left_paren) {
@panic("missing left parenthesis");
}
// Pop the opening bracket off the stack.
_ = op_stack.pop();
// If the token at the top of the stack is a function token, pop it and append it to the output.
if (op_stack.len > 0 and op_stack.top() == .function) {
output.push(op_stack.pop());
}
}
},
}
}
while (op_stack.len > 0) {
output.push(op_stack.pop());
}
return output.buf[0..output.len];
}
test "infixToPostfix" {
var token_buffer: [MAX_TOKENS]Token = undefined;
var s = Scanner.init("(aa + bb) .* cc");
var tokens = s.getTokens(&token_buffer);
var arg_names = [_][]const u8{ "aa", "bb", "cc" };
var e = Evaluator.init(tokens, &arg_names);
var item_buffer: [MAX_ITEMS]Item = undefined;
var items = e.getItems(&item_buffer);
var postfix_item_buffer: [MAX_ITEMS]Item = undefined;
var postfix_items = infixToPostfix(items, &postfix_item_buffer);
var expected_items = [_]Item{
Item{ .value = Value{ .index = 0 } },
Item{ .value = Value{ .index = 1 } },
Item{ .operator = .plus },
Item{ .value = Value{ .index = 2 } },
Item{ .operator = .times },
};
std.testing.expect(expected_items.len == postfix_items.len);
for (postfix_items) |t, i| {
std.testing.expect(itemsEqual(t, expected_items[i]));
}
}
fn itemsEqual(item1: Item, item2: Item) bool {
return switch (item1) {
.value => |value| value.index == item2.value.index,
.number => |number| switch (number) {
.int => |int| int == item2.number.int,
.float => |float| float == item2.number.float,
},
.operator => |operator| operator == item2.operator,
.function => |function| function == item2.function,
.paren => |paren| paren == item2.paren,
.comma => item2 == .comma,
};
}
fn compileExpression(ex: []const u8, arg_names: [][]const u8) CompiledExpression {
if (arg_names.len > MAX_VALUES) {
@panic("too many arguments");
}
var token_buffer: [MAX_TOKENS]Token = undefined;
var s = Scanner.init(ex);
var tokens = s.getTokens(&token_buffer);
var e = Evaluator.init(tokens, arg_names);
var infix_item_buffer: [MAX_ITEMS]Item = undefined;
var infix_items = e.getItems(&infix_item_buffer);
var item_buffer: [MAX_ITEMS]Item = undefined;
var items = infixToPostfix(infix_items, &item_buffer);
if (items.len == 0) {
@panic("no items found");
}
var index_buf: [MAX_VALUES]u64 = undefined;
var index_stack = Stack(u64).init(&index_buf);
var operations_buf: [MAX_OPERATIONS]Operation = undefined;
var operations_stack = Stack(Operation).init(&operations_buf);
var numbers_buf: [MAX_VALUES]Number = undefined;
var numbers_stack = Stack(Number).init(&numbers_buf);
var index_count: u64 = arg_names.len; // we reserve one value slot for each argument
// also reserve a slot for each number literal
// keep an ordered list of the numbers in numbers stack
// keep a mapping from item index to index
var number_item_index_to_index: [MAX_VALUES]u64 = [_]u64{0} ** MAX_VALUES;
for (items) |item, item_index| {
if (item == .number) {
numbers_stack.push(item.number);
number_item_index_to_index[item_index] = index_count;
index_count += 1;
}
}
for (items) |item, item_index| {
switch (item) {
.value => |value| {
index_stack.push(value.index);
},
.number => |number| {
index_stack.push(number_item_index_to_index[item_index]);
},
.operator, .function => |f| {
// operator, pop operands from stack
var num_args = functionToNumArgs(f);
if (num_args > MAX_FUNCTION_ARGS) {
@panic("too many arguments for function");
}
var input_indices: [MAX_FUNCTION_ARGS]u64 = undefined;
var i: u64 = 0;
while (i < num_args) : (i += 1) {
// since we are popping the stack, if we had "a b -" we want a to be the first value
// and be to be the second one
input_indices[num_args - 1 - i] = index_stack.pop();
}
// put result on stack
var output_value_index = index_count;
index_count += 1;
index_stack.push(output_value_index);
// record the operation
var operation = Operation{ .function = f, .input_indices = input_indices, .output_index = output_value_index };
operations_stack.push(operation);
},
.paren, .comma => {
@panic("parenthesis encountered after evaluation");
},
}
}
if (index_stack.len != 1) {
@panic("did not process all values on stack");
}
return CompiledExpression{ .operations = operations_buf, .operation_count = operations_stack.len, .numbers = numbers_buf, .number_count = numbers_stack.len, .index_count = index_count, .output_index = index_stack.buf[0] };
}
/// Execute an expression using integer values, this is just for testing `compileExpression`
fn intExpr(comptime ex: []const u8, args: anytype) i64 {
const type_info = @typeInfo(@TypeOf(args));
const num_fields = type_info.Struct.fields.len;
if (type_info != .Struct) {
@compileError("must pass a struct to this function");
}
comptime var field_names: [num_fields][]const u8 = undefined;
comptime {
var i: comptime_int = 0;
while (i < num_fields) : (i += 1) {
field_names[i] = type_info.Struct.fields[i].name;
}
}
@setEvalBranchQuota(10000);
comptime var ce = compileExpression(ex, &field_names);
if (ce.index_count == 0) {
@compileError("expression used no values");
}
var values: [ce.index_count]i64 = undefined;
// copy the fields into the values array
comptime var field_index = 0;
inline while (field_index < num_fields) : (field_index += 1) {
values[field_index] = @field(args, type_info.Struct.fields[field_index].name);
}
// copy any literals from the expression to our values array after the fields
comptime var number_index = 0;
inline while (number_index < ce.number_count) : (number_index += 1) {
values[num_fields + number_index] = ce.numbers[number_index].int;
}
// execute the operations, reading and writing values
// this should be inlined at comptime but causes a compiler crash
// comptime var op_index: u64 = 0;
// inline while (op_index < ce.operation_count) : (op_index += 1) {
var op_index: u64 = 0;
while (op_index < ce.operation_count) : (op_index += 1) {
var op = ce.operations[op_index];
var x = values[op.input_indices[0]];
// if this is not used, it will be the value at index 0
// we assume that there's at least one value
var y = values[op.input_indices[1]];
var z = switch (op.function) {
.plus => x + y,
.minus => x - y,
.uplus => x,
.uminus => -x,
.times => x * y,
.mtimes => @panic("mtimes not supported"),
.divide => @divTrunc(x, y),
.mdivide => @panic("mdivide not supported"),
.power => std.math.pow(i64, x, y),
.mpower => @panic("mpower not supported"),
.eq => boolToValue(i64, x == y),
.gt => boolToValue(i64, x > y),
.gte => boolToValue(i64, x >= y),
.lt => boolToValue(i64, x < y),
.lte => boolToValue(i64, x <= y),
.transpose => x,
.ctranspose => x,
.f32 => @panic("f32 not supported"),
.detach => @panic("keep_max not supported"),
.log => @panic("ln not supported"),
.log2 => @floatToInt(i64, std.math.log(f64, 2.0, @intToFloat(f64, x))),
.exp => @floatToInt(i64, std.math.pow(f64, @intToFloat(f64, x), std.math.e)),
.max => @panic("max not supported"),
.reduce_sum => @panic("reduce_sum not supported"),
.keep_sum => @panic("keep_sum not supported"),
.reduce_max => @panic("reduce_max not supported"),
.keep_max => @panic("keep_max not supported"),
.reduce_mean => @panic("reduce_mean not supported"),
.keep_mean => @panic("keep_mean not supported"),
.reduce_arg_max => @panic("reduce_arg_max not supported"),
.keep_arg_max => @panic("keep_arg_max not supported"),
.gather => @panic("gather not supported"),
};
// std.debug.print("{} op {} => {}\n", .{x, y, z});
values[op.output_index] = z;
}
return values[ce.output_index];
}
test "int_expr" {
std.testing.expect(intExpr("-2 .^ 4", .{}) == -16);
std.testing.expect(intExpr("power(uminus(2), 4)", .{}) == 16);
std.testing.expect(intExpr("2 .^ 3 .^ 2", .{}) == 64);
std.testing.expect(intExpr("2 + 3 .* 4", .{}) == 14);
std.testing.expect(intExpr("(2 + 3) .* 4", .{}) == 20);
std.testing.expect(intExpr("1 .* 2 + +(-3 + 4 .^ 5) .+ -(6 .- 7)", .{}) == 1024);
}
pub fn OpsTable(comptime T: type) type {
const Error = error{OutOfMemory};
const UnaryOpType = fn (*std.mem.Allocator, T) Error!T;
const BinaryOpType = fn (*std.mem.Allocator, T, T) Error!T;
const TernaryOpType = fn (*std.mem.Allocator, T, T, T) Error!T;
const ScalarType = fn (*std.mem.Allocator, DType, f64) Error!T;
const CastType = fn (*std.mem.Allocator, T, DType) Error!T;
const DTypeType = fn (T) DType;
return struct {
plus: BinaryOpType,
minus: BinaryOpType,
uplus: UnaryOpType,
uminus: UnaryOpType,
times: BinaryOpType,
mtimes: BinaryOpType,
divide: BinaryOpType,
mdivide: BinaryOpType,
power: BinaryOpType,
mpower: BinaryOpType,
eq: BinaryOpType,
gt: BinaryOpType,
gte: BinaryOpType,
lt: BinaryOpType,
lte: BinaryOpType,
transpose: UnaryOpType,
ctranspose: UnaryOpType,
scalar: ScalarType,
cast: CastType,
detach: UnaryOpType,
log: UnaryOpType,
log2: UnaryOpType,
exp: UnaryOpType,
// dims are passed as an array
max: BinaryOpType,
reduce_sum: BinaryOpType,
keep_sum: BinaryOpType,
reduce_max: BinaryOpType,
keep_max: BinaryOpType,
reduce_mean: BinaryOpType,
keep_mean: BinaryOpType,
reduce_arg_max: BinaryOpType,
keep_arg_max: BinaryOpType,
gather: TernaryOpType,
get_dtype: DTypeType,
};
}
pub fn binaryNotImplemented(alc: *std.mem.Allocator, x: Array, y: Array) !Array {
@panic("operation not implemented");
}
pub fn unaryNotImplemented(alc: *std.mem.Allocator, x: Array) !Array {
@panic("operation not implemented");
}
pub fn reduceSumExprAlloc(alc: *std.mem.Allocator, x: Array, dims: Array) !Array {
var dims_cast = try castAlloc(alc, dims, .u64);
defer dims_cast.release();
var dims_buf = dims_cast.getBuffer(u64);
return try reduceSumAlloc(alc, x, dims_buf);
}
pub fn keepSumExprAlloc(alc: *std.mem.Allocator, x: Array, dims: Array) !Array {
var dims_cast = try castAlloc(alc, dims, .u64);
defer dims_cast.release();
var dims_buf = dims_cast.getBuffer(u64);
return try keepSumAlloc(alc, x, dims_buf);
}
pub fn reduceMaxExprAlloc(alc: *std.mem.Allocator, x: Array, dims: Array) !Array {
var dims_cast = try castAlloc(alc, dims, .u64);
defer dims_cast.release();
var dims_buf = dims_cast.getBuffer(u64);
return try reduceMaxAlloc(alc, x, dims_buf);
}
pub fn keepMaxExprAlloc(alc: *std.mem.Allocator, x: Array, dims: Array) !Array {
var dims_cast = try castAlloc(alc, dims, .u64);
defer dims_cast.release();
var dims_buf = dims_cast.getBuffer(u64);
return try keepMaxAlloc(alc, x, dims_buf);
}
pub fn reduceMeanExprAlloc(alc: *std.mem.Allocator, x: Array, dims: Array) !Array {
var dims_cast = try castAlloc(alc, dims, .u64);
defer dims_cast.release();
var dims_buf = dims_cast.getBuffer(u64);
return try reduceMeanAlloc(alc, x, dims_buf);
}
pub fn keepMeanExprAlloc(alc: *std.mem.Allocator, x: Array, dims: Array) !Array {
var dims_cast = try castAlloc(alc, dims, .u64);
defer dims_cast.release();
var dims_buf = dims_cast.getBuffer(u64);
return try keepMeanAlloc(alc, x, dims_buf);
}
pub fn reduceArgMaxExprAlloc(alc: *std.mem.Allocator, x: Array, dim: Array) !Array {
var dim_cast = try castAlloc(alc, dim, .u64);
defer dim_cast.release();
return try reduceArgMaxAlloc(alc, x, dim_cast.getItem(u64));
}
pub fn keepArgMaxExprAlloc(alc: *std.mem.Allocator, x: Array, dim: Array) !Array {
var dim_cast = try castAlloc(alc, dim, .u64);
defer dim_cast.release();
return try keepArgMaxAlloc(alc, x, dim_cast.getItem(u64));
}
pub fn gatherExprAlloc(alc: *std.mem.Allocator, x: Array, dim: Array, index: Array) !Array {
var dim_cast = try castAlloc(alc, dim, .u64);
defer dim_cast.release();
var index_cast = try castAlloc(alc, index, .u64);
defer index_cast.release();
return try gatherAlloc(alc, x, dim_cast.getItem(u64), index_cast);
}
fn getDType(a: Array) DType {
return a.dtype;
}
pub fn expr(alc: *std.mem.Allocator, comptime ex: []const u8, args: anytype) !Array {
comptime var opsTable = OpsTable(Array){
.plus = plusAlloc,
.minus = minusAlloc,
.uplus = uplusAlloc,
.uminus = uminusAlloc,
.times = timesAlloc,
.mtimes = mtimesAlloc,
.divide = divideAlloc,
.mdivide = binaryNotImplemented,
.power = powerAlloc,
.mpower = binaryNotImplemented,
.eq = eqAlloc,
.gt = gtAlloc,
.gte = gteAlloc,
.lt = ltAlloc,
.lte = lteAlloc,
.transpose = transposeAlloc,
.ctranspose = transposeAlloc,
.scalar = scalarAlloc,
.cast = castAlloc,
.detach = unaryNotImplemented,
.log = logAlloc,
.log2 = log2Alloc,
.exp = expAlloc,
.max = maxAlloc,
.reduce_sum = reduceSumExprAlloc,
.keep_sum = keepSumExprAlloc,
.reduce_max = reduceMaxExprAlloc,
.keep_max = keepMaxExprAlloc,
.reduce_mean = reduceMeanExprAlloc,
.keep_mean = keepMeanExprAlloc,
.reduce_arg_max = reduceArgMaxExprAlloc,
.keep_arg_max = keepArgMaxExprAlloc,
.gather = gatherExprAlloc,
.get_dtype = getDType,
};
return try genericExpr(Array, opsTable, alc, ex, args);
}
/// Execute an expression using array-like values
pub fn genericExpr(comptime T: type, comptime opsTable: OpsTable(T), alc: *std.mem.Allocator, comptime ex: []const u8, args: anytype) !T {
const type_info = @typeInfo(@TypeOf(args));
const num_fields = type_info.Struct.fields.len;
if (type_info != .Struct) {
@compileError("must pass a struct to this function");
}
comptime var field_names: [num_fields][]const u8 = undefined;
comptime {
var i: comptime_int = 0;
while (i < num_fields) : (i += 1) {
field_names[i] = type_info.Struct.fields[i].name;
}
}
@setEvalBranchQuota(10000);
comptime var ce = compileExpression(ex, &field_names);
if (ce.index_count == 0) {
@compileError("expression used no values");
}
var values: [ce.index_count]T = undefined;
var allocated_scalar = [_]bool{false} ** num_fields;
// copy the fields into the values array
comptime var field_index = 0;
inline while (field_index < num_fields) : (field_index += 1) {
const field_type = type_info.Struct.fields[field_index].field_type;
const value = @field(args, type_info.Struct.fields[field_index].name);
values[field_index] = switch (field_type) {
T => value,
comptime_int => blk: {
allocated_scalar[field_index] = true;
break :blk try opsTable.scalar(alc, defaultIntDType, value);
},
comptime_float => blk: {
allocated_scalar[field_index] = true;
break :blk try opsTable.scalar(alc, defaultFloatDType, value);
},
i64, u64 => blk: {
allocated_scalar[field_index] = true;
break :blk try opsTable.scalar(alc, typeToDType(field_type), @intToFloat(f64, value));
},
f32, f64 => blk: {
allocated_scalar[field_index] = true;
break :blk try opsTable.scalar(alc, typeToDType(field_type), @floatCast(f64, value));
},
else => std.debug.panic("Unsupported type {}", .{@typeName(field_type)}),
};
}
// copy any literals from the expression to our values array after the fields
comptime var number_index = 0;
inline while (number_index < ce.number_count) : (number_index += 1) {
var arr = switch (ce.numbers[number_index]) {
// these should probably use the minimum type that represents the value
// to avoid accidentally casting arguments to larger types
.int => |int| try opsTable.scalar(alc, defaultIntDType, int),
.float => |float| try opsTable.scalar(alc, defaultFloatDType, float),
};
var value_index = num_fields + number_index;
values[value_index] = arr;
}
// execute the operations, reading and writing values
// this should be inlined at comptime but causes a compiler crash
var op_args: [MAX_FUNCTION_ARGS]T = undefined;
// comptime var op_index: u64 = 0;
// inline while (op_index < ce.operation_count) : (op_index += 1) {
var op_index: u64 = 0;
while (op_index < ce.operation_count) : (op_index += 1) {
var op = ce.operations[op_index];
var num_args = functionToNumArgs(op.function);
var should_release: [MAX_FUNCTION_ARGS]bool = [_]bool{false} ** MAX_FUNCTION_ARGS;
// automatic casting rules
// for functions that take two arguments
// cast_dtype = max(dtype[0], dtype[1])
// where the dtypes are ordered: all ints, then all floats in order of bit width
// some operations are handled specially:
// divide()
// if both args are integer types, they will be cast to the default float type
{
var arg_index: u64 = 0;
while (arg_index < num_args) : (arg_index += 1) {
var value_index = op.input_indices[arg_index];
var value = values[value_index];
// we shouldn't release the original args to this function, but we should
// release any literals or calculated values since those are only used once
if (value_index >= num_fields) {
should_release[arg_index] = true;
}
op_args[arg_index] = value;
}
}
// do any autocasting
var autocast_args = functionToAutocastArgs(op.function);
if (autocast_args.len > 0) {
var cast_dtype = DType.u64;
for (autocast_args) |arg_index| {
cast_dtype = dtypeMax(cast_dtype, opsTable.get_dtype(op_args[autocast_args[arg_index]]));
}
if (dtypeIsInteger(cast_dtype) and op.function == .divide) {
cast_dtype = defaultFloatDType;
}
var arg_index: u64 = 0;
while (arg_index < num_args) : (arg_index += 1) {
if (opsTable.get_dtype(op_args[arg_index]) != cast_dtype) {
var cast_value = try opsTable.cast(alc, op_args[arg_index], cast_dtype);
if (should_release[arg_index]) {
op_args[arg_index].release();
}
op_args[arg_index] = cast_value;
// since we allocate this cast, we should free it when we are done with this operation
should_release[arg_index] = true;
}
}
}
var out = switch (op.function) {
.plus => try opsTable.plus(alc, op_args[0], op_args[1]),
.minus => try opsTable.minus(alc, op_args[0], op_args[1]),
.uplus => try opsTable.uplus(alc, op_args[0]),
.uminus => try opsTable.uminus(alc, op_args[0]),
.times => try opsTable.times(alc, op_args[0], op_args[1]),
.mtimes => try opsTable.mtimes(alc, op_args[0], op_args[1]),
.divide => try opsTable.divide(alc, op_args[0], op_args[1]),
.mdivide => try opsTable.mdivide(alc, op_args[0], op_args[1]),
.power => try opsTable.power(alc, op_args[0], op_args[1]),
.mpower => try opsTable.mpower(alc, op_args[0], op_args[1]),
.eq => try opsTable.eq(alc, op_args[0], op_args[1]),
.gt => try opsTable.gt(alc, op_args[0], op_args[1]),
.gte => try opsTable.gte(alc, op_args[0], op_args[1]),
.lt => try opsTable.lt(alc, op_args[0], op_args[1]),
.lte => try opsTable.lte(alc, op_args[0], op_args[1]),
.transpose => try opsTable.transpose(alc, op_args[0]),
.ctranspose => try opsTable.ctranspose(alc, op_args[0]),
.detach => try opsTable.detach(alc, op_args[0]),
.log => try opsTable.log(alc, op_args[0]),
.log2 => try opsTable.log2(alc, op_args[0]),
.exp => try opsTable.exp(alc, op_args[0]),
.f32 => try opsTable.cast(alc, op_args[0], DType.f32),
.max => try opsTable.max(alc, op_args[0], op_args[1]),
.reduce_sum => try opsTable.reduce_sum(alc, op_args[0], op_args[1]),
.keep_sum => try opsTable.keep_sum(alc, op_args[0], op_args[1]),
.reduce_max => try opsTable.reduce_max(alc, op_args[0], op_args[1]),
.keep_max => try opsTable.keep_max(alc, op_args[0], op_args[1]),
.reduce_mean => try opsTable.reduce_mean(alc, op_args[0], op_args[1]),
.keep_mean => try opsTable.keep_mean(alc, op_args[0], op_args[1]),
.reduce_arg_max => try opsTable.reduce_arg_max(alc, op_args[0], op_args[1]),
.keep_arg_max => try opsTable.keep_arg_max(alc, op_args[0], op_args[1]),
.gather => try opsTable.gather(alc, op_args[0], op_args[1], op_args[2]),
};
{
var arg_index: u64 = 0;
while (arg_index < num_args) : (arg_index += 1) {
if (should_release[arg_index]) {
op_args[arg_index].release();
}
}
}
values[op.output_index] = out;
}
// free any scalars we allocated from the struct fields
comptime var index = 0;
inline while (index < allocated_scalar.len) : (index += 1) {
if (allocated_scalar[index]) {
values[index].release();
}
}
return values[ce.output_index];
}
test "expr" {
{
const a = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer a.release();
const b = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 1, 4, 1 }, 1.0, 1.0);
defer b.release();
var c = try expr(std.testing.allocator, "-1 + a + b + 1", .{ .a = a, .b = b });
defer c.release();
var d = try plusAlloc(std.testing.allocator, a, b);
defer d.release();
std.testing.expect(equal(c, d));
}
{
const a = try Array.allocWithRange(i64, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer a.release();
const b = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 1, 4, 1 }, 1.0, 1.0);
defer b.release();
var c = try expr(std.testing.allocator, "-1 + f32(a) + b + 1", .{ .a = a, .b = b });
defer c.release();
const d = try Array.allocWithRange(f32, std.testing.allocator, &[_]u64{ 2, 1, 3 }, 1.0, 1.0);
defer d.release();
var e = try plusAlloc(std.testing.allocator, d, b);
defer e.release();
std.testing.expect(equal(c, e));
}
{
var output = try expr(std.testing.allocator, "a + b", .{ .a = 1, .b = 2 });
defer output.release();
const expected_output = try Array.allocWithValue(i64, std.testing.allocator, &[_]u64{}, 3);
defer expected_output.release();
std.testing.expect(equal(output, expected_output));
}
}
// test "debug_expr" {
// var token_buffer: [MAX_TOKENS]Token = undefined;
// var s = Scanner.init("reduce_sum(-x, dims)/size");
// var tokens = s.getTokens(&token_buffer);
// for (tokens) |t, i| {
// std.debug.print("token {}\n", .{t});
// }
// var arg_names = [_][]const u8{"x", "dims", "size"};
// var e = Evaluator.init(tokens, &arg_names);
// var item_buffer: [MAX_ITEMS]Item = undefined;
// var items = e.getItems(&item_buffer);
// for (items) |t, i| {
// std.debug.print("infix item {}\n", .{t});
// }
// var postfix_item_buffer: [MAX_ITEMS]Item = undefined;
// var postfix_items = infixToPostfix(items, &postfix_item_buffer);
// for (postfix_items) |t, i| {
// std.debug.print("item {}\n", .{t});
// }
// } | src/array.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const Arch = std.Target.Cpu.Arch;
const meta = std.meta;
pub const ZpackError = error{
InvalidArgument,
UnsupportedType,
StringTooLong,
OutOfMemory,
};
pub const Timestamp32 = struct {
seconds: u32,
};
pub const Timestamp64 = packed struct {
nanoseconds: u30,
seconds: u34,
};
pub const Timestamp96 = struct {
nanoseconds: u32,
seconds: i64,
};
pub fn main() ZpackError!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
var zs = try ZpackStream.init(gpa.allocator());
defer zs.deinit();
_ = try zs.packNil();
_ = try zs.packBool(true);
_ = try zs.packBool(false);
_ = try zs.packPosFixInt(112);
_ = try zs.packNegFixInt(23);
_ = try zs.packU8(42);
_ = try zs.packU16(1337);
_ = try zs.packU32(0xDEADBEEF);
_ = try zs.packU64(0xCAFEB00BDEADBEEF);
_ = try zs.packI8(42);
_ = try zs.packI16(420);
_ = try zs.packI32(0x1CED1337);
_ = try zs.PackI64(0x1ECA1BABEFECA1BA);
_ = try zs.packF32(3.14);
_ = try zs.packF64(133769420.69420);
// _ = try zs.packFixStr("Memes");
// _ = try zs.packFixStr("school!");
// _ = try zs.packStr8("I wish, I wish, I wish I was a fish. This string is 71 characters long.");
// _ = try zs.packStr16("I hate the way that one race does that one thing. Terrible. This string is 133 characters long and it better fucking stay that way.");
// _ = try zs.packStr32("saldkjfhasldkjfhal;skdjhflsakjdhfkajdshflaksjdfhlaiusjhdfoashcdnakjsdfliasjdnpakjsdnvcoaisjefnposidjfp asdfjaskdfjn a;sodkfj;aslkdjf;alkdjf;alskdjf alskdjfasld;jfa; sldkjfapoiehfjpwoingfpe9ifj;a'slkdfuapsl;kfjaoioigjhkrga;nfnrepoaserfkjahsdfa dfask;d. I hate the way that one race does that one thing. Terrible. This string is 386 characters long and it better fucking stay that way.");
// _ = try zs.packBin8("I wish, I wish, I wish I was a fish. This string is 71 characters long.");
// _ = try zs.packBin16("I hate the way that one race does that one thing. Terrible. This string is 133 characters long and it better fucking stay that way.");
// _ = try zs.packBin32("saldkjfhasldkjfhal;skdjhflsakjdhfkajdshflaksjdfhlaiusjhdfoashcdnakjsdfliasjdnpakjsdnvcoaisjefnposidjfp asdfjaskdfjn a;sodkfj;aslkdjf;alkdjf;alskdjf alskdjfasld;jfa; sldkjfapoiehfjpwoingfpe9ifj;a'slkdfuapsl;kfjaoioigjhkrga;nfnrepoaserfkjahsdfa dfask;d. I hate the way that one race does that one thing. Terrible. This string is 386 characters long and it better fucking stay that way.");
_ = try zs.packAny(.{ @as(u8, 27), .{ 1337, "Cats are cool" } });
_ = try zs.packAny(1337);
_ = try zs.packAny(null);
const test_arr: [10]u32 = .{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
_ = try zs.packFixArrT(test_arr);
const ts32: Timestamp32 = .{ .seconds = 1337 };
_ = try zs.packTimestamp32(ts32);
const ts64: Timestamp64 = .{ .seconds = 69420, .nanoseconds = 42069 };
_ = try zs.packTimestamp64(ts64);
_ = try zs.packAnyT(null);
std.log.info("##### {s}", .{@typeName(@TypeOf(null))});
zs.dump();
zs.hexDump();
}
// Does a byte-swap on LE machines, an intCast on BE machines.
// !!! The programmer must make sure `dest_type` is large enough to hold the
// value of `n` without losing any information. Use `beCastSafe` for an error-
// checked version that returns an UnsafeTypeNarrowing error if the cast would discard
// bits.
fn beCast(n: anytype) @TypeOf(n) {
var num = n;
const num_ti = @typeInfo(@TypeOf(num));
// TODO: write our own endianness detection, this is too bloaty to pull in to detect endianess.
// If big endian, return the value as-is
if (@import("builtin").target.cpu.arch.endian() == .Big)
return num;
// On LE machines byte-swap
return switch (num_ti) {
.Int, .ComptimeInt => @byteSwap(@TypeOf(num), @intCast(@TypeOf(num), num)),
.Float => |f_ti| blk: {
if (f_ti.bits == 32) {
break :blk @byteSwap(u32, @bitCast(u32, num));
} else if (f_ti.bits == 64) {
break :blk @byteSwap(u64, @bitCast(u64, num));
}
},
else => num,
};
}
/// Writes a native int with BE byte ordering.
/// Returns: number of bytes written.
const ZpackStream = struct {
ator: Allocator = undefined,
buf: []u8 = undefined,
capacity: usize = undefined,
// end: usize = 0,
pos: usize = 0,
/// Prints type and value of all objects in this stream.
pub fn dump(zs: ZpackStream) void {
var i: usize = 0;
_ = std.log.info("ZpackStream dump:", .{});
_ = std.log.info("Capacity: {d}", .{zs.capacity});
_ = std.log.info("Pos: {d}", .{zs.pos});
while (i < zs.pos) {
var tag: u8 = zs.buf[i];
i += 1;
switch (tag) {
0x00...0x7F => std.log.info("Positive Fixint: {d}", .{tag}),
0xA0...0xBF => {
const len = tag & 0b0001_1111;
std.log.info("FixStr: len: {d}, \"{s}\"", .{ len, zs.buf[i .. i + len] });
i += len;
},
0xC0 => std.log.info("nil", .{}),
0xC2 => std.log.info("Bool: False", .{}),
0xC3 => std.log.info("Bool: True", .{}),
0xC4 => {
const len: u8 = zs.buf[i];
std.log.info("Bin8: len: {d}, \"{s}\"", .{ len, zs.buf[i + 1 .. i + 1 + len] });
i += len + 1;
},
0xC5 => {
var len: u16 = @as(u16, zs.buf[i]) << 8;
len |= zs.buf[i + 1];
std.log.info("Bin16: len: {d}, \"{s}\"", .{ len, zs.buf[i + 2 .. i + 2 + len] });
i += len + 2;
},
0xC6 => {
var len: u32 = @as(u32, zs.buf[i]) << 24;
len |= @as(u32, zs.buf[i + 1]) << 16;
len |= @as(u32, zs.buf[i + 2]) << 8;
len |= zs.buf[i + 3];
std.log.info("Bin32: len: {d}, \"{s}\"", .{ len, zs.buf[i + 4 .. i + 4 + len] });
i += len + 4;
},
0xCA => {
std.log.info("Float32: {e}", .{@bitCast(f32, beCast(zs.buf[i] + (@intCast(u32, zs.buf[i + 1]) << 8) + (@intCast(u32, zs.buf[i + 2]) << 16) + (@intCast(u32, zs.buf[i + 3]) << 24)))});
i += 4;
},
0xCB => {
std.log.info("Float64: {e}", .{@bitCast(f64, beCast(zs.buf[i] + (@intCast(u64, zs.buf[i + 1]) << 8) + (@intCast(u64, zs.buf[i + 2]) << 16) + (@intCast(u64, zs.buf[i + 3]) << 24) + (@intCast(u64, zs.buf[i + 4]) << 32) + (@intCast(u64, zs.buf[i + 5]) << 40) + (@intCast(u64, zs.buf[i + 6]) << 48) + (@intCast(u64, zs.buf[i + 7]) << 56)))});
i += 8;
},
0xCC => {
std.log.info("Uint8: {d}", .{zs.buf[i]});
i += 1;
},
0xCD => {
std.log.info("Uint16: 0x{0X} ({0d})", .{beCast(zs.buf[i] + (@intCast(u16, zs.buf[i + 1]) << 8))});
i += 2;
},
0xCE => {
std.log.info("Uint32: 0x{0X} ({0d})", .{beCast(zs.buf[i] + (@intCast(u32, zs.buf[i + 1]) << 8) + (@intCast(u32, zs.buf[i + 2]) << 16) + (@intCast(u32, zs.buf[i + 3]) << 24))});
i += 4;
},
0xCF => {
std.log.info("Uint64: 0x{0X} ({0d})", .{beCast(zs.buf[i] + (@intCast(u64, zs.buf[i + 1]) << 8) + (@intCast(u64, zs.buf[i + 2]) << 16) + (@intCast(u64, zs.buf[i + 3]) << 24) + (@intCast(u64, zs.buf[i + 4]) << 32) + (@intCast(u64, zs.buf[i + 5]) << 40) + (@intCast(u64, zs.buf[i + 6]) << 48) + (@intCast(u64, zs.buf[i + 7]) << 56))});
i += 8;
},
0xD0 => {
std.log.info("Int8: {d}", .{zs.buf[i]});
i += 1;
},
0xD1 => {
std.log.info("Int16: 0x{0X} ({0d})", .{beCast(zs.buf[i] + (@intCast(i16, zs.buf[i + 1]) << 8))});
i += 2;
},
0xD2 => {
std.log.info("Int32: 0x{0X} ({0d})", .{beCast(zs.buf[i] + (@intCast(i32, zs.buf[i + 1]) << 8) + (@intCast(i32, zs.buf[i + 2]) << 16) + (@intCast(i32, zs.buf[i + 3]) << 24))});
i += 4;
},
0xD3 => {
std.log.info("Int64: 0x{0X} ({0d})", .{beCast(zs.buf[i] + (@intCast(i64, zs.buf[i + 1]) << 8) + (@intCast(i64, zs.buf[i + 2]) << 16) + (@intCast(i64, zs.buf[i + 3]) << 24) + (@intCast(i64, zs.buf[i + 4]) << 32) + (@intCast(i64, zs.buf[i + 5]) << 40) + (@intCast(i64, zs.buf[i + 6]) << 48) + (@intCast(i64, zs.buf[i + 7]) << 56))});
i += 8;
},
0xD6 => {
i += 1; // ext marker
std.log.info("Timestamp32: {d}s", .{beCast(zs.buf[i] + (@intCast(u32, zs.buf[i + 1]) << 8) + (@intCast(u32, zs.buf[i + 2]) << 16) + (@intCast(u32, zs.buf[i + 3]) << 24))});
i += 4;
},
0xD7 => {
i += 1; // ext marker
const d64: u64 = beCast(zs.buf[i] + (@intCast(u64, zs.buf[i + 1]) << 8) + (@intCast(u64, zs.buf[i + 2]) << 16) + (@intCast(u64, zs.buf[i + 3]) << 24) + (@intCast(u64, zs.buf[i + 4]) << 32) + (@intCast(u64, zs.buf[i + 5]) << 40) + (@intCast(u64, zs.buf[i + 6]) << 48) + (@intCast(u64, zs.buf[i + 7]) << 56));
const ts = @bitCast(Timestamp64, d64);
std.log.info("Timestamp64: {d}s {d}ns", .{ ts.seconds, ts.nanoseconds });
i += 8;
},
0xD9 => {
const len: u8 = zs.buf[i];
std.log.info("Str8: len: {d}, \"{s}\"", .{ len, zs.buf[i + 1 .. i + 1 + len] });
i += len + 1;
},
0xDA => {
var len: u16 = @as(u16, zs.buf[i]) << 8;
len |= zs.buf[i + 1];
std.log.info("Str16: len: {d}, \"{s}\"", .{ len, zs.buf[i + 2 .. i + 2 + len] });
i += len + 2;
},
0xDB => {
var len: u32 = @as(u32, zs.buf[i]) << 24;
len |= @as(u32, zs.buf[i + 1]) << 16;
len |= @as(u32, zs.buf[i + 2]) << 8;
len |= zs.buf[i + 3];
std.log.info("Str32: len: {d}, \"{s}\"", .{ len, zs.buf[i + 4 .. i + 4 + len] });
i += len + 4;
},
0xE0...0xFF => std.log.info("Negative Fixint: -{d}", .{tag & 0b0001_1111}),
else => std.log.info("Unknown tag: {X}", .{tag}),
}
}
}
// TODO: redo this, hex formatting still works apparently, was getting deprecated errors when I wrote this.
pub fn hexDump(zs: *ZpackStream) void {
const alph = "0123456789ABCDEF";
var buf: [16 * 3]u8 = .{0} ** 48;
_ = alph;
_ = zs;
var row: usize = 0;
while (row < zs.pos) : (row += 16) {
for (zs.buf[row .. row + 16]) |c, i| {
buf[i * 3] = alph[c >> 4 & 0xF];
buf[i * 3 + 1] = alph[c & 0xF];
buf[i * 3 + 2] = ' ';
// std.log.info("{d}: {c}{c}{c}", .{ i, alph[c >> 4 & 0xF], alph[c & 0xF], ' ' });
}
std.log.info("{d}: {s}", .{ row, buf });
}
}
/// Reallocates the buffer with the given capacity.
pub fn realloc(zs: *ZpackStream, new_capacity: usize) ZpackError!void {
var new_buf = try zs.ator.alloc(u8, new_capacity);
std.mem.copy(u8, new_buf, zs.buf[0..zs.pos]);
zs.ator.free(zs.buf);
zs.buf = new_buf;
zs.capacity = new_capacity;
}
/// If desired_capacity is > zs.capacity, double the buffer size and reallocate.
pub fn reallocIfNeeded(zs: *ZpackStream, desired_capacity: usize) ZpackError!void {
if (desired_capacity > zs.capacity) {
var new_cap = zs.capacity * 2;
_ = try zs.realloc(new_cap);
zs.capacity = new_cap;
}
}
/// Writes a 1 byte nil to the object stream.
/// Returns number of bytes written (always 1)
pub fn packNil(zs: *ZpackStream) ZpackError!usize {
_ = try zs.reallocIfNeeded(zs.pos + 1);
zs.buf[zs.pos] = 0xC0;
zs.pos += 1;
return 1;
}
/// Writes a 1 byte bool to the object stream.
/// Returns number of bytes written (always 1)
pub fn packBool(zs: *ZpackStream, b: bool) ZpackError!usize {
_ = try zs.reallocIfNeeded(zs.pos + 1);
zs.buf[zs.pos] = @as(u8, if (b) 0xC3 else 0xC2);
zs.pos += 1;
return 1;
}
/// Writes a 7 bit positive integer to the object stream.
/// Returns number of bytes written (always 1).
pub fn packPosFixInt(zs: *ZpackStream, n: u7) ZpackError!usize {
_ = try zs.reallocIfNeeded(zs.pos + 1);
zs.buf[zs.pos] = @as(u8, n);
zs.pos += 1;
return 1;
}
/// Writes a 5 bit negative integer to the object stream.
/// Returns number of bytes written (always 1).
pub fn packNegFixInt(zs: *ZpackStream, n: u5) ZpackError!usize {
_ = try zs.reallocIfNeeded(zs.pos + 1);
zs.buf[zs.pos] = @as(u8, n) | @as(u8, 0b1110_0000);
zs.pos += 1;
return 1;
}
/// Writes an 8-bit unsigned integer to the object stream.
/// Returns number of bytes written (always 2).
pub fn packU8(zs: *ZpackStream, n: u8) ZpackError!usize {
_ = try zs.reallocIfNeeded(zs.pos + 2);
zs.buf[zs.pos] = 0xCC;
zs.buf[zs.pos + 1] = n;
zs.pos += 2;
return 2;
}
/// Writes a 16-bit unsigned integer to the object stream.
/// Returns number of bytes written (always 3).
pub fn packU16(zs: *ZpackStream, n: u16) ZpackError!usize {
var n_be = beCast(n);
_ = try zs.reallocIfNeeded(zs.pos + 3);
zs.buf[zs.pos] = 0xCD;
zs.buf[zs.pos + 1] = @intCast(u8, n_be & 0xFF);
zs.buf[zs.pos + 2] = @intCast(u8, n_be >> 8);
zs.pos += 3;
return 3;
}
/// Writes a 32-bit unsigned integer to the object stream.
/// Returns number of bytes written (always 5).
pub fn packU32(zs: *ZpackStream, n: u32) ZpackError!usize {
var n_be = beCast(n);
_ = try zs.reallocIfNeeded(zs.pos + 5);
zs.buf[zs.pos] = 0xCE;
zs.buf[zs.pos + 1] = @intCast(u8, n_be & 0xFF);
zs.buf[zs.pos + 2] = @intCast(u8, n_be >> 8 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, n_be >> 16 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, n_be >> 24 & 0xFF);
zs.pos += 5;
return 5;
}
/// Writes a 64-bit unsigned integer to the object stream.
/// Returns number of bytes written (always 9).
pub fn packU64(zs: *ZpackStream, n: u64) ZpackError!usize {
var n_be = beCast(n);
_ = try zs.reallocIfNeeded(zs.pos + 9);
zs.buf[zs.pos] = 0xCF;
zs.buf[zs.pos + 1] = @intCast(u8, n_be & 0xFF);
zs.buf[zs.pos + 2] = @intCast(u8, n_be >> 8 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, n_be >> 16 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, n_be >> 24 & 0xFF);
zs.buf[zs.pos + 5] = @intCast(u8, n_be >> 32 & 0xFF);
zs.buf[zs.pos + 6] = @intCast(u8, n_be >> 40 & 0xFF);
zs.buf[zs.pos + 7] = @intCast(u8, n_be >> 48 & 0xFF);
zs.buf[zs.pos + 8] = @intCast(u8, n_be >> 56 & 0xFF);
zs.pos += 9;
return 9;
}
/// Writes an 8-bit signed integer to the object stream.
/// Returns number of bytes written (always 2).
pub fn packI8(zs: *ZpackStream, n: i8) ZpackError!usize {
var n_be = beCast(n);
_ = try zs.reallocIfNeeded(zs.pos + 2);
zs.buf[zs.pos] = 0xD0;
zs.buf[zs.pos + 1] = @intCast(u8, n_be & 0x7F);
zs.pos += 2;
return 2;
}
/// Writes a 16-bit signed integer to the object stream.
/// Returns number of bytes written (always 3).
pub fn packI16(zs: *ZpackStream, n: i16) ZpackError!usize {
var n_be = beCast(n);
_ = try zs.reallocIfNeeded(zs.pos + 3);
zs.buf[zs.pos] = 0xD1;
zs.buf[zs.pos + 1] = @intCast(u8, n_be & 0x7F);
zs.buf[zs.pos + 2] = @intCast(u8, n_be >> 8 & 0xFF);
zs.pos += 3;
return 3;
}
/// Writes a 32-bit signed integer to the object stream.
/// Returns number of bytes written (always 5).
pub fn packI32(zs: *ZpackStream, n: i32) ZpackError!usize {
var n_be = beCast(n);
_ = try zs.reallocIfNeeded(zs.pos + 5);
zs.buf[zs.pos] = 0xD2;
zs.buf[zs.pos + 1] = @intCast(u8, n_be & 0x7F);
zs.buf[zs.pos + 2] = @intCast(u8, n_be >> 8 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, n_be >> 16 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, n_be >> 24 & 0xFF);
zs.pos += 5;
return 5;
}
/// Writes a 64-bit signed integer to the object stream.
/// Returns number of bytes written (always 9).
pub fn PackI64(zs: *ZpackStream, n: i64) ZpackError!usize {
var n_be = beCast(n);
_ = try zs.reallocIfNeeded(zs.pos + 9);
zs.buf[zs.pos] = 0xD3;
zs.buf[zs.pos + 1] = @intCast(u8, n_be & 0x7F);
zs.buf[zs.pos + 2] = @intCast(u8, n_be >> 8 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, n_be >> 16 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, n_be >> 24 & 0xFF);
zs.buf[zs.pos + 5] = @intCast(u8, n_be >> 32 & 0xFF);
zs.buf[zs.pos + 6] = @intCast(u8, n_be >> 40 & 0xFF);
zs.buf[zs.pos + 7] = @intCast(u8, n_be >> 48 & 0xFF);
zs.buf[zs.pos + 8] = @intCast(u8, n_be >> 56 & 0xFF);
zs.pos += 9;
return 9;
}
/// Writes a 32-bit floating point number to the object stream.
/// Returns the number of bytes written (always 5).
pub fn packF32(zs: *ZpackStream, f: f32) ZpackError!usize {
var fu_be: u32 = beCast(@bitCast(u32, f));
_ = try zs.reallocIfNeeded(zs.pos + 5);
zs.buf[zs.pos] = 0xCA;
zs.buf[zs.pos + 1] = @intCast(u8, fu_be & 0x7F);
zs.buf[zs.pos + 2] = @intCast(u8, fu_be >> 8 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, fu_be >> 16 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, fu_be >> 24 & 0xFF);
zs.pos += 5;
return 5;
}
/// Writes a 64-bit floating point number to the object stream.
/// Returns the number of bytes written (always 9).
pub fn packF64(zs: *ZpackStream, f: f64) ZpackError!usize {
var fu_be: u64 = beCast(@bitCast(u64, f));
_ = try zs.reallocIfNeeded(zs.pos + 9);
zs.buf[zs.pos] = 0xCB;
zs.buf[zs.pos + 1] = @intCast(u8, fu_be & 0x7F);
zs.buf[zs.pos + 2] = @intCast(u8, fu_be >> 8 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, fu_be >> 16 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, fu_be >> 24 & 0xFF);
zs.buf[zs.pos + 5] = @intCast(u8, fu_be >> 32 & 0xFF);
zs.buf[zs.pos + 6] = @intCast(u8, fu_be >> 40 & 0xFF);
zs.buf[zs.pos + 7] = @intCast(u8, fu_be >> 48 & 0xFF);
zs.buf[zs.pos + 8] = @intCast(u8, fu_be >> 56 & 0xFF);
zs.pos += 9;
return 9;
}
/// Writes a byte array with a max len of 31 to the object stream.
/// Returns number of bytes written (s.len + 1).
pub fn packFixStr(zs: *ZpackStream, s: []const u8) ZpackError!usize {
var tag: u8 = 0b1010_0000;
if (s.len > 31)
return ZpackError.StringTooLong;
tag |= @intCast(u8, s.len);
_ = try zs.reallocIfNeeded(zs.pos + 1 + s.len);
zs.buf[zs.pos] = tag;
std.mem.copy(u8, zs.buf[zs.pos + 1 ..], s);
zs.pos += (1 + s.len);
return s.len + 1;
}
pub fn packStr8(zs: *ZpackStream, s: []const u8) ZpackError!usize {
var tag: u8 = 0xD9;
if (s.len > 255)
return ZpackError.StringTooLong;
_ = try zs.reallocIfNeeded(zs.pos + 2 + s.len);
zs.buf[zs.pos] = tag;
zs.buf[zs.pos + 1] = @intCast(u8, s.len);
std.mem.copy(u8, zs.buf[zs.pos + 2 ..], s);
zs.pos += (2 + s.len);
return s.len + 2;
}
pub fn packStr16(zs: *ZpackStream, s: []const u8) ZpackError!usize {
var tag: u8 = 0xDA;
if (s.len > 65535)
return ZpackError.StringTooLong;
_ = try zs.reallocIfNeeded(zs.pos + 3 + s.len);
zs.buf[zs.pos] = tag;
zs.buf[zs.pos + 1] = @intCast(u8, s.len >> 8 & 0xFF);
zs.buf[zs.pos + 2] = @intCast(u8, s.len & 0xFF);
std.mem.copy(u8, zs.buf[zs.pos + 3 ..], s);
zs.pos += (3 + s.len);
return s.len + 3;
}
pub fn packStr32(zs: *ZpackStream, s: []const u8) ZpackError!usize {
var tag: u8 = 0xDB;
if (s.len > 4294967295)
return ZpackError.StringTooLong;
_ = try zs.reallocIfNeeded(zs.pos + 5 + s.len);
zs.buf[zs.pos] = tag;
zs.buf[zs.pos + 1] = @intCast(u8, s.len >> 24 & 0xFF);
zs.buf[zs.pos + 2] = @intCast(u8, s.len >> 16 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, s.len >> 8 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, s.len & 0xFF);
std.mem.copy(u8, zs.buf[zs.pos + 5 ..], s);
zs.pos += (5 + s.len);
return s.len + 5;
}
pub fn packBin8(zs: *ZpackStream, s: []const u8) ZpackError!usize {
var tag: u8 = 0xC4;
if (s.len > 255)
return ZpackError.StringTooLong;
_ = try zs.reallocIfNeeded(zs.pos + 2 + s.len);
zs.buf[zs.pos] = tag;
zs.buf[zs.pos + 1] = @intCast(u8, s.len);
std.mem.copy(u8, zs.buf[zs.pos + 2 ..], s);
zs.pos += (2 + s.len);
return s.len + 2;
}
pub fn packBin16(zs: *ZpackStream, s: []const u8) ZpackError!usize {
var tag: u8 = 0xC5;
if (s.len > 65535)
return ZpackError.StringTooLong;
_ = try zs.reallocIfNeeded(zs.pos + 3 + s.len);
zs.buf[zs.pos] = tag;
zs.buf[zs.pos + 1] = @intCast(u8, s.len >> 8 & 0xFF);
zs.buf[zs.pos + 2] = @intCast(u8, s.len & 0xFF);
std.mem.copy(u8, zs.buf[zs.pos + 3 ..], s);
zs.pos += (3 + s.len);
return s.len + 3;
}
pub fn packBin32(zs: *ZpackStream, s: []const u8) ZpackError!usize {
var tag: u8 = 0xC6;
if (s.len > 4294967295)
return ZpackError.StringTooLong;
_ = try zs.reallocIfNeeded(zs.pos + 5 + s.len);
zs.buf[zs.pos] = tag;
zs.buf[zs.pos + 1] = @intCast(u8, s.len >> 24 & 0xFF);
zs.buf[zs.pos + 2] = @intCast(u8, s.len >> 16 & 0xFF);
zs.buf[zs.pos + 3] = @intCast(u8, s.len >> 8 & 0xFF);
zs.buf[zs.pos + 4] = @intCast(u8, s.len & 0xFF);
std.mem.copy(u8, zs.buf[zs.pos + 5 ..], s);
zs.pos += (5 + s.len);
return s.len + 5;
}
pub fn packTimestamp32(zs: *ZpackStream, timestamp: Timestamp32) ZpackError!usize {
const tag: u8 = 0xD6;
const ext_tag: u8 = 0xFF;
_ = try zs.reallocIfNeeded(zs.pos + 5);
zs.buf[zs.pos] = tag;
zs.pos += 1;
_ = try zs.packU32(timestamp.seconds);
zs.buf[zs.pos - 5] = ext_tag;
return 6;
}
pub fn packTimestamp64(zs: *ZpackStream, timestamp: Timestamp64) ZpackError!usize {
const tag: u8 = 0xD7;
const ext_tag: u8 = 0xFF;
_ = try zs.reallocIfNeeded(zs.pos + 5);
const d64 = @bitCast(u64, timestamp);
zs.buf[zs.pos] = tag;
zs.pos += 1;
_ = try zs.packU64(d64);
zs.buf[zs.pos - 9] = ext_tag;
return 6;
}
// Accepts any type and tries to pack it into the object stream in the least amount of space.
// Supported types are: u0-u64, i1-i64, comptime ints that fit in 64 bits, f16, f32, f64, bool, structs, null, and anything
// isZigString(T) returns true for.
// Returns the number of bytes written to the stream.
// TODO: Add support for arrays, a timestamp object, u/i128, f128,
// TODO: refactor to be like packAnyT()
pub fn packAny(zs: *ZpackStream, item: anytype) ZpackError!usize {
var bytes_written: usize = 0;
bytes_written += switch (@typeInfo(@TypeOf(item))) {
.Int => |ti| blk: {
if (ti.signedness == .unsigned) {
break :blk switch (@as(u128, item)) {
0x00...0x7F => try zs.packPosFixInt(@intCast(u7, item)),
0x80...0xFF => try zs.packU8(@intCast(u8, item)),
0x100...0xFFFF => try zs.packU16(@intCast(u16, item)),
0x1_0000...0xFFFF_FFFF => try zs.packU32(@intCast(u32, item)),
0x1_0000_0000...0xFFFF_FFFF_FFFF_FFFF => try zs.packU64(@intCast(u64, item)),
else => return ZpackError.InvalidArgument, // TODO: u128 ext
};
} else { // ti.signedness == .signed
break :blk switch (item.bits) {
-0x80...0x7F => try zs.packI8(@intCast(i8, item)),
-0x8000...0x7FFF => try zs.packI16(@intCast(i16, item)),
-0x8000_0000...0x7FFF_FFFF => try zs.packI32(@intCast(i32, item)),
-0x8000_0000_0000_0000...0x7FFF_FFFF_FFFF_FFFF => try zs.PackI64(@intCast(i64, item)),
else => return ZpackError.InvalidArgument, // TODO: u128 ext
};
}
},
.ComptimeInt => blk: {
if (item >= 0) { // Treat as unsigned.
break :blk switch (item) {
0x00...0xFF => try zs.packU8(@intCast(u8, item)),
0x100...0xFFFF => try zs.packU16(@intCast(u16, item)),
0x1_0000...0xFFFF_FFFF => try zs.packU32(@intCast(u32, item)),
0x1_0000_0000...0xFFFF_FFFF_FFFF_FFFF => try zs.packU64(@intCast(u64, item)),
else => return ZpackError.InvalidArgument, // TODO: u128 ext
};
} else { // Treat as signed.
break :blk switch (item.bits) {
-0x80...0x7F => try zs.packI8(@intCast(i8, item)),
-0x8000...0x7FFF => try zs.packI16(@intCast(i16, item)),
-0x8000_0000...0x7FFF_FFFF => try zs.packI32(@intCast(i32, item)),
-0x8000_0000_0000_0000...0x7FFF_FFFF_FFFF_FFFF => try zs.PackI64(@intCast(i64, item)),
else => return ZpackError.InvalidArgument, // TODO: i128 ext
};
}
},
.Float => |fti| blk: {
break :blk switch (fti.bits) {
16, 32 => try zs.packF32(item),
64 => try zs.packF64(item),
else => return ZpackError.InvalidArgument, // TODO: f128 ext
};
},
.ComptimeFloat => @compileError("Comptime Floats not supported."), // TODO: f128 ext
.Bool => try zs.packBool(item),
.Struct => blk: {
var tmp: usize = 0;
inline for (item) |v|
tmp += try zs.packAny(v);
break :blk tmp;
},
.Null => try zs.packNil(),
else => blk: {
if (meta.trait.isZigString(@TypeOf(item))) {
break :blk switch (item.len) {
0...31 => try zs.packFixStr(item),
32...255 => try zs.packStr8(item),
256...65535 => try zs.packStr16(item),
65536...4294967295 => try zs.packStr32(item),
else => return ZpackError.StringTooLong,
};
} else { // switch on the type to easily catch lib structs
break :blk switch (@TypeOf(item)) {
Timestamp32 => try zs.packTimestamp32(item),
else => {
std.log.info("Unsupported type {s}", .{@typeName(@TypeOf(item))});
return ZpackError.UnsupportedType;
},
};
}
return ZpackError.UnsupportedType;
},
};
return bytes_written;
}
// Essentially the same as packAny() but preserves the type information (packAny() will pack a u64{12} using
// packU8(12), packAnyT() will use packU64(12).)
// Returns the number of bytes written to the stream.
pub fn packAnyT(zs: *ZpackStream, arg: anytype) ZpackError!usize {
// var bytes_written: usize = 0;
// try switching on type first for primitives and library structs
return switch (@TypeOf(arg)) {
u8 => try zs.packU8(arg),
u16 => try zs.packU16(arg),
u32 => try zs.packU32(arg),
u64 => try zs.packU64(arg),
i8 => try zs.packI8(arg),
i16 => try zs.packI16(arg),
i32 => try zs.packI32(arg),
i64 => try zs.packI64(arg),
f32 => try zs.packF32(arg),
f64 => try zs.packF64(arg),
bool => try zs.packBool(arg),
Timestamp32 => try zs.packTimestamp32(arg),
@Type(.Null) => try zs.packNil(),
else => blk: { // string check then switch on type info
if (meta.trait.isZigString(@TypeOf(arg))) {
break :blk switch (arg.len) {
0...31 => try zs.packFixStr(arg),
32...255 => try zs.packStr8(arg),
256...65535 => try zs.packStr16(arg),
65536...4294967295 => try zs.packStr32(arg),
else => return ZpackError.StringTooLong,
};
} else { // switch on the type to easily catch lib structs
break :blk switch (@typeInfo(@TypeOf(arg))) {
.Struct, .Array => try zs.packAnyT(arg),
else => {
std.log.info("Unsupported type {s}", .{@typeName(@TypeOf(arg))});
return ZpackError.UnsupportedType;
},
};
}
}, // switch else
}; // switch (@TypeOf(arg)) {
}
pub fn packStruct(zs: *ZpackStream, _struct: anytype) ZpackError!usize {
var bytes_written: usize = 0;
bytes_written += switch (@typeInfo(@TypeOf(_struct))) {
.Struct => try zs.packAny(_struct),
.Pointer => @compileLog("Pointers not supported in packStruct() yet.", .{}),
else => return ZpackError.UnsupportedType,
};
return bytes_written;
}
pub fn packFixArrT(zs: *ZpackStream, arg: anytype) ZpackError!usize {
var bytes_written: usize = 0;
const ti = @typeInfo(@TypeOf(arg));
switch (ti) {
.Array => {
for (arg) |v| bytes_written += try zs.packAnyT(v);
},
else => return ZpackError.UnsupportedType,
}
return bytes_written;
}
pub fn packFixArr(zs: *ZpackStream, args: anytype) ZpackError!usize {
var bytes_written: usize = 0;
const ti = @typeInfo(@TypeOf(args));
switch (ti) {
.Array => {
for (args) |v| bytes_written += try zs.packAny(v);
},
else => return ZpackError.UnsupportedType,
}
return bytes_written;
}
pub fn init(alligator: Allocator) !ZpackStream {
var buff: []u8 = undefined;
const cap: usize = 2;
buff = try alligator.alloc(u8, cap); // TODO: magic
return ZpackStream{
.ator = alligator,
.buf = buff,
.capacity = cap,
.pos = 0,
};
}
// Free the buffer.
pub fn deinit(zs: ZpackStream) void {
zs.ator.free(zs.buf);
}
}; | src/main.zig |
const std = @import("std");
const druzhba = @import("druzhba");
// Please do not run `zig fmt` on this source... It makes chained fn calls ugly.
/// Signature `Count` — Provides a method to decrement a counter value.
const Count = druzhba.defineSig(struct {
fn ___(comptime Self: type) type {
return struct {
/// Decrement the counter value and return the new value.
pub next: fn (Self) u32,
};
}
}.___);
/// Cell class `Counter` — Implements `Count`.
const Counter = druzhba.defineClass()
.state(u32)
.attr(u32)
// Set the `ctor` method of the class. Use it to initialize `state`.
.ctor(struct {
fn ___(self: var) void {
const state: *u32 = self.state();
const attr: *const u32 = self.attr();
state.* = attr.*;
std.debug.warn("Counter: The counter value was initialized to {}\n", self.attr().*);
}
}.___)
// Defines an inbound port of signature `Count`.
.in("count", Count, struct {
fn ___(comptime Self: type) type {
return struct {
// This is one of the two ways to provide an implementation.
// Less handy, but good for metaprogramming.
pub fn __vtable__() Count.Vtable(Self) {
return Count.Vtable(Self) { .next = next };
}
/// The implementation of the `next` method.
fn next(self: Self) u32 {
const state: *u32 = self.state();
state.* -= 1;
return state.*;
}
};
}
}.___)
.build();
/// Signature `Entrypoint` — Provides an entrypoint method to be called from
/// the main function (hosted environment), reset handler, or interrupt handlers
/// (bare metal).
const Entrypoint = druzhba.defineSig(struct {
fn ___(comptime Self: type) type {
return struct {
pub main: fn (Self) void,
};
}
}.___);
/// Cell class `App` — The example application.
const App = druzhba.defineClass()
// Defines an outbound port of signature `Count`.
.out("count", Count)
// Defines an inbound port of signature `Entrypoint`.
.in("main", Entrypoint, struct {
fn ___(comptime Self: type) type {
return struct {
pub fn main(self: Self) void {
const count = self.out("count");
// https://www.derpibooru.org/2078751
var i = count.invoke("next");
while (i > 0) {
const i_next = count.invoke("next");
if (i != 1) {
std.debug.warn("{} bottles of pop on the wall, {} bottles of pop.\n", i, i);
} else {
std.debug.warn("{} bottle of pop on the wall, {} bottle of pop.\n", i, i);
}
if (i_next != 1) {
std.debug.warn("Take one down, pass it around, {} bottles of pop on the wall.\n", i_next);
} else {
std.debug.warn("Take one down, pass it around, {} bottle of pop on the wall.\n", i_next);
}
std.debug.warn("\n");
i = i_next;
}
}
};
}
}.___)
.build();
/// The inbound/outbound ports exposed by the subsystem `addApp`.
const AppIo = struct {
in_main: druzhba.InPort,
out_count: druzhba.OutPort,
};
/// This demonstrates how to define a subsystem.
fn addApp(comptime ctx: *druzhba.ComposeCtx) AppIo {
const app = ctx.new(App);
return AppIo {
.in_main = app.in("main"),
.out_count = app.out("count"),
};
}
/// The compose function, where classes are instantiated and connections are
/// defined.
fn addSystem(comptime ctx: *druzhba.ComposeCtx) void {
// Instantiate a `Counter` cell.
const counter = ctx.new(Counter).withAttr(100);
// Instantiate the `App` subsystem.
const app = addApp(ctx);
// Wire things up
ctx.connect(app.out_count, counter.in("count"));
// Try replacing the last line with this:
// ctx.connect(app.out_count, druzhba.wrapTrace(ctx, counter.in("count"), "counter"));
// Use a trace component to log method calls
const in_main = druzhba.wrapTrace(ctx, app.in_main, "main");
ctx.entry(in_main);
}
const System = druzhba.Compose(addSystem);
var system_state: System.State() = undefined; // → RAM
const system = comptime System.link(&system_state); // → ROM
pub fn main() anyerror!void {
// Call `ctor` methods of cells.
system.init();
// Invoke the `main` method of the inbound port `app.in_main`.
system.invoke("main");
} | examples/basic.zig |
const std = @import("std");
const Dir = std.fs.Dir;
const FnMeta = std.builtin.TypeInfo.Fn;
const FnDecl = std.builtin.TypeInfo.Declaration.Data.FnDecl;
const StructMeta = std.builtin.TypeInfo.Struct;
const EnumMeta = std.builtin.TypeInfo.Enum;
const UnionMeta = std.builtin.TypeInfo.Union;
const warn = std.debug.warn;
pub const C_Generator = struct {
file: std.fs.File,
const Self = @This();
pub fn init(comptime src_file: []const u8, dst_dir: *Dir) Self {
comptime const filebaseext = std.fs.path.basename(src_file);
comptime const filebase = filebaseext[0 .. filebaseext.len - 4];
var file = dst_dir.createFile(filebase ++ ".h", .{}) catch
@panic("Failed to create header file for source: " ++ src_file);
var res = Self{ .file = file };
// write the header's header, lol
res.write("#ifndef _" ++ filebase ++ "_H\n\n#define _" ++ filebase ++ "_H\n");
res.write("#include <stddef.h>\n#include <stdint.h>\n#include <stdbool.h>\n\n");
return res;
}
pub fn deinit(self: *Self) void {
self.write("\n#endif\n");
self.file.close();
}
pub fn gen_func(self: *Self, comptime name: []const u8, comptime func: FnDecl, comptime meta: FnMeta) void {
switch (meta.calling_convention) {
.Naked => self.write("__attribute__((naked)) "),
.Stdcall => self.write("__attribute__((stdcall)) "),
.Fastcall => self.write("__attribute__((fastcall)) "),
.Thiscall => self.write("__attribute__((thiscall)) "),
else => {},
}
self.writeType(func.return_type);
self.write(" " ++ name ++ "(");
inline for (meta.args) |arg, i| {
self.writeType(arg.arg_type.?);
//TODO: Figure out how to get arg names; for now just do arg0..argN
_ = self.file.writer().print(" arg{}", .{i}) catch unreachable;
if (i != meta.args.len - 1)
self.write(", ");
}
self.write(");\n\n");
}
pub fn gen_struct(self: *Self, comptime name: []const u8, comptime meta: StructMeta) void {
self.write("typedef struct ");
if (meta.layout == .Packed)
self.write("__attribute__((__packed__)) ");
self.write(name ++ " {\n");
inline for (meta.fields) |field| {
self.write(" ");
const info = @typeInfo(field.field_type);
if (info == .Array) {
self.writeType(info.Array.child);
} else {
self.writeType(field.field_type);
}
self.write(" " ++ field.name);
if (info == .Array) {
_ = self.file.writer().print("[{}]", .{info.Array.len}) catch unreachable;
}
self.write(";\n");
}
self.write("} " ++ name ++ "_t;\n\n");
}
pub fn gen_enum(self: *Self, comptime name: []const u8, comptime meta: EnumMeta) void {
self.write("enum " ++ name ++ " {\n");
comptime var last = 0;
inline for (meta.fields) |field, i| {
self.write(" " ++ field.name);
// if field value is unexpected/custom, manually define it
if ((i == 0 and field.value != 0) or (i > 0 and field.value > last + 1)) {
_ = self.file.writer().print(" = {}", .{field.value}) catch unreachable;
}
self.write(",\n");
last = field.value;
}
self.write("};\n\n");
}
pub fn gen_union(self: *Self, comptime name: []const u8, comptime meta: UnionMeta) void {
self.write("typedef union ");
self.write(name ++ " {\n");
inline for (meta.fields) |field| {
self.write(" ");
self.writeType(field.field_type);
self.write(" " ++ field.name ++ ";\n");
}
self.write("} " ++ name ++ "_t;\n\n");
}
fn writeType(self: *Self, comptime T: type) void {
switch (T) {
void => self.write("void"),
bool => self.write("bool"),
usize => self.write("size_t"),
isize => self.write("int"),
u8 => self.write("uint8_t"),
u16 => self.write("uint16_t"),
u32 => self.write("uint32_t"),
u64 => self.write("uint64_t"),
i8 => self.write("int8_t"),
i16 => self.write("int16_t"),
i24 => self.write("int24_t"),
i32 => self.write("int32_t"),
i64 => self.write("int64_t"),
[*]bool => self.write("bool*"),
[*]usize => self.write("size_t*"),
[*]isize => self.write("int*"),
[*]u8 => self.write("uint8_t*"),
[*]u16 => self.write("uint16_t*"),
[*]u32 => self.write("uint32_t*"),
[*]u64 => self.write("uint64_t*"),
[*]i8 => self.write("int8_t*"),
[*]i16 => self.write("int16_t*"),
[*]i32 => self.write("int32_t*"),
[*]i64 => self.write("int64_t*"),
else => {
const meta = @typeInfo(T);
switch (meta) {
.Pointer => {
const child = meta.Pointer.child;
const childmeta = @typeInfo(child);
if (childmeta == .Struct and childmeta.Struct.layout != .Extern) {
self.write("void");
} else {
self.writeType(child);
}
self.write("*");
},
.Optional => self.writeType(meta.Optional.child),
.Array => @compileError("Handle goofy looking C Arrays in the calling function"),
else => self.write(@typeName(T) ++ "_t"),
}
},
}
}
fn write(self: *Self, str: []const u8) void {
_ = self.file.writeAll(str) catch unreachable;
}
}; | generators/c.zig |
const std = @import("std");
const gen2 = @import("../../gen2/data.zig");
const assert = std.debug.assert;
const Stats = gen2.Stats;
const Types = gen2.Types;
pub const Species = enum(u8) {
None,
Bulbasaur,
Ivysaur,
Venusaur,
Charmander,
Charmeleon,
Charizard,
Squirtle,
Wartortle,
Blastoise,
Caterpie,
Metapod,
Butterfree,
Weedle,
Kakuna,
Beedrill,
Pidgey,
Pidgeotto,
Pidgeot,
Rattata,
Raticate,
Spearow,
Fearow,
Ekans,
Arbok,
Pikachu,
Raichu,
Sandshrew,
Sandslash,
NidoranF,
Nidorina,
Nidoqueen,
NidoranM,
Nidorino,
Nidoking,
Clefairy,
Clefable,
Vulpix,
Ninetales,
Jigglypuff,
Wigglytuff,
Zubat,
Golbat,
Oddish,
Gloom,
Vileplume,
Paras,
Parasect,
Venonat,
Venomoth,
Diglett,
Dugtrio,
Meowth,
Persian,
Psyduck,
Golduck,
Mankey,
Primeape,
Growlithe,
Arcanine,
Poliwag,
Poliwhirl,
Poliwrath,
Abra,
Kadabra,
Alakazam,
Machop,
Machoke,
Machamp,
Bellsprout,
Weepinbell,
Victreebel,
Tentacool,
Tentacruel,
Geodude,
Graveler,
Golem,
Ponyta,
Rapidash,
Slowpoke,
Slowbro,
Magnemite,
Magneton,
Farfetchd,
Doduo,
Dodrio,
Seel,
Dewgong,
Grimer,
Muk,
Shellder,
Cloyster,
Gastly,
Haunter,
Gengar,
Onix,
Drowzee,
Hypno,
Krabby,
Kingler,
Voltorb,
Electrode,
Exeggcute,
Exeggutor,
Cubone,
Marowak,
Hitmonlee,
Hitmonchan,
Lickitung,
Koffing,
Weezing,
Rhyhorn,
Rhydon,
Chansey,
Tangela,
Kangaskhan,
Horsea,
Seadra,
Goldeen,
Seaking,
Staryu,
Starmie,
MrMime,
Scyther,
Jynx,
Electabuzz,
Magmar,
Pinsir,
Tauros,
Magikarp,
Gyarados,
Lapras,
Ditto,
Eevee,
Vaporeon,
Jolteon,
Flareon,
Porygon,
Omanyte,
Omastar,
Kabuto,
Kabutops,
Aerodactyl,
Snorlax,
Articuno,
Zapdos,
Moltres,
Dratini,
Dragonair,
Dragonite,
Mewtwo,
Mew,
Chikorita,
Bayleef,
Meganium,
Cyndaquil,
Quilava,
Typhlosion,
Totodile,
Croconaw,
Feraligatr,
Sentret,
Furret,
Hoothoot,
Noctowl,
Ledyba,
Ledian,
Spinarak,
Ariados,
Crobat,
Chinchou,
Lanturn,
Pichu,
Cleffa,
Igglybuff,
Togepi,
Togetic,
Natu,
Xatu,
Mareep,
Flaaffy,
Ampharos,
Bellossom,
Marill,
Azumarill,
Sudowoodo,
Politoed,
Hoppip,
Skiploom,
Jumpluff,
Aipom,
Sunkern,
Sunflora,
Yanma,
Wooper,
Quagsire,
Espeon,
Umbreon,
Murkrow,
Slowking,
Misdreavus,
Unown,
Wobbuffet,
Girafarig,
Pineco,
Forretress,
Dunsparce,
Gligar,
Steelix,
Snubbull,
Granbull,
Qwilfish,
Scizor,
Shuckle,
Heracross,
Sneasel,
Teddiursa,
Ursaring,
Slugma,
Magcargo,
Swinub,
Piloswine,
Corsola,
Remoraid,
Octillery,
Delibird,
Mantine,
Skarmory,
Houndour,
Houndoom,
Kingdra,
Phanpy,
Donphan,
Porygon2,
Stantler,
Smeargle,
Tyrogue,
Hitmontop,
Smoochum,
Elekid,
Magby,
Miltank,
Blissey,
Raikou,
Entei,
Suicune,
Larvitar,
Pupitar,
Tyranitar,
Lugia,
HoOh,
Celebi,
// @test-only
pub const Data = struct {
stats: Stats(u8),
types: Types,
ratio: u8,
};
// @test-only
const DATA = [_]Data{
// Bulbasaur
.{
.stats = .{ .hp = 45, .atk = 49, .def = 49, .spe = 45, .spa = 65, .spd = 65 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x1F, // 12.5% F
},
// Ivysaur
.{
.stats = .{ .hp = 60, .atk = 62, .def = 63, .spe = 60, .spa = 80, .spd = 80 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x1F, // 12.5% F
},
// Venusaur
.{
.stats = .{ .hp = 80, .atk = 82, .def = 83, .spe = 80, .spa = 100, .spd = 100 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x1F, // 12.5% F
},
// Charmander
.{
.stats = .{ .hp = 39, .atk = 52, .def = 43, .spe = 65, .spa = 60, .spd = 50 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x1F, // 12.5% F
},
// Charmeleon
.{
.stats = .{ .hp = 58, .atk = 64, .def = 58, .spe = 80, .spa = 80, .spd = 65 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x1F, // 12.5% F
},
// Charizard
.{
.stats = .{ .hp = 78, .atk = 84, .def = 78, .spe = 100, .spa = 109, .spd = 85 },
.types = .{ .type1 = .Fire, .type2 = .Flying },
.ratio = 0x1F, // 12.5% F
},
// Squirtle
.{
.stats = .{ .hp = 44, .atk = 48, .def = 65, .spe = 43, .spa = 50, .spd = 64 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Wartortle
.{
.stats = .{ .hp = 59, .atk = 63, .def = 80, .spe = 58, .spa = 65, .spd = 80 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Blastoise
.{
.stats = .{ .hp = 79, .atk = 83, .def = 100, .spe = 78, .spa = 85, .spd = 105 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Caterpie
.{
.stats = .{ .hp = 45, .atk = 30, .def = 35, .spe = 45, .spa = 20, .spd = 20 },
.types = .{ .type1 = .Bug, .type2 = .Bug },
.ratio = 0x7F, // 50.0% F
},
// Metapod
.{
.stats = .{ .hp = 50, .atk = 20, .def = 55, .spe = 30, .spa = 25, .spd = 25 },
.types = .{ .type1 = .Bug, .type2 = .Bug },
.ratio = 0x7F, // 50.0% F
},
// Butterfree
.{
.stats = .{ .hp = 60, .atk = 45, .def = 50, .spe = 70, .spa = 80, .spd = 80 },
.types = .{ .type1 = .Bug, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Weedle
.{
.stats = .{ .hp = 40, .atk = 35, .def = 30, .spe = 50, .spa = 20, .spd = 20 },
.types = .{ .type1 = .Bug, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Kakuna
.{
.stats = .{ .hp = 45, .atk = 25, .def = 50, .spe = 35, .spa = 25, .spd = 25 },
.types = .{ .type1 = .Bug, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Beedrill
.{
.stats = .{ .hp = 65, .atk = 80, .def = 40, .spe = 75, .spa = 45, .spd = 80 },
.types = .{ .type1 = .Bug, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Pidgey
.{
.stats = .{ .hp = 40, .atk = 45, .def = 40, .spe = 56, .spa = 35, .spd = 35 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Pidgeotto
.{
.stats = .{ .hp = 63, .atk = 60, .def = 55, .spe = 71, .spa = 50, .spd = 50 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Pidgeot
.{
.stats = .{ .hp = 83, .atk = 80, .def = 75, .spe = 91, .spa = 70, .spd = 70 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Rattata
.{
.stats = .{ .hp = 30, .atk = 56, .def = 35, .spe = 72, .spa = 25, .spd = 35 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Raticate
.{
.stats = .{ .hp = 55, .atk = 81, .def = 60, .spe = 97, .spa = 50, .spd = 70 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Spearow
.{
.stats = .{ .hp = 40, .atk = 60, .def = 30, .spe = 70, .spa = 31, .spd = 31 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Fearow
.{
.stats = .{ .hp = 65, .atk = 90, .def = 65, .spe = 100, .spa = 61, .spd = 61 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Ekans
.{
.stats = .{ .hp = 35, .atk = 60, .def = 44, .spe = 55, .spa = 40, .spd = 54 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Arbok
.{
.stats = .{ .hp = 60, .atk = 85, .def = 69, .spe = 80, .spa = 65, .spd = 79 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Pikachu
.{
.stats = .{ .hp = 35, .atk = 55, .def = 30, .spe = 90, .spa = 50, .spd = 40 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Raichu
.{
.stats = .{ .hp = 60, .atk = 90, .def = 55, .spe = 100, .spa = 90, .spd = 80 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Sandshrew
.{
.stats = .{ .hp = 50, .atk = 75, .def = 85, .spe = 40, .spa = 20, .spd = 30 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Sandslash
.{
.stats = .{ .hp = 75, .atk = 100, .def = 110, .spe = 65, .spa = 45, .spd = 55 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// NidoranF
.{
.stats = .{ .hp = 55, .atk = 47, .def = 52, .spe = 41, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0xFE, // 100% F
},
// Nidorina
.{
.stats = .{ .hp = 70, .atk = 62, .def = 67, .spe = 56, .spa = 55, .spd = 55 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0xFE, // 100% F
},
// Nidoqueen
.{
.stats = .{ .hp = 90, .atk = 82, .def = 87, .spe = 76, .spa = 75, .spd = 85 },
.types = .{ .type1 = .Poison, .type2 = .Ground },
.ratio = 0xFE, // 100% F
},
// NidoranM
.{
.stats = .{ .hp = 46, .atk = 57, .def = 40, .spe = 50, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x00, // 0.00% F
},
// Nidorino
.{
.stats = .{ .hp = 61, .atk = 72, .def = 57, .spe = 65, .spa = 55, .spd = 55 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x00, // 0.00% F
},
// Nidoking
.{
.stats = .{ .hp = 81, .atk = 92, .def = 77, .spe = 85, .spa = 85, .spd = 75 },
.types = .{ .type1 = .Poison, .type2 = .Ground },
.ratio = 0x00, // 0.00% F
},
// Clefairy
.{
.stats = .{ .hp = 70, .atk = 45, .def = 48, .spe = 35, .spa = 60, .spd = 65 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Clefable
.{
.stats = .{ .hp = 95, .atk = 70, .def = 73, .spe = 60, .spa = 85, .spd = 90 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Vulpix
.{
.stats = .{ .hp = 38, .atk = 41, .def = 40, .spe = 65, .spa = 50, .spd = 65 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0xBF, // 75.0% F
},
// Ninetales
.{
.stats = .{ .hp = 73, .atk = 76, .def = 75, .spe = 100, .spa = 81, .spd = 100 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0xBF, // 75.0% F
},
// Jigglypuff
.{
.stats = .{ .hp = 115, .atk = 45, .def = 20, .spe = 20, .spa = 45, .spd = 25 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Wigglytuff
.{
.stats = .{ .hp = 140, .atk = 70, .def = 45, .spe = 45, .spa = 75, .spd = 50 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Zubat
.{
.stats = .{ .hp = 40, .atk = 45, .def = 35, .spe = 55, .spa = 30, .spd = 40 },
.types = .{ .type1 = .Poison, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Golbat
.{
.stats = .{ .hp = 75, .atk = 80, .def = 70, .spe = 90, .spa = 65, .spd = 75 },
.types = .{ .type1 = .Poison, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Oddish
.{
.stats = .{ .hp = 45, .atk = 50, .def = 55, .spe = 30, .spa = 75, .spd = 65 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Gloom
.{
.stats = .{ .hp = 60, .atk = 65, .def = 70, .spe = 40, .spa = 85, .spd = 75 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Vileplume
.{
.stats = .{ .hp = 75, .atk = 80, .def = 85, .spe = 50, .spa = 100, .spd = 90 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Paras
.{
.stats = .{ .hp = 35, .atk = 70, .def = 55, .spe = 25, .spa = 45, .spd = 55 },
.types = .{ .type1 = .Bug, .type2 = .Grass },
.ratio = 0x7F, // 50.0% F
},
// Parasect
.{
.stats = .{ .hp = 60, .atk = 95, .def = 80, .spe = 30, .spa = 60, .spd = 80 },
.types = .{ .type1 = .Bug, .type2 = .Grass },
.ratio = 0x7F, // 50.0% F
},
// Venonat
.{
.stats = .{ .hp = 60, .atk = 55, .def = 50, .spe = 45, .spa = 40, .spd = 55 },
.types = .{ .type1 = .Bug, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Venomoth
.{
.stats = .{ .hp = 70, .atk = 65, .def = 60, .spe = 90, .spa = 90, .spd = 75 },
.types = .{ .type1 = .Bug, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Diglett
.{
.stats = .{ .hp = 10, .atk = 55, .def = 25, .spe = 95, .spa = 35, .spd = 45 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Dugtrio
.{
.stats = .{ .hp = 35, .atk = 80, .def = 50, .spe = 120, .spa = 50, .spd = 70 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Meowth
.{
.stats = .{ .hp = 40, .atk = 45, .def = 35, .spe = 90, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Persian
.{
.stats = .{ .hp = 65, .atk = 70, .def = 60, .spe = 115, .spa = 65, .spd = 65 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Psyduck
.{
.stats = .{ .hp = 50, .atk = 52, .def = 48, .spe = 55, .spa = 65, .spd = 50 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Golduck
.{
.stats = .{ .hp = 80, .atk = 82, .def = 78, .spe = 85, .spa = 95, .spd = 80 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Mankey
.{
.stats = .{ .hp = 40, .atk = 80, .def = 35, .spe = 70, .spa = 35, .spd = 45 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x7F, // 50.0% F
},
// Primeape
.{
.stats = .{ .hp = 65, .atk = 105, .def = 60, .spe = 95, .spa = 60, .spd = 70 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x7F, // 50.0% F
},
// Growlithe
.{
.stats = .{ .hp = 55, .atk = 70, .def = 45, .spe = 60, .spa = 70, .spd = 50 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x3F, // 25.0% F
},
// Arcanine
.{
.stats = .{ .hp = 90, .atk = 110, .def = 80, .spe = 95, .spa = 100, .spd = 80 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x3F, // 25.0% F
},
// Poliwag
.{
.stats = .{ .hp = 40, .atk = 50, .def = 40, .spe = 90, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Poliwhirl
.{
.stats = .{ .hp = 65, .atk = 65, .def = 65, .spe = 90, .spa = 50, .spd = 50 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Poliwrath
.{
.stats = .{ .hp = 90, .atk = 85, .def = 95, .spe = 70, .spa = 70, .spd = 90 },
.types = .{ .type1 = .Water, .type2 = .Fighting },
.ratio = 0x7F, // 50.0% F
},
// Abra
.{
.stats = .{ .hp = 25, .atk = 20, .def = 15, .spe = 90, .spa = 105, .spd = 55 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x3F, // 25.0% F
},
// Kadabra
.{
.stats = .{ .hp = 40, .atk = 35, .def = 30, .spe = 105, .spa = 120, .spd = 70 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x3F, // 25.0% F
},
// Alakazam
.{
.stats = .{ .hp = 55, .atk = 50, .def = 45, .spe = 120, .spa = 135, .spd = 85 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x3F, // 25.0% F
},
// Machop
.{
.stats = .{ .hp = 70, .atk = 80, .def = 50, .spe = 35, .spa = 35, .spd = 35 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x3F, // 25.0% F
},
// Machoke
.{
.stats = .{ .hp = 80, .atk = 100, .def = 70, .spe = 45, .spa = 50, .spd = 60 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x3F, // 25.0% F
},
// Machamp
.{
.stats = .{ .hp = 90, .atk = 130, .def = 80, .spe = 55, .spa = 65, .spd = 85 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x3F, // 25.0% F
},
// Bellsprout
.{
.stats = .{ .hp = 50, .atk = 75, .def = 35, .spe = 40, .spa = 70, .spd = 30 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Weepinbell
.{
.stats = .{ .hp = 65, .atk = 90, .def = 50, .spe = 55, .spa = 85, .spd = 45 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Victreebel
.{
.stats = .{ .hp = 80, .atk = 105, .def = 65, .spe = 70, .spa = 100, .spd = 60 },
.types = .{ .type1 = .Grass, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Tentacool
.{
.stats = .{ .hp = 40, .atk = 40, .def = 35, .spe = 70, .spa = 50, .spd = 100 },
.types = .{ .type1 = .Water, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Tentacruel
.{
.stats = .{ .hp = 80, .atk = 70, .def = 65, .spe = 100, .spa = 80, .spd = 120 },
.types = .{ .type1 = .Water, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Geodude
.{
.stats = .{ .hp = 40, .atk = 80, .def = 100, .spe = 20, .spa = 30, .spd = 30 },
.types = .{ .type1 = .Rock, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Graveler
.{
.stats = .{ .hp = 55, .atk = 95, .def = 115, .spe = 35, .spa = 45, .spd = 45 },
.types = .{ .type1 = .Rock, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Golem
.{
.stats = .{ .hp = 80, .atk = 110, .def = 130, .spe = 45, .spa = 55, .spd = 65 },
.types = .{ .type1 = .Rock, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Ponyta
.{
.stats = .{ .hp = 50, .atk = 85, .def = 55, .spe = 90, .spa = 65, .spd = 65 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x7F, // 50.0% F
},
// Rapidash
.{
.stats = .{ .hp = 65, .atk = 100, .def = 70, .spe = 105, .spa = 80, .spd = 80 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x7F, // 50.0% F
},
// Slowpoke
.{
.stats = .{ .hp = 90, .atk = 65, .def = 65, .spe = 15, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Water, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Slowbro
.{
.stats = .{ .hp = 95, .atk = 75, .def = 110, .spe = 30, .spa = 100, .spd = 80 },
.types = .{ .type1 = .Water, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Magnemite
.{
.stats = .{ .hp = 25, .atk = 35, .def = 70, .spe = 45, .spa = 95, .spd = 55 },
.types = .{ .type1 = .Electric, .type2 = .Steel },
.ratio = 0xFF, // N
},
// Magneton
.{
.stats = .{ .hp = 50, .atk = 60, .def = 95, .spe = 70, .spa = 120, .spd = 70 },
.types = .{ .type1 = .Electric, .type2 = .Steel },
.ratio = 0xFF, // N
},
// Farfetchd
.{
.stats = .{ .hp = 52, .atk = 65, .def = 55, .spe = 60, .spa = 58, .spd = 62 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Doduo
.{
.stats = .{ .hp = 35, .atk = 85, .def = 45, .spe = 75, .spa = 35, .spd = 35 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Dodrio
.{
.stats = .{ .hp = 60, .atk = 110, .def = 70, .spe = 100, .spa = 60, .spd = 60 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Seel
.{
.stats = .{ .hp = 65, .atk = 45, .def = 55, .spe = 45, .spa = 45, .spd = 70 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Dewgong
.{
.stats = .{ .hp = 90, .atk = 70, .def = 80, .spe = 70, .spa = 70, .spd = 95 },
.types = .{ .type1 = .Water, .type2 = .Ice },
.ratio = 0x7F, // 50.0% F
},
// Grimer
.{
.stats = .{ .hp = 80, .atk = 80, .def = 50, .spe = 25, .spa = 40, .spd = 50 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Muk
.{
.stats = .{ .hp = 105, .atk = 105, .def = 75, .spe = 50, .spa = 65, .spd = 100 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Shellder
.{
.stats = .{ .hp = 30, .atk = 65, .def = 100, .spe = 40, .spa = 45, .spd = 25 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Cloyster
.{
.stats = .{ .hp = 50, .atk = 95, .def = 180, .spe = 70, .spa = 85, .spd = 45 },
.types = .{ .type1 = .Water, .type2 = .Ice },
.ratio = 0x7F, // 50.0% F
},
// Gastly
.{
.stats = .{ .hp = 30, .atk = 35, .def = 30, .spe = 80, .spa = 100, .spd = 35 },
.types = .{ .type1 = .Ghost, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Haunter
.{
.stats = .{ .hp = 45, .atk = 50, .def = 45, .spe = 95, .spa = 115, .spd = 55 },
.types = .{ .type1 = .Ghost, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Gengar
.{
.stats = .{ .hp = 60, .atk = 65, .def = 60, .spe = 110, .spa = 130, .spd = 75 },
.types = .{ .type1 = .Ghost, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Onix
.{
.stats = .{ .hp = 35, .atk = 45, .def = 160, .spe = 70, .spa = 30, .spd = 45 },
.types = .{ .type1 = .Rock, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Drowzee
.{
.stats = .{ .hp = 60, .atk = 48, .def = 45, .spe = 42, .spa = 43, .spd = 90 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Hypno
.{
.stats = .{ .hp = 85, .atk = 73, .def = 70, .spe = 67, .spa = 73, .spd = 115 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Krabby
.{
.stats = .{ .hp = 30, .atk = 105, .def = 90, .spe = 50, .spa = 25, .spd = 25 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Kingler
.{
.stats = .{ .hp = 55, .atk = 130, .def = 115, .spe = 75, .spa = 50, .spd = 50 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Voltorb
.{
.stats = .{ .hp = 40, .atk = 30, .def = 50, .spe = 100, .spa = 55, .spd = 55 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0xFF, // N
},
// Electrode
.{
.stats = .{ .hp = 60, .atk = 50, .def = 70, .spe = 140, .spa = 80, .spd = 80 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0xFF, // N
},
// Exeggcute
.{
.stats = .{ .hp = 60, .atk = 40, .def = 80, .spe = 40, .spa = 60, .spd = 45 },
.types = .{ .type1 = .Grass, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Exeggutor
.{
.stats = .{ .hp = 95, .atk = 95, .def = 85, .spe = 55, .spa = 125, .spd = 65 },
.types = .{ .type1 = .Grass, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Cubone
.{
.stats = .{ .hp = 50, .atk = 50, .def = 95, .spe = 35, .spa = 40, .spd = 50 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Marowak
.{
.stats = .{ .hp = 60, .atk = 80, .def = 110, .spe = 45, .spa = 50, .spd = 80 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Hitmonlee
.{
.stats = .{ .hp = 50, .atk = 120, .def = 53, .spe = 87, .spa = 35, .spd = 110 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x00, // 0.00% F
},
// Hitmonchan
.{
.stats = .{ .hp = 50, .atk = 105, .def = 79, .spe = 76, .spa = 35, .spd = 110 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x00, // 0.00% F
},
// Lickitung
.{
.stats = .{ .hp = 90, .atk = 55, .def = 75, .spe = 30, .spa = 60, .spd = 75 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Koffing
.{
.stats = .{ .hp = 40, .atk = 65, .def = 95, .spe = 35, .spa = 60, .spd = 45 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Weezing
.{
.stats = .{ .hp = 65, .atk = 90, .def = 120, .spe = 60, .spa = 85, .spd = 70 },
.types = .{ .type1 = .Poison, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Rhyhorn
.{
.stats = .{ .hp = 80, .atk = 85, .def = 95, .spe = 25, .spa = 30, .spd = 30 },
.types = .{ .type1 = .Ground, .type2 = .Rock },
.ratio = 0x7F, // 50.0% F
},
// Rhydon
.{
.stats = .{ .hp = 105, .atk = 130, .def = 120, .spe = 40, .spa = 45, .spd = 45 },
.types = .{ .type1 = .Ground, .type2 = .Rock },
.ratio = 0x7F, // 50.0% F
},
// Chansey
.{
.stats = .{ .hp = 250, .atk = 5, .def = 5, .spe = 50, .spa = 35, .spd = 105 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xFE, // 100% F
},
// Tangela
.{
.stats = .{ .hp = 65, .atk = 55, .def = 115, .spe = 60, .spa = 100, .spd = 40 },
.types = .{ .type1 = .Grass, .type2 = .Grass },
.ratio = 0x7F, // 50.0% F
},
// Kangaskhan
.{
.stats = .{ .hp = 105, .atk = 95, .def = 80, .spe = 90, .spa = 40, .spd = 80 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xFE, // 100% F
},
// Horsea
.{
.stats = .{ .hp = 30, .atk = 40, .def = 70, .spe = 60, .spa = 70, .spd = 25 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Seadra
.{
.stats = .{ .hp = 55, .atk = 65, .def = 95, .spe = 85, .spa = 95, .spd = 45 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Goldeen
.{
.stats = .{ .hp = 45, .atk = 67, .def = 60, .spe = 63, .spa = 35, .spd = 50 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Seaking
.{
.stats = .{ .hp = 80, .atk = 92, .def = 65, .spe = 68, .spa = 65, .spd = 80 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Staryu
.{
.stats = .{ .hp = 30, .atk = 45, .def = 55, .spe = 85, .spa = 70, .spd = 55 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0xFF, // N
},
// Starmie
.{
.stats = .{ .hp = 60, .atk = 75, .def = 85, .spe = 115, .spa = 100, .spd = 85 },
.types = .{ .type1 = .Water, .type2 = .Psychic },
.ratio = 0xFF, // N
},
// MrMime
.{
.stats = .{ .hp = 40, .atk = 45, .def = 65, .spe = 90, .spa = 100, .spd = 120 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Scyther
.{
.stats = .{ .hp = 70, .atk = 110, .def = 80, .spe = 105, .spa = 55, .spd = 80 },
.types = .{ .type1 = .Bug, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Jynx
.{
.stats = .{ .hp = 65, .atk = 50, .def = 35, .spe = 95, .spa = 115, .spd = 95 },
.types = .{ .type1 = .Ice, .type2 = .Psychic },
.ratio = 0xFE, // 100% F
},
// Electabuzz
.{
.stats = .{ .hp = 65, .atk = 83, .def = 57, .spe = 105, .spa = 95, .spd = 85 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x3F, // 25.0% F
},
// Magmar
.{
.stats = .{ .hp = 65, .atk = 95, .def = 57, .spe = 93, .spa = 100, .spd = 85 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x3F, // 25.0% F
},
// Pinsir
.{
.stats = .{ .hp = 65, .atk = 125, .def = 100, .spe = 85, .spa = 55, .spd = 70 },
.types = .{ .type1 = .Bug, .type2 = .Bug },
.ratio = 0x7F, // 50.0% F
},
// Tauros
.{
.stats = .{ .hp = 75, .atk = 100, .def = 95, .spe = 110, .spa = 40, .spd = 70 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x00, // 0.00% F
},
// Magikarp
.{
.stats = .{ .hp = 20, .atk = 10, .def = 55, .spe = 80, .spa = 15, .spd = 20 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Gyarados
.{
.stats = .{ .hp = 95, .atk = 125, .def = 79, .spe = 81, .spa = 60, .spd = 100 },
.types = .{ .type1 = .Water, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Lapras
.{
.stats = .{ .hp = 130, .atk = 85, .def = 80, .spe = 60, .spa = 85, .spd = 95 },
.types = .{ .type1 = .Water, .type2 = .Ice },
.ratio = 0x7F, // 50.0% F
},
// Ditto
.{
.stats = .{ .hp = 48, .atk = 48, .def = 48, .spe = 48, .spa = 48, .spd = 48 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xFF, // N
},
// Eevee
.{
.stats = .{ .hp = 55, .atk = 55, .def = 50, .spe = 55, .spa = 45, .spd = 65 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x1F, // 12.5% F
},
// Vaporeon
.{
.stats = .{ .hp = 130, .atk = 65, .def = 60, .spe = 65, .spa = 110, .spd = 95 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Jolteon
.{
.stats = .{ .hp = 65, .atk = 65, .def = 60, .spe = 130, .spa = 110, .spd = 95 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x1F, // 12.5% F
},
// Flareon
.{
.stats = .{ .hp = 65, .atk = 130, .def = 60, .spe = 65, .spa = 95, .spd = 110 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x1F, // 12.5% F
},
// Porygon
.{
.stats = .{ .hp = 65, .atk = 60, .def = 70, .spe = 40, .spa = 85, .spd = 75 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xFF, // N
},
// Omanyte
.{
.stats = .{ .hp = 35, .atk = 40, .def = 100, .spe = 35, .spa = 90, .spd = 55 },
.types = .{ .type1 = .Rock, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Omastar
.{
.stats = .{ .hp = 70, .atk = 60, .def = 125, .spe = 55, .spa = 115, .spd = 70 },
.types = .{ .type1 = .Rock, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Kabuto
.{
.stats = .{ .hp = 30, .atk = 80, .def = 90, .spe = 55, .spa = 55, .spd = 45 },
.types = .{ .type1 = .Rock, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Kabutops
.{
.stats = .{ .hp = 60, .atk = 115, .def = 105, .spe = 80, .spa = 65, .spd = 70 },
.types = .{ .type1 = .Rock, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Aerodactyl
.{
.stats = .{ .hp = 80, .atk = 105, .def = 65, .spe = 130, .spa = 60, .spd = 75 },
.types = .{ .type1 = .Rock, .type2 = .Flying },
.ratio = 0x1F, // 12.5% F
},
// Snorlax
.{
.stats = .{ .hp = 160, .atk = 110, .def = 65, .spe = 30, .spa = 65, .spd = 110 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x1F, // 12.5% F
},
// Articuno
.{
.stats = .{ .hp = 90, .atk = 85, .def = 100, .spe = 85, .spa = 95, .spd = 125 },
.types = .{ .type1 = .Ice, .type2 = .Flying },
.ratio = 0xFF, // N
},
// Zapdos
.{
.stats = .{ .hp = 90, .atk = 90, .def = 85, .spe = 100, .spa = 125, .spd = 90 },
.types = .{ .type1 = .Electric, .type2 = .Flying },
.ratio = 0xFF, // N
},
// Moltres
.{
.stats = .{ .hp = 90, .atk = 100, .def = 90, .spe = 90, .spa = 125, .spd = 85 },
.types = .{ .type1 = .Fire, .type2 = .Flying },
.ratio = 0xFF, // N
},
// Dratini
.{
.stats = .{ .hp = 41, .atk = 64, .def = 45, .spe = 50, .spa = 50, .spd = 50 },
.types = .{ .type1 = .Dragon, .type2 = .Dragon },
.ratio = 0x7F, // 50.0% F
},
// Dragonair
.{
.stats = .{ .hp = 61, .atk = 84, .def = 65, .spe = 70, .spa = 70, .spd = 70 },
.types = .{ .type1 = .Dragon, .type2 = .Dragon },
.ratio = 0x7F, // 50.0% F
},
// Dragonite
.{
.stats = .{ .hp = 91, .atk = 134, .def = 95, .spe = 80, .spa = 100, .spd = 100 },
.types = .{ .type1 = .Dragon, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Mewtwo
.{
.stats = .{ .hp = 106, .atk = 110, .def = 90, .spe = 130, .spa = 154, .spd = 90 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0xFF, // N
},
// Mew
.{
.stats = .{ .hp = 100, .atk = 100, .def = 100, .spe = 100, .spa = 100, .spd = 100 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0xFF, // N
},
// Chikorita
.{
.stats = .{ .hp = 45, .atk = 49, .def = 65, .spe = 45, .spa = 49, .spd = 65 },
.types = .{ .type1 = .Grass, .type2 = .Grass },
.ratio = 0x1F, // 12.5% F
},
// Bayleef
.{
.stats = .{ .hp = 60, .atk = 62, .def = 80, .spe = 60, .spa = 63, .spd = 80 },
.types = .{ .type1 = .Grass, .type2 = .Grass },
.ratio = 0x1F, // 12.5% F
},
// Meganium
.{
.stats = .{ .hp = 80, .atk = 82, .def = 100, .spe = 80, .spa = 83, .spd = 100 },
.types = .{ .type1 = .Grass, .type2 = .Grass },
.ratio = 0x1F, // 12.5% F
},
// Cyndaquil
.{
.stats = .{ .hp = 39, .atk = 52, .def = 43, .spe = 65, .spa = 60, .spd = 50 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x1F, // 12.5% F
},
// Quilava
.{
.stats = .{ .hp = 58, .atk = 64, .def = 58, .spe = 80, .spa = 80, .spd = 65 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x1F, // 12.5% F
},
// Typhlosion
.{
.stats = .{ .hp = 78, .atk = 84, .def = 78, .spe = 100, .spa = 109, .spd = 85 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x1F, // 12.5% F
},
// Totodile
.{
.stats = .{ .hp = 50, .atk = 65, .def = 64, .spe = 43, .spa = 44, .spd = 48 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Croconaw
.{
.stats = .{ .hp = 65, .atk = 80, .def = 80, .spe = 58, .spa = 59, .spd = 63 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Feraligatr
.{
.stats = .{ .hp = 85, .atk = 105, .def = 100, .spe = 78, .spa = 79, .spd = 83 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x1F, // 12.5% F
},
// Sentret
.{
.stats = .{ .hp = 35, .atk = 46, .def = 34, .spe = 20, .spa = 35, .spd = 45 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Furret
.{
.stats = .{ .hp = 85, .atk = 76, .def = 64, .spe = 90, .spa = 45, .spd = 55 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Hoothoot
.{
.stats = .{ .hp = 60, .atk = 30, .def = 30, .spe = 50, .spa = 36, .spd = 56 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Noctowl
.{
.stats = .{ .hp = 100, .atk = 50, .def = 50, .spe = 70, .spa = 76, .spd = 96 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Ledyba
.{
.stats = .{ .hp = 40, .atk = 20, .def = 30, .spe = 55, .spa = 40, .spd = 80 },
.types = .{ .type1 = .Bug, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Ledian
.{
.stats = .{ .hp = 55, .atk = 35, .def = 50, .spe = 85, .spa = 55, .spd = 110 },
.types = .{ .type1 = .Bug, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Spinarak
.{
.stats = .{ .hp = 40, .atk = 60, .def = 40, .spe = 30, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Bug, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Ariados
.{
.stats = .{ .hp = 70, .atk = 90, .def = 70, .spe = 40, .spa = 60, .spd = 60 },
.types = .{ .type1 = .Bug, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Crobat
.{
.stats = .{ .hp = 85, .atk = 90, .def = 80, .spe = 130, .spa = 70, .spd = 80 },
.types = .{ .type1 = .Poison, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Chinchou
.{
.stats = .{ .hp = 75, .atk = 38, .def = 38, .spe = 67, .spa = 56, .spd = 56 },
.types = .{ .type1 = .Water, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Lanturn
.{
.stats = .{ .hp = 125, .atk = 58, .def = 58, .spe = 67, .spa = 76, .spd = 76 },
.types = .{ .type1 = .Water, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Pichu
.{
.stats = .{ .hp = 20, .atk = 40, .def = 15, .spe = 60, .spa = 35, .spd = 35 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Cleffa
.{
.stats = .{ .hp = 50, .atk = 25, .def = 28, .spe = 15, .spa = 45, .spd = 55 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Igglybuff
.{
.stats = .{ .hp = 90, .atk = 30, .def = 15, .spe = 15, .spa = 40, .spd = 20 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Togepi
.{
.stats = .{ .hp = 35, .atk = 20, .def = 65, .spe = 20, .spa = 40, .spd = 65 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x1F, // 12.5% F
},
// Togetic
.{
.stats = .{ .hp = 55, .atk = 40, .def = 85, .spe = 40, .spa = 80, .spd = 105 },
.types = .{ .type1 = .Normal, .type2 = .Flying },
.ratio = 0x1F, // 12.5% F
},
// Natu
.{
.stats = .{ .hp = 40, .atk = 50, .def = 45, .spe = 70, .spa = 70, .spd = 45 },
.types = .{ .type1 = .Psychic, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Xatu
.{
.stats = .{ .hp = 65, .atk = 75, .def = 70, .spe = 95, .spa = 95, .spd = 70 },
.types = .{ .type1 = .Psychic, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Mareep
.{
.stats = .{ .hp = 55, .atk = 40, .def = 40, .spe = 35, .spa = 65, .spd = 45 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Flaaffy
.{
.stats = .{ .hp = 70, .atk = 55, .def = 55, .spe = 45, .spa = 80, .spd = 60 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Ampharos
.{
.stats = .{ .hp = 90, .atk = 75, .def = 75, .spe = 55, .spa = 115, .spd = 90 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x7F, // 50.0% F
},
// Bellossom
.{
.stats = .{ .hp = 75, .atk = 80, .def = 85, .spe = 50, .spa = 90, .spd = 100 },
.types = .{ .type1 = .Grass, .type2 = .Grass },
.ratio = 0x7F, // 50.0% F
},
// Marill
.{
.stats = .{ .hp = 70, .atk = 20, .def = 50, .spe = 40, .spa = 20, .spd = 50 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Azumarill
.{
.stats = .{ .hp = 100, .atk = 50, .def = 80, .spe = 50, .spa = 50, .spd = 80 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Sudowoodo
.{
.stats = .{ .hp = 70, .atk = 100, .def = 115, .spe = 30, .spa = 30, .spd = 65 },
.types = .{ .type1 = .Rock, .type2 = .Rock },
.ratio = 0x7F, // 50.0% F
},
// Politoed
.{
.stats = .{ .hp = 90, .atk = 75, .def = 75, .spe = 70, .spa = 90, .spd = 100 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Hoppip
.{
.stats = .{ .hp = 35, .atk = 35, .def = 40, .spe = 50, .spa = 35, .spd = 55 },
.types = .{ .type1 = .Grass, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Skiploom
.{
.stats = .{ .hp = 55, .atk = 45, .def = 50, .spe = 80, .spa = 45, .spd = 65 },
.types = .{ .type1 = .Grass, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Jumpluff
.{
.stats = .{ .hp = 75, .atk = 55, .def = 70, .spe = 110, .spa = 55, .spd = 85 },
.types = .{ .type1 = .Grass, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Aipom
.{
.stats = .{ .hp = 55, .atk = 70, .def = 55, .spe = 85, .spa = 40, .spd = 55 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Sunkern
.{
.stats = .{ .hp = 30, .atk = 30, .def = 30, .spe = 30, .spa = 30, .spd = 30 },
.types = .{ .type1 = .Grass, .type2 = .Grass },
.ratio = 0x7F, // 50.0% F
},
// Sunflora
.{
.stats = .{ .hp = 75, .atk = 75, .def = 55, .spe = 30, .spa = 105, .spd = 85 },
.types = .{ .type1 = .Grass, .type2 = .Grass },
.ratio = 0x7F, // 50.0% F
},
// Yanma
.{
.stats = .{ .hp = 65, .atk = 65, .def = 45, .spe = 95, .spa = 75, .spd = 45 },
.types = .{ .type1 = .Bug, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Wooper
.{
.stats = .{ .hp = 55, .atk = 45, .def = 45, .spe = 15, .spa = 25, .spd = 25 },
.types = .{ .type1 = .Water, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Quagsire
.{
.stats = .{ .hp = 95, .atk = 85, .def = 85, .spe = 35, .spa = 65, .spd = 65 },
.types = .{ .type1 = .Water, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Espeon
.{
.stats = .{ .hp = 65, .atk = 65, .def = 60, .spe = 110, .spa = 130, .spd = 95 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x1F, // 12.5% F
},
// Umbreon
.{
.stats = .{ .hp = 95, .atk = 65, .def = 110, .spe = 65, .spa = 60, .spd = 130 },
.types = .{ .type1 = .Dark, .type2 = .Dark },
.ratio = 0x1F, // 12.5% F
},
// Murkrow
.{
.stats = .{ .hp = 60, .atk = 85, .def = 42, .spe = 91, .spa = 85, .spd = 42 },
.types = .{ .type1 = .Dark, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Slowking
.{
.stats = .{ .hp = 95, .atk = 75, .def = 80, .spe = 30, .spa = 100, .spd = 110 },
.types = .{ .type1 = .Water, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Misdreavus
.{
.stats = .{ .hp = 60, .atk = 60, .def = 60, .spe = 85, .spa = 85, .spd = 85 },
.types = .{ .type1 = .Ghost, .type2 = .Ghost },
.ratio = 0x7F, // 50.0% F
},
// Unown
.{
.stats = .{ .hp = 48, .atk = 72, .def = 48, .spe = 48, .spa = 72, .spd = 48 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0xFF, // N
},
// Wobbuffet
.{
.stats = .{ .hp = 190, .atk = 33, .def = 58, .spe = 33, .spa = 33, .spd = 58 },
.types = .{ .type1 = .Psychic, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Girafarig
.{
.stats = .{ .hp = 70, .atk = 80, .def = 65, .spe = 85, .spa = 90, .spd = 65 },
.types = .{ .type1 = .Normal, .type2 = .Psychic },
.ratio = 0x7F, // 50.0% F
},
// Pineco
.{
.stats = .{ .hp = 50, .atk = 65, .def = 90, .spe = 15, .spa = 35, .spd = 35 },
.types = .{ .type1 = .Bug, .type2 = .Bug },
.ratio = 0x7F, // 50.0% F
},
// Forretress
.{
.stats = .{ .hp = 75, .atk = 90, .def = 140, .spe = 40, .spa = 60, .spd = 60 },
.types = .{ .type1 = .Bug, .type2 = .Steel },
.ratio = 0x7F, // 50.0% F
},
// Dunsparce
.{
.stats = .{ .hp = 100, .atk = 70, .def = 70, .spe = 45, .spa = 65, .spd = 65 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Gligar
.{
.stats = .{ .hp = 65, .atk = 75, .def = 105, .spe = 85, .spa = 35, .spd = 65 },
.types = .{ .type1 = .Ground, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Steelix
.{
.stats = .{ .hp = 75, .atk = 85, .def = 200, .spe = 30, .spa = 55, .spd = 65 },
.types = .{ .type1 = .Steel, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Snubbull
.{
.stats = .{ .hp = 60, .atk = 80, .def = 50, .spe = 30, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Granbull
.{
.stats = .{ .hp = 90, .atk = 120, .def = 75, .spe = 45, .spa = 60, .spd = 60 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xBF, // 75.0% F
},
// Qwilfish
.{
.stats = .{ .hp = 65, .atk = 95, .def = 75, .spe = 85, .spa = 55, .spd = 55 },
.types = .{ .type1 = .Water, .type2 = .Poison },
.ratio = 0x7F, // 50.0% F
},
// Scizor
.{
.stats = .{ .hp = 70, .atk = 130, .def = 100, .spe = 65, .spa = 55, .spd = 80 },
.types = .{ .type1 = .Bug, .type2 = .Steel },
.ratio = 0x7F, // 50.0% F
},
// Shuckle
.{
.stats = .{ .hp = 20, .atk = 10, .def = 230, .spe = 5, .spa = 10, .spd = 230 },
.types = .{ .type1 = .Bug, .type2 = .Rock },
.ratio = 0x7F, // 50.0% F
},
// Heracross
.{
.stats = .{ .hp = 80, .atk = 125, .def = 75, .spe = 85, .spa = 40, .spd = 95 },
.types = .{ .type1 = .Bug, .type2 = .Fighting },
.ratio = 0x7F, // 50.0% F
},
// Sneasel
.{
.stats = .{ .hp = 55, .atk = 95, .def = 55, .spe = 115, .spa = 35, .spd = 75 },
.types = .{ .type1 = .Dark, .type2 = .Ice },
.ratio = 0x7F, // 50.0% F
},
// Teddiursa
.{
.stats = .{ .hp = 60, .atk = 80, .def = 50, .spe = 40, .spa = 50, .spd = 50 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Ursaring
.{
.stats = .{ .hp = 90, .atk = 130, .def = 75, .spe = 55, .spa = 75, .spd = 75 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Slugma
.{
.stats = .{ .hp = 40, .atk = 40, .def = 40, .spe = 20, .spa = 70, .spd = 40 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x7F, // 50.0% F
},
// Magcargo
.{
.stats = .{ .hp = 50, .atk = 50, .def = 120, .spe = 30, .spa = 80, .spd = 80 },
.types = .{ .type1 = .Fire, .type2 = .Rock },
.ratio = 0x7F, // 50.0% F
},
// Swinub
.{
.stats = .{ .hp = 50, .atk = 50, .def = 40, .spe = 50, .spa = 30, .spd = 30 },
.types = .{ .type1 = .Ice, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Piloswine
.{
.stats = .{ .hp = 100, .atk = 100, .def = 80, .spe = 50, .spa = 60, .spd = 60 },
.types = .{ .type1 = .Ice, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Corsola
.{
.stats = .{ .hp = 55, .atk = 55, .def = 85, .spe = 35, .spa = 65, .spd = 85 },
.types = .{ .type1 = .Water, .type2 = .Rock },
.ratio = 0xBF, // 75.0% F
},
// Remoraid
.{
.stats = .{ .hp = 35, .atk = 65, .def = 35, .spe = 65, .spa = 65, .spd = 35 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Octillery
.{
.stats = .{ .hp = 75, .atk = 105, .def = 75, .spe = 45, .spa = 105, .spd = 75 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0x7F, // 50.0% F
},
// Delibird
.{
.stats = .{ .hp = 45, .atk = 55, .def = 45, .spe = 75, .spa = 65, .spd = 45 },
.types = .{ .type1 = .Ice, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Mantine
.{
.stats = .{ .hp = 65, .atk = 40, .def = 70, .spe = 70, .spa = 80, .spd = 140 },
.types = .{ .type1 = .Water, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Skarmory
.{
.stats = .{ .hp = 65, .atk = 80, .def = 140, .spe = 70, .spa = 40, .spd = 70 },
.types = .{ .type1 = .Steel, .type2 = .Flying },
.ratio = 0x7F, // 50.0% F
},
// Houndour
.{
.stats = .{ .hp = 45, .atk = 60, .def = 30, .spe = 65, .spa = 80, .spd = 50 },
.types = .{ .type1 = .Dark, .type2 = .Fire },
.ratio = 0x7F, // 50.0% F
},
// Houndoom
.{
.stats = .{ .hp = 75, .atk = 90, .def = 50, .spe = 95, .spa = 110, .spd = 80 },
.types = .{ .type1 = .Dark, .type2 = .Fire },
.ratio = 0x7F, // 50.0% F
},
// Kingdra
.{
.stats = .{ .hp = 75, .atk = 95, .def = 95, .spe = 85, .spa = 95, .spd = 95 },
.types = .{ .type1 = .Water, .type2 = .Dragon },
.ratio = 0x7F, // 50.0% F
},
// Phanpy
.{
.stats = .{ .hp = 90, .atk = 60, .def = 60, .spe = 40, .spa = 40, .spd = 40 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Donphan
.{
.stats = .{ .hp = 90, .atk = 120, .def = 120, .spe = 50, .spa = 60, .spd = 60 },
.types = .{ .type1 = .Ground, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Porygon2
.{
.stats = .{ .hp = 85, .atk = 80, .def = 90, .spe = 60, .spa = 105, .spd = 95 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xFF, // N
},
// Stantler
.{
.stats = .{ .hp = 73, .atk = 95, .def = 62, .spe = 85, .spa = 85, .spd = 65 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Smeargle
.{
.stats = .{ .hp = 55, .atk = 20, .def = 35, .spe = 75, .spa = 20, .spd = 45 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0x7F, // 50.0% F
},
// Tyrogue
.{
.stats = .{ .hp = 35, .atk = 35, .def = 35, .spe = 35, .spa = 35, .spd = 35 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x00, // 0.00% F
},
// Hitmontop
.{
.stats = .{ .hp = 50, .atk = 95, .def = 95, .spe = 70, .spa = 35, .spd = 110 },
.types = .{ .type1 = .Fighting, .type2 = .Fighting },
.ratio = 0x00, // 0.00% F
},
// Smoochum
.{
.stats = .{ .hp = 45, .atk = 30, .def = 15, .spe = 65, .spa = 85, .spd = 65 },
.types = .{ .type1 = .Ice, .type2 = .Psychic },
.ratio = 0xFE, // 100% F
},
// Elekid
.{
.stats = .{ .hp = 45, .atk = 63, .def = 37, .spe = 95, .spa = 65, .spd = 55 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0x3F, // 25.0% F
},
// Magby
.{
.stats = .{ .hp = 45, .atk = 75, .def = 37, .spe = 83, .spa = 70, .spd = 55 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0x3F, // 25.0% F
},
// Miltank
.{
.stats = .{ .hp = 95, .atk = 80, .def = 105, .spe = 100, .spa = 40, .spd = 70 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xFE, // 100% F
},
// Blissey
.{
.stats = .{ .hp = 255, .atk = 10, .def = 10, .spe = 55, .spa = 75, .spd = 135 },
.types = .{ .type1 = .Normal, .type2 = .Normal },
.ratio = 0xFE, // 100% F
},
// Raikou
.{
.stats = .{ .hp = 90, .atk = 85, .def = 75, .spe = 115, .spa = 115, .spd = 100 },
.types = .{ .type1 = .Electric, .type2 = .Electric },
.ratio = 0xFF, // N
},
// Entei
.{
.stats = .{ .hp = 115, .atk = 115, .def = 85, .spe = 100, .spa = 90, .spd = 75 },
.types = .{ .type1 = .Fire, .type2 = .Fire },
.ratio = 0xFF, // N
},
// Suicune
.{
.stats = .{ .hp = 100, .atk = 75, .def = 115, .spe = 85, .spa = 90, .spd = 115 },
.types = .{ .type1 = .Water, .type2 = .Water },
.ratio = 0xFF, // N
},
// Larvitar
.{
.stats = .{ .hp = 50, .atk = 64, .def = 50, .spe = 41, .spa = 45, .spd = 50 },
.types = .{ .type1 = .Rock, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Pupitar
.{
.stats = .{ .hp = 70, .atk = 84, .def = 70, .spe = 51, .spa = 65, .spd = 70 },
.types = .{ .type1 = .Rock, .type2 = .Ground },
.ratio = 0x7F, // 50.0% F
},
// Tyranitar
.{
.stats = .{ .hp = 100, .atk = 134, .def = 110, .spe = 61, .spa = 95, .spd = 100 },
.types = .{ .type1 = .Rock, .type2 = .Dark },
.ratio = 0x7F, // 50.0% F
},
// Lugia
.{
.stats = .{ .hp = 106, .atk = 90, .def = 130, .spe = 110, .spa = 90, .spd = 154 },
.types = .{ .type1 = .Psychic, .type2 = .Flying },
.ratio = 0xFF, // N
},
// HoOh
.{
.stats = .{ .hp = 106, .atk = 130, .def = 90, .spe = 90, .spa = 110, .spd = 154 },
.types = .{ .type1 = .Fire, .type2 = .Flying },
.ratio = 0xFF, // N
},
// Celebi
.{
.stats = .{ .hp = 100, .atk = 100, .def = 100, .spe = 100, .spa = 100, .spd = 100 },
.types = .{ .type1 = .Psychic, .type2 = .Grass },
.ratio = 0xFF, // N
},
};
comptime {
assert(@sizeOf(Species) == 1);
}
// @test-only
pub fn get(id: Species) Data {
assert(id != .None);
return DATA[@enumToInt(id) - 1];
}
}; | src/lib/gen2/data/species.zig |
const std = @import("std");
const webgpu = @import("../../webgpu.zig");
const dummy = @import("./dummy.zig");
pub const ComputePipeline = struct {
const vtable = webgpu.ComputePipeline.VTable{
.destroy_fn = destroy,
.get_bind_group_layout_fn = getBindGroupLayout,
.set_label_fn = setLabel,
};
super: webgpu.ComputePipeline,
pub fn create(device: *dummy.Device, descriptor: webgpu.ComputePipelineDescriptor) webgpu.Device.CreateComputePipelineError!*ComputePipeline {
_ = descriptor;
var compute_pipeline = try device.allocator.create(ComputePipeline);
errdefer device.allocator.destroy(compute_pipeline);
compute_pipeline.super = .{
.__vtable = &vtable,
.device = &device.super,
};
return compute_pipeline;
}
fn destroy(super: *webgpu.ComputePipeline) void {
var compute_pipeline = @fieldParentPtr(ComputePipeline, "super", super);
compute_pipeline.device.allocator.destroy(compute_pipeline);
}
fn getBindGroupLayout(super: *webgpu.ComputePipeline, group_index: u32) ?*webgpu.BindGroupLayout {
_ = super;
_ = group_index;
return null;
}
fn setLabel(super: *webgpu.ComputePipeline, label: [:0]const u8) void {
_ = super;
_ = label;
}
};
pub const RenderPipeline = struct {
const vtable = webgpu.RenderPipeline.VTable{
.destroy_fn = destroy,
.get_bind_group_layout_fn = getBindGroupLayout,
.set_label_fn = setLabel,
};
super: webgpu.RenderPipeline,
pub fn create(device: *dummy.Device, descriptor: webgpu.RenderPipelineDescriptor) webgpu.Device.CreateRenderPipelineError!*RenderPipeline {
_ = descriptor;
var render_pipeline = try device.allocator.create(RenderPipeline);
errdefer device.allocator.destroy(render_pipeline);
render_pipeline.super = .{
.__vtable = &vtable,
.device = &device.super,
};
return render_pipeline;
}
fn destroy(super: *webgpu.RenderPipeline) void {
var render_pipeline = @fieldParentPtr(RenderPipeline, "super", super);
render_pipeline.device.allocator.destroy(render_pipeline);
}
fn getBindGroupLayout(super: *webgpu.RenderPipeline, group_index: u32) ?*webgpu.BindGroupLayout {
_ = super;
_ = group_index;
return null;
}
fn setLabel(super: *webgpu.RenderPipeline, label: [:0]const u8) void {
_ = super;
_ = label;
}
};
pub const ShaderModule = struct {
const vtable = webgpu.ShaderModule.VTable{
.destroy_fn = destroy,
.get_compilation_info_fn = getCompilationInfo,
.set_label_fn = setLabel,
};
super: webgpu.ShaderModule,
pub fn create(device: *dummy.Device, descriptor: webgpu.ShaderModuleDescriptor) webgpu.Device.CreateShaderModuleError!*ShaderModule {
_ = descriptor;
var shader_module = try device.allocator.create(ShaderModule);
errdefer device.allocator.destroy(shader_module);
shader_module.super = .{
.__vtable = &vtable,
.device = &device.super,
};
return shader_module;
}
fn destroy(super: *webgpu.ShaderModule) void {
var shader_module = @fieldParentPtr(ShaderModule, "super", super);
shader_module.device.allocator.destroy(shader_module);
}
fn getCompilationInfo(super: *webgpu.ShaderModule) webgpu.ShaderModule.GetCompilationInfoError!webgpu.CompilationInfo {
_ = super;
return webgpu.CompilationInfo{.messages = &.{}};
}
fn setLabel(super: *webgpu.ShaderModule, label: [:0]const u8) void {
_ = super;
_ = label;
}
}; | src/backends/dummy/pipeline.zig |
const std = @import("std");
const debug = std.debug.print;
const Mutex = std.Thread.Mutex;
const testing = std.testing;
/// Functionality to run a thread pool of n Threads with a list of tasks to be executed stored as function + workload.
/// Solution will then spin up n threads, each thread checks for next available entry in the list, locks while copying out the task, then unlocks for any other thread to get started.
/// If list is empty, finish thread
///
/// This is currently just an excercise in getting to know zig. It should most likely be replaced by something like this:
/// https://zig.news/kprotty/resource-efficient-thread-pools-with-zig-3291
///
/// This is a simple variant which takes a predefined common function to activate for all work-items
/// Requirement: Set up from single thread, no modifications after start()
pub fn ThreadPool(comptime PayloadType: type, comptime TaskCapacity: usize, worker_function: fn (*PayloadType) void) type {
const MAX_NUM_THREADS = 128;
const ThreadPoolState = enum {
NotStarted,
Running,
Finished
};
return struct {
const Self = @This();
num_threads: usize,
work_mutex: Mutex = Mutex{},
state: ThreadPoolState = ThreadPoolState.NotStarted,
work: [TaskCapacity]PayloadType = undefined,
next_work_item_idx: usize = 0,
next_free_item_idx: usize = 0,
function: fn (*PayloadType) void = worker_function,
thread_pool: [MAX_NUM_THREADS]std.Thread = undefined,
/// Required to be populated from single thread as of now, and must be done before start.
pub fn addWork(self: *Self, work: PayloadType) !void {
if(self.isCapacity()) {
self.work[self.next_free_item_idx] = work;
// next_work_item_idx = next_free_item_idx;
self.next_free_item_idx += 1;
} else {
return error.NoMoreCapacity;
}
}
/// Not thread safe, must lock outside
fn takeWork(self: *Self) !PayloadType {
if(self.isWork()) {
var item_to_return = self.next_work_item_idx;
self.next_work_item_idx += 1;
return self.work[item_to_return];
} else {
return error.NoMoreWork;
}
}
fn isWork(self: *Self) bool {
return self.next_work_item_idx < self.next_free_item_idx;
}
fn isCapacity(self: *Self) bool {
return self.next_free_item_idx < TaskCapacity;
}
/// Thread-worker
fn worker(self: *Self) void {
// While work to be done
var work: PayloadType = undefined;
while(true) {
// Critical section
// Pop work
{
self.work_mutex.lock();
defer self.work_mutex.unlock();
if(!self.isWork()) break;
work = self.takeWork() catch { break; };
}
// End critical section
// Call worker_function with workload
self.function(&work);
}
}
/// Once all work is set up: call this to spawn all threads and get to work
pub fn start(self: *Self) !void {
// Fill up thread pool, with .worker()
var t_id:usize = 0;
// TBD: What if only some threads are spawned?
while(t_id < self.num_threads) : (t_id += 1) {
self.thread_pool[t_id] = try std.Thread.spawn(.{}, Self.worker, .{self});
}
}
/// Join on all threads of pool
pub fn join(self: *Self) void {
// Wait for all to finish
var t_id:usize = 0;
while(t_id < self.num_threads) : (t_id += 1) {
self.thread_pool[t_id].join();
}
}
/// Convenience-function identical to .start() and .join()
pub fn startAndJoin(self: *Self) !void {
try self.start();
self.join();
}
/// Main creator-function (some would even call it... constructor)
pub fn init(wanted_num_threads: usize) Self {
return Self{
.num_threads = wanted_num_threads,
};
}
};
}
test "threadpool basic implementation" {
const MyPayloadResult = struct {
mutex: Mutex = Mutex{},
total: u64 = 0,
};
//
const MyPayload = struct {
const Self = @This();
data: u64,
result: *MyPayloadResult,
pub fn worker(self: *Self) void {
self.result.mutex.lock();
defer self.result.mutex.unlock();
var total = self.result.total;
total += self.data;
total += self.data;
self.result.total = total;
}
};
var result = MyPayloadResult{};
var pool = ThreadPool(MyPayload, 1000, MyPayload.worker).init(24);
var tmp: usize = 0;
var checkresult: u64 = 0;
while(tmp < 1000) : (tmp += 1) {
checkresult += tmp*2;
try pool.addWork(.{
.data = tmp,
.result = &result,
});
}
try pool.start();
pool.join();
try testing.expectEqual(checkresult, result.total);
debug("Result: {d}\n", .{result.total});
} | src/threadpool.zig |
const std = @import("std");
const Fundude = @import("main.zig");
pub const Op = @import("Cpu/Op.zig");
const util = @import("util.zig");
const Cpu = @This();
mode: Mode,
interrupt_master: bool,
duration: u8,
remaining: u8,
next: [3]u8,
reg: packed union {
_16: util.EnumArray(Reg16, u16),
_8: util.EnumArray(Reg8, u8),
flags: Flags,
},
pub const Flags = packed struct {
_pad: u4 = 0,
C: u1,
H: u1,
N: bool,
Z: bool,
};
test "register arrangement" {
var cpu: Cpu = undefined;
cpu.reg._8.set(.A, 0x12);
cpu.reg._8.set(.F, 0x34);
std.testing.expectEqual(@as(u16, 0x1234), cpu.reg._16.get(.AF));
cpu.reg._8.set(.B, 0x23);
cpu.reg._8.set(.C, 0x34);
std.testing.expectEqual(@as(u16, 0x2334), cpu.reg._16.get(.BC));
cpu.reg._8.set(.D, 0x58);
cpu.reg._8.set(.E, 0x76);
std.testing.expectEqual(@as(u16, 0x5876), cpu.reg._16.get(.DE));
cpu.reg._8.set(.H, 0xAF);
cpu.reg._8.set(.L, 0xCD);
std.testing.expectEqual(@as(u16, 0xAFCD), cpu.reg._16.get(.HL));
}
test "flags" {
var cpu: Cpu = undefined;
cpu.reg.flags = .{
.Z = true,
.N = true,
.H = 1,
.C = 1,
};
std.testing.expectEqual(@as(u8, 0xF0), cpu.reg._8.get(.F));
cpu.reg.flags = .{
.Z = true,
.N = false,
.H = 0,
.C = 0,
};
std.testing.expectEqual(@as(u8, 0x80), cpu.reg._8.get(.F));
cpu.reg.flags = .{
.Z = false,
.N = false,
.H = 0,
.C = 1,
};
std.testing.expectEqual(@as(u8, 0x10), cpu.reg._8.get(.F));
}
pub fn reset(self: *Cpu) void {
self.mode = .norm;
self.interrupt_master = false;
self.reg._16.set(.PC, 0);
self.duration = 0;
self.remaining = 0;
}
// Always be 4 cycles
pub fn tick(self: *Cpu, mmu: *Fundude.Mmu) void {
std.debug.assert(self.remaining % 4 == 0);
if (self.remaining == 0) {
if (self.irqNext(mmu)) |irqBytes| {
self.next = irqBytes;
// TODO: does this really take the same duration as CALL?
const meta = meta_ops[self.next[0]];
self.duration = meta.duration;
self.remaining = meta.duration;
} else if (self.mode == .halt) {
return;
} else {
self.next = mmu.instrBytes(self.reg._16.get(.PC));
const meta = meta_ops[self.next[0]];
self.duration = meta.duration;
self.remaining = meta.duration;
self.reg._16.set(.PC, self.reg._16.get(.PC) +% meta.length);
}
}
if (self.remaining == 4) {
const actual_duration = @call(Fundude.profiling_call, self.opExecute, .{ mmu, self.next });
// const actual_duration = @call(.{ .modifier = .always_inline }, self.opExecute, .{ mmu, self.next });
self.remaining = if (actual_duration > self.duration) actual_duration - self.duration else 0;
self.next = .{ 0, 0, 0 };
} else {
self.remaining -%= 4;
}
}
const meta_ops = blk: {
@setEvalBranchQuota(10000);
var result: [256]struct { length: u8, duration: u8 } = undefined;
for (result) |*val, i| {
const op = Op.decode(.{ i, 0, 0 });
val.* = .{ .length = op.length, .duration = op.durations[0] };
}
break :blk result;
};
fn irqNext(self: *Cpu, mmu: *Fundude.Mmu) ?[3]u8 {
const cmp = mmu.dyn.io.IF.cmp(mmu.dyn.interrupt_enable);
const addr: u8 = blk: {
// Naive implementation:
// if (cmp.vblank) {
// mmu.dyn.io.IF.vblank = false;
// break :blk 0x40;
// } else if (cmp.lcd_stat) {
// mmu.dyn.io.IF.lcd_stat = false;
// break :blk 0x48;
// } else if (cmp.timer) {
// mmu.dyn.io.IF.timer = false;
// break :blk 0x50;
// } else if (cmp.serial) {
// mmu.dyn.io.IF.serial = false;
// break :blk 0x58;
// } else if (cmp.joypad) {
// mmu.dyn.io.IF.joypad = false;
// break :blk 0x60;
// } else {
// return null;
// }
if (cmp.isActive() and self.interrupt_master) {
const active = cmp.active().?;
std.debug.assert(cmp.get(active));
mmu.dyn.io.IF.disable(active);
break :blk 0x40 + @as(u8, 8) * @enumToInt(active);
} else {
return null;
}
};
self.mode = .norm;
self.interrupt_master = false;
// return Op.iw___(.call_IW___, addr);
return [3]u8{ 0xCD, addr, 0 };
}
pub fn opExecute(cpu: *Cpu, mmu: *Fundude.Mmu, bytes: [3]u8) u8 {
const op = @call(.{ .modifier = .always_inline }, Op.decode, .{bytes});
inline for (std.meta.fields(Op.Id)) |field| {
if (field.value == @enumToInt(op.id)) {
const func = @field(Op.impl, field.name);
const result = func(cpu, mmu, op);
const Result = @typeInfo(@TypeOf(func)).Fn.return_type.?;
std.debug.assert(result.duration == Result.durations[0] or result.duration == Result.durations[1]);
return @bitCast(u8, result);
}
}
unreachable;
}
test "opExecute smoke" {
var fd = Fundude{};
fd.mmu.cart_meta.mbc = .None;
var i: usize = 0;
while (i < 256) : (i += 1) {
_ = fd.cpu.opExecute(&fd.mmu, .{ @intCast(u8, i), 0, 0 });
}
// CB instructions
i = 0;
while (i < 256) : (i += 1) {
_ = fd.cpu.opExecute(&fd.mmu, .{ 0xCB, @intCast(u8, i), 0 });
}
}
pub const Mode = enum {
norm,
halt,
stop,
illegal,
};
pub const Reg16 = enum(u3) {
AF = 0,
BC = 1,
DE = 2,
HL = 3,
SP = 4,
PC = 5,
};
pub const Reg8 = enum(u3) {
F = 0,
A = 1,
C = 2,
B = 3,
E = 4,
D = 5,
L = 6,
H = 7,
};
pub const Irq = packed struct {
vblank: bool,
lcd_stat: bool,
timer: bool,
serial: bool,
joypad: bool,
_pad: u3 = 0,
pub const Pos = enum(u3) {
vblank,
lcd_stat,
timer,
serial,
joypad,
fn mask(self: Pos) u8 {
return @as(u8, 1) << @enumToInt(self);
}
};
pub fn cmp(self: Irq, other: Irq) Irq {
return @bitCast(Irq, @bitCast(u8, self) & @bitCast(u8, other));
}
pub fn get(self: Irq, pos: Pos) bool {
return pos.mask() & @bitCast(u8, self) != 0;
}
pub fn isActive(self: Irq) bool {
return @bitCast(u8, self) != 0;
}
pub fn active(self: Irq) ?Pos {
const raw = @ctz(u8, @bitCast(u8, self));
return std.meta.intToEnum(Pos, raw) catch null;
}
pub fn disable(self: *Irq, pos: Pos) void {
self.* = @bitCast(Irq, (~pos.mask()) & @bitCast(u8, self.*));
}
};
test "" {
_ = Op;
} | src/Cpu.zig |
pub const BG_NOTIFY_JOB_TRANSFERRED = @as(u32, 1);
pub const BG_NOTIFY_JOB_ERROR = @as(u32, 2);
pub const BG_NOTIFY_DISABLE = @as(u32, 4);
pub const BG_NOTIFY_JOB_MODIFICATION = @as(u32, 8);
pub const BG_NOTIFY_FILE_TRANSFERRED = @as(u32, 16);
pub const BG_NOTIFY_FILE_RANGES_TRANSFERRED = @as(u32, 32);
pub const BG_JOB_ENUM_ALL_USERS = @as(u32, 1);
pub const BG_COPY_FILE_OWNER = @as(u32, 1);
pub const BG_COPY_FILE_GROUP = @as(u32, 2);
pub const BG_COPY_FILE_DACL = @as(u32, 4);
pub const BG_COPY_FILE_SACL = @as(u32, 8);
pub const BG_COPY_FILE_ALL = @as(u32, 15);
pub const BG_SSL_ENABLE_CRL_CHECK = @as(u32, 1);
pub const BG_SSL_IGNORE_CERT_CN_INVALID = @as(u32, 2);
pub const BG_SSL_IGNORE_CERT_DATE_INVALID = @as(u32, 4);
pub const BG_SSL_IGNORE_UNKNOWN_CA = @as(u32, 8);
pub const BG_SSL_IGNORE_CERT_WRONG_USAGE = @as(u32, 16);
pub const BG_HTTP_REDIRECT_POLICY_MASK = @as(u32, 1792);
pub const BG_HTTP_REDIRECT_POLICY_ALLOW_SILENT = @as(u32, 0);
pub const BG_HTTP_REDIRECT_POLICY_ALLOW_REPORT = @as(u32, 256);
pub const BG_HTTP_REDIRECT_POLICY_DISALLOW = @as(u32, 512);
pub const BG_HTTP_REDIRECT_POLICY_ALLOW_HTTPS_TO_HTTP = @as(u32, 2048);
pub const BG_ENABLE_PEERCACHING_CLIENT = @as(u32, 1);
pub const BG_ENABLE_PEERCACHING_SERVER = @as(u32, 2);
pub const BG_DISABLE_BRANCH_CACHE = @as(u32, 4);
pub const BG_JOB_ENABLE_PEERCACHING_CLIENT = @as(u32, 1);
pub const BG_JOB_ENABLE_PEERCACHING_SERVER = @as(u32, 2);
pub const BG_JOB_DISABLE_BRANCH_CACHE = @as(u32, 4);
pub const BITS_COST_STATE_UNRESTRICTED = @as(u32, 1);
pub const BITS_COST_STATE_CAPPED_USAGE_UNKNOWN = @as(u32, 2);
pub const BITS_COST_STATE_BELOW_CAP = @as(u32, 4);
pub const BITS_COST_STATE_NEAR_CAP = @as(u32, 8);
pub const BITS_COST_STATE_OVERCAP_CHARGED = @as(u32, 16);
pub const BITS_COST_STATE_OVERCAP_THROTTLED = @as(u32, 32);
pub const BITS_COST_STATE_USAGE_BASED = @as(u32, 64);
pub const BITS_COST_STATE_ROAMING = @as(u32, 128);
pub const BITS_COST_OPTION_IGNORE_CONGESTION = @as(u32, 2147483648);
pub const BITS_COST_STATE_RESERVED = @as(u32, 1073741824);
pub const QM_NOTIFY_FILE_DONE = @as(u32, 1);
pub const QM_NOTIFY_JOB_DONE = @as(u32, 2);
pub const QM_NOTIFY_GROUP_DONE = @as(u32, 4);
pub const QM_NOTIFY_DISABLE_NOTIFY = @as(u32, 64);
pub const QM_NOTIFY_USE_PROGRESSEX = @as(u32, 128);
pub const QM_STATUS_FILE_COMPLETE = @as(u32, 1);
pub const QM_STATUS_FILE_INCOMPLETE = @as(u32, 2);
pub const QM_STATUS_JOB_COMPLETE = @as(u32, 4);
pub const QM_STATUS_JOB_INCOMPLETE = @as(u32, 8);
pub const QM_STATUS_JOB_ERROR = @as(u32, 16);
pub const QM_STATUS_JOB_FOREGROUND = @as(u32, 32);
pub const QM_STATUS_GROUP_COMPLETE = @as(u32, 64);
pub const QM_STATUS_GROUP_INCOMPLETE = @as(u32, 128);
pub const QM_STATUS_GROUP_SUSPENDED = @as(u32, 256);
pub const QM_STATUS_GROUP_ERROR = @as(u32, 512);
pub const QM_STATUS_GROUP_FOREGROUND = @as(u32, 1024);
pub const QM_PROTOCOL_HTTP = @as(u32, 1);
pub const QM_PROTOCOL_FTP = @as(u32, 2);
pub const QM_PROTOCOL_SMB = @as(u32, 3);
pub const QM_PROTOCOL_CUSTOM = @as(u32, 4);
pub const QM_PROGRESS_PERCENT_DONE = @as(u32, 1);
pub const QM_PROGRESS_TIME_DONE = @as(u32, 2);
pub const QM_PROGRESS_SIZE_DONE = @as(u32, 3);
pub const QM_E_INVALID_STATE = @as(u32, 2164264961);
pub const QM_E_SERVICE_UNAVAILABLE = @as(u32, 2164264962);
pub const QM_E_DOWNLOADER_UNAVAILABLE = @as(u32, 2164264963);
pub const QM_E_ITEM_NOT_FOUND = @as(u32, 2164264964);
//--------------------------------------------------------------------------------
// Section: Types (74)
//--------------------------------------------------------------------------------
pub const BG_TOKEN = enum(u32) {
LOCAL_FILE = 1,
NETWORK = 2,
};
pub const BG_TOKEN_LOCAL_FILE = BG_TOKEN.LOCAL_FILE;
pub const BG_TOKEN_NETWORK = BG_TOKEN.NETWORK;
const CLSID_BackgroundCopyManager_Value = @import("../zig.zig").Guid.initString("4991d34b-80a1-4291-83b6-3328366b9097");
pub const CLSID_BackgroundCopyManager = &CLSID_BackgroundCopyManager_Value;
pub const BG_FILE_PROGRESS = extern struct {
BytesTotal: u64,
BytesTransferred: u64,
Completed: BOOL,
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyFile_Value = @import("../zig.zig").Guid.initString("01b7bd23-fb88-4a77-8490-5891d3e4653a");
pub const IID_IBackgroundCopyFile = &IID_IBackgroundCopyFile_Value;
pub const IBackgroundCopyFile = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetRemoteName: fn(
self: *const IBackgroundCopyFile,
pVal: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLocalName: fn(
self: *const IBackgroundCopyFile,
pVal: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProgress: fn(
self: *const IBackgroundCopyFile,
pVal: ?*BG_FILE_PROGRESS,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile_GetRemoteName(self: *const T, pVal: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile.VTable, self.vtable).GetRemoteName(@ptrCast(*const IBackgroundCopyFile, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile_GetLocalName(self: *const T, pVal: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile.VTable, self.vtable).GetLocalName(@ptrCast(*const IBackgroundCopyFile, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile_GetProgress(self: *const T, pVal: ?*BG_FILE_PROGRESS) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile.VTable, self.vtable).GetProgress(@ptrCast(*const IBackgroundCopyFile, self), pVal);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IEnumBackgroundCopyFiles_Value = @import("../zig.zig").Guid.initString("ca51e165-c365-424c-8d41-24aaa4ff3c40");
pub const IID_IEnumBackgroundCopyFiles = &IID_IEnumBackgroundCopyFiles_Value;
pub const IEnumBackgroundCopyFiles = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Next: fn(
self: *const IEnumBackgroundCopyFiles,
celt: u32,
rgelt: ?*?*IBackgroundCopyFile,
pceltFetched: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Skip: fn(
self: *const IEnumBackgroundCopyFiles,
celt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reset: fn(
self: *const IEnumBackgroundCopyFiles,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Clone: fn(
self: *const IEnumBackgroundCopyFiles,
ppenum: ?*?*IEnumBackgroundCopyFiles,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCount: fn(
self: *const IEnumBackgroundCopyFiles,
puCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyFiles_Next(self: *const T, celt: u32, rgelt: ?*?*IBackgroundCopyFile, pceltFetched: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyFiles.VTable, self.vtable).Next(@ptrCast(*const IEnumBackgroundCopyFiles, self), celt, rgelt, pceltFetched);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyFiles_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyFiles.VTable, self.vtable).Skip(@ptrCast(*const IEnumBackgroundCopyFiles, self), celt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyFiles_Reset(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyFiles.VTable, self.vtable).Reset(@ptrCast(*const IEnumBackgroundCopyFiles, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyFiles_Clone(self: *const T, ppenum: ?*?*IEnumBackgroundCopyFiles) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyFiles.VTable, self.vtable).Clone(@ptrCast(*const IEnumBackgroundCopyFiles, self), ppenum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyFiles_GetCount(self: *const T, puCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyFiles.VTable, self.vtable).GetCount(@ptrCast(*const IEnumBackgroundCopyFiles, self), puCount);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const BG_ERROR_CONTEXT = enum(i32) {
NONE = 0,
UNKNOWN = 1,
GENERAL_QUEUE_MANAGER = 2,
QUEUE_MANAGER_NOTIFICATION = 3,
LOCAL_FILE = 4,
REMOTE_FILE = 5,
GENERAL_TRANSPORT = 6,
REMOTE_APPLICATION = 7,
SERVER_CERTIFICATE_CALLBACK = 8,
};
pub const BG_ERROR_CONTEXT_NONE = BG_ERROR_CONTEXT.NONE;
pub const BG_ERROR_CONTEXT_UNKNOWN = BG_ERROR_CONTEXT.UNKNOWN;
pub const BG_ERROR_CONTEXT_GENERAL_QUEUE_MANAGER = BG_ERROR_CONTEXT.GENERAL_QUEUE_MANAGER;
pub const BG_ERROR_CONTEXT_QUEUE_MANAGER_NOTIFICATION = BG_ERROR_CONTEXT.QUEUE_MANAGER_NOTIFICATION;
pub const BG_ERROR_CONTEXT_LOCAL_FILE = BG_ERROR_CONTEXT.LOCAL_FILE;
pub const BG_ERROR_CONTEXT_REMOTE_FILE = BG_ERROR_CONTEXT.REMOTE_FILE;
pub const BG_ERROR_CONTEXT_GENERAL_TRANSPORT = BG_ERROR_CONTEXT.GENERAL_TRANSPORT;
pub const BG_ERROR_CONTEXT_REMOTE_APPLICATION = BG_ERROR_CONTEXT.REMOTE_APPLICATION;
pub const BG_ERROR_CONTEXT_SERVER_CERTIFICATE_CALLBACK = BG_ERROR_CONTEXT.SERVER_CERTIFICATE_CALLBACK;
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyError_Value = @import("../zig.zig").Guid.initString("19c613a0-fcb8-4f28-81ae-897c3d078f81");
pub const IID_IBackgroundCopyError = &IID_IBackgroundCopyError_Value;
pub const IBackgroundCopyError = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetError: fn(
self: *const IBackgroundCopyError,
pContext: ?*BG_ERROR_CONTEXT,
pCode: ?*HRESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFile: fn(
self: *const IBackgroundCopyError,
pVal: ?*?*IBackgroundCopyFile,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetErrorDescription: fn(
self: *const IBackgroundCopyError,
LanguageId: u32,
pErrorDescription: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetErrorContextDescription: fn(
self: *const IBackgroundCopyError,
LanguageId: u32,
pContextDescription: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProtocol: fn(
self: *const IBackgroundCopyError,
pProtocol: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyError_GetError(self: *const T, pContext: ?*BG_ERROR_CONTEXT, pCode: ?*HRESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyError.VTable, self.vtable).GetError(@ptrCast(*const IBackgroundCopyError, self), pContext, pCode);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyError_GetFile(self: *const T, pVal: ?*?*IBackgroundCopyFile) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyError.VTable, self.vtable).GetFile(@ptrCast(*const IBackgroundCopyError, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyError_GetErrorDescription(self: *const T, LanguageId: u32, pErrorDescription: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyError.VTable, self.vtable).GetErrorDescription(@ptrCast(*const IBackgroundCopyError, self), LanguageId, pErrorDescription);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyError_GetErrorContextDescription(self: *const T, LanguageId: u32, pContextDescription: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyError.VTable, self.vtable).GetErrorContextDescription(@ptrCast(*const IBackgroundCopyError, self), LanguageId, pContextDescription);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyError_GetProtocol(self: *const T, pProtocol: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyError.VTable, self.vtable).GetProtocol(@ptrCast(*const IBackgroundCopyError, self), pProtocol);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const BG_FILE_INFO = extern struct {
RemoteName: ?PWSTR,
LocalName: ?PWSTR,
};
pub const BG_JOB_PROGRESS = extern struct {
BytesTotal: u64,
BytesTransferred: u64,
FilesTotal: u32,
FilesTransferred: u32,
};
pub const BG_JOB_TIMES = extern struct {
CreationTime: FILETIME,
ModificationTime: FILETIME,
TransferCompletionTime: FILETIME,
};
pub const BG_JOB_PRIORITY = enum(i32) {
FOREGROUND = 0,
HIGH = 1,
NORMAL = 2,
LOW = 3,
};
pub const BG_JOB_PRIORITY_FOREGROUND = BG_JOB_PRIORITY.FOREGROUND;
pub const BG_JOB_PRIORITY_HIGH = BG_JOB_PRIORITY.HIGH;
pub const BG_JOB_PRIORITY_NORMAL = BG_JOB_PRIORITY.NORMAL;
pub const BG_JOB_PRIORITY_LOW = BG_JOB_PRIORITY.LOW;
pub const BG_JOB_STATE = enum(i32) {
QUEUED = 0,
CONNECTING = 1,
TRANSFERRING = 2,
SUSPENDED = 3,
ERROR = 4,
TRANSIENT_ERROR = 5,
TRANSFERRED = 6,
ACKNOWLEDGED = 7,
CANCELLED = 8,
};
pub const BG_JOB_STATE_QUEUED = BG_JOB_STATE.QUEUED;
pub const BG_JOB_STATE_CONNECTING = BG_JOB_STATE.CONNECTING;
pub const BG_JOB_STATE_TRANSFERRING = BG_JOB_STATE.TRANSFERRING;
pub const BG_JOB_STATE_SUSPENDED = BG_JOB_STATE.SUSPENDED;
pub const BG_JOB_STATE_ERROR = BG_JOB_STATE.ERROR;
pub const BG_JOB_STATE_TRANSIENT_ERROR = BG_JOB_STATE.TRANSIENT_ERROR;
pub const BG_JOB_STATE_TRANSFERRED = BG_JOB_STATE.TRANSFERRED;
pub const BG_JOB_STATE_ACKNOWLEDGED = BG_JOB_STATE.ACKNOWLEDGED;
pub const BG_JOB_STATE_CANCELLED = BG_JOB_STATE.CANCELLED;
pub const BG_JOB_TYPE = enum(i32) {
DOWNLOAD = 0,
UPLOAD = 1,
UPLOAD_REPLY = 2,
};
pub const BG_JOB_TYPE_DOWNLOAD = BG_JOB_TYPE.DOWNLOAD;
pub const BG_JOB_TYPE_UPLOAD = BG_JOB_TYPE.UPLOAD;
pub const BG_JOB_TYPE_UPLOAD_REPLY = BG_JOB_TYPE.UPLOAD_REPLY;
pub const BG_JOB_PROXY_USAGE = enum(i32) {
PRECONFIG = 0,
NO_PROXY = 1,
OVERRIDE = 2,
AUTODETECT = 3,
};
pub const BG_JOB_PROXY_USAGE_PRECONFIG = BG_JOB_PROXY_USAGE.PRECONFIG;
pub const BG_JOB_PROXY_USAGE_NO_PROXY = BG_JOB_PROXY_USAGE.NO_PROXY;
pub const BG_JOB_PROXY_USAGE_OVERRIDE = BG_JOB_PROXY_USAGE.OVERRIDE;
pub const BG_JOB_PROXY_USAGE_AUTODETECT = BG_JOB_PROXY_USAGE.AUTODETECT;
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyJob_Value = @import("../zig.zig").Guid.initString("37668d37-507e-4160-9316-26306d150b12");
pub const IID_IBackgroundCopyJob = &IID_IBackgroundCopyJob_Value;
pub const IBackgroundCopyJob = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
AddFileSet: fn(
self: *const IBackgroundCopyJob,
cFileCount: u32,
pFileSet: [*]BG_FILE_INFO,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddFile: fn(
self: *const IBackgroundCopyJob,
RemoteUrl: ?[*:0]const u16,
LocalName: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumFiles: fn(
self: *const IBackgroundCopyJob,
pEnum: ?*?*IEnumBackgroundCopyFiles,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Suspend: fn(
self: *const IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Resume: fn(
self: *const IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Cancel: fn(
self: *const IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Complete: fn(
self: *const IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetId: fn(
self: *const IBackgroundCopyJob,
pVal: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetType: fn(
self: *const IBackgroundCopyJob,
pVal: ?*BG_JOB_TYPE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProgress: fn(
self: *const IBackgroundCopyJob,
pVal: ?*BG_JOB_PROGRESS,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetTimes: fn(
self: *const IBackgroundCopyJob,
pVal: ?*BG_JOB_TIMES,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetState: fn(
self: *const IBackgroundCopyJob,
pVal: ?*BG_JOB_STATE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetError: fn(
self: *const IBackgroundCopyJob,
ppError: ?*?*IBackgroundCopyError,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOwner: fn(
self: *const IBackgroundCopyJob,
pVal: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetDisplayName: fn(
self: *const IBackgroundCopyJob,
Val: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDisplayName: fn(
self: *const IBackgroundCopyJob,
pVal: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetDescription: fn(
self: *const IBackgroundCopyJob,
Val: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDescription: fn(
self: *const IBackgroundCopyJob,
pVal: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetPriority: fn(
self: *const IBackgroundCopyJob,
Val: BG_JOB_PRIORITY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetPriority: fn(
self: *const IBackgroundCopyJob,
pVal: ?*BG_JOB_PRIORITY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetNotifyFlags: fn(
self: *const IBackgroundCopyJob,
Val: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetNotifyFlags: fn(
self: *const IBackgroundCopyJob,
pVal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetNotifyInterface: fn(
self: *const IBackgroundCopyJob,
Val: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetNotifyInterface: fn(
self: *const IBackgroundCopyJob,
pVal: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMinimumRetryDelay: fn(
self: *const IBackgroundCopyJob,
Seconds: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMinimumRetryDelay: fn(
self: *const IBackgroundCopyJob,
Seconds: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetNoProgressTimeout: fn(
self: *const IBackgroundCopyJob,
Seconds: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetNoProgressTimeout: fn(
self: *const IBackgroundCopyJob,
Seconds: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetErrorCount: fn(
self: *const IBackgroundCopyJob,
Errors: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetProxySettings: fn(
self: *const IBackgroundCopyJob,
ProxyUsage: BG_JOB_PROXY_USAGE,
ProxyList: ?[*:0]const u16,
ProxyBypassList: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProxySettings: fn(
self: *const IBackgroundCopyJob,
pProxyUsage: ?*BG_JOB_PROXY_USAGE,
pProxyList: ?*?PWSTR,
pProxyBypassList: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
TakeOwnership: fn(
self: *const IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_AddFileSet(self: *const T, cFileCount: u32, pFileSet: [*]BG_FILE_INFO) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).AddFileSet(@ptrCast(*const IBackgroundCopyJob, self), cFileCount, pFileSet);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_AddFile(self: *const T, RemoteUrl: ?[*:0]const u16, LocalName: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).AddFile(@ptrCast(*const IBackgroundCopyJob, self), RemoteUrl, LocalName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_EnumFiles(self: *const T, pEnum: ?*?*IEnumBackgroundCopyFiles) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).EnumFiles(@ptrCast(*const IBackgroundCopyJob, self), pEnum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_Suspend(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).Suspend(@ptrCast(*const IBackgroundCopyJob, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_Resume(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).Resume(@ptrCast(*const IBackgroundCopyJob, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_Cancel(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).Cancel(@ptrCast(*const IBackgroundCopyJob, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_Complete(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).Complete(@ptrCast(*const IBackgroundCopyJob, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetId(self: *const T, pVal: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetId(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetType(self: *const T, pVal: ?*BG_JOB_TYPE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetType(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetProgress(self: *const T, pVal: ?*BG_JOB_PROGRESS) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetProgress(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetTimes(self: *const T, pVal: ?*BG_JOB_TIMES) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetTimes(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetState(self: *const T, pVal: ?*BG_JOB_STATE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetState(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetError(self: *const T, ppError: ?*?*IBackgroundCopyError) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetError(@ptrCast(*const IBackgroundCopyJob, self), ppError);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetOwner(self: *const T, pVal: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetOwner(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetDisplayName(self: *const T, Val: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetDisplayName(@ptrCast(*const IBackgroundCopyJob, self), Val);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetDisplayName(self: *const T, pVal: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetDisplayName(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetDescription(self: *const T, Val: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetDescription(@ptrCast(*const IBackgroundCopyJob, self), Val);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetDescription(self: *const T, pVal: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetDescription(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetPriority(self: *const T, Val: BG_JOB_PRIORITY) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetPriority(@ptrCast(*const IBackgroundCopyJob, self), Val);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetPriority(self: *const T, pVal: ?*BG_JOB_PRIORITY) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetPriority(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetNotifyFlags(self: *const T, Val: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetNotifyFlags(@ptrCast(*const IBackgroundCopyJob, self), Val);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetNotifyFlags(self: *const T, pVal: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetNotifyFlags(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetNotifyInterface(self: *const T, Val: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetNotifyInterface(@ptrCast(*const IBackgroundCopyJob, self), Val);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetNotifyInterface(self: *const T, pVal: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetNotifyInterface(@ptrCast(*const IBackgroundCopyJob, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetMinimumRetryDelay(self: *const T, Seconds: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetMinimumRetryDelay(@ptrCast(*const IBackgroundCopyJob, self), Seconds);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetMinimumRetryDelay(self: *const T, Seconds: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetMinimumRetryDelay(@ptrCast(*const IBackgroundCopyJob, self), Seconds);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetNoProgressTimeout(self: *const T, Seconds: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetNoProgressTimeout(@ptrCast(*const IBackgroundCopyJob, self), Seconds);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetNoProgressTimeout(self: *const T, Seconds: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetNoProgressTimeout(@ptrCast(*const IBackgroundCopyJob, self), Seconds);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetErrorCount(self: *const T, Errors: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetErrorCount(@ptrCast(*const IBackgroundCopyJob, self), Errors);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_SetProxySettings(self: *const T, ProxyUsage: BG_JOB_PROXY_USAGE, ProxyList: ?[*:0]const u16, ProxyBypassList: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).SetProxySettings(@ptrCast(*const IBackgroundCopyJob, self), ProxyUsage, ProxyList, ProxyBypassList);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_GetProxySettings(self: *const T, pProxyUsage: ?*BG_JOB_PROXY_USAGE, pProxyList: ?*?PWSTR, pProxyBypassList: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).GetProxySettings(@ptrCast(*const IBackgroundCopyJob, self), pProxyUsage, pProxyList, pProxyBypassList);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob_TakeOwnership(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob.VTable, self.vtable).TakeOwnership(@ptrCast(*const IBackgroundCopyJob, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IEnumBackgroundCopyJobs_Value = @import("../zig.zig").Guid.initString("1af4f612-3b71-466f-8f58-7b6f73ac57ad");
pub const IID_IEnumBackgroundCopyJobs = &IID_IEnumBackgroundCopyJobs_Value;
pub const IEnumBackgroundCopyJobs = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Next: fn(
self: *const IEnumBackgroundCopyJobs,
celt: u32,
rgelt: ?*?*IBackgroundCopyJob,
pceltFetched: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Skip: fn(
self: *const IEnumBackgroundCopyJobs,
celt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reset: fn(
self: *const IEnumBackgroundCopyJobs,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Clone: fn(
self: *const IEnumBackgroundCopyJobs,
ppenum: ?*?*IEnumBackgroundCopyJobs,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCount: fn(
self: *const IEnumBackgroundCopyJobs,
puCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs_Next(self: *const T, celt: u32, rgelt: ?*?*IBackgroundCopyJob, pceltFetched: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs.VTable, self.vtable).Next(@ptrCast(*const IEnumBackgroundCopyJobs, self), celt, rgelt, pceltFetched);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs.VTable, self.vtable).Skip(@ptrCast(*const IEnumBackgroundCopyJobs, self), celt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs_Reset(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs.VTable, self.vtable).Reset(@ptrCast(*const IEnumBackgroundCopyJobs, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs_Clone(self: *const T, ppenum: ?*?*IEnumBackgroundCopyJobs) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs.VTable, self.vtable).Clone(@ptrCast(*const IEnumBackgroundCopyJobs, self), ppenum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs_GetCount(self: *const T, puCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs.VTable, self.vtable).GetCount(@ptrCast(*const IEnumBackgroundCopyJobs, self), puCount);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyCallback_Value = @import("../zig.zig").Guid.initString("97ea99c7-0186-4ad4-8df9-c5b4e0ed6b22");
pub const IID_IBackgroundCopyCallback = &IID_IBackgroundCopyCallback_Value;
pub const IBackgroundCopyCallback = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
JobTransferred: fn(
self: *const IBackgroundCopyCallback,
pJob: ?*IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
JobError: fn(
self: *const IBackgroundCopyCallback,
pJob: ?*IBackgroundCopyJob,
pError: ?*IBackgroundCopyError,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
JobModification: fn(
self: *const IBackgroundCopyCallback,
pJob: ?*IBackgroundCopyJob,
dwReserved: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback_JobTransferred(self: *const T, pJob: ?*IBackgroundCopyJob) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback.VTable, self.vtable).JobTransferred(@ptrCast(*const IBackgroundCopyCallback, self), pJob);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback_JobError(self: *const T, pJob: ?*IBackgroundCopyJob, pError: ?*IBackgroundCopyError) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback.VTable, self.vtable).JobError(@ptrCast(*const IBackgroundCopyCallback, self), pJob, pError);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback_JobModification(self: *const T, pJob: ?*IBackgroundCopyJob, dwReserved: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback.VTable, self.vtable).JobModification(@ptrCast(*const IBackgroundCopyCallback, self), pJob, dwReserved);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_AsyncIBackgroundCopyCallback_Value = @import("../zig.zig").Guid.initString("ca29d251-b4bb-4679-a3d9-ae8006119d54");
pub const IID_AsyncIBackgroundCopyCallback = &IID_AsyncIBackgroundCopyCallback_Value;
pub const AsyncIBackgroundCopyCallback = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Begin_JobTransferred: fn(
self: *const AsyncIBackgroundCopyCallback,
pJob: ?*IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Finish_JobTransferred: fn(
self: *const AsyncIBackgroundCopyCallback,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Begin_JobError: fn(
self: *const AsyncIBackgroundCopyCallback,
pJob: ?*IBackgroundCopyJob,
pError: ?*IBackgroundCopyError,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Finish_JobError: fn(
self: *const AsyncIBackgroundCopyCallback,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Begin_JobModification: fn(
self: *const AsyncIBackgroundCopyCallback,
pJob: ?*IBackgroundCopyJob,
dwReserved: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Finish_JobModification: fn(
self: *const AsyncIBackgroundCopyCallback,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn AsyncIBackgroundCopyCallback_Begin_JobTransferred(self: *const T, pJob: ?*IBackgroundCopyJob) callconv(.Inline) HRESULT {
return @ptrCast(*const AsyncIBackgroundCopyCallback.VTable, self.vtable).Begin_JobTransferred(@ptrCast(*const AsyncIBackgroundCopyCallback, self), pJob);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn AsyncIBackgroundCopyCallback_Finish_JobTransferred(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const AsyncIBackgroundCopyCallback.VTable, self.vtable).Finish_JobTransferred(@ptrCast(*const AsyncIBackgroundCopyCallback, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn AsyncIBackgroundCopyCallback_Begin_JobError(self: *const T, pJob: ?*IBackgroundCopyJob, pError: ?*IBackgroundCopyError) callconv(.Inline) HRESULT {
return @ptrCast(*const AsyncIBackgroundCopyCallback.VTable, self.vtable).Begin_JobError(@ptrCast(*const AsyncIBackgroundCopyCallback, self), pJob, pError);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn AsyncIBackgroundCopyCallback_Finish_JobError(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const AsyncIBackgroundCopyCallback.VTable, self.vtable).Finish_JobError(@ptrCast(*const AsyncIBackgroundCopyCallback, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn AsyncIBackgroundCopyCallback_Begin_JobModification(self: *const T, pJob: ?*IBackgroundCopyJob, dwReserved: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const AsyncIBackgroundCopyCallback.VTable, self.vtable).Begin_JobModification(@ptrCast(*const AsyncIBackgroundCopyCallback, self), pJob, dwReserved);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn AsyncIBackgroundCopyCallback_Finish_JobModification(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const AsyncIBackgroundCopyCallback.VTable, self.vtable).Finish_JobModification(@ptrCast(*const AsyncIBackgroundCopyCallback, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyManager_Value = @import("../zig.zig").Guid.initString("5ce34c0d-0dc9-4c1f-897c-daa1b78cee7c");
pub const IID_IBackgroundCopyManager = &IID_IBackgroundCopyManager_Value;
pub const IBackgroundCopyManager = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateJob: fn(
self: *const IBackgroundCopyManager,
DisplayName: ?[*:0]const u16,
Type: BG_JOB_TYPE,
pJobId: ?*Guid,
ppJob: ?*?*IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetJob: fn(
self: *const IBackgroundCopyManager,
jobID: ?*const Guid,
ppJob: ?*?*IBackgroundCopyJob,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumJobs: fn(
self: *const IBackgroundCopyManager,
dwFlags: u32,
ppEnum: ?*?*IEnumBackgroundCopyJobs,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetErrorDescription: fn(
self: *const IBackgroundCopyManager,
hResult: HRESULT,
LanguageId: u32,
pErrorDescription: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyManager_CreateJob(self: *const T, DisplayName: ?[*:0]const u16, Type: BG_JOB_TYPE, pJobId: ?*Guid, ppJob: ?*?*IBackgroundCopyJob) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyManager.VTable, self.vtable).CreateJob(@ptrCast(*const IBackgroundCopyManager, self), DisplayName, Type, pJobId, ppJob);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyManager_GetJob(self: *const T, jobID: ?*const Guid, ppJob: ?*?*IBackgroundCopyJob) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyManager.VTable, self.vtable).GetJob(@ptrCast(*const IBackgroundCopyManager, self), jobID, ppJob);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyManager_EnumJobs(self: *const T, dwFlags: u32, ppEnum: ?*?*IEnumBackgroundCopyJobs) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyManager.VTable, self.vtable).EnumJobs(@ptrCast(*const IBackgroundCopyManager, self), dwFlags, ppEnum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyManager_GetErrorDescription(self: *const T, hResult: HRESULT, LanguageId: u32, pErrorDescription: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyManager.VTable, self.vtable).GetErrorDescription(@ptrCast(*const IBackgroundCopyManager, self), hResult, LanguageId, pErrorDescription);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager1_5_Value = @import("../zig.zig").Guid.initString("f087771f-d74f-4c1a-bb8a-e16aca9124ea");
pub const CLSID_BackgroundCopyManager1_5 = &CLSID_BackgroundCopyManager1_5_Value;
pub const BG_JOB_REPLY_PROGRESS = extern struct {
BytesTotal: u64,
BytesTransferred: u64,
};
pub const BG_AUTH_TARGET = enum(i32) {
SERVER = 1,
PROXY = 2,
};
pub const BG_AUTH_TARGET_SERVER = BG_AUTH_TARGET.SERVER;
pub const BG_AUTH_TARGET_PROXY = BG_AUTH_TARGET.PROXY;
pub const BG_AUTH_SCHEME = enum(i32) {
BASIC = 1,
DIGEST = 2,
NTLM = 3,
NEGOTIATE = 4,
PASSPORT = 5,
};
pub const BG_AUTH_SCHEME_BASIC = BG_AUTH_SCHEME.BASIC;
pub const BG_AUTH_SCHEME_DIGEST = BG_AUTH_SCHEME.DIGEST;
pub const BG_AUTH_SCHEME_NTLM = BG_AUTH_SCHEME.NTLM;
pub const BG_AUTH_SCHEME_NEGOTIATE = BG_AUTH_SCHEME.NEGOTIATE;
pub const BG_AUTH_SCHEME_PASSPORT = BG_AUTH_SCHEME.PASSPORT;
pub const BG_BASIC_CREDENTIALS = extern struct {
UserName: ?PWSTR,
Password: <PASSWORD>,
};
pub const BG_AUTH_CREDENTIALS_UNION = extern union {
Basic: BG_BASIC_CREDENTIALS,
};
pub const BG_AUTH_CREDENTIALS = extern struct {
Target: BG_AUTH_TARGET,
Scheme: BG_AUTH_SCHEME,
Credentials: BG_AUTH_CREDENTIALS_UNION,
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBackgroundCopyJob2_Value = @import("../zig.zig").Guid.initString("54b50739-686f-45eb-9dff-d6a9a0faa9af");
pub const IID_IBackgroundCopyJob2 = &IID_IBackgroundCopyJob2_Value;
pub const IBackgroundCopyJob2 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyJob.VTable,
SetNotifyCmdLine: fn(
self: *const IBackgroundCopyJob2,
Program: ?[*:0]const u16,
Parameters: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetNotifyCmdLine: fn(
self: *const IBackgroundCopyJob2,
pProgram: ?*?PWSTR,
pParameters: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetReplyProgress: fn(
self: *const IBackgroundCopyJob2,
pProgress: ?*BG_JOB_REPLY_PROGRESS,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetReplyData: fn(
self: *const IBackgroundCopyJob2,
ppBuffer: ?*?*u8,
pLength: ?*u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetReplyFileName: fn(
self: *const IBackgroundCopyJob2,
ReplyFileName: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetReplyFileName: fn(
self: *const IBackgroundCopyJob2,
pReplyFileName: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetCredentials: fn(
self: *const IBackgroundCopyJob2,
credentials: ?*BG_AUTH_CREDENTIALS,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RemoveCredentials: fn(
self: *const IBackgroundCopyJob2,
Target: BG_AUTH_TARGET,
Scheme: BG_AUTH_SCHEME,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyJob.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_SetNotifyCmdLine(self: *const T, Program: ?[*:0]const u16, Parameters: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).SetNotifyCmdLine(@ptrCast(*const IBackgroundCopyJob2, self), Program, Parameters);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_GetNotifyCmdLine(self: *const T, pProgram: ?*?PWSTR, pParameters: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).GetNotifyCmdLine(@ptrCast(*const IBackgroundCopyJob2, self), pProgram, pParameters);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_GetReplyProgress(self: *const T, pProgress: ?*BG_JOB_REPLY_PROGRESS) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).GetReplyProgress(@ptrCast(*const IBackgroundCopyJob2, self), pProgress);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_GetReplyData(self: *const T, ppBuffer: ?*?*u8, pLength: ?*u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).GetReplyData(@ptrCast(*const IBackgroundCopyJob2, self), ppBuffer, pLength);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_SetReplyFileName(self: *const T, ReplyFileName: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).SetReplyFileName(@ptrCast(*const IBackgroundCopyJob2, self), ReplyFileName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_GetReplyFileName(self: *const T, pReplyFileName: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).GetReplyFileName(@ptrCast(*const IBackgroundCopyJob2, self), pReplyFileName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_SetCredentials(self: *const T, credentials: ?*BG_AUTH_CREDENTIALS) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).SetCredentials(@ptrCast(*const IBackgroundCopyJob2, self), credentials);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob2_RemoveCredentials(self: *const T, Target: BG_AUTH_TARGET, Scheme: BG_AUTH_SCHEME) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob2.VTable, self.vtable).RemoveCredentials(@ptrCast(*const IBackgroundCopyJob2, self), Target, Scheme);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager2_0_Value = @import("../zig.zig").Guid.initString("6d18ad12-bde3-4393-b311-099c346e6df9");
pub const CLSID_BackgroundCopyManager2_0 = &CLSID_BackgroundCopyManager2_0_Value;
pub const BG_FILE_RANGE = extern struct {
InitialOffset: u64,
Length: u64,
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBackgroundCopyJob3_Value = @import("../zig.zig").Guid.initString("443c8934-90ff-48ed-bcde-26f5c7450042");
pub const IID_IBackgroundCopyJob3 = &IID_IBackgroundCopyJob3_Value;
pub const IBackgroundCopyJob3 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyJob2.VTable,
ReplaceRemotePrefix: fn(
self: *const IBackgroundCopyJob3,
OldPrefix: ?[*:0]const u16,
NewPrefix: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddFileWithRanges: fn(
self: *const IBackgroundCopyJob3,
RemoteUrl: ?[*:0]const u16,
LocalName: ?[*:0]const u16,
RangeCount: u32,
Ranges: [*]BG_FILE_RANGE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetFileACLFlags: fn(
self: *const IBackgroundCopyJob3,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFileACLFlags: fn(
self: *const IBackgroundCopyJob3,
Flags: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyJob2.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob3_ReplaceRemotePrefix(self: *const T, OldPrefix: ?[*:0]const u16, NewPrefix: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob3.VTable, self.vtable).ReplaceRemotePrefix(@ptrCast(*const IBackgroundCopyJob3, self), OldPrefix, NewPrefix);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob3_AddFileWithRanges(self: *const T, RemoteUrl: ?[*:0]const u16, LocalName: ?[*:0]const u16, RangeCount: u32, Ranges: [*]BG_FILE_RANGE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob3.VTable, self.vtable).AddFileWithRanges(@ptrCast(*const IBackgroundCopyJob3, self), RemoteUrl, LocalName, RangeCount, Ranges);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob3_SetFileACLFlags(self: *const T, Flags: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob3.VTable, self.vtable).SetFileACLFlags(@ptrCast(*const IBackgroundCopyJob3, self), Flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob3_GetFileACLFlags(self: *const T, Flags: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob3.VTable, self.vtable).GetFileACLFlags(@ptrCast(*const IBackgroundCopyJob3, self), Flags);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBackgroundCopyFile2_Value = @import("../zig.zig").Guid.initString("83e81b93-0873-474d-8a8c-f2018b1a939c");
pub const IID_IBackgroundCopyFile2 = &IID_IBackgroundCopyFile2_Value;
pub const IBackgroundCopyFile2 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyFile.VTable,
GetFileRanges: fn(
self: *const IBackgroundCopyFile2,
RangeCount: ?*u32,
Ranges: [*]?*BG_FILE_RANGE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetRemoteName: fn(
self: *const IBackgroundCopyFile2,
Val: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyFile.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile2_GetFileRanges(self: *const T, RangeCount: ?*u32, Ranges: [*]?*BG_FILE_RANGE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile2.VTable, self.vtable).GetFileRanges(@ptrCast(*const IBackgroundCopyFile2, self), RangeCount, Ranges);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile2_SetRemoteName(self: *const T, Val: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile2.VTable, self.vtable).SetRemoteName(@ptrCast(*const IBackgroundCopyFile2, self), Val);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager2_5_Value = @import("../zig.zig").Guid.initString("03ca98d6-ff5d-49b8-abc6-03dd84127020");
pub const CLSID_BackgroundCopyManager2_5 = &CLSID_BackgroundCopyManager2_5_Value;
pub const BG_CERT_STORE_LOCATION = enum(i32) {
CURRENT_USER = 0,
LOCAL_MACHINE = 1,
CURRENT_SERVICE = 2,
SERVICES = 3,
USERS = 4,
CURRENT_USER_GROUP_POLICY = 5,
LOCAL_MACHINE_GROUP_POLICY = 6,
LOCAL_MACHINE_ENTERPRISE = 7,
};
pub const BG_CERT_STORE_LOCATION_CURRENT_USER = BG_CERT_STORE_LOCATION.CURRENT_USER;
pub const BG_CERT_STORE_LOCATION_LOCAL_MACHINE = BG_CERT_STORE_LOCATION.LOCAL_MACHINE;
pub const BG_CERT_STORE_LOCATION_CURRENT_SERVICE = BG_CERT_STORE_LOCATION.CURRENT_SERVICE;
pub const BG_CERT_STORE_LOCATION_SERVICES = BG_CERT_STORE_LOCATION.SERVICES;
pub const BG_CERT_STORE_LOCATION_USERS = BG_CERT_STORE_LOCATION.USERS;
pub const BG_CERT_STORE_LOCATION_CURRENT_USER_GROUP_POLICY = BG_CERT_STORE_LOCATION.CURRENT_USER_GROUP_POLICY;
pub const BG_CERT_STORE_LOCATION_LOCAL_MACHINE_GROUP_POLICY = BG_CERT_STORE_LOCATION.LOCAL_MACHINE_GROUP_POLICY;
pub const BG_CERT_STORE_LOCATION_LOCAL_MACHINE_ENTERPRISE = BG_CERT_STORE_LOCATION.LOCAL_MACHINE_ENTERPRISE;
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBackgroundCopyJobHttpOptions_Value = @import("../zig.zig").Guid.initString("f1bd1079-9f01-4bdc-8036-f09b70095066");
pub const IID_IBackgroundCopyJobHttpOptions = &IID_IBackgroundCopyJobHttpOptions_Value;
pub const IBackgroundCopyJobHttpOptions = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetClientCertificateByID: fn(
self: *const IBackgroundCopyJobHttpOptions,
StoreLocation: BG_CERT_STORE_LOCATION,
StoreName: ?[*:0]const u16,
pCertHashBlob: *[20]u8,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetClientCertificateByName: fn(
self: *const IBackgroundCopyJobHttpOptions,
StoreLocation: BG_CERT_STORE_LOCATION,
StoreName: ?[*:0]const u16,
SubjectName: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RemoveClientCertificate: fn(
self: *const IBackgroundCopyJobHttpOptions,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetClientCertificate: fn(
self: *const IBackgroundCopyJobHttpOptions,
pStoreLocation: ?*BG_CERT_STORE_LOCATION,
pStoreName: ?*?PWSTR,
ppCertHashBlob: ?*[20]?*u8,
pSubjectName: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetCustomHeaders: fn(
self: *const IBackgroundCopyJobHttpOptions,
RequestHeaders: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCustomHeaders: fn(
self: *const IBackgroundCopyJobHttpOptions,
pRequestHeaders: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetSecurityFlags: fn(
self: *const IBackgroundCopyJobHttpOptions,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetSecurityFlags: fn(
self: *const IBackgroundCopyJobHttpOptions,
pFlags: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_SetClientCertificateByID(self: *const T, StoreLocation: BG_CERT_STORE_LOCATION, StoreName: ?[*:0]const u16, pCertHashBlob: *[20]u8) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).SetClientCertificateByID(@ptrCast(*const IBackgroundCopyJobHttpOptions, self), StoreLocation, StoreName, pCertHashBlob);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_SetClientCertificateByName(self: *const T, StoreLocation: BG_CERT_STORE_LOCATION, StoreName: ?[*:0]const u16, SubjectName: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).SetClientCertificateByName(@ptrCast(*const IBackgroundCopyJobHttpOptions, self), StoreLocation, StoreName, SubjectName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_RemoveClientCertificate(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).RemoveClientCertificate(@ptrCast(*const IBackgroundCopyJobHttpOptions, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_GetClientCertificate(self: *const T, pStoreLocation: ?*BG_CERT_STORE_LOCATION, pStoreName: ?*?PWSTR, ppCertHashBlob: ?*[20]?*u8, pSubjectName: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).GetClientCertificate(@ptrCast(*const IBackgroundCopyJobHttpOptions, self), pStoreLocation, pStoreName, ppCertHashBlob, pSubjectName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_SetCustomHeaders(self: *const T, RequestHeaders: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).SetCustomHeaders(@ptrCast(*const IBackgroundCopyJobHttpOptions, self), RequestHeaders);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_GetCustomHeaders(self: *const T, pRequestHeaders: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).GetCustomHeaders(@ptrCast(*const IBackgroundCopyJobHttpOptions, self), pRequestHeaders);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_SetSecurityFlags(self: *const T, Flags: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).SetSecurityFlags(@ptrCast(*const IBackgroundCopyJobHttpOptions, self), Flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions_GetSecurityFlags(self: *const T, pFlags: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions.VTable, self.vtable).GetSecurityFlags(@ptrCast(*const IBackgroundCopyJobHttpOptions, self), pFlags);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager3_0_Value = @import("../zig.zig").Guid.initString("659cdea7-489e-11d9-a9cd-000d56965251");
pub const CLSID_BackgroundCopyManager3_0 = &CLSID_BackgroundCopyManager3_0_Value;
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBitsPeerCacheRecord_Value = @import("../zig.zig").Guid.initString("659cdeaf-489e-11d9-a9cd-000d56965251");
pub const IID_IBitsPeerCacheRecord = &IID_IBitsPeerCacheRecord_Value;
pub const IBitsPeerCacheRecord = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetId: fn(
self: *const IBitsPeerCacheRecord,
pVal: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOriginUrl: fn(
self: *const IBitsPeerCacheRecord,
pVal: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFileSize: fn(
self: *const IBitsPeerCacheRecord,
pVal: ?*u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFileModificationTime: fn(
self: *const IBitsPeerCacheRecord,
pVal: ?*FILETIME,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLastAccessTime: fn(
self: *const IBitsPeerCacheRecord,
pVal: ?*FILETIME,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsFileValidated: fn(
self: *const IBitsPeerCacheRecord,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFileRanges: fn(
self: *const IBitsPeerCacheRecord,
pRangeCount: ?*u32,
ppRanges: [*]?*BG_FILE_RANGE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheRecord_GetId(self: *const T, pVal: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheRecord.VTable, self.vtable).GetId(@ptrCast(*const IBitsPeerCacheRecord, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheRecord_GetOriginUrl(self: *const T, pVal: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheRecord.VTable, self.vtable).GetOriginUrl(@ptrCast(*const IBitsPeerCacheRecord, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheRecord_GetFileSize(self: *const T, pVal: ?*u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheRecord.VTable, self.vtable).GetFileSize(@ptrCast(*const IBitsPeerCacheRecord, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheRecord_GetFileModificationTime(self: *const T, pVal: ?*FILETIME) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheRecord.VTable, self.vtable).GetFileModificationTime(@ptrCast(*const IBitsPeerCacheRecord, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheRecord_GetLastAccessTime(self: *const T, pVal: ?*FILETIME) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheRecord.VTable, self.vtable).GetLastAccessTime(@ptrCast(*const IBitsPeerCacheRecord, self), pVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheRecord_IsFileValidated(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheRecord.VTable, self.vtable).IsFileValidated(@ptrCast(*const IBitsPeerCacheRecord, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheRecord_GetFileRanges(self: *const T, pRangeCount: ?*u32, ppRanges: [*]?*BG_FILE_RANGE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheRecord.VTable, self.vtable).GetFileRanges(@ptrCast(*const IBitsPeerCacheRecord, self), pRangeCount, ppRanges);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IEnumBitsPeerCacheRecords_Value = @import("../zig.zig").Guid.initString("659cdea4-489e-11d9-a9cd-000d56965251");
pub const IID_IEnumBitsPeerCacheRecords = &IID_IEnumBitsPeerCacheRecords_Value;
pub const IEnumBitsPeerCacheRecords = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Next: fn(
self: *const IEnumBitsPeerCacheRecords,
celt: u32,
rgelt: ?*?*IBitsPeerCacheRecord,
pceltFetched: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Skip: fn(
self: *const IEnumBitsPeerCacheRecords,
celt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reset: fn(
self: *const IEnumBitsPeerCacheRecords,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Clone: fn(
self: *const IEnumBitsPeerCacheRecords,
ppenum: ?*?*IEnumBitsPeerCacheRecords,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCount: fn(
self: *const IEnumBitsPeerCacheRecords,
puCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeerCacheRecords_Next(self: *const T, celt: u32, rgelt: ?*?*IBitsPeerCacheRecord, pceltFetched: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeerCacheRecords.VTable, self.vtable).Next(@ptrCast(*const IEnumBitsPeerCacheRecords, self), celt, rgelt, pceltFetched);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeerCacheRecords_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeerCacheRecords.VTable, self.vtable).Skip(@ptrCast(*const IEnumBitsPeerCacheRecords, self), celt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeerCacheRecords_Reset(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeerCacheRecords.VTable, self.vtable).Reset(@ptrCast(*const IEnumBitsPeerCacheRecords, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeerCacheRecords_Clone(self: *const T, ppenum: ?*?*IEnumBitsPeerCacheRecords) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeerCacheRecords.VTable, self.vtable).Clone(@ptrCast(*const IEnumBitsPeerCacheRecords, self), ppenum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeerCacheRecords_GetCount(self: *const T, puCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeerCacheRecords.VTable, self.vtable).GetCount(@ptrCast(*const IEnumBitsPeerCacheRecords, self), puCount);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBitsPeer_Value = @import("../zig.zig").Guid.initString("659cdea2-489e-11d9-a9cd-000d56965251");
pub const IID_IBitsPeer = &IID_IBitsPeer_Value;
pub const IBitsPeer = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetPeerName: fn(
self: *const IBitsPeer,
pName: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsAuthenticated: fn(
self: *const IBitsPeer,
pAuth: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsAvailable: fn(
self: *const IBitsPeer,
pOnline: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeer_GetPeerName(self: *const T, pName: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeer.VTable, self.vtable).GetPeerName(@ptrCast(*const IBitsPeer, self), pName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeer_IsAuthenticated(self: *const T, pAuth: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeer.VTable, self.vtable).IsAuthenticated(@ptrCast(*const IBitsPeer, self), pAuth);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeer_IsAvailable(self: *const T, pOnline: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeer.VTable, self.vtable).IsAvailable(@ptrCast(*const IBitsPeer, self), pOnline);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IEnumBitsPeers_Value = @import("../zig.zig").Guid.initString("659cdea5-489e-11d9-a9cd-000d56965251");
pub const IID_IEnumBitsPeers = &IID_IEnumBitsPeers_Value;
pub const IEnumBitsPeers = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Next: fn(
self: *const IEnumBitsPeers,
celt: u32,
rgelt: ?*?*IBitsPeer,
pceltFetched: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Skip: fn(
self: *const IEnumBitsPeers,
celt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reset: fn(
self: *const IEnumBitsPeers,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Clone: fn(
self: *const IEnumBitsPeers,
ppenum: ?*?*IEnumBitsPeers,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCount: fn(
self: *const IEnumBitsPeers,
puCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeers_Next(self: *const T, celt: u32, rgelt: ?*?*IBitsPeer, pceltFetched: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeers.VTable, self.vtable).Next(@ptrCast(*const IEnumBitsPeers, self), celt, rgelt, pceltFetched);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeers_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeers.VTable, self.vtable).Skip(@ptrCast(*const IEnumBitsPeers, self), celt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeers_Reset(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeers.VTable, self.vtable).Reset(@ptrCast(*const IEnumBitsPeers, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeers_Clone(self: *const T, ppenum: ?*?*IEnumBitsPeers) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeers.VTable, self.vtable).Clone(@ptrCast(*const IEnumBitsPeers, self), ppenum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBitsPeers_GetCount(self: *const T, puCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBitsPeers.VTable, self.vtable).GetCount(@ptrCast(*const IEnumBitsPeers, self), puCount);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBitsPeerCacheAdministration_Value = @import("../zig.zig").Guid.initString("659cdead-489e-11d9-a9cd-000d56965251");
pub const IID_IBitsPeerCacheAdministration = &IID_IBitsPeerCacheAdministration_Value;
pub const IBitsPeerCacheAdministration = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetMaximumCacheSize: fn(
self: *const IBitsPeerCacheAdministration,
pBytes: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaximumCacheSize: fn(
self: *const IBitsPeerCacheAdministration,
Bytes: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaximumContentAge: fn(
self: *const IBitsPeerCacheAdministration,
pSeconds: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaximumContentAge: fn(
self: *const IBitsPeerCacheAdministration,
Seconds: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetConfigurationFlags: fn(
self: *const IBitsPeerCacheAdministration,
pFlags: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetConfigurationFlags: fn(
self: *const IBitsPeerCacheAdministration,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumRecords: fn(
self: *const IBitsPeerCacheAdministration,
ppEnum: ?*?*IEnumBitsPeerCacheRecords,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetRecord: fn(
self: *const IBitsPeerCacheAdministration,
id: ?*const Guid,
ppRecord: ?*?*IBitsPeerCacheRecord,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ClearRecords: fn(
self: *const IBitsPeerCacheAdministration,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DeleteRecord: fn(
self: *const IBitsPeerCacheAdministration,
id: ?*const Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DeleteUrl: fn(
self: *const IBitsPeerCacheAdministration,
url: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumPeers: fn(
self: *const IBitsPeerCacheAdministration,
ppEnum: ?*?*IEnumBitsPeers,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ClearPeers: fn(
self: *const IBitsPeerCacheAdministration,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DiscoverPeers: fn(
self: *const IBitsPeerCacheAdministration,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_GetMaximumCacheSize(self: *const T, pBytes: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).GetMaximumCacheSize(@ptrCast(*const IBitsPeerCacheAdministration, self), pBytes);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_SetMaximumCacheSize(self: *const T, Bytes: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).SetMaximumCacheSize(@ptrCast(*const IBitsPeerCacheAdministration, self), Bytes);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_GetMaximumContentAge(self: *const T, pSeconds: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).GetMaximumContentAge(@ptrCast(*const IBitsPeerCacheAdministration, self), pSeconds);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_SetMaximumContentAge(self: *const T, Seconds: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).SetMaximumContentAge(@ptrCast(*const IBitsPeerCacheAdministration, self), Seconds);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_GetConfigurationFlags(self: *const T, pFlags: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).GetConfigurationFlags(@ptrCast(*const IBitsPeerCacheAdministration, self), pFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_SetConfigurationFlags(self: *const T, Flags: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).SetConfigurationFlags(@ptrCast(*const IBitsPeerCacheAdministration, self), Flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_EnumRecords(self: *const T, ppEnum: ?*?*IEnumBitsPeerCacheRecords) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).EnumRecords(@ptrCast(*const IBitsPeerCacheAdministration, self), ppEnum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_GetRecord(self: *const T, id: ?*const Guid, ppRecord: ?*?*IBitsPeerCacheRecord) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).GetRecord(@ptrCast(*const IBitsPeerCacheAdministration, self), id, ppRecord);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_ClearRecords(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).ClearRecords(@ptrCast(*const IBitsPeerCacheAdministration, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_DeleteRecord(self: *const T, id: ?*const Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).DeleteRecord(@ptrCast(*const IBitsPeerCacheAdministration, self), id);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_DeleteUrl(self: *const T, url: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).DeleteUrl(@ptrCast(*const IBitsPeerCacheAdministration, self), url);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_EnumPeers(self: *const T, ppEnum: ?*?*IEnumBitsPeers) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).EnumPeers(@ptrCast(*const IBitsPeerCacheAdministration, self), ppEnum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_ClearPeers(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).ClearPeers(@ptrCast(*const IBitsPeerCacheAdministration, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsPeerCacheAdministration_DiscoverPeers(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsPeerCacheAdministration.VTable, self.vtable).DiscoverPeers(@ptrCast(*const IBitsPeerCacheAdministration, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBackgroundCopyJob4_Value = @import("../zig.zig").Guid.initString("659cdeae-489e-11d9-a9cd-000d56965251");
pub const IID_IBackgroundCopyJob4 = &IID_IBackgroundCopyJob4_Value;
pub const IBackgroundCopyJob4 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyJob3.VTable,
SetPeerCachingFlags: fn(
self: *const IBackgroundCopyJob4,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetPeerCachingFlags: fn(
self: *const IBackgroundCopyJob4,
pFlags: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOwnerIntegrityLevel: fn(
self: *const IBackgroundCopyJob4,
pLevel: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetOwnerElevationState: fn(
self: *const IBackgroundCopyJob4,
pElevated: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetMaximumDownloadTime: fn(
self: *const IBackgroundCopyJob4,
Timeout: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetMaximumDownloadTime: fn(
self: *const IBackgroundCopyJob4,
pTimeout: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyJob3.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob4_SetPeerCachingFlags(self: *const T, Flags: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob4.VTable, self.vtable).SetPeerCachingFlags(@ptrCast(*const IBackgroundCopyJob4, self), Flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob4_GetPeerCachingFlags(self: *const T, pFlags: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob4.VTable, self.vtable).GetPeerCachingFlags(@ptrCast(*const IBackgroundCopyJob4, self), pFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob4_GetOwnerIntegrityLevel(self: *const T, pLevel: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob4.VTable, self.vtable).GetOwnerIntegrityLevel(@ptrCast(*const IBackgroundCopyJob4, self), pLevel);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob4_GetOwnerElevationState(self: *const T, pElevated: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob4.VTable, self.vtable).GetOwnerElevationState(@ptrCast(*const IBackgroundCopyJob4, self), pElevated);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob4_SetMaximumDownloadTime(self: *const T, Timeout: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob4.VTable, self.vtable).SetMaximumDownloadTime(@ptrCast(*const IBackgroundCopyJob4, self), Timeout);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob4_GetMaximumDownloadTime(self: *const T, pTimeout: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob4.VTable, self.vtable).GetMaximumDownloadTime(@ptrCast(*const IBackgroundCopyJob4, self), pTimeout);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBackgroundCopyFile3_Value = @import("../zig.zig").Guid.initString("659cdeaa-489e-11d9-a9cd-000d56965251");
pub const IID_IBackgroundCopyFile3 = &IID_IBackgroundCopyFile3_Value;
pub const IBackgroundCopyFile3 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyFile2.VTable,
GetTemporaryName: fn(
self: *const IBackgroundCopyFile3,
pFilename: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetValidationState: fn(
self: *const IBackgroundCopyFile3,
state: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetValidationState: fn(
self: *const IBackgroundCopyFile3,
pState: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
IsDownloadedFromPeer: fn(
self: *const IBackgroundCopyFile3,
pVal: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyFile2.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile3_GetTemporaryName(self: *const T, pFilename: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile3.VTable, self.vtable).GetTemporaryName(@ptrCast(*const IBackgroundCopyFile3, self), pFilename);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile3_SetValidationState(self: *const T, state: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile3.VTable, self.vtable).SetValidationState(@ptrCast(*const IBackgroundCopyFile3, self), state);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile3_GetValidationState(self: *const T, pState: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile3.VTable, self.vtable).GetValidationState(@ptrCast(*const IBackgroundCopyFile3, self), pState);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile3_IsDownloadedFromPeer(self: *const T, pVal: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile3.VTable, self.vtable).IsDownloadedFromPeer(@ptrCast(*const IBackgroundCopyFile3, self), pVal);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBackgroundCopyCallback2_Value = @import("../zig.zig").Guid.initString("659cdeac-489e-11d9-a9cd-000d56965251");
pub const IID_IBackgroundCopyCallback2 = &IID_IBackgroundCopyCallback2_Value;
pub const IBackgroundCopyCallback2 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyCallback.VTable,
FileTransferred: fn(
self: *const IBackgroundCopyCallback2,
pJob: ?*IBackgroundCopyJob,
pFile: ?*IBackgroundCopyFile,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyCallback.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback2_FileTransferred(self: *const T, pJob: ?*IBackgroundCopyJob, pFile: ?*IBackgroundCopyFile) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback2.VTable, self.vtable).FileTransferred(@ptrCast(*const IBackgroundCopyCallback2, self), pJob, pFile);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager4_0_Value = @import("../zig.zig").Guid.initString("bb6df56b-cace-11dc-9992-0019b93a3a84");
pub const CLSID_BackgroundCopyManager4_0 = &CLSID_BackgroundCopyManager4_0_Value;
// TODO: this type is limited to platform 'windows6.1'
const IID_IBitsTokenOptions_Value = @import("../zig.zig").Guid.initString("9a2584c3-f7d2-457a-9a5e-22b67bffc7d2");
pub const IID_IBitsTokenOptions = &IID_IBitsTokenOptions_Value;
pub const IBitsTokenOptions = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
SetHelperTokenFlags: fn(
self: *const IBitsTokenOptions,
UsageFlags: BG_TOKEN,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetHelperTokenFlags: fn(
self: *const IBitsTokenOptions,
pFlags: ?*BG_TOKEN,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetHelperToken: fn(
self: *const IBitsTokenOptions,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ClearHelperToken: fn(
self: *const IBitsTokenOptions,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetHelperTokenSid: fn(
self: *const IBitsTokenOptions,
pSid: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsTokenOptions_SetHelperTokenFlags(self: *const T, UsageFlags: BG_TOKEN) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsTokenOptions.VTable, self.vtable).SetHelperTokenFlags(@ptrCast(*const IBitsTokenOptions, self), UsageFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsTokenOptions_GetHelperTokenFlags(self: *const T, pFlags: ?*BG_TOKEN) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsTokenOptions.VTable, self.vtable).GetHelperTokenFlags(@ptrCast(*const IBitsTokenOptions, self), pFlags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsTokenOptions_SetHelperToken(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsTokenOptions.VTable, self.vtable).SetHelperToken(@ptrCast(*const IBitsTokenOptions, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsTokenOptions_ClearHelperToken(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsTokenOptions.VTable, self.vtable).ClearHelperToken(@ptrCast(*const IBitsTokenOptions, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBitsTokenOptions_GetHelperTokenSid(self: *const T, pSid: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBitsTokenOptions.VTable, self.vtable).GetHelperTokenSid(@ptrCast(*const IBitsTokenOptions, self), pSid);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.1'
const IID_IBackgroundCopyFile4_Value = @import("../zig.zig").Guid.initString("ef7e0655-7888-4960-b0e5-730846e03492");
pub const IID_IBackgroundCopyFile4 = &IID_IBackgroundCopyFile4_Value;
pub const IBackgroundCopyFile4 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyFile3.VTable,
GetPeerDownloadStats: fn(
self: *const IBackgroundCopyFile4,
pFromOrigin: ?*u64,
pFromPeers: ?*u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyFile3.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile4_GetPeerDownloadStats(self: *const T, pFromOrigin: ?*u64, pFromPeers: ?*u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile4.VTable, self.vtable).GetPeerDownloadStats(@ptrCast(*const IBackgroundCopyFile4, self), pFromOrigin, pFromPeers);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager5_0_Value = @import("../zig.zig").Guid.initString("1ecca34c-e88a-44e3-8d6a-8921bde9e452");
pub const CLSID_BackgroundCopyManager5_0 = &CLSID_BackgroundCopyManager5_0_Value;
pub const BITS_JOB_TRANSFER_POLICY = enum(i32) {
ALWAYS = -2147483393,
NOT_ROAMING = -2147483521,
NO_SURCHARGE = -2147483537,
STANDARD = -2147483545,
UNRESTRICTED = -2147483615,
};
pub const BITS_JOB_TRANSFER_POLICY_ALWAYS = BITS_JOB_TRANSFER_POLICY.ALWAYS;
pub const BITS_JOB_TRANSFER_POLICY_NOT_ROAMING = BITS_JOB_TRANSFER_POLICY.NOT_ROAMING;
pub const BITS_JOB_TRANSFER_POLICY_NO_SURCHARGE = BITS_JOB_TRANSFER_POLICY.NO_SURCHARGE;
pub const BITS_JOB_TRANSFER_POLICY_STANDARD = BITS_JOB_TRANSFER_POLICY.STANDARD;
pub const BITS_JOB_TRANSFER_POLICY_UNRESTRICTED = BITS_JOB_TRANSFER_POLICY.UNRESTRICTED;
pub const BITS_JOB_PROPERTY_ID = enum(i32) {
ID_COST_FLAGS = 1,
NOTIFICATION_CLSID = 2,
DYNAMIC_CONTENT = 3,
HIGH_PERFORMANCE = 4,
MAX_DOWNLOAD_SIZE = 5,
USE_STORED_CREDENTIALS = 7,
MINIMUM_NOTIFICATION_INTERVAL_MS = 9,
ON_DEMAND_MODE = 10,
};
pub const BITS_JOB_PROPERTY_ID_COST_FLAGS = BITS_JOB_PROPERTY_ID.ID_COST_FLAGS;
pub const BITS_JOB_PROPERTY_NOTIFICATION_CLSID = BITS_JOB_PROPERTY_ID.NOTIFICATION_CLSID;
pub const BITS_JOB_PROPERTY_DYNAMIC_CONTENT = BITS_JOB_PROPERTY_ID.DYNAMIC_CONTENT;
pub const BITS_JOB_PROPERTY_HIGH_PERFORMANCE = BITS_JOB_PROPERTY_ID.HIGH_PERFORMANCE;
pub const BITS_JOB_PROPERTY_MAX_DOWNLOAD_SIZE = BITS_JOB_PROPERTY_ID.MAX_DOWNLOAD_SIZE;
pub const BITS_JOB_PROPERTY_USE_STORED_CREDENTIALS = BITS_JOB_PROPERTY_ID.USE_STORED_CREDENTIALS;
pub const BITS_JOB_PROPERTY_MINIMUM_NOTIFICATION_INTERVAL_MS = BITS_JOB_PROPERTY_ID.MINIMUM_NOTIFICATION_INTERVAL_MS;
pub const BITS_JOB_PROPERTY_ON_DEMAND_MODE = BITS_JOB_PROPERTY_ID.ON_DEMAND_MODE;
pub const BITS_JOB_PROPERTY_VALUE = extern union {
Dword: u32,
ClsID: Guid,
Enable: BOOL,
Uint64: u64,
Target: BG_AUTH_TARGET,
};
pub const BITS_FILE_PROPERTY_ID = enum(i32) {
S = 1,
};
pub const BITS_FILE_PROPERTY_ID_HTTP_RESPONSE_HEADERS = BITS_FILE_PROPERTY_ID.S;
pub const BITS_FILE_PROPERTY_VALUE = extern union {
String: ?PWSTR,
};
// TODO: this type is limited to platform 'windows8.0'
const IID_IBackgroundCopyJob5_Value = @import("../zig.zig").Guid.initString("e847030c-bbba-4657-af6d-484aa42bf1fe");
pub const IID_IBackgroundCopyJob5 = &IID_IBackgroundCopyJob5_Value;
pub const IBackgroundCopyJob5 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyJob4.VTable,
SetProperty: fn(
self: *const IBackgroundCopyJob5,
PropertyId: BITS_JOB_PROPERTY_ID,
PropertyValue: BITS_JOB_PROPERTY_VALUE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProperty: fn(
self: *const IBackgroundCopyJob5,
PropertyId: BITS_JOB_PROPERTY_ID,
PropertyValue: ?*BITS_JOB_PROPERTY_VALUE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyJob4.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob5_SetProperty(self: *const T, PropertyId: BITS_JOB_PROPERTY_ID, PropertyValue: BITS_JOB_PROPERTY_VALUE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob5.VTable, self.vtable).SetProperty(@ptrCast(*const IBackgroundCopyJob5, self), PropertyId, PropertyValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob5_GetProperty(self: *const T, PropertyId: BITS_JOB_PROPERTY_ID, PropertyValue: ?*BITS_JOB_PROPERTY_VALUE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob5.VTable, self.vtable).GetProperty(@ptrCast(*const IBackgroundCopyJob5, self), PropertyId, PropertyValue);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows8.0'
const IID_IBackgroundCopyFile5_Value = @import("../zig.zig").Guid.initString("85c1657f-dafc-40e8-8834-df18ea25717e");
pub const IID_IBackgroundCopyFile5 = &IID_IBackgroundCopyFile5_Value;
pub const IBackgroundCopyFile5 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyFile4.VTable,
SetProperty: fn(
self: *const IBackgroundCopyFile5,
PropertyId: BITS_FILE_PROPERTY_ID,
PropertyValue: BITS_FILE_PROPERTY_VALUE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProperty: fn(
self: *const IBackgroundCopyFile5,
PropertyId: BITS_FILE_PROPERTY_ID,
PropertyValue: ?*BITS_FILE_PROPERTY_VALUE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyFile4.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile5_SetProperty(self: *const T, PropertyId: BITS_FILE_PROPERTY_ID, PropertyValue: BITS_FILE_PROPERTY_VALUE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile5.VTable, self.vtable).SetProperty(@ptrCast(*const IBackgroundCopyFile5, self), PropertyId, PropertyValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile5_GetProperty(self: *const T, PropertyId: BITS_FILE_PROPERTY_ID, PropertyValue: ?*BITS_FILE_PROPERTY_VALUE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile5.VTable, self.vtable).GetProperty(@ptrCast(*const IBackgroundCopyFile5, self), PropertyId, PropertyValue);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager10_1_Value = @import("../zig.zig").Guid.initString("4bd3e4e1-7bd4-4a2b-9964-496400de5193");
pub const CLSID_BackgroundCopyManager10_1 = &CLSID_BackgroundCopyManager10_1_Value;
// TODO: this type is limited to platform 'windows10.0.15063'
const IID_IBackgroundCopyCallback3_Value = @import("../zig.zig").Guid.initString("98c97bd2-e32b-4ad8-a528-95fd8b16bd42");
pub const IID_IBackgroundCopyCallback3 = &IID_IBackgroundCopyCallback3_Value;
pub const IBackgroundCopyCallback3 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyCallback2.VTable,
FileRangesTransferred: fn(
self: *const IBackgroundCopyCallback3,
job: ?*IBackgroundCopyJob,
file: ?*IBackgroundCopyFile,
rangeCount: u32,
ranges: [*]const BG_FILE_RANGE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyCallback2.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback3_FileRangesTransferred(self: *const T, job: ?*IBackgroundCopyJob, file: ?*IBackgroundCopyFile, rangeCount: u32, ranges: [*]const BG_FILE_RANGE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback3.VTable, self.vtable).FileRangesTransferred(@ptrCast(*const IBackgroundCopyCallback3, self), job, file, rangeCount, ranges);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows10.0.15063'
const IID_IBackgroundCopyFile6_Value = @import("../zig.zig").Guid.initString("cf6784f7-d677-49fd-9368-cb47aee9d1ad");
pub const IID_IBackgroundCopyFile6 = &IID_IBackgroundCopyFile6_Value;
pub const IBackgroundCopyFile6 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyFile5.VTable,
UpdateDownloadPosition: fn(
self: *const IBackgroundCopyFile6,
offset: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RequestFileRanges: fn(
self: *const IBackgroundCopyFile6,
rangeCount: u32,
ranges: [*]const BG_FILE_RANGE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFilledFileRanges: fn(
self: *const IBackgroundCopyFile6,
rangeCount: ?*u32,
ranges: [*]?*BG_FILE_RANGE,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyFile5.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile6_UpdateDownloadPosition(self: *const T, offset: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile6.VTable, self.vtable).UpdateDownloadPosition(@ptrCast(*const IBackgroundCopyFile6, self), offset);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile6_RequestFileRanges(self: *const T, rangeCount: u32, ranges: [*]const BG_FILE_RANGE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile6.VTable, self.vtable).RequestFileRanges(@ptrCast(*const IBackgroundCopyFile6, self), rangeCount, ranges);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyFile6_GetFilledFileRanges(self: *const T, rangeCount: ?*u32, ranges: [*]?*BG_FILE_RANGE) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyFile6.VTable, self.vtable).GetFilledFileRanges(@ptrCast(*const IBackgroundCopyFile6, self), rangeCount, ranges);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager10_2_Value = @import("../zig.zig").Guid.initString("4575438f-a6c8-4976-b0fe-2f26b80d959e");
pub const CLSID_BackgroundCopyManager10_2 = &CLSID_BackgroundCopyManager10_2_Value;
// TODO: this type is limited to platform 'windows10.0.17763'
const IID_IBackgroundCopyJobHttpOptions2_Value = @import("../zig.zig").Guid.initString("b591a192-a405-4fc3-8323-4c5c542578fc");
pub const IID_IBackgroundCopyJobHttpOptions2 = &IID_IBackgroundCopyJobHttpOptions2_Value;
pub const IBackgroundCopyJobHttpOptions2 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyJobHttpOptions.VTable,
SetHttpMethod: fn(
self: *const IBackgroundCopyJobHttpOptions2,
method: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetHttpMethod: fn(
self: *const IBackgroundCopyJobHttpOptions2,
method: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyJobHttpOptions.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions2_SetHttpMethod(self: *const T, method: ?[*:0]const u16) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions2.VTable, self.vtable).SetHttpMethod(@ptrCast(*const IBackgroundCopyJobHttpOptions2, self), method);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions2_GetHttpMethod(self: *const T, method: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions2.VTable, self.vtable).GetHttpMethod(@ptrCast(*const IBackgroundCopyJobHttpOptions2, self), method);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyManager10_3_Value = @import("../zig.zig").Guid.initString("5fd42ad5-c04e-4d36-adc7-e08ff15737ad");
pub const CLSID_BackgroundCopyManager10_3 = &CLSID_BackgroundCopyManager10_3_Value;
const IID_IBackgroundCopyServerCertificateValidationCallback_Value = @import("../zig.zig").Guid.initString("4cec0d02-def7-4158-813a-c32a46945ff7");
pub const IID_IBackgroundCopyServerCertificateValidationCallback = &IID_IBackgroundCopyServerCertificateValidationCallback_Value;
pub const IBackgroundCopyServerCertificateValidationCallback = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
ValidateServerCertificate: fn(
self: *const IBackgroundCopyServerCertificateValidationCallback,
job: ?*IBackgroundCopyJob,
file: ?*IBackgroundCopyFile,
certLength: u32,
certData: [*:0]const u8,
certEncodingType: u32,
certStoreLength: u32,
certStoreData: [*:0]const u8,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyServerCertificateValidationCallback_ValidateServerCertificate(self: *const T, job: ?*IBackgroundCopyJob, file: ?*IBackgroundCopyFile, certLength: u32, certData: [*:0]const u8, certEncodingType: u32, certStoreLength: u32, certStoreData: [*:0]const u8) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyServerCertificateValidationCallback.VTable, self.vtable).ValidateServerCertificate(@ptrCast(*const IBackgroundCopyServerCertificateValidationCallback, self), job, file, certLength, certData, certEncodingType, certStoreLength, certStoreData);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IBackgroundCopyJobHttpOptions3_Value = @import("../zig.zig").Guid.initString("8a9263d3-fd4c-4eda-9b28-30132a4d4e3c");
pub const IID_IBackgroundCopyJobHttpOptions3 = &IID_IBackgroundCopyJobHttpOptions3_Value;
pub const IBackgroundCopyJobHttpOptions3 = extern struct {
pub const VTable = extern struct {
base: IBackgroundCopyJobHttpOptions2.VTable,
SetServerCertificateValidationInterface: fn(
self: *const IBackgroundCopyJobHttpOptions3,
certValidationCallback: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
MakeCustomHeadersWriteOnly: fn(
self: *const IBackgroundCopyJobHttpOptions3,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IBackgroundCopyJobHttpOptions2.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions3_SetServerCertificateValidationInterface(self: *const T, certValidationCallback: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions3.VTable, self.vtable).SetServerCertificateValidationInterface(@ptrCast(*const IBackgroundCopyJobHttpOptions3, self), certValidationCallback);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJobHttpOptions3_MakeCustomHeadersWriteOnly(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJobHttpOptions3.VTable, self.vtable).MakeCustomHeadersWriteOnly(@ptrCast(*const IBackgroundCopyJobHttpOptions3, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BITSExtensionSetupFactory_Value = @import("../zig.zig").Guid.initString("efbbab68-7286-4783-94bf-9461d8b7e7e9");
pub const CLSID_BITSExtensionSetupFactory = &CLSID_BITSExtensionSetupFactory_Value;
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBITSExtensionSetup_Value = @import("../zig.zig").Guid.initString("29cfbbf7-09e4-4b97-b0bc-f2287e3d8eb3");
pub const IID_IBITSExtensionSetup = &IID_IBITSExtensionSetup_Value;
pub const IBITSExtensionSetup = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
EnableBITSUploads: fn(
self: *const IBITSExtensionSetup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
DisableBITSUploads: fn(
self: *const IBITSExtensionSetup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCleanupTaskName: fn(
self: *const IBITSExtensionSetup,
pTaskName: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCleanupTask: fn(
self: *const IBITSExtensionSetup,
riid: ?*const Guid,
ppUnk: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBITSExtensionSetup_EnableBITSUploads(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBITSExtensionSetup.VTable, self.vtable).EnableBITSUploads(@ptrCast(*const IBITSExtensionSetup, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBITSExtensionSetup_DisableBITSUploads(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBITSExtensionSetup.VTable, self.vtable).DisableBITSUploads(@ptrCast(*const IBITSExtensionSetup, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBITSExtensionSetup_GetCleanupTaskName(self: *const T, pTaskName: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IBITSExtensionSetup.VTable, self.vtable).GetCleanupTaskName(@ptrCast(*const IBITSExtensionSetup, self), pTaskName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBITSExtensionSetup_GetCleanupTask(self: *const T, riid: ?*const Guid, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IBITSExtensionSetup.VTable, self.vtable).GetCleanupTask(@ptrCast(*const IBITSExtensionSetup, self), riid, ppUnk);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IBITSExtensionSetupFactory_Value = @import("../zig.zig").Guid.initString("d5d2d542-5503-4e64-8b48-72ef91a32ee1");
pub const IID_IBITSExtensionSetupFactory = &IID_IBITSExtensionSetupFactory_Value;
pub const IBITSExtensionSetupFactory = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
GetObject: fn(
self: *const IBITSExtensionSetupFactory,
Path: ?BSTR,
ppExtensionSetup: ?*?*IBITSExtensionSetup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBITSExtensionSetupFactory_GetObject(self: *const T, Path: ?BSTR, ppExtensionSetup: ?*?*IBITSExtensionSetup) callconv(.Inline) HRESULT {
return @ptrCast(*const IBITSExtensionSetupFactory.VTable, self.vtable).GetObject(@ptrCast(*const IBITSExtensionSetupFactory, self), Path, ppExtensionSetup);
}
};}
pub usingnamespace MethodMixin(@This());
};
const CLSID_BackgroundCopyQMgr_Value = @import("../zig.zig").Guid.initString("69ad4aee-51be-439b-a92c-86ae490e8b30");
pub const CLSID_BackgroundCopyQMgr = &CLSID_BackgroundCopyQMgr_Value;
pub const FILESETINFO = extern struct {
bstrRemoteFile: ?BSTR,
bstrLocalFile: ?BSTR,
dwSizeHint: u32,
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyJob1_Value = @import("../zig.zig").Guid.initString("59f5553c-2031-4629-bb18-2645a6970947");
pub const IID_IBackgroundCopyJob1 = &IID_IBackgroundCopyJob1_Value;
pub const IBackgroundCopyJob1 = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CancelJob: fn(
self: *const IBackgroundCopyJob1,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProgress: fn(
self: *const IBackgroundCopyJob1,
dwFlags: u32,
pdwProgress: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetStatus: fn(
self: *const IBackgroundCopyJob1,
pdwStatus: ?*u32,
pdwWin32Result: ?*u32,
pdwTransportResult: ?*u32,
pdwNumOfRetries: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddFiles: fn(
self: *const IBackgroundCopyJob1,
cFileCount: u32,
ppFileSet: [*]?*FILESETINFO,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFile: fn(
self: *const IBackgroundCopyJob1,
cFileIndex: u32,
pFileInfo: ?*FILESETINFO,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetFileCount: fn(
self: *const IBackgroundCopyJob1,
pdwFileCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SwitchToForeground: fn(
self: *const IBackgroundCopyJob1,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_JobID: fn(
self: *const IBackgroundCopyJob1,
pguidJobID: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_CancelJob(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).CancelJob(@ptrCast(*const IBackgroundCopyJob1, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_GetProgress(self: *const T, dwFlags: u32, pdwProgress: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).GetProgress(@ptrCast(*const IBackgroundCopyJob1, self), dwFlags, pdwProgress);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_GetStatus(self: *const T, pdwStatus: ?*u32, pdwWin32Result: ?*u32, pdwTransportResult: ?*u32, pdwNumOfRetries: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).GetStatus(@ptrCast(*const IBackgroundCopyJob1, self), pdwStatus, pdwWin32Result, pdwTransportResult, pdwNumOfRetries);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_AddFiles(self: *const T, cFileCount: u32, ppFileSet: [*]?*FILESETINFO) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).AddFiles(@ptrCast(*const IBackgroundCopyJob1, self), cFileCount, ppFileSet);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_GetFile(self: *const T, cFileIndex: u32, pFileInfo: ?*FILESETINFO) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).GetFile(@ptrCast(*const IBackgroundCopyJob1, self), cFileIndex, pFileInfo);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_GetFileCount(self: *const T, pdwFileCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).GetFileCount(@ptrCast(*const IBackgroundCopyJob1, self), pdwFileCount);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_SwitchToForeground(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).SwitchToForeground(@ptrCast(*const IBackgroundCopyJob1, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyJob1_get_JobID(self: *const T, pguidJobID: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyJob1.VTable, self.vtable).get_JobID(@ptrCast(*const IBackgroundCopyJob1, self), pguidJobID);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IEnumBackgroundCopyJobs1_Value = @import("../zig.zig").Guid.initString("8baeba9d-8f1c-42c4-b82c-09ae79980d25");
pub const IID_IEnumBackgroundCopyJobs1 = &IID_IEnumBackgroundCopyJobs1_Value;
pub const IEnumBackgroundCopyJobs1 = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Next: fn(
self: *const IEnumBackgroundCopyJobs1,
celt: u32,
rgelt: [*]Guid,
pceltFetched: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Skip: fn(
self: *const IEnumBackgroundCopyJobs1,
celt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reset: fn(
self: *const IEnumBackgroundCopyJobs1,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Clone: fn(
self: *const IEnumBackgroundCopyJobs1,
ppenum: ?*?*IEnumBackgroundCopyJobs1,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCount: fn(
self: *const IEnumBackgroundCopyJobs1,
puCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs1_Next(self: *const T, celt: u32, rgelt: [*]Guid, pceltFetched: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs1.VTable, self.vtable).Next(@ptrCast(*const IEnumBackgroundCopyJobs1, self), celt, rgelt, pceltFetched);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs1_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs1.VTable, self.vtable).Skip(@ptrCast(*const IEnumBackgroundCopyJobs1, self), celt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs1_Reset(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs1.VTable, self.vtable).Reset(@ptrCast(*const IEnumBackgroundCopyJobs1, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs1_Clone(self: *const T, ppenum: ?*?*IEnumBackgroundCopyJobs1) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs1.VTable, self.vtable).Clone(@ptrCast(*const IEnumBackgroundCopyJobs1, self), ppenum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyJobs1_GetCount(self: *const T, puCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyJobs1.VTable, self.vtable).GetCount(@ptrCast(*const IEnumBackgroundCopyJobs1, self), puCount);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const GROUPPROP = enum(i32) {
PRIORITY = 0,
REMOTEUSERID = 1,
REMOTEUSERPWD = 2,
LOCALUSERID = 3,
LOCALUSERPWD = 4,
PROTOCOLFLAGS = 5,
NOTIFYFLAGS = 6,
NOTIFYCLSID = 7,
PROGRESSSIZE = 8,
PROGRESSPERCENT = 9,
PROGRESSTIME = 10,
DISPLAYNAME = 11,
DESCRIPTION = 12,
};
pub const GROUPPROP_PRIORITY = GROUPPROP.PRIORITY;
pub const GROUPPROP_REMOTEUSERID = GROUPPROP.REMOTEUSERID;
pub const GROUPPROP_REMOTEUSERPWD = GROUPPROP.REMOTEUSERPWD;
pub const GROUPPROP_LOCALUSERID = GROUPPROP.LOCALUSERID;
pub const GROUPPROP_LOCALUSERPWD = GROUPPROP.LOCALUSERPWD;
pub const GROUPPROP_PROTOCOLFLAGS = GROUPPROP.PROTOCOLFLAGS;
pub const GROUPPROP_NOTIFYFLAGS = GROUPPROP.NOTIFYFLAGS;
pub const GROUPPROP_NOTIFYCLSID = GROUPPROP.NOTIFYCLSID;
pub const GROUPPROP_PROGRESSSIZE = GROUPPROP.PROGRESSSIZE;
pub const GROUPPROP_PROGRESSPERCENT = GROUPPROP.PROGRESSPERCENT;
pub const GROUPPROP_PROGRESSTIME = GROUPPROP.PROGRESSTIME;
pub const GROUPPROP_DISPLAYNAME = GROUPPROP.DISPLAYNAME;
pub const GROUPPROP_DESCRIPTION = GROUPPROP.DESCRIPTION;
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyGroup_Value = @import("../zig.zig").Guid.initString("1ded80a7-53ea-424f-8a04-17fea9adc4f5");
pub const IID_IBackgroundCopyGroup = &IID_IBackgroundCopyGroup_Value;
pub const IBackgroundCopyGroup = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetProp: fn(
self: *const IBackgroundCopyGroup,
propID: GROUPPROP,
pvarVal: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetProp: fn(
self: *const IBackgroundCopyGroup,
propID: GROUPPROP,
pvarVal: ?*VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetProgress: fn(
self: *const IBackgroundCopyGroup,
dwFlags: u32,
pdwProgress: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetStatus: fn(
self: *const IBackgroundCopyGroup,
pdwStatus: ?*u32,
pdwJobIndex: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetJob: fn(
self: *const IBackgroundCopyGroup,
jobID: Guid,
ppJob: ?*?*IBackgroundCopyJob1,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SuspendGroup: fn(
self: *const IBackgroundCopyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ResumeGroup: fn(
self: *const IBackgroundCopyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CancelGroup: fn(
self: *const IBackgroundCopyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Size: fn(
self: *const IBackgroundCopyGroup,
pdwSize: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_GroupID: fn(
self: *const IBackgroundCopyGroup,
pguidGroupID: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CreateJob: fn(
self: *const IBackgroundCopyGroup,
guidJobID: Guid,
ppJob: ?*?*IBackgroundCopyJob1,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumJobs: fn(
self: *const IBackgroundCopyGroup,
dwFlags: u32,
ppEnumJobs: ?*?*IEnumBackgroundCopyJobs1,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SwitchToForeground: fn(
self: *const IBackgroundCopyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
QueryNewJobInterface: fn(
self: *const IBackgroundCopyGroup,
iid: ?*const Guid,
pUnk: ?*?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetNotificationPointer: fn(
self: *const IBackgroundCopyGroup,
iid: ?*const Guid,
pUnk: ?*IUnknown,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_GetProp(self: *const T, propID: GROUPPROP, pvarVal: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).GetProp(@ptrCast(*const IBackgroundCopyGroup, self), propID, pvarVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_SetProp(self: *const T, propID: GROUPPROP, pvarVal: ?*VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).SetProp(@ptrCast(*const IBackgroundCopyGroup, self), propID, pvarVal);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_GetProgress(self: *const T, dwFlags: u32, pdwProgress: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).GetProgress(@ptrCast(*const IBackgroundCopyGroup, self), dwFlags, pdwProgress);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_GetStatus(self: *const T, pdwStatus: ?*u32, pdwJobIndex: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).GetStatus(@ptrCast(*const IBackgroundCopyGroup, self), pdwStatus, pdwJobIndex);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_GetJob(self: *const T, jobID: Guid, ppJob: ?*?*IBackgroundCopyJob1) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).GetJob(@ptrCast(*const IBackgroundCopyGroup, self), jobID, ppJob);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_SuspendGroup(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).SuspendGroup(@ptrCast(*const IBackgroundCopyGroup, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_ResumeGroup(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).ResumeGroup(@ptrCast(*const IBackgroundCopyGroup, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_CancelGroup(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).CancelGroup(@ptrCast(*const IBackgroundCopyGroup, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_get_Size(self: *const T, pdwSize: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).get_Size(@ptrCast(*const IBackgroundCopyGroup, self), pdwSize);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_get_GroupID(self: *const T, pguidGroupID: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).get_GroupID(@ptrCast(*const IBackgroundCopyGroup, self), pguidGroupID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_CreateJob(self: *const T, guidJobID: Guid, ppJob: ?*?*IBackgroundCopyJob1) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).CreateJob(@ptrCast(*const IBackgroundCopyGroup, self), guidJobID, ppJob);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_EnumJobs(self: *const T, dwFlags: u32, ppEnumJobs: ?*?*IEnumBackgroundCopyJobs1) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).EnumJobs(@ptrCast(*const IBackgroundCopyGroup, self), dwFlags, ppEnumJobs);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_SwitchToForeground(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).SwitchToForeground(@ptrCast(*const IBackgroundCopyGroup, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_QueryNewJobInterface(self: *const T, iid: ?*const Guid, pUnk: ?*?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).QueryNewJobInterface(@ptrCast(*const IBackgroundCopyGroup, self), iid, pUnk);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyGroup_SetNotificationPointer(self: *const T, iid: ?*const Guid, pUnk: ?*IUnknown) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyGroup.VTable, self.vtable).SetNotificationPointer(@ptrCast(*const IBackgroundCopyGroup, self), iid, pUnk);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IEnumBackgroundCopyGroups_Value = @import("../zig.zig").Guid.initString("d993e603-4aa4-47c5-8665-c20d39c2ba4f");
pub const IID_IEnumBackgroundCopyGroups = &IID_IEnumBackgroundCopyGroups_Value;
pub const IEnumBackgroundCopyGroups = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Next: fn(
self: *const IEnumBackgroundCopyGroups,
celt: u32,
rgelt: [*]Guid,
pceltFetched: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Skip: fn(
self: *const IEnumBackgroundCopyGroups,
celt: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Reset: fn(
self: *const IEnumBackgroundCopyGroups,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Clone: fn(
self: *const IEnumBackgroundCopyGroups,
ppenum: ?*?*IEnumBackgroundCopyGroups,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetCount: fn(
self: *const IEnumBackgroundCopyGroups,
puCount: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyGroups_Next(self: *const T, celt: u32, rgelt: [*]Guid, pceltFetched: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyGroups.VTable, self.vtable).Next(@ptrCast(*const IEnumBackgroundCopyGroups, self), celt, rgelt, pceltFetched);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyGroups_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyGroups.VTable, self.vtable).Skip(@ptrCast(*const IEnumBackgroundCopyGroups, self), celt);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyGroups_Reset(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyGroups.VTable, self.vtable).Reset(@ptrCast(*const IEnumBackgroundCopyGroups, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyGroups_Clone(self: *const T, ppenum: ?*?*IEnumBackgroundCopyGroups) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyGroups.VTable, self.vtable).Clone(@ptrCast(*const IEnumBackgroundCopyGroups, self), ppenum);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IEnumBackgroundCopyGroups_GetCount(self: *const T, puCount: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IEnumBackgroundCopyGroups.VTable, self.vtable).GetCount(@ptrCast(*const IEnumBackgroundCopyGroups, self), puCount);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyCallback1_Value = @import("../zig.zig").Guid.initString("084f6593-3800-4e08-9b59-99fa59addf82");
pub const IID_IBackgroundCopyCallback1 = &IID_IBackgroundCopyCallback1_Value;
pub const IBackgroundCopyCallback1 = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
OnStatus: fn(
self: *const IBackgroundCopyCallback1,
pGroup: ?*IBackgroundCopyGroup,
pJob: ?*IBackgroundCopyJob1,
dwFileIndex: u32,
dwStatus: u32,
dwNumOfRetries: u32,
dwWin32Result: u32,
dwTransportResult: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnProgress: fn(
self: *const IBackgroundCopyCallback1,
ProgressType: u32,
pGroup: ?*IBackgroundCopyGroup,
pJob: ?*IBackgroundCopyJob1,
dwFileIndex: u32,
dwProgressValue: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
OnProgressEx: fn(
self: *const IBackgroundCopyCallback1,
ProgressType: u32,
pGroup: ?*IBackgroundCopyGroup,
pJob: ?*IBackgroundCopyJob1,
dwFileIndex: u32,
dwProgressValue: u32,
dwByteArraySize: u32,
pByte: [*:0]u8,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback1_OnStatus(self: *const T, pGroup: ?*IBackgroundCopyGroup, pJob: ?*IBackgroundCopyJob1, dwFileIndex: u32, dwStatus: u32, dwNumOfRetries: u32, dwWin32Result: u32, dwTransportResult: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback1.VTable, self.vtable).OnStatus(@ptrCast(*const IBackgroundCopyCallback1, self), pGroup, pJob, dwFileIndex, dwStatus, dwNumOfRetries, dwWin32Result, dwTransportResult);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback1_OnProgress(self: *const T, ProgressType: u32, pGroup: ?*IBackgroundCopyGroup, pJob: ?*IBackgroundCopyJob1, dwFileIndex: u32, dwProgressValue: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback1.VTable, self.vtable).OnProgress(@ptrCast(*const IBackgroundCopyCallback1, self), ProgressType, pGroup, pJob, dwFileIndex, dwProgressValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyCallback1_OnProgressEx(self: *const T, ProgressType: u32, pGroup: ?*IBackgroundCopyGroup, pJob: ?*IBackgroundCopyJob1, dwFileIndex: u32, dwProgressValue: u32, dwByteArraySize: u32, pByte: [*:0]u8) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyCallback1.VTable, self.vtable).OnProgressEx(@ptrCast(*const IBackgroundCopyCallback1, self), ProgressType, pGroup, pJob, dwFileIndex, dwProgressValue, dwByteArraySize, pByte);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows5.1.2600'
const IID_IBackgroundCopyQMgr_Value = @import("../zig.zig").Guid.initString("16f41c69-09f5-41d2-8cd8-3c08c47bc8a8");
pub const IID_IBackgroundCopyQMgr = &IID_IBackgroundCopyQMgr_Value;
pub const IBackgroundCopyQMgr = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
CreateGroup: fn(
self: *const IBackgroundCopyQMgr,
guidGroupID: Guid,
ppGroup: ?*?*IBackgroundCopyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetGroup: fn(
self: *const IBackgroundCopyQMgr,
groupID: Guid,
ppGroup: ?*?*IBackgroundCopyGroup,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumGroups: fn(
self: *const IBackgroundCopyQMgr,
dwFlags: u32,
ppEnumGroups: ?*?*IEnumBackgroundCopyGroups,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyQMgr_CreateGroup(self: *const T, guidGroupID: Guid, ppGroup: ?*?*IBackgroundCopyGroup) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyQMgr.VTable, self.vtable).CreateGroup(@ptrCast(*const IBackgroundCopyQMgr, self), guidGroupID, ppGroup);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyQMgr_GetGroup(self: *const T, groupID: Guid, ppGroup: ?*?*IBackgroundCopyGroup) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyQMgr.VTable, self.vtable).GetGroup(@ptrCast(*const IBackgroundCopyQMgr, self), groupID, ppGroup);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IBackgroundCopyQMgr_EnumGroups(self: *const T, dwFlags: u32, ppEnumGroups: ?*?*IEnumBackgroundCopyGroups) callconv(.Inline) HRESULT {
return @ptrCast(*const IBackgroundCopyQMgr.VTable, self.vtable).EnumGroups(@ptrCast(*const IBackgroundCopyQMgr, self), dwFlags, ppEnumGroups);
}
};}
pub usingnamespace MethodMixin(@This());
};
//--------------------------------------------------------------------------------
// Section: Functions (0)
//--------------------------------------------------------------------------------
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (9)
//--------------------------------------------------------------------------------
const Guid = @import("../zig.zig").Guid;
const BOOL = @import("../foundation.zig").BOOL;
const BSTR = @import("../foundation.zig").BSTR;
const FILETIME = @import("../foundation.zig").FILETIME;
const HRESULT = @import("../foundation.zig").HRESULT;
const IDispatch = @import("../system/ole_automation.zig").IDispatch;
const IUnknown = @import("../system/com.zig").IUnknown;
const PWSTR = @import("../foundation.zig").PWSTR;
const VARIANT = @import("../system/ole_automation.zig").VARIANT;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | deps/zigwin32/win32/networking/background_intelligent_transfer_service.zig |
Subsets and Splits