code
stringlengths
38
801k
repo_path
stringlengths
6
263
const std = @import("std"); const mem = std.mem; const OpenPunctuation = @This(); allocator: *mem.Allocator, array: []bool, lo: u21 = 40, hi: u21 = 65378, pub fn init(allocator: *mem.Allocator) !OpenPunctuation { var instance = OpenPunctuation{ .allocator = allocator, .array = try allocator.alloc(bool, 65339), }; mem.set(bool, instance.array, false); var index: u21 = 0; instance.array[0] = true; instance.array[51] = true; instance.array[83] = true; instance.array[3858] = true; instance.array[3860] = true; instance.array[5747] = true; instance.array[8178] = true; instance.array[8182] = true; instance.array[8221] = true; instance.array[8277] = true; instance.array[8293] = true; instance.array[8928] = true; instance.array[8930] = true; instance.array[8961] = true; instance.array[10048] = true; instance.array[10050] = true; instance.array[10052] = true; instance.array[10054] = true; instance.array[10056] = true; instance.array[10058] = true; instance.array[10060] = true; instance.array[10141] = true; instance.array[10174] = true; instance.array[10176] = true; instance.array[10178] = true; instance.array[10180] = true; instance.array[10182] = true; instance.array[10587] = true; instance.array[10589] = true; instance.array[10591] = true; instance.array[10593] = true; instance.array[10595] = true; instance.array[10597] = true; instance.array[10599] = true; instance.array[10601] = true; instance.array[10603] = true; instance.array[10605] = true; instance.array[10607] = true; instance.array[10672] = true; instance.array[10674] = true; instance.array[10708] = true; instance.array[11770] = true; instance.array[11772] = true; instance.array[11774] = true; instance.array[11776] = true; instance.array[11802] = true; instance.array[12256] = true; instance.array[12258] = true; instance.array[12260] = true; instance.array[12262] = true; instance.array[12264] = true; instance.array[12268] = true; instance.array[12270] = true; instance.array[12272] = true; instance.array[12274] = true; instance.array[12277] = true; instance.array[64791] = true; instance.array[65007] = true; instance.array[65037] = true; instance.array[65039] = true; instance.array[65041] = true; instance.array[65043] = true; instance.array[65045] = true; instance.array[65047] = true; instance.array[65049] = true; instance.array[65051] = true; instance.array[65055] = true; instance.array[65073] = true; instance.array[65075] = true; instance.array[65077] = true; instance.array[65248] = true; instance.array[65299] = true; instance.array[65331] = true; instance.array[65335] = true; instance.array[65338] = true; // Placeholder: 0. Struct name, 1. Code point kind return instance; } pub fn deinit(self: *OpenPunctuation) void { self.allocator.free(self.array); } // isOpenPunctuation checks if cp is of the kind Open_Punctuation. pub fn isOpenPunctuation(self: OpenPunctuation, cp: u21) bool { if (cp < self.lo or cp > self.hi) return false; const index = cp - self.lo; return if (index >= self.array.len) false else self.array[index]; }
src/components/autogen/DerivedGeneralCategory/OpenPunctuation.zig
const std = @import("std"); const mem = std.mem; const OtherMath = @This(); allocator: *mem.Allocator, array: []bool, lo: u21 = 94, hi: u21 = 126651, pub fn init(allocator: *mem.Allocator) !OtherMath { var instance = OtherMath{ .allocator = allocator, .array = try allocator.alloc(bool, 126558), }; mem.set(bool, instance.array, false); var index: u21 = 0; instance.array[0] = true; index = 882; while (index <= 884) : (index += 1) { instance.array[index] = true; } instance.array[887] = true; index = 914; while (index <= 915) : (index += 1) { instance.array[index] = true; } index = 918; while (index <= 919) : (index += 1) { instance.array[index] = true; } instance.array[8120] = true; index = 8148; while (index <= 8150) : (index += 1) { instance.array[index] = true; } instance.array[8162] = true; index = 8195; while (index <= 8198) : (index += 1) { instance.array[index] = true; } instance.array[8223] = true; instance.array[8224] = true; instance.array[8239] = true; instance.array[8240] = true; index = 8306; while (index <= 8318) : (index += 1) { instance.array[index] = true; } instance.array[8323] = true; index = 8327; while (index <= 8328) : (index += 1) { instance.array[index] = true; } index = 8333; while (index <= 8337) : (index += 1) { instance.array[index] = true; } instance.array[8356] = true; instance.array[8361] = true; index = 8364; while (index <= 8373) : (index += 1) { instance.array[index] = true; } instance.array[8375] = true; index = 8379; while (index <= 8383) : (index += 1) { instance.array[index] = true; } instance.array[8390] = true; instance.array[8394] = true; instance.array[8395] = true; index = 8398; while (index <= 8399) : (index += 1) { instance.array[index] = true; } index = 8401; while (index <= 8403) : (index += 1) { instance.array[index] = true; } index = 8405; while (index <= 8406) : (index += 1) { instance.array[index] = true; } index = 8407; while (index <= 8410) : (index += 1) { instance.array[index] = true; } index = 8414; while (index <= 8417) : (index += 1) { instance.array[index] = true; } index = 8423; while (index <= 8427) : (index += 1) { instance.array[index] = true; } index = 8503; while (index <= 8507) : (index += 1) { instance.array[index] = true; } index = 8510; while (index <= 8513) : (index += 1) { instance.array[index] = true; } index = 8515; while (index <= 8516) : (index += 1) { instance.array[index] = true; } index = 8518; while (index <= 8519) : (index += 1) { instance.array[index] = true; } instance.array[8521] = true; index = 8523; while (index <= 8527) : (index += 1) { instance.array[index] = true; } index = 8530; while (index <= 8531) : (index += 1) { instance.array[index] = true; } index = 8536; while (index <= 8537) : (index += 1) { instance.array[index] = true; } index = 8542; while (index <= 8559) : (index += 1) { instance.array[index] = true; } index = 8562; while (index <= 8563) : (index += 1) { instance.array[index] = true; } instance.array[8565] = true; index = 8567; while (index <= 8573) : (index += 1) { instance.array[index] = true; } instance.array[8575] = true; index = 8582; while (index <= 8583) : (index += 1) { instance.array[index] = true; } instance.array[8874] = true; instance.array[8875] = true; instance.array[8876] = true; instance.array[8877] = true; index = 9046; while (index <= 9047) : (index += 1) { instance.array[index] = true; } instance.array[9049] = true; instance.array[9074] = true; instance.array[9092] = true; index = 9538; while (index <= 9539) : (index += 1) { instance.array[index] = true; } index = 9552; while (index <= 9560) : (index += 1) { instance.array[index] = true; } index = 9566; while (index <= 9570) : (index += 1) { instance.array[index] = true; } index = 9576; while (index <= 9577) : (index += 1) { instance.array[index] = true; } index = 9580; while (index <= 9581) : (index += 1) { instance.array[index] = true; } index = 9585; while (index <= 9589) : (index += 1) { instance.array[index] = true; } instance.array[9604] = true; instance.array[9606] = true; index = 9609; while (index <= 9614) : (index += 1) { instance.array[index] = true; } index = 9639; while (index <= 9640) : (index += 1) { instance.array[index] = true; } instance.array[9698] = true; instance.array[9700] = true; index = 9730; while (index <= 9733) : (index += 1) { instance.array[index] = true; } index = 9743; while (index <= 9744) : (index += 1) { instance.array[index] = true; } instance.array[10087] = true; instance.array[10088] = true; instance.array[10120] = true; instance.array[10121] = true; instance.array[10122] = true; instance.array[10123] = true; instance.array[10124] = true; instance.array[10125] = true; instance.array[10126] = true; instance.array[10127] = true; instance.array[10128] = true; instance.array[10129] = true; instance.array[10533] = true; instance.array[10534] = true; instance.array[10535] = true; instance.array[10536] = true; instance.array[10537] = true; instance.array[10538] = true; instance.array[10539] = true; instance.array[10540] = true; instance.array[10541] = true; instance.array[10542] = true; instance.array[10543] = true; instance.array[10544] = true; instance.array[10545] = true; instance.array[10546] = true; instance.array[10547] = true; instance.array[10548] = true; instance.array[10549] = true; instance.array[10550] = true; instance.array[10551] = true; instance.array[10552] = true; instance.array[10553] = true; instance.array[10554] = true; instance.array[10618] = true; instance.array[10619] = true; instance.array[10620] = true; instance.array[10621] = true; instance.array[10654] = true; instance.array[10655] = true; instance.array[65027] = true; instance.array[65029] = true; instance.array[65034] = true; instance.array[65246] = true; instance.array[65248] = true; index = 119714; while (index <= 119798) : (index += 1) { instance.array[index] = true; } index = 119800; while (index <= 119870) : (index += 1) { instance.array[index] = true; } index = 119872; while (index <= 119873) : (index += 1) { instance.array[index] = true; } instance.array[119876] = true; index = 119879; while (index <= 119880) : (index += 1) { instance.array[index] = true; } index = 119883; while (index <= 119886) : (index += 1) { instance.array[index] = true; } index = 119888; while (index <= 119899) : (index += 1) { instance.array[index] = true; } instance.array[119901] = true; index = 119903; while (index <= 119909) : (index += 1) { instance.array[index] = true; } index = 119911; while (index <= 119975) : (index += 1) { instance.array[index] = true; } index = 119977; while (index <= 119980) : (index += 1) { instance.array[index] = true; } index = 119983; while (index <= 119990) : (index += 1) { instance.array[index] = true; } index = 119992; while (index <= 119998) : (index += 1) { instance.array[index] = true; } index = 120000; while (index <= 120027) : (index += 1) { instance.array[index] = true; } index = 120029; while (index <= 120032) : (index += 1) { instance.array[index] = true; } index = 120034; while (index <= 120038) : (index += 1) { instance.array[index] = true; } instance.array[120040] = true; index = 120044; while (index <= 120050) : (index += 1) { instance.array[index] = true; } index = 120052; while (index <= 120391) : (index += 1) { instance.array[index] = true; } index = 120394; while (index <= 120418) : (index += 1) { instance.array[index] = true; } index = 120420; while (index <= 120444) : (index += 1) { instance.array[index] = true; } index = 120446; while (index <= 120476) : (index += 1) { instance.array[index] = true; } index = 120478; while (index <= 120502) : (index += 1) { instance.array[index] = true; } index = 120504; while (index <= 120534) : (index += 1) { instance.array[index] = true; } index = 120536; while (index <= 120560) : (index += 1) { instance.array[index] = true; } index = 120562; while (index <= 120592) : (index += 1) { instance.array[index] = true; } index = 120594; while (index <= 120618) : (index += 1) { instance.array[index] = true; } index = 120620; while (index <= 120650) : (index += 1) { instance.array[index] = true; } index = 120652; while (index <= 120676) : (index += 1) { instance.array[index] = true; } index = 120678; while (index <= 120685) : (index += 1) { instance.array[index] = true; } index = 120688; while (index <= 120737) : (index += 1) { instance.array[index] = true; } index = 126370; while (index <= 126373) : (index += 1) { instance.array[index] = true; } index = 126375; while (index <= 126401) : (index += 1) { instance.array[index] = true; } index = 126403; while (index <= 126404) : (index += 1) { instance.array[index] = true; } instance.array[126406] = true; instance.array[126409] = true; index = 126411; while (index <= 126420) : (index += 1) { instance.array[index] = true; } index = 126422; while (index <= 126425) : (index += 1) { instance.array[index] = true; } instance.array[126427] = true; instance.array[126429] = true; instance.array[126436] = true; instance.array[126441] = true; instance.array[126443] = true; instance.array[126445] = true; index = 126447; while (index <= 126449) : (index += 1) { instance.array[index] = true; } index = 126451; while (index <= 126452) : (index += 1) { instance.array[index] = true; } instance.array[126454] = true; instance.array[126457] = true; instance.array[126459] = true; instance.array[126461] = true; instance.array[126463] = true; instance.array[126465] = true; index = 126467; while (index <= 126468) : (index += 1) { instance.array[index] = true; } instance.array[126470] = true; index = 126473; while (index <= 126476) : (index += 1) { instance.array[index] = true; } index = 126478; while (index <= 126484) : (index += 1) { instance.array[index] = true; } index = 126486; while (index <= 126489) : (index += 1) { instance.array[index] = true; } index = 126491; while (index <= 126494) : (index += 1) { instance.array[index] = true; } instance.array[126496] = true; index = 126498; while (index <= 126507) : (index += 1) { instance.array[index] = true; } index = 126509; while (index <= 126525) : (index += 1) { instance.array[index] = true; } index = 126531; while (index <= 126533) : (index += 1) { instance.array[index] = true; } index = 126535; while (index <= 126539) : (index += 1) { instance.array[index] = true; } index = 126541; while (index <= 126557) : (index += 1) { instance.array[index] = true; } // Placeholder: 0. Struct name, 1. Code point kind return instance; } pub fn deinit(self: *OtherMath) void { self.allocator.free(self.array); } // isOtherMath checks if cp is of the kind Other_Math. pub fn isOtherMath(self: OtherMath, cp: u21) bool { if (cp < self.lo or cp > self.hi) return false; const index = cp - self.lo; return if (index >= self.array.len) false else self.array[index]; }
src/components/autogen/PropList/OtherMath.zig
const std = @import("std"); const pkgs = @import("deps.zig").pkgs; const mem = std.mem; fn detectWasmerLibDir(b: *std.build.Builder) ?[]const u8 { const argv = &[_][]const u8{ "wasmer", "config", "--libdir" }; const result = std.ChildProcess.exec(.{ .allocator = b.allocator, .argv = argv, }) catch return null; if (result.stderr.len != 0 or result.term.Exited != 0) return null; const lib_dir = mem.trimRight(u8, result.stdout, "\r\n"); return lib_dir; } pub fn build(b: *std.build.Builder) !void { // Standard release options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. const mode = b.standardReleaseOptions(); // Try detecting Wasmer lib dir. const wasmer_lib_dir = detectWasmerLibDir(b); const lib = b.addStaticLibrary("wasmer-zig", "src/main.zig"); lib.setBuildMode(mode); lib.addPackage(pkgs.wasm); lib.install(); var main_tests = b.addTest("src/main.zig"); main_tests.setBuildMode(mode); main_tests.addPackage(pkgs.wasm); if (wasmer_lib_dir) |lib_dir| { main_tests.addRPath(lib_dir); main_tests.addLibPath(lib_dir); } main_tests.linkSystemLibrary("wasmer"); main_tests.linkLibC(); const test_step = b.step("test", "Run library tests"); test_step.dependOn(&main_tests.step); const example = b.option([]const u8, "example", "Specify example to run from examples/ dir"); const example_path = example_path: { const basename = example orelse "instance"; const with_ext = try std.fmt.allocPrint(b.allocator, "{s}.zig", .{basename}); const full_path = try std.fs.path.join(b.allocator, &[_][]const u8{ "examples", with_ext }); break :example_path full_path; }; const executable = b.addExecutable(example orelse "instance", example_path); executable.setBuildMode(mode); executable.addPackage(.{ .name = "wasmer", .path = .{.path = "src/main.zig"}, .dependencies = &.{pkgs.wasm}, }); if (wasmer_lib_dir) |lib_dir| { executable.addRPath(lib_dir); executable.addLibPath(lib_dir); } executable.linkSystemLibrary("wasmer"); executable.linkLibC(); executable.step.dependOn(b.getInstallStep()); const run_executable = executable.run(); const run_step = b.step("run", "Run an example specified with -Dexample (defaults to examples/instance.zig)"); run_step.dependOn(&run_executable.step); }
build.zig
const xcb = @import("../xcb.zig"); pub const id = xcb.Extension{ .name = "DRI2", .global_id = 0 }; pub const Attachment = extern enum(c_uint) { @"BufferFrontLeft" = 0, @"BufferBackLeft" = 1, @"BufferFrontRight" = 2, @"BufferBackRight" = 3, @"BufferDepth" = 4, @"BufferStencil" = 5, @"BufferAccum" = 6, @"BufferFakeFrontLeft" = 7, @"BufferFakeFrontRight" = 8, @"BufferDepthStencil" = 9, @"BufferHiz" = 10, }; pub const DriverType = extern enum(c_uint) { @"DRI" = 0, @"VDPAU" = 1, }; pub const EventType = extern enum(c_uint) { @"ExchangeComplete" = 1, @"BlitComplete" = 2, @"FlipComplete" = 3, }; /// @brief DRI2Buffer pub const DRI2Buffer = struct { @"attachment": u32, @"name": u32, @"pitch": u32, @"cpp": u32, @"flags": u32, }; /// @brief AttachFormat pub const AttachFormat = struct { @"attachment": u32, @"format": u32, }; /// @brief QueryVersioncookie pub const QueryVersioncookie = struct { sequence: c_uint, }; /// @brief QueryVersionRequest pub const QueryVersionRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 0, @"length": u16, @"major_version": u32, @"minor_version": u32, }; /// @brief QueryVersionReply pub const QueryVersionReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"major_version": u32, @"minor_version": u32, }; /// @brief Connectcookie pub const Connectcookie = struct { sequence: c_uint, }; /// @brief ConnectRequest pub const ConnectRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 1, @"length": u16, @"window": xcb.WINDOW, @"driver_type": u32, }; /// @brief ConnectReply pub const ConnectReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"driver_name_length": u32, @"device_name_length": u32, @"pad1": [16]u8, @"driver_name": []u8, @"alignment_pad": []u8, @"device_name": []u8, }; /// @brief Authenticatecookie pub const Authenticatecookie = struct { sequence: c_uint, }; /// @brief AuthenticateRequest pub const AuthenticateRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 2, @"length": u16, @"window": xcb.WINDOW, @"magic": u32, }; /// @brief AuthenticateReply pub const AuthenticateReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"authenticated": u32, }; /// @brief CreateDrawableRequest pub const CreateDrawableRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 3, @"length": u16, @"drawable": xcb.DRAWABLE, }; /// @brief DestroyDrawableRequest pub const DestroyDrawableRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 4, @"length": u16, @"drawable": xcb.DRAWABLE, }; /// @brief GetBufferscookie pub const GetBufferscookie = struct { sequence: c_uint, }; /// @brief GetBuffersRequest pub const GetBuffersRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 5, @"length": u16, @"drawable": xcb.DRAWABLE, @"count": u32, @"attachments": []const u32, }; /// @brief GetBuffersReply pub const GetBuffersReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"width": u32, @"height": u32, @"count": u32, @"pad1": [12]u8, @"buffers": []xcb.dri2.DRI2Buffer, }; /// @brief CopyRegioncookie pub const CopyRegioncookie = struct { sequence: c_uint, }; /// @brief CopyRegionRequest pub const CopyRegionRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 6, @"length": u16, @"drawable": xcb.DRAWABLE, @"region": u32, @"dest": u32, @"src": u32, }; /// @brief CopyRegionReply pub const CopyRegionReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, }; /// @brief GetBuffersWithFormatcookie pub const GetBuffersWithFormatcookie = struct { sequence: c_uint, }; /// @brief GetBuffersWithFormatRequest pub const GetBuffersWithFormatRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 7, @"length": u16, @"drawable": xcb.DRAWABLE, @"count": u32, @"attachments": []const xcb.dri2.AttachFormat, }; /// @brief GetBuffersWithFormatReply pub const GetBuffersWithFormatReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"width": u32, @"height": u32, @"count": u32, @"pad1": [12]u8, @"buffers": []xcb.dri2.DRI2Buffer, }; /// @brief SwapBufferscookie pub const SwapBufferscookie = struct { sequence: c_uint, }; /// @brief SwapBuffersRequest pub const SwapBuffersRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 8, @"length": u16, @"drawable": xcb.DRAWABLE, @"target_msc_hi": u32, @"target_msc_lo": u32, @"divisor_hi": u32, @"divisor_lo": u32, @"remainder_hi": u32, @"remainder_lo": u32, }; /// @brief SwapBuffersReply pub const SwapBuffersReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"swap_hi": u32, @"swap_lo": u32, }; /// @brief GetMSCcookie pub const GetMSCcookie = struct { sequence: c_uint, }; /// @brief GetMSCRequest pub const GetMSCRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 9, @"length": u16, @"drawable": xcb.DRAWABLE, }; /// @brief GetMSCReply pub const GetMSCReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ust_hi": u32, @"ust_lo": u32, @"msc_hi": u32, @"msc_lo": u32, @"sbc_hi": u32, @"sbc_lo": u32, }; /// @brief WaitMSCcookie pub const WaitMSCcookie = struct { sequence: c_uint, }; /// @brief WaitMSCRequest pub const WaitMSCRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 10, @"length": u16, @"drawable": xcb.DRAWABLE, @"target_msc_hi": u32, @"target_msc_lo": u32, @"divisor_hi": u32, @"divisor_lo": u32, @"remainder_hi": u32, @"remainder_lo": u32, }; /// @brief WaitMSCReply pub const WaitMSCReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ust_hi": u32, @"ust_lo": u32, @"msc_hi": u32, @"msc_lo": u32, @"sbc_hi": u32, @"sbc_lo": u32, }; /// @brief WaitSBCcookie pub const WaitSBCcookie = struct { sequence: c_uint, }; /// @brief WaitSBCRequest pub const WaitSBCRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 11, @"length": u16, @"drawable": xcb.DRAWABLE, @"target_sbc_hi": u32, @"target_sbc_lo": u32, }; /// @brief WaitSBCReply pub const WaitSBCReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ust_hi": u32, @"ust_lo": u32, @"msc_hi": u32, @"msc_lo": u32, @"sbc_hi": u32, @"sbc_lo": u32, }; /// @brief SwapIntervalRequest pub const SwapIntervalRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 12, @"length": u16, @"drawable": xcb.DRAWABLE, @"interval": u32, }; /// @brief GetParamcookie pub const GetParamcookie = struct { sequence: c_uint, }; /// @brief GetParamRequest pub const GetParamRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 13, @"length": u16, @"drawable": xcb.DRAWABLE, @"param": u32, }; /// @brief GetParamReply pub const GetParamReply = struct { @"response_type": u8, @"is_param_recognized": u8, @"sequence": u16, @"length": u32, @"value_hi": u32, @"value_lo": u32, }; /// Opcode for BufferSwapComplete. pub const BufferSwapCompleteOpcode = 0; /// @brief BufferSwapCompleteEvent pub const BufferSwapCompleteEvent = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"event_type": u16, @"pad1": [2]u8, @"drawable": xcb.DRAWABLE, @"ust_hi": u32, @"ust_lo": u32, @"msc_hi": u32, @"msc_lo": u32, @"sbc": u32, }; /// Opcode for InvalidateBuffers. pub const InvalidateBuffersOpcode = 1; /// @brief InvalidateBuffersEvent pub const InvalidateBuffersEvent = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"drawable": xcb.DRAWABLE, }; test "" { @import("std").testing.refAllDecls(@This()); }
src/auto/dri2.zig
const std = @import("std"); const builtin = @import("builtin"); const version = @import("version"); const uri = @import("uri"); const assert = std.debug.assert; pub const Size = struct { rows: usize, cols: usize, }; pub const Source = union(enum) { const Git = struct { url: []const u8, commit: []const u8, }; git: Git, sub: Git, pkg: struct { repository: []const u8, user: []const u8, name: []const u8, semver: version.Semver, }, url: []const u8, }; const EntryUpdate = union(enum) { progress: Progress, err: void, }; const Progress = struct { current: usize, total: usize, }; const Entry = struct { tag: []const u8, label: []const u8, version: []const u8, progress: Progress, err: bool, }; const UpdateState = struct { current_len: usize, entries: std.ArrayList(Entry), progress: std.AutoHashMap(usize, Progress), errors: std.AutoHashMap(usize, void), new_size: ?Size, fn init(allocator: *std.mem.Allocator) UpdateState { return UpdateState{ .current_len = 0, .entries = std.ArrayList(Entry).init(allocator), .progress = std.AutoHashMap(usize, Progress).init(allocator), .errors = std.AutoHashMap(usize, void).init(allocator), .new_size = null, }; } fn deinit(self: *UpdateState) void { self.entries.deinit(); self.progress.deinit(); self.errors.deinit(); } fn hasChanges(self: UpdateState) bool { return self.new_size != null or self.entries.items.len > 0 or self.progress.count() > 0 or self.errors.count() > 0; } fn clear(self: *UpdateState) void { self.entries.clearRetainingCapacity(); self.progress.clearRetainingCapacity(); self.errors.clearRetainingCapacity(); self.new_size = null; } }; const Self = @This(); mode: union(enum) { direct_log: void, ansi: struct { allocator: *std.mem.Allocator, arena: std.heap.ArenaAllocator, entries: std.ArrayList(Entry), logs: std.ArrayList([]const u8), depth: usize, size: Size, running: std.atomic.Atomic(bool), mtx: std.Thread.Mutex, logs_mtx: std.Thread.Mutex, render_thread: std.Thread, // state maps that get swapped collector: *UpdateState, scratchpad: *UpdateState, fifo: std.fifo.LinearFifo(u8, .{ .Dynamic = {} }), }, }, pub fn init(location: *Self, allocator: *std.mem.Allocator) !void { var size = Size{ .rows = 24, .cols = 80, }; switch (builtin.target.os.tag) { .windows => { const c = @cImport({ @cInclude("windows.h"); }); var csbi: c.CONSOLE_SCREEN_BUFFER_INFO = undefined; if (0 == c.GetConsoleScreenBufferInfo(c.GetStdHandle(c.STD_OUTPUT_HANDLE), &csbi) or std.process.hasEnvVarConstant("GYRO_DIRECT_LOG")) { location.* = Self{ .mode = .{ .direct_log = {} } }; return; } size.rows = @intCast(usize, csbi.srWindow.Bottom - csbi.srWindow.Top + 1); size.cols = @intCast(usize, csbi.srWindow.Right - csbi.srWindow.Left + 1); }, else => { const c = @cImport({ @cInclude("sys/ioctl.h"); @cInclude("unistd.h"); }); var winsize: c.winsize = undefined; const rc = c.ioctl(0, c.TIOCGWINSZ, &winsize); if (rc != 0 or c.isatty(std.io.getStdOut().handle) != 1 or std.process.hasEnvVarConstant("GYRO_DIRECT_LOG")) { location.* = Self{ .mode = .{ .direct_log = {} } }; return; } size.rows = winsize.ws_row; size.cols = winsize.ws_col; }, } const collector = try allocator.create(UpdateState); errdefer allocator.destroy(collector); const scratchpad = try allocator.create(UpdateState); errdefer allocator.destroy(scratchpad); collector.* = UpdateState.init(allocator); scratchpad.* = UpdateState.init(allocator); location.* = Self{ .mode = .{ .ansi = .{ .allocator = allocator, .arena = std.heap.ArenaAllocator.init(allocator), .running = std.atomic.Atomic(bool).init(true), .mtx = std.Thread.Mutex{}, .logs_mtx = std.Thread.Mutex{}, .render_thread = try std.Thread.spawn(.{}, renderTask, .{location}), .entries = std.ArrayList(Entry).init(allocator), .logs = std.ArrayList([]const u8).init(allocator), .size = size, .depth = 0, .collector = collector, .scratchpad = scratchpad, .fifo = std.fifo.LinearFifo(u8, .{ .Dynamic = {} }).init(allocator), }, }, }; } pub fn deinit(self: *Self) void { switch (self.mode) { .direct_log => {}, .ansi => |*ansi| { ansi.running.store(false, .SeqCst); ansi.render_thread.join(); const stderr = std.io.getStdErr().writer(); if (ansi.logs.items.len > 1) { stderr.writeByteNTimes('-', ansi.size.cols) catch {}; stderr.writeByte('\n') catch {}; stderr.writeAll("logs captured during fetch:\n") catch {}; } for (ansi.logs.items) |msg| stderr.writeAll(msg) catch continue; ansi.entries.deinit(); ansi.collector.deinit(); ansi.scratchpad.deinit(); ansi.allocator.destroy(ansi.collector); ansi.allocator.destroy(ansi.scratchpad); ansi.fifo.deinit(); ansi.arena.deinit(); }, } } pub fn log( self: *Self, comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { switch (self.mode) { .direct_log => std.log.defaultLog(level, scope, format, args), .ansi => |*ansi| { const level_txt = comptime level.asText(); const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): "; const message = std.fmt.allocPrint(ansi.allocator, level_txt ++ prefix2 ++ format ++ "\n", args) catch return; ansi.logs_mtx.lock(); defer ansi.logs_mtx.unlock(); ansi.logs.append(message) catch {}; }, } } fn entryFromGit( self: *Self, tag: []const u8, url: []const u8, commit: []const u8, ) !Entry { const link = try uri.parse(url); const begin = if (link.scheme) |scheme| scheme.len + 3 else 0; const end_offset: usize = if (std.mem.endsWith(u8, url, ".git")) 4 else 0; return Entry{ .tag = tag, .label = try self.mode.ansi.arena.allocator.dupe(u8, url[begin .. url.len - end_offset]), .version = try self.mode.ansi.arena.allocator.dupe(u8, commit[0..std.math.min(commit.len, 8)]), .progress = .{ .current = 0, .total = 1, }, .err = false, }; } pub fn createEntry(self: *Self, source: Source) !usize { switch (self.mode) { .direct_log => { switch (source) { .git => |git| std.log.info("cloning {s} {s}", .{ git.url, git.commit[0..std.math.min(git.commit.len, 8)], }), .sub => |sub| std.log.info("cloning submodule {s}", .{ sub.url, }), .pkg => |pkg| std.log.info("fetching package {s}/{s}/{s}", .{ pkg.repository, pkg.user, pkg.name, }), .url => |url| std.log.info("fetching tarball {s}", .{ url, }), } return 0; }, .ansi => |*ansi| { const allocator = &ansi.arena.allocator; const new_entry = switch (source) { .git => |git| try self.entryFromGit("git", git.url, git.commit), .sub => |sub| try self.entryFromGit("sub", sub.url, sub.commit), .pkg => |pkg| Entry{ .tag = "pkg", .label = try std.fmt.allocPrint(allocator, "{s}/{s}/{s}", .{ pkg.repository, pkg.user, pkg.name, }), .version = try std.fmt.allocPrint(allocator, "{}", .{pkg.semver}), .progress = .{ .current = 0, .total = 1, }, .err = false, }, .url => |url| Entry{ .tag = "url", .label = try ansi.allocator.dupe(u8, url), .version = "", .progress = .{ .current = 0, .total = 1, }, .err = false, }, }; ansi.mtx.lock(); defer ansi.mtx.unlock(); try ansi.collector.entries.append(new_entry); return ansi.collector.current_len + ansi.collector.entries.items.len - 1; }, } } pub fn updateEntry(self: *Self, handle: usize, update: EntryUpdate) !void { switch (self.mode) { .direct_log => {}, .ansi => |*ansi| { ansi.mtx.lock(); defer ansi.mtx.unlock(); switch (update) { .progress => |p| try ansi.collector.progress.put(handle, p), .err => try ansi.collector.errors.put(handle, {}), } }, } } pub fn updateSize(self: *Self, new_size: Size) void { switch (self.mod) { .direct_log => {}, .ansi => |ansi| { ansi.mtx.lock(); defer ansi.mtx.unlock(); ansi.collector.new_size = new_size; }, } } fn updateState(self: *Self) !void { switch (self.mode) { .direct_log => unreachable, .ansi => |*ansi| { try ansi.entries.appendSlice(ansi.scratchpad.entries.items); if (ansi.scratchpad.new_size) |new_size| ansi.size = new_size; { var it = ansi.scratchpad.progress.iterator(); while (it.next()) |entry| { const idx = entry.key_ptr.*; assert(idx <= ansi.entries.items.len); ansi.entries.items[idx].progress = entry.value_ptr.*; } } { var it = ansi.scratchpad.errors.iterator(); while (it.next()) |entry| { const idx = entry.key_ptr.*; ansi.entries.items[idx].err = true; } } }, } } fn renderTask(self: *Self) !void { const stdout = std.io.getStdOut().writer(); var done = false; while (!done) : (std.time.sleep(std.time.ns_per_s * 0.1)) { if (!self.mode.ansi.running.load(.SeqCst)) done = true; { self.mode.ansi.mtx.lock(); defer self.mode.ansi.mtx.unlock(); self.mode.ansi.scratchpad.current_len = self.mode.ansi.collector.current_len + self.mode.ansi.collector.entries.items.len; std.mem.swap(UpdateState, self.mode.ansi.collector, self.mode.ansi.scratchpad); } try self.updateState(); if (self.mode.ansi.entries.items.len > 0 and (self.mode.ansi.scratchpad.hasChanges() or self.mode.ansi.depth != self.mode.ansi.entries.items.len)) { try self.render(stdout); } self.mode.ansi.scratchpad.clear(); } } fn drawBar(writer: anytype, width: usize, percent: usize) !void { if (width < 3) { try writer.writeByteNTimes(' ', width); return; } const bar_width = width - 2; const cells = std.math.min(percent * bar_width / 100, bar_width); try writer.writeByte('['); try writer.writeByteNTimes('#', cells); try writer.writeByteNTimes(' ', bar_width - cells); try writer.writeByte(']'); } fn render(self: *Self, stdout: anytype) !void { switch (self.mode) { .direct_log => unreachable, .ansi => |*ansi| { const writer = ansi.fifo.writer(); defer { ansi.fifo.count = 0; ansi.fifo.head = 0; } const spacing = 20; const short_mode = ansi.size.cols < 50; // calculations const version_width = 8; const variable = ansi.size.cols -| 26; const label_width = if (variable < spacing) variable else spacing + ((variable - spacing) / 2); const bar_width = if (variable < spacing) 0 else ((variable - spacing) / 2) + if (variable % 2 == 1) @as(usize, 1) else 0; if (ansi.depth < ansi.entries.items.len) { try writer.writeByteNTimes('\n', ansi.entries.items.len - ansi.depth); ansi.depth = ansi.entries.items.len; } // up n lines at beginning try writer.print("\x1b[{}F", .{ansi.depth}); for (ansi.entries.items) |entry| { if (short_mode) { if (entry.err) try writer.writeAll("\x1b[31m"); try writer.writeAll(entry.label[0..std.math.min(entry.label.len, ansi.size.cols)]); if (entry.err) { try writer.writeAll("\x1b[0m"); } try writer.writeAll("\x1b[1B\x0d"); continue; } const percent = std.math.min(entry.progress.current * 100 / entry.progress.total, 100); if (entry.err) try writer.writeAll("\x1b[31m"); try writer.print("{s} ", .{entry.tag}); if (entry.label.len > label_width) { try writer.writeAll(entry.label[0 .. label_width - 3]); try writer.writeAll("..."); } else { try writer.writeAll(entry.label); try writer.writeByteNTimes(' ', label_width - entry.label.len); } try writer.writeByte(' '); try writer.writeAll(entry.version[0..std.math.min(version_width, entry.version.len)]); if (entry.version.len < label_width) try writer.writeByteNTimes(' ', version_width - entry.version.len); try writer.writeByte(' '); try drawBar(writer, bar_width, percent); try writer.print(" {: >3}%", .{percent}); if (entry.err) { try writer.writeAll(" ERROR \x1b[0m"); } else { try writer.writeByteNTimes(' ', 7); } try writer.writeAll("\x1b[1E"); } try stdout.writeAll(ansi.fifo.readableSlice(0)); }, } }
src/Display.zig
const uefi = @import("std").os.uefi; const elf = @import("std").elf; const console = @import("./console.zig"); const bootstrap = @import("./uefi_bootstrap.zig"); // Docs: https://github.com/ziglang/zig/blob/master/lib/std/elf.zig // https://github.com/ziglang/zig/blob/master/lib/std/os/uefi/protocols/file_protocol.zig pub fn load_kernel_image( file_system: *uefi.protocols.FileProtocol, file_path: [*:0]const u16, base_physical_address: u64, kernel_entry_point: *u64, kernel_start_address: *u64, ) uefi.Status { var kernel_img_file: *uefi.protocols.FileProtocol = undefined; var result = file_system.open(&kernel_img_file, file_path, uefi.protocols.FileProtocol.efi_file_mode_read, uefi.protocols.FileProtocol.efi_file_read_only); if (result != uefi.Status.Success) { return result; } console.puts(" -> file found, validating identity..."); // load enough bytes to idenitfy the file (EI_NIDENT) var header_buffer: [*]align(8) u8 = undefined; result = read_and_allocate(kernel_img_file, 0, elf.EI_NIDENT, &header_buffer); if (result != uefi.Status.Success) { return result; } // check magic header is an elf file if((header_buffer[0] != 0x7F) or (header_buffer[1] != 0x45) or (header_buffer[2] != 0x4C) or (header_buffer[3] != 0x46)) { return uefi.Status.InvalidParameter; } // check we're loading a 64 bit little-endian binary if(header_buffer[elf.EI_CLASS] != elf.ELFCLASS64) { return uefi.Status.Unsupported; } if(header_buffer[elf.EI_DATA] != elf.ELFDATA2LSB) { return uefi.Status.IncompatibleVersion; } // free the identity buffer result = bootstrap.boot_services.freePool(header_buffer); if (result != uefi.Status.Success) { return result; } console.puts(" [done]\r\n"); // Load the elf header console.puts(" -> loading elf header..."); result = read_and_allocate(kernel_img_file, 0, @sizeOf(elf.Elf64_Ehdr), &header_buffer); if (result != uefi.Status.Success) { return result; } var header = elf.Header.parse(header_buffer[0..64]) catch |err| { switch(err) { error.InvalidElfMagic => { return uefi.Status.InvalidParameter; }, error.InvalidElfVersion => { return uefi.Status.IncompatibleVersion; }, error.InvalidElfEndian => { return uefi.Status.IncompatibleVersion; }, error.InvalidElfClass => { return uefi.Status.Unsupported; } } }; console.puts(" [done]\r\n"); console.printf(" -> found entry point @{}\r\n", .{header.entry}); kernel_entry_point.* = header.entry; // load the program headers console.puts(" -> loading program headers..."); var program_headers_buffer: [*]align(8) u8 = undefined; result = read_and_allocate(kernel_img_file, header.phoff, header.phentsize * header.phnum, &program_headers_buffer); if (result != uefi.Status.Success) { return result; } const program_headers = @ptrCast([*]const elf.Elf64_Phdr, program_headers_buffer); console.puts(" [done]\r\n"); result = load_program_segments(kernel_img_file, &header, program_headers, base_physical_address, kernel_start_address); if (result != uefi.Status.Success) { return result; } // free temporary buffers _ = kernel_img_file.close(); _ = bootstrap.boot_services.freePool(header_buffer); _ = bootstrap.boot_services.freePool(program_headers_buffer); return uefi.Status.Success; } fn read_file(file: *uefi.protocols.FileProtocol, position: u64, size: usize, buffer: *[*]align(8) u8) uefi.Status { var result = file.setPosition(position); if (result != uefi.Status.Success) { return result; } return file.read(&@ptrCast(usize, size), buffer.*); } fn read_and_allocate(file: *uefi.protocols.FileProtocol, position: u64, size: usize, buffer: *[*]align(8) u8) uefi.Status { var result = bootstrap.boot_services.allocatePool(uefi.tables.MemoryType.LoaderData, size, buffer); if (result != uefi.Status.Success) { return result; } return read_file(file, position, size, buffer); } fn load_program_segments( file: *uefi.protocols.FileProtocol, header: *elf.Header, program_headers: [*]const elf.Elf64_Phdr, base_physical_address: u64, kernel_start_address: *u64, ) uefi.Status { const length = header.phnum; if (length == 0) { console.puts(" -> no program segments found!"); return uefi.Status.InvalidParameter; } console.printf(" -> loading {} program segments: ", .{length}); var result = uefi.Status.Success; var loaded: u64 = 0; var index: u64 = 0; var set_start_address: bool = true; var base_address_difference: u64 = 0; while (index < length) { if (program_headers[index].p_type == elf.PT_LOAD) { console.printf("[{}", .{index}); if (set_start_address) { set_start_address = false; kernel_start_address.* = program_headers[index].p_vaddr; // calculate the difference between virtual and physical addresses // we'll enable virtual addressing before jumping to the kernel base_address_difference = program_headers[index].p_vaddr - base_physical_address; } result = load_segment( file, program_headers[index].p_offset, program_headers[index].p_filesz, program_headers[index].p_memsz, program_headers[index].p_vaddr - base_address_difference, ); if (result != uefi.Status.Success) { return result; } console.puts("]."); loaded += 1; } index += 1; } if (loaded == 0) { return uefi.Status.NotFound; } console.puts("[done]\r\n"); return result; } fn load_segment( file: *uefi.protocols.FileProtocol, file_offset: elf.Elf64_Off, file_size: elf.Elf64_Xword, memory_size: elf.Elf64_Xword, virtual_address: elf.Elf64_Addr ) uefi.Status { var num_pages = size_to_pages(memory_size); console.printf("p({})", .{num_pages}); var seg_buffer: [*]align(4096) u8 = @intToPtr([*]align(4096) u8, virtual_address); var result = bootstrap.boot_services.allocatePages(uefi.tables.AllocateType.AllocateAddress, uefi.tables.MemoryType.LoaderData, num_pages, &seg_buffer); if (result != uefi.Status.Success) { return result; } console.printf("a({})", .{virtual_address}); if(file_size > 0) { // load directly into correct position in memory console.printf("c({})", .{file_size}); result = read_file(file, file_offset, file_size, &seg_buffer); if (result != uefi.Status.Success) { return result; } } // As per ELF Standard, if the size in memory is larger than the file size // the segment is mandated to be zero filled. // For more information on Refer to ELF standard page 34. var zero_fill_start = virtual_address + file_size; var zero_fill_count = memory_size - file_size; if(zero_fill_count > 0) { console.printf("0({})", .{zero_fill_count}); bootstrap.boot_services.setMem(@intToPtr([*]u8, zero_fill_start), zero_fill_count, 0); } return uefi.Status.Success; } fn size_to_pages(bytes: u64) u64 { if ((bytes & 0xFFF) > 0) { return (bytes >> 12) + 1; } else { return bytes >> 12; } }
bootstrap/loader.zig
const std = @import("std"); const fs = std.fs; const Builder = std.build.Builder; pub const Example = struct { description: ?[]const u8, output: []const u8, input: []const u8, pub fn new(output: []const u8, input: []const u8, desc: ?[]const u8) Example { return Example{ .description = desc, .output = output, .input = input, }; } }; const examples = &[_]Example{ Example.new("simple", "examples/simple.zig", "A simple hello world app"), Example.new("capi", "examples/capi.zig", "Using the C-api directly"), Example.new("image", "examples/image.zig", "Simple image example"), Example.new("input", "examples/input.zig", "Simple input example"), Example.new("mixed", "examples/mixed.zig", "Mixing both c and zig apis"), Example.new("editor", "examples/editor.zig", "More complex example"), Example.new("layout", "examples/layout.zig", "Layout example"), Example.new("valuators", "examples/valuators.zig", "valuators example"), Example.new("channels", "examples/channels.zig", "Use messages to handle events"), Example.new("editormsgs", "examples/editormsgs.zig", "Use messages in the editor example"), Example.new("browser", "examples/browser.zig", "Browser example"), Example.new("svg", "examples/svg.zig", "svg example"), }; pub fn build(b: *Builder) !void { const target = b.standardTargetOptions(.{}); const mode = b.standardReleaseOptions(); _ = fs.cwd().openDir("vendor/lib", .{}) catch |err| { std.debug.print("Warning: {e}. The cfltk library will be grabbed and built from source!\n", .{err}); const zfltk_init = b.addSystemCommand(&[_][]const u8{ "git", "submodule", "update", "--init", "--recursive", "--depth=1", }); try zfltk_init.step.make(); if (target.isWindows() or target.isDarwin()) { const zfltk_config = b.addSystemCommand(&[_][]const u8{ "cmake", "-B", "vendor/bin", "-S", "vendor/cfltk", "-DCMAKE_BUILD_TYPE=Release", "-DCMAKE_INSTALL_PREFIX=vendor/lib", "-DFLTK_BUILD_TEST=OFF", "-DOPTION_USE_SYSTEM_LIBPNG=OFF", "-DOPTION_USE_SYSTEM_LIBJPEG=OFF", "-DOPTION_USE_SYSTEM_ZLIB=OFF", }); try zfltk_config.step.make(); } else { const zfltk_config = b.addSystemCommand(&[_][]const u8{ "cmake", "-B", "vendor/bin", "-S", "vendor/cfltk", "-DCMAKE_BUILD_TYPE=Release", "-DCMAKE_INSTALL_PREFIX=vendor/lib", "-DFLTK_BUILD_TEST=OFF", "-DOPTION_USE_SYSTEM_LIBPNG=OFF", "-DOPTION_USE_SYSTEM_LIBJPEG=OFF", "-DOPTION_USE_SYSTEM_ZLIB=OFF", "-DOPTION_USE_PANGO=ON", // enable if rtl/cjk font support is needed }); try zfltk_config.step.make(); } const zfltk_build = b.addSystemCommand(&[_][]const u8{ "cmake", "--build", "vendor/bin", "--config", "Release", "--parallel", }); try zfltk_build.step.make(); // This only needs to run once! const zfltk_install = b.addSystemCommand(&[_][]const u8{ "cmake", "--install", "vendor/bin", }); try zfltk_install.step.make(); }; const examples_step = b.step("examples", "build the examples"); b.default_step.dependOn(examples_step); for (examples) |example| { const exe = b.addExecutable(example.output, example.input); exe.setTarget(target); exe.setBuildMode(mode); exe.addPackagePath("zfltk", "src/zfltk.zig"); exe.addIncludeDir("vendor/cfltk/include"); exe.addLibPath("vendor/lib/lib"); exe.linkSystemLibrary("cfltk"); exe.linkSystemLibrary("fltk"); exe.linkSystemLibrary("fltk_images"); exe.linkSystemLibrary("fltk_png"); exe.linkSystemLibrary("fltk_jpeg"); exe.linkSystemLibrary("fltk_z"); exe.linkSystemLibrary("c"); if (target.isWindows()) { exe.linkSystemLibrary("ws2_32"); exe.linkSystemLibrary("comctl32"); exe.linkSystemLibrary("gdi32"); exe.linkSystemLibrary("oleaut32"); exe.linkSystemLibrary("ole32"); exe.linkSystemLibrary("uuid"); exe.linkSystemLibrary("shell32"); exe.linkSystemLibrary("advapi32"); exe.linkSystemLibrary("comdlg32"); exe.linkSystemLibrary("winspool"); exe.linkSystemLibrary("user32"); exe.linkSystemLibrary("kernel32"); exe.linkSystemLibrary("odbc32"); exe.linkSystemLibrary("gdiplus"); } else if (target.isDarwin()) { exe.linkFramework("Carbon"); exe.linkFramework("Cocoa"); exe.linkFramework("ApplicationServices"); } else { exe.linkSystemLibrary("pthread"); exe.linkSystemLibrary("X11"); exe.linkSystemLibrary("Xext"); exe.linkSystemLibrary("Xinerama"); exe.linkSystemLibrary("Xcursor"); exe.linkSystemLibrary("Xrender"); exe.linkSystemLibrary("Xfixes"); exe.linkSystemLibrary("Xft"); exe.linkSystemLibrary("fontconfig"); exe.linkSystemLibrary("pango-1.0"); exe.linkSystemLibrary("pangoxft-1.0"); exe.linkSystemLibrary("gobject-2.0"); exe.linkSystemLibrary("cairo"); exe.linkSystemLibrary("pangocairo-1.0"); } examples_step.dependOn(&exe.step); b.installArtifact(exe); const run_cmd = exe.run(); const run_step = b.step(b.fmt("run-{s}", .{example.output}), example.description.?); run_step.dependOn(&run_cmd.step); } }
build.zig
//-------------------------------------------------------------------------------- // Section: Types (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Functions (23) //-------------------------------------------------------------------------------- pub extern "KERNEL32" fn SetEnvironmentStringsW( NewEnvironment: ?[*]u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetCommandLineA( ) callconv(@import("std").os.windows.WINAPI) ?PSTR; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetCommandLineW( ) callconv(@import("std").os.windows.WINAPI) ?PWSTR; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetEnvironmentStrings( ) callconv(@import("std").os.windows.WINAPI) ?PSTR; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetEnvironmentStringsW( ) callconv(@import("std").os.windows.WINAPI) ?PWSTR; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn FreeEnvironmentStringsA( penv: ?[*]u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn FreeEnvironmentStringsW( penv: ?[*]u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetEnvironmentVariableA( lpName: ?[*:0]const u8, lpBuffer: ?[*:0]u8, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetEnvironmentVariableW( lpName: ?[*:0]const u16, lpBuffer: ?[*:0]u16, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn SetEnvironmentVariableA( lpName: ?[*:0]const u8, lpValue: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn SetEnvironmentVariableW( lpName: ?[*:0]const u16, lpValue: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn ExpandEnvironmentStringsA( lpSrc: ?[*:0]const u8, lpDst: ?[*:0]u8, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn ExpandEnvironmentStringsW( lpSrc: ?[*:0]const u16, lpDst: ?[*:0]u16, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "KERNEL32" fn SetCurrentDirectoryA( lpPathName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn SetCurrentDirectoryW( lpPathName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn GetCurrentDirectoryA( nBufferLength: u32, lpBuffer: ?[*:0]u8, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "KERNEL32" fn GetCurrentDirectoryW( nBufferLength: u32, lpBuffer: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn NeedCurrentDirectoryForExePathA( ExeName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn NeedCurrentDirectoryForExePathW( ExeName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USERENV" fn CreateEnvironmentBlock( lpEnvironment: ?*?*c_void, hToken: ?HANDLE, bInherit: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USERENV" fn DestroyEnvironmentBlock( lpEnvironment: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USERENV" fn ExpandEnvironmentStringsForUserA( hToken: ?HANDLE, lpSrc: ?[*:0]const u8, lpDest: [*:0]u8, dwSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USERENV" fn ExpandEnvironmentStringsForUserW( hToken: ?HANDLE, lpSrc: ?[*:0]const u16, lpDest: [*:0]u16, dwSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (9) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { pub const GetCommandLine = thismodule.GetCommandLineA; pub const FreeEnvironmentStrings = thismodule.FreeEnvironmentStringsA; pub const GetEnvironmentVariable = thismodule.GetEnvironmentVariableA; pub const SetEnvironmentVariable = thismodule.SetEnvironmentVariableA; pub const ExpandEnvironmentStrings = thismodule.ExpandEnvironmentStringsA; pub const SetCurrentDirectory = thismodule.SetCurrentDirectoryA; pub const GetCurrentDirectory = thismodule.GetCurrentDirectoryA; pub const NeedCurrentDirectoryForExePath = thismodule.NeedCurrentDirectoryForExePathA; pub const ExpandEnvironmentStringsForUser = thismodule.ExpandEnvironmentStringsForUserA; }, .wide => struct { pub const GetCommandLine = thismodule.GetCommandLineW; pub const FreeEnvironmentStrings = thismodule.FreeEnvironmentStringsW; pub const GetEnvironmentVariable = thismodule.GetEnvironmentVariableW; pub const SetEnvironmentVariable = thismodule.SetEnvironmentVariableW; pub const ExpandEnvironmentStrings = thismodule.ExpandEnvironmentStringsW; pub const SetCurrentDirectory = thismodule.SetCurrentDirectoryW; pub const GetCurrentDirectory = thismodule.GetCurrentDirectoryW; pub const NeedCurrentDirectoryForExePath = thismodule.NeedCurrentDirectoryForExePathW; pub const ExpandEnvironmentStringsForUser = thismodule.ExpandEnvironmentStringsForUserW; }, .unspecified => if (@import("builtin").is_test) struct { pub const GetCommandLine = *opaque{}; pub const FreeEnvironmentStrings = *opaque{}; pub const GetEnvironmentVariable = *opaque{}; pub const SetEnvironmentVariable = *opaque{}; pub const ExpandEnvironmentStrings = *opaque{}; pub const SetCurrentDirectory = *opaque{}; pub const GetCurrentDirectory = *opaque{}; pub const NeedCurrentDirectoryForExePath = *opaque{}; pub const ExpandEnvironmentStringsForUser = *opaque{}; } else struct { pub const GetCommandLine = @compileError("'GetCommandLine' requires that UNICODE be set to true or false in the root module"); pub const FreeEnvironmentStrings = @compileError("'FreeEnvironmentStrings' requires that UNICODE be set to true or false in the root module"); pub const GetEnvironmentVariable = @compileError("'GetEnvironmentVariable' requires that UNICODE be set to true or false in the root module"); pub const SetEnvironmentVariable = @compileError("'SetEnvironmentVariable' requires that UNICODE be set to true or false in the root module"); pub const ExpandEnvironmentStrings = @compileError("'ExpandEnvironmentStrings' requires that UNICODE be set to true or false in the root module"); pub const SetCurrentDirectory = @compileError("'SetCurrentDirectory' requires that UNICODE be set to true or false in the root module"); pub const GetCurrentDirectory = @compileError("'GetCurrentDirectory' requires that UNICODE be set to true or false in the root module"); pub const NeedCurrentDirectoryForExePath = @compileError("'NeedCurrentDirectoryForExePath' requires that UNICODE be set to true or false in the root module"); pub const ExpandEnvironmentStringsForUser = @compileError("'ExpandEnvironmentStringsForUser' requires that UNICODE be set to true or false in the root module"); }, }; //-------------------------------------------------------------------------------- // Section: Imports (4) //-------------------------------------------------------------------------------- const BOOL = @import("../foundation.zig").BOOL; const HANDLE = @import("../foundation.zig").HANDLE; const PSTR = @import("../foundation.zig").PSTR; const PWSTR = @import("../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/system/environment.zig
const c = @import("c.zig"); const nk = @import("../nuklear.zig"); const std = @import("std"); const testing = std.testing; pub fn line(b: *nk.CommandBuffer, x0: f32, y0: f32, x1: f32, y1: f32, line_thickness: f32, col: nk.Color) void { return c.nk_stroke_line(b, x0, y0, x1, y1, line_thickness, col); } pub fn curve( b: *nk.CommandBuffer, ax: f32, ay: f32, ctrl0x: f32, ctrl0y: f32, ctrl1x: f32, ctrl1y: f32, bx: f32, by: f32, line_thickness: f32, col: nk.Color, ) void { return c.nk_stroke_curve( b, ax, ay, ctrl0x, ctrl0y, ctrl1x, ctrl1y, bx, by, line_thickness, col, ); } pub fn rect(b: *nk.CommandBuffer, r: nk.Rect, rounding: f32, line_thickness: f32, col: nk.Color) void { return c.nk_stroke_rect(b, r, rounding, line_thickness, col); } pub fn circle(b: *nk.CommandBuffer, r: nk.Rect, line_thickness: f32, col: nk.Color) void { return c.nk_stroke_circle(b, r, line_thickness, col); } pub fn arc(b: *nk.CommandBuffer, cx: f32, cy: f32, radius: f32, a_min: f32, a_max: f32, line_thickness: f32, col: nk.Color) void { return c.nk_stroke_arc(b, cx, cy, radius, a_min, a_max, line_thickness, col); } pub fn triangle(b: *nk.CommandBuffer, h: f32, o: f32, q: f32, d: f32, y: f32, a: f32, line_thichness: f32, u: nk.Color) void { return c.nk_stroke_triangle(b, h, o, q, d, y, a, line_thichness, u); } pub fn polyline(b: *nk.CommandBuffer, points: [][2]f32, line_thickness: f32, col: nk.Color) void { return c.nk_stroke_polyline( b, @ptrCast([*]f32, points.ptr), @intCast(c_int, points.len), line_thickness, col, ); } pub fn polygon(b: *nk.CommandBuffer, points: [][2]f32, line_thickness: f32, col: nk.Color) void { return c.nk_stroke_polygon( b, @ptrCast([*]f32, points.ptr), @intCast(c_int, points.len), line_thickness, col, ); } test { testing.refAllDecls(@This()); }
src/stroke.zig
const std = @import("std"); const mem = std.mem; const testing = std.testing; const Decomp = @import("DecompFile.zig").Decomp; const Self = @This(); const Node = struct { value: ?Decomp = null, children: [256]?*Node = [_]?*Node{null} ** 256, fn deinit(self: *Node, allocator: mem.Allocator) void { for (self.children) |byte| { if (byte) |node| { node.deinit(allocator); allocator.destroy(node); } } } }; allocator: mem.Allocator, root: Node = Node{}, pub fn init(allocator: mem.Allocator) Self { return Self{ .allocator = allocator, }; } pub fn deinit(self: *Self) void { self.root.deinit(self.allocator); } /// `add` a value for the specified key. Keys are slices of the key value type. pub fn add(self: *Self, key: []const u8, value: Decomp) !void { var current = &self.root; for (key) |k| { if (current.children[k] == null) { var node = try self.allocator.create(Node); node.* = Node{}; current.children[k] = node; } current = current.children[k].?; } current.value = value; } /// `Lookup` is returned from the find method on a successful match. The index field refers to /// the index of the element in the key slice that produced the match. pub const Lookup = struct { index: usize, value: Decomp, }; /// `finds` the matching value for the given key, null otherwise. pub fn find(self: Self, key: []const u8) ?Lookup { var current = &self.root; var result: ?Lookup = null; for (key) |k, i| { if (current.children[k] == null) break; if (current.children[k].?.value) |value| { result = .{ .index = i, .value = value, }; } current = current.children[k].?; } return result; } test "Normalizer Trieton" { var allocator = std.testing.allocator; var trie = init(allocator); defer trie.deinit(); try trie.add(&[_]u8{ 2, 3 }, .{ .seq = [_]u21{ 33, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }); const result = trie.find(&[_]u8{ 2, 3 }); try testing.expect(result != null); try testing.expectEqual(result.?.index, 1); try testing.expectEqual(result.?.value.form, .canon); try testing.expectEqual(result.?.value.len, 2); try testing.expectEqual(result.?.value.seq[0], 33); try testing.expectEqual(result.?.value.seq[1], 33); }
src/normalizer/Trieton.zig
const std = @import("std"); const alka = @import("alka"); const m = alka.math; usingnamespace alka.log; pub const mlog = std.log.scoped(.app); pub const log_level: std.log.Level = .info; const vertex_shader = \\#version 330 core \\layout (location = 0) in vec2 aPos; \\layout (location = 1) in vec2 aTexCoord; \\layout (location = 2) in vec4 aColour; \\ \\out vec2 ourTexCoord; \\out vec4 ourColour; \\uniform mat4 view; \\ \\void main() { \\ gl_Position = view * vec4(aPos.xy, 0.0, 1.0); \\ ourTexCoord = aTexCoord; \\ ourColour = aColour; \\} ; const fragment_shader = \\#version 330 core \\out vec4 final; \\in vec2 ourTexCoord; \\in vec4 ourColour; \\uniform sampler2D uTexture; \\ \\void main() { \\ vec4 texelColour = texture(uTexture, ourTexCoord); \\ final = vec4(1, 0, 0, 1) * texelColour; // everything is red \\} ; fn batchDraw(corebatch: alka.Batch2DQuad, mode: alka.gl.DrawMode, shader: *u32, texture: *alka.renderer.Texture, cam2d: *m.Camera2D) alka.Error!void { cam2d.attach(); defer cam2d.detach(); alka.gl.shaderProgramUse(shader.*); defer alka.gl.shaderProgramUse(0); alka.gl.textureActive(.texture0); alka.gl.textureBind(.t2D, texture.id); defer alka.gl.textureBind(.t2D, 0); const mvploc = alka.gl.shaderProgramGetUniformLocation(shader.*, "view"); alka.gl.shaderProgramSetMat4x4f(mvploc, cam2d.view); try corebatch.draw(mode); } fn draw() !void { const asset = alka.getAssetManager(); // create the batch // NOTE: if the batch exists, it won't create one, instead returns the existing batch // drawmode, shader_id, texture_id var batch = try alka.createBatch(alka.gl.DrawMode.triangles, 1, 0); // this way we can change how we draw the batch // if not used, it'll draw the defaultbatch // which stored at: `Batch.drawDefault` batch.drawfun = batchDraw; alka.setBatchFun(batch); // there is also // usefull when using non-assetmanager loaded shaders and textures // createBatchNoID(mode: gl.DrawMode, sh: u32, texture: renderer.Texture) Error!Batch // // every draw call will create batches even if you don't create one explicitly // this is usefull in case you need to explicitly use auto-gen batchs // getBatch(mode: gl.DrawMode, sh_id: u64, texture_id: u64) Error!Batch // // usefull when using non-assetmanager loaded shaders and textures // getBatchNoID(mode: gl.DrawMode, sh: u32, texture: renderer.Texture) Error!Batch // // explicitly renders the batch, does not clean the batch tho // so you need to use cleanBatch() if you don't want to end up with // 2x draw call // renderBatch(batch: Batch) Error!void // // explicitly clears the batch // cleanBatch(batch: Batch) void // push the batch try alka.pushBatch(batch); { const r = m.Rectangle{ .position = m.Vec2f{ .x = 100.0, .y = 200.0 }, .size = m.Vec2f{ .x = 50.0, .y = 50.0 } }; const col = alka.Colour{ .r = 1, .g = 1, .b = 1, .a = 1 }; try alka.drawRectangleAdv(r, m.Vec2f{ .x = 25, .y = 25 }, m.deg2radf(45), col); // custom batch forces to use drawmode: triangles, so it'll be corrupted rectangle //try alka.drawRectangleLinesAdv(r, m.Vec2f{ .x = 25, .y = 25 }, m.deg2radf(45), col); // there is also a 2dcamera in unique to every batch, // the alka.getCamera2D() is the global camera which every batch defaults for, every frame batch.cam2d.zoom.x = 0.5; batch.cam2d.zoom.y = 0.5; } // pop the batch alka.popBatch(); const r2 = m.Rectangle{ .position = m.Vec2f{ .x = 200.0, .y = 200.0 }, .size = m.Vec2f{ .x = 30.0, .y = 30.0 } }; const col2 = alka.Colour.rgba(30, 80, 200, 255); try alka.drawRectangle(r2, col2); } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const callbacks = alka.Callbacks{ .update = null, .fixed = null, .draw = draw, .resize = null, .close = null, }; try alka.init(&gpa.allocator, callbacks, 1024, 768, "Custom batch", 0, false); try alka.getAssetManager().loadShader(1, vertex_shader, fragment_shader); try alka.open(); try alka.update(); try alka.close(); try alka.deinit(); const leaked = gpa.deinit(); if (leaked) return error.Leak; }
examples/custombatch.zig
pub const struct_ZigClangAPValue = @OpaqueType(); pub const struct_ZigClangAPSInt = @OpaqueType(); pub const struct_ZigClangASTContext = @OpaqueType(); pub const struct_ZigClangASTUnit = @OpaqueType(); pub const struct_ZigClangArraySubscriptExpr = @OpaqueType(); pub const struct_ZigClangArrayType = @OpaqueType(); pub const struct_ZigClangAttributedType = @OpaqueType(); pub const struct_ZigClangBinaryOperator = @OpaqueType(); pub const struct_ZigClangBreakStmt = @OpaqueType(); pub const struct_ZigClangBuiltinType = @OpaqueType(); pub const struct_ZigClangCStyleCastExpr = @OpaqueType(); pub const struct_ZigClangCallExpr = @OpaqueType(); pub const struct_ZigClangCaseStmt = @OpaqueType(); pub const struct_ZigClangCompoundAssignOperator = @OpaqueType(); pub const struct_ZigClangCompoundStmt = @OpaqueType(); pub const struct_ZigClangConditionalOperator = @OpaqueType(); pub const struct_ZigClangConstantArrayType = @OpaqueType(); pub const struct_ZigClangContinueStmt = @OpaqueType(); pub const struct_ZigClangDecayedType = @OpaqueType(); pub const struct_ZigClangDecl = @OpaqueType(); pub const struct_ZigClangDeclRefExpr = @OpaqueType(); pub const struct_ZigClangDeclStmt = @OpaqueType(); pub const struct_ZigClangDefaultStmt = @OpaqueType(); pub const struct_ZigClangDiagnosticOptions = @OpaqueType(); pub const struct_ZigClangDiagnosticsEngine = @OpaqueType(); pub const struct_ZigClangDoStmt = @OpaqueType(); pub const struct_ZigClangElaboratedType = @OpaqueType(); pub const struct_ZigClangEnumConstantDecl = @OpaqueType(); pub const struct_ZigClangEnumDecl = @OpaqueType(); pub const struct_ZigClangEnumType = @OpaqueType(); pub const struct_ZigClangExpr = @OpaqueType(); pub const struct_ZigClangFieldDecl = @OpaqueType(); pub const struct_ZigClangFileID = @OpaqueType(); pub const struct_ZigClangForStmt = @OpaqueType(); pub const struct_ZigClangFullSourceLoc = @OpaqueType(); pub const struct_ZigClangFunctionDecl = @OpaqueType(); pub const struct_ZigClangFunctionProtoType = @OpaqueType(); pub const struct_ZigClangIfStmt = @OpaqueType(); pub const struct_ZigClangImplicitCastExpr = @OpaqueType(); pub const struct_ZigClangIncompleteArrayType = @OpaqueType(); pub const struct_ZigClangIntegerLiteral = @OpaqueType(); pub const struct_ZigClangMacroDefinitionRecord = @OpaqueType(); pub const struct_ZigClangMemberExpr = @OpaqueType(); pub const struct_ZigClangNamedDecl = @OpaqueType(); pub const struct_ZigClangNone = @OpaqueType(); pub const struct_ZigClangPCHContainerOperations = @OpaqueType(); pub const struct_ZigClangParenExpr = @OpaqueType(); pub const struct_ZigClangParenType = @OpaqueType(); pub const struct_ZigClangParmVarDecl = @OpaqueType(); pub const struct_ZigClangPointerType = @OpaqueType(); pub const struct_ZigClangPreprocessedEntity = @OpaqueType(); pub const struct_ZigClangRecordDecl = @OpaqueType(); pub const struct_ZigClangRecordType = @OpaqueType(); pub const struct_ZigClangReturnStmt = @OpaqueType(); pub const struct_ZigClangSkipFunctionBodiesScope = @OpaqueType(); pub const struct_ZigClangSourceManager = @OpaqueType(); pub const struct_ZigClangSourceRange = @OpaqueType(); pub const struct_ZigClangStmt = @OpaqueType(); pub const struct_ZigClangStorageClass = @OpaqueType(); pub const struct_ZigClangStringLiteral = @OpaqueType(); pub const struct_ZigClangStringRef = @OpaqueType(); pub const struct_ZigClangSwitchStmt = @OpaqueType(); pub const struct_ZigClangTagDecl = @OpaqueType(); pub const struct_ZigClangType = @OpaqueType(); pub const struct_ZigClangTypedefNameDecl = @OpaqueType(); pub const struct_ZigClangTypedefType = @OpaqueType(); pub const struct_ZigClangUnaryExprOrTypeTraitExpr = @OpaqueType(); pub const struct_ZigClangUnaryOperator = @OpaqueType(); pub const struct_ZigClangValueDecl = @OpaqueType(); pub const struct_ZigClangVarDecl = @OpaqueType(); pub const struct_ZigClangWhileStmt = @OpaqueType(); pub const ZigClangBO_PtrMemD = enum_ZigClangBO._PtrMemD; pub const ZigClangBO_PtrMemI = enum_ZigClangBO._PtrMemI; pub const ZigClangBO_Mul = enum_ZigClangBO._Mul; pub const ZigClangBO_Div = enum_ZigClangBO._Div; pub const ZigClangBO_Rem = enum_ZigClangBO._Rem; pub const ZigClangBO_Add = enum_ZigClangBO._Add; pub const ZigClangBO_Sub = enum_ZigClangBO._Sub; pub const ZigClangBO_Shl = enum_ZigClangBO._Shl; pub const ZigClangBO_Shr = enum_ZigClangBO._Shr; pub const ZigClangBO_Cmp = enum_ZigClangBO._Cmp; pub const ZigClangBO_LT = enum_ZigClangBO._LT; pub const ZigClangBO_GT = enum_ZigClangBO._GT; pub const ZigClangBO_LE = enum_ZigClangBO._LE; pub const ZigClangBO_GE = enum_ZigClangBO._GE; pub const ZigClangBO_EQ = enum_ZigClangBO._EQ; pub const ZigClangBO_NE = enum_ZigClangBO._NE; pub const ZigClangBO_And = enum_ZigClangBO._And; pub const ZigClangBO_Xor = enum_ZigClangBO._Xor; pub const ZigClangBO_Or = enum_ZigClangBO._Or; pub const ZigClangBO_LAnd = enum_ZigClangBO._LAnd; pub const ZigClangBO_LOr = enum_ZigClangBO._LOr; pub const ZigClangBO_Assign = enum_ZigClangBO._Assign; pub const ZigClangBO_MulAssign = enum_ZigClangBO._MulAssign; pub const ZigClangBO_DivAssign = enum_ZigClangBO._DivAssign; pub const ZigClangBO_RemAssign = enum_ZigClangBO._RemAssign; pub const ZigClangBO_AddAssign = enum_ZigClangBO._AddAssign; pub const ZigClangBO_SubAssign = enum_ZigClangBO._SubAssign; pub const ZigClangBO_ShlAssign = enum_ZigClangBO._ShlAssign; pub const ZigClangBO_ShrAssign = enum_ZigClangBO._ShrAssign; pub const ZigClangBO_AndAssign = enum_ZigClangBO._AndAssign; pub const ZigClangBO_XorAssign = enum_ZigClangBO._XorAssign; pub const ZigClangBO_OrAssign = enum_ZigClangBO._OrAssign; pub const ZigClangBO_Comma = enum_ZigClangBO._Comma; pub const enum_ZigClangBO = extern enum { _PtrMemD, _PtrMemI, _Mul, _Div, _Rem, _Add, _Sub, _Shl, _Shr, _Cmp, _LT, _GT, _LE, _GE, _EQ, _NE, _And, _Xor, _Or, _LAnd, _LOr, _Assign, _MulAssign, _DivAssign, _RemAssign, _AddAssign, _SubAssign, _ShlAssign, _ShrAssign, _AndAssign, _XorAssign, _OrAssign, _Comma, }; pub const ZigClangUO_PostInc = enum_ZigClangUO._PostInc; pub const ZigClangUO_PostDec = enum_ZigClangUO._PostDec; pub const ZigClangUO_PreInc = enum_ZigClangUO._PreInc; pub const ZigClangUO_PreDec = enum_ZigClangUO._PreDec; pub const ZigClangUO_AddrOf = enum_ZigClangUO._AddrOf; pub const ZigClangUO_Deref = enum_ZigClangUO._Deref; pub const ZigClangUO_Plus = enum_ZigClangUO._Plus; pub const ZigClangUO_Minus = enum_ZigClangUO._Minus; pub const ZigClangUO_Not = enum_ZigClangUO._Not; pub const ZigClangUO_LNot = enum_ZigClangUO._LNot; pub const ZigClangUO_Real = enum_ZigClangUO._Real; pub const ZigClangUO_Imag = enum_ZigClangUO._Imag; pub const ZigClangUO_Extension = enum_ZigClangUO._Extension; pub const ZigClangUO_Coawait = enum_ZigClangUO._Coawait; pub const enum_ZigClangUO = extern enum { _PostInc, _PostDec, _PreInc, _PreDec, _AddrOf, _Deref, _Plus, _Minus, _Not, _LNot, _Real, _Imag, _Extension, _Coawait, }; pub const ZigClangType_Builtin = enum_ZigClangTypeClass.ZigClangType_Builtin; pub const ZigClangType_Complex = enum_ZigClangTypeClass.ZigClangType_Complex; pub const ZigClangType_Pointer = enum_ZigClangTypeClass.ZigClangType_Pointer; pub const ZigClangType_BlockPointer = enum_ZigClangTypeClass.ZigClangType_BlockPointer; pub const ZigClangType_LValueReference = enum_ZigClangTypeClass.ZigClangType_LValueReference; pub const ZigClangType_RValueReference = enum_ZigClangTypeClass.ZigClangType_RValueReference; pub const ZigClangType_MemberPointer = enum_ZigClangTypeClass.ZigClangType_MemberPointer; pub const ZigClangType_ConstantArray = enum_ZigClangTypeClass.ZigClangType_ConstantArray; pub const ZigClangType_IncompleteArray = enum_ZigClangTypeClass.ZigClangType_IncompleteArray; pub const ZigClangType_VariableArray = enum_ZigClangTypeClass.ZigClangType_VariableArray; pub const ZigClangType_DependentSizedArray = enum_ZigClangTypeClass.ZigClangType_DependentSizedArray; pub const ZigClangType_DependentSizedExtVector = enum_ZigClangTypeClass.ZigClangType_DependentSizedExtVector; pub const ZigClangType_DependentAddressSpace = enum_ZigClangTypeClass.ZigClangType_DependentAddressSpace; pub const ZigClangType_Vector = enum_ZigClangTypeClass.ZigClangType_Vector; pub const ZigClangType_DependentVector = enum_ZigClangTypeClass.ZigClangType_DependentVector; pub const ZigClangType_ExtVector = enum_ZigClangTypeClass.ZigClangType_ExtVector; pub const ZigClangType_FunctionProto = enum_ZigClangTypeClass.ZigClangType_FunctionProto; pub const ZigClangType_FunctionNoProto = enum_ZigClangTypeClass.ZigClangType_FunctionNoProto; pub const ZigClangType_UnresolvedUsing = enum_ZigClangTypeClass.ZigClangType_UnresolvedUsing; pub const ZigClangType_Paren = enum_ZigClangTypeClass.ZigClangType_Paren; pub const ZigClangType_Typedef = enum_ZigClangTypeClass.ZigClangType_Typedef; pub const ZigClangType_Adjusted = enum_ZigClangTypeClass.ZigClangType_Adjusted; pub const ZigClangType_Decayed = enum_ZigClangTypeClass.ZigClangType_Decayed; pub const ZigClangType_TypeOfExpr = enum_ZigClangTypeClass.ZigClangType_TypeOfExpr; pub const ZigClangType_TypeOf = enum_ZigClangTypeClass.ZigClangType_TypeOf; pub const ZigClangType_Decltype = enum_ZigClangTypeClass.ZigClangType_Decltype; pub const ZigClangType_UnaryTransform = enum_ZigClangTypeClass.ZigClangType_UnaryTransform; pub const ZigClangType_Record = enum_ZigClangTypeClass.ZigClangType_Record; pub const ZigClangType_Enum = enum_ZigClangTypeClass.ZigClangType_Enum; pub const ZigClangType_Elaborated = enum_ZigClangTypeClass.ZigClangType_Elaborated; pub const ZigClangType_Attributed = enum_ZigClangTypeClass.ZigClangType_Attributed; pub const ZigClangType_TemplateTypeParm = enum_ZigClangTypeClass.ZigClangType_TemplateTypeParm; pub const ZigClangType_SubstTemplateTypeParm = enum_ZigClangTypeClass.ZigClangType_SubstTemplateTypeParm; pub const ZigClangType_SubstTemplateTypeParmPack = enum_ZigClangTypeClass.ZigClangType_SubstTemplateTypeParmPack; pub const ZigClangType_TemplateSpecialization = enum_ZigClangTypeClass.ZigClangType_TemplateSpecialization; pub const ZigClangType_Auto = enum_ZigClangTypeClass.ZigClangType_Auto; pub const ZigClangType_DeducedTemplateSpecialization = enum_ZigClangTypeClass.ZigClangType_DeducedTemplateSpecialization; pub const ZigClangType_InjectedClassName = enum_ZigClangTypeClass.ZigClangType_InjectedClassName; pub const ZigClangType_DependentName = enum_ZigClangTypeClass.ZigClangType_DependentName; pub const ZigClangType_DependentTemplateSpecialization = enum_ZigClangTypeClass.ZigClangType_DependentTemplateSpecialization; pub const ZigClangType_PackExpansion = enum_ZigClangTypeClass.ZigClangType_PackExpansion; pub const ZigClangType_ObjCTypeParam = enum_ZigClangTypeClass.ZigClangType_ObjCTypeParam; pub const ZigClangType_ObjCObject = enum_ZigClangTypeClass.ZigClangType_ObjCObject; pub const ZigClangType_ObjCInterface = enum_ZigClangTypeClass.ZigClangType_ObjCInterface; pub const ZigClangType_ObjCObjectPointer = enum_ZigClangTypeClass.ZigClangType_ObjCObjectPointer; pub const ZigClangType_Pipe = enum_ZigClangTypeClass.ZigClangType_Pipe; pub const ZigClangType_Atomic = enum_ZigClangTypeClass.ZigClangType_Atomic; pub const enum_ZigClangTypeClass = extern enum { ZigClangType_Builtin, ZigClangType_Complex, ZigClangType_Pointer, ZigClangType_BlockPointer, ZigClangType_LValueReference, ZigClangType_RValueReference, ZigClangType_MemberPointer, ZigClangType_ConstantArray, ZigClangType_IncompleteArray, ZigClangType_VariableArray, ZigClangType_DependentSizedArray, ZigClangType_DependentSizedExtVector, ZigClangType_DependentAddressSpace, ZigClangType_Vector, ZigClangType_DependentVector, ZigClangType_ExtVector, ZigClangType_FunctionProto, ZigClangType_FunctionNoProto, ZigClangType_UnresolvedUsing, ZigClangType_Paren, ZigClangType_Typedef, ZigClangType_Adjusted, ZigClangType_Decayed, ZigClangType_TypeOfExpr, ZigClangType_TypeOf, ZigClangType_Decltype, ZigClangType_UnaryTransform, ZigClangType_Record, ZigClangType_Enum, ZigClangType_Elaborated, ZigClangType_Attributed, ZigClangType_TemplateTypeParm, ZigClangType_SubstTemplateTypeParm, ZigClangType_SubstTemplateTypeParmPack, ZigClangType_TemplateSpecialization, ZigClangType_Auto, ZigClangType_DeducedTemplateSpecialization, ZigClangType_InjectedClassName, ZigClangType_DependentName, ZigClangType_DependentTemplateSpecialization, ZigClangType_PackExpansion, ZigClangType_ObjCTypeParam, ZigClangType_ObjCObject, ZigClangType_ObjCInterface, ZigClangType_ObjCObjectPointer, ZigClangType_Pipe, ZigClangType_Atomic, }; pub const ZigClangStmt_NoStmtClass = enum_ZigClangStmtClass.ZigClangStmt_NoStmtClass; pub const ZigClangStmt_GCCAsmStmtClass = enum_ZigClangStmtClass.ZigClangStmt_GCCAsmStmtClass; pub const ZigClangStmt_MSAsmStmtClass = enum_ZigClangStmtClass.ZigClangStmt_MSAsmStmtClass; pub const ZigClangStmt_AttributedStmtClass = enum_ZigClangStmtClass.ZigClangStmt_AttributedStmtClass; pub const ZigClangStmt_BreakStmtClass = enum_ZigClangStmtClass.ZigClangStmt_BreakStmtClass; pub const ZigClangStmt_CXXCatchStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CXXCatchStmtClass; pub const ZigClangStmt_CXXForRangeStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CXXForRangeStmtClass; pub const ZigClangStmt_CXXTryStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CXXTryStmtClass; pub const ZigClangStmt_CapturedStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CapturedStmtClass; pub const ZigClangStmt_CompoundStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CompoundStmtClass; pub const ZigClangStmt_ContinueStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ContinueStmtClass; pub const ZigClangStmt_CoreturnStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CoreturnStmtClass; pub const ZigClangStmt_CoroutineBodyStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CoroutineBodyStmtClass; pub const ZigClangStmt_DeclStmtClass = enum_ZigClangStmtClass.ZigClangStmt_DeclStmtClass; pub const ZigClangStmt_DoStmtClass = enum_ZigClangStmtClass.ZigClangStmt_DoStmtClass; pub const ZigClangStmt_BinaryConditionalOperatorClass = enum_ZigClangStmtClass.ZigClangStmt_BinaryConditionalOperatorClass; pub const ZigClangStmt_ConditionalOperatorClass = enum_ZigClangStmtClass.ZigClangStmt_ConditionalOperatorClass; pub const ZigClangStmt_AddrLabelExprClass = enum_ZigClangStmtClass.ZigClangStmt_AddrLabelExprClass; pub const ZigClangStmt_ArrayInitIndexExprClass = enum_ZigClangStmtClass.ZigClangStmt_ArrayInitIndexExprClass; pub const ZigClangStmt_ArrayInitLoopExprClass = enum_ZigClangStmtClass.ZigClangStmt_ArrayInitLoopExprClass; pub const ZigClangStmt_ArraySubscriptExprClass = enum_ZigClangStmtClass.ZigClangStmt_ArraySubscriptExprClass; pub const ZigClangStmt_ArrayTypeTraitExprClass = enum_ZigClangStmtClass.ZigClangStmt_ArrayTypeTraitExprClass; pub const ZigClangStmt_AsTypeExprClass = enum_ZigClangStmtClass.ZigClangStmt_AsTypeExprClass; pub const ZigClangStmt_AtomicExprClass = enum_ZigClangStmtClass.ZigClangStmt_AtomicExprClass; pub const ZigClangStmt_BinaryOperatorClass = enum_ZigClangStmtClass.ZigClangStmt_BinaryOperatorClass; pub const ZigClangStmt_CompoundAssignOperatorClass = enum_ZigClangStmtClass.ZigClangStmt_CompoundAssignOperatorClass; pub const ZigClangStmt_BlockExprClass = enum_ZigClangStmtClass.ZigClangStmt_BlockExprClass; pub const ZigClangStmt_CXXBindTemporaryExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXBindTemporaryExprClass; pub const ZigClangStmt_CXXBoolLiteralExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXBoolLiteralExprClass; pub const ZigClangStmt_CXXConstructExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXConstructExprClass; pub const ZigClangStmt_CXXTemporaryObjectExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXTemporaryObjectExprClass; pub const ZigClangStmt_CXXDefaultArgExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXDefaultArgExprClass; pub const ZigClangStmt_CXXDefaultInitExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXDefaultInitExprClass; pub const ZigClangStmt_CXXDeleteExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXDeleteExprClass; pub const ZigClangStmt_CXXDependentScopeMemberExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXDependentScopeMemberExprClass; pub const ZigClangStmt_CXXFoldExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXFoldExprClass; pub const ZigClangStmt_CXXInheritedCtorInitExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXInheritedCtorInitExprClass; pub const ZigClangStmt_CXXNewExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXNewExprClass; pub const ZigClangStmt_CXXNoexceptExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXNoexceptExprClass; pub const ZigClangStmt_CXXNullPtrLiteralExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXNullPtrLiteralExprClass; pub const ZigClangStmt_CXXPseudoDestructorExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXPseudoDestructorExprClass; pub const ZigClangStmt_CXXScalarValueInitExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXScalarValueInitExprClass; pub const ZigClangStmt_CXXStdInitializerListExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXStdInitializerListExprClass; pub const ZigClangStmt_CXXThisExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXThisExprClass; pub const ZigClangStmt_CXXThrowExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXThrowExprClass; pub const ZigClangStmt_CXXTypeidExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXTypeidExprClass; pub const ZigClangStmt_CXXUnresolvedConstructExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXUnresolvedConstructExprClass; pub const ZigClangStmt_CXXUuidofExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXUuidofExprClass; pub const ZigClangStmt_CallExprClass = enum_ZigClangStmtClass.ZigClangStmt_CallExprClass; pub const ZigClangStmt_CUDAKernelCallExprClass = enum_ZigClangStmtClass.ZigClangStmt_CUDAKernelCallExprClass; pub const ZigClangStmt_CXXMemberCallExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXMemberCallExprClass; pub const ZigClangStmt_CXXOperatorCallExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXOperatorCallExprClass; pub const ZigClangStmt_UserDefinedLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_UserDefinedLiteralClass; pub const ZigClangStmt_CStyleCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_CStyleCastExprClass; pub const ZigClangStmt_CXXFunctionalCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXFunctionalCastExprClass; pub const ZigClangStmt_CXXConstCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXConstCastExprClass; pub const ZigClangStmt_CXXDynamicCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXDynamicCastExprClass; pub const ZigClangStmt_CXXReinterpretCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXReinterpretCastExprClass; pub const ZigClangStmt_CXXStaticCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_CXXStaticCastExprClass; pub const ZigClangStmt_ObjCBridgedCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCBridgedCastExprClass; pub const ZigClangStmt_ImplicitCastExprClass = enum_ZigClangStmtClass.ZigClangStmt_ImplicitCastExprClass; pub const ZigClangStmt_CharacterLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_CharacterLiteralClass; pub const ZigClangStmt_ChooseExprClass = enum_ZigClangStmtClass.ZigClangStmt_ChooseExprClass; pub const ZigClangStmt_CompoundLiteralExprClass = enum_ZigClangStmtClass.ZigClangStmt_CompoundLiteralExprClass; pub const ZigClangStmt_ConvertVectorExprClass = enum_ZigClangStmtClass.ZigClangStmt_ConvertVectorExprClass; pub const ZigClangStmt_CoawaitExprClass = enum_ZigClangStmtClass.ZigClangStmt_CoawaitExprClass; pub const ZigClangStmt_CoyieldExprClass = enum_ZigClangStmtClass.ZigClangStmt_CoyieldExprClass; pub const ZigClangStmt_DeclRefExprClass = enum_ZigClangStmtClass.ZigClangStmt_DeclRefExprClass; pub const ZigClangStmt_DependentCoawaitExprClass = enum_ZigClangStmtClass.ZigClangStmt_DependentCoawaitExprClass; pub const ZigClangStmt_DependentScopeDeclRefExprClass = enum_ZigClangStmtClass.ZigClangStmt_DependentScopeDeclRefExprClass; pub const ZigClangStmt_DesignatedInitExprClass = enum_ZigClangStmtClass.ZigClangStmt_DesignatedInitExprClass; pub const ZigClangStmt_DesignatedInitUpdateExprClass = enum_ZigClangStmtClass.ZigClangStmt_DesignatedInitUpdateExprClass; pub const ZigClangStmt_ExpressionTraitExprClass = enum_ZigClangStmtClass.ZigClangStmt_ExpressionTraitExprClass; pub const ZigClangStmt_ExtVectorElementExprClass = enum_ZigClangStmtClass.ZigClangStmt_ExtVectorElementExprClass; pub const ZigClangStmt_FixedPointLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_FixedPointLiteralClass; pub const ZigClangStmt_FloatingLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_FloatingLiteralClass; pub const ZigClangStmt_ConstantExprClass = enum_ZigClangStmtClass.ZigClangStmt_ConstantExprClass; pub const ZigClangStmt_ExprWithCleanupsClass = enum_ZigClangStmtClass.ZigClangStmt_ExprWithCleanupsClass; pub const ZigClangStmt_FunctionParmPackExprClass = enum_ZigClangStmtClass.ZigClangStmt_FunctionParmPackExprClass; pub const ZigClangStmt_GNUNullExprClass = enum_ZigClangStmtClass.ZigClangStmt_GNUNullExprClass; pub const ZigClangStmt_GenericSelectionExprClass = enum_ZigClangStmtClass.ZigClangStmt_GenericSelectionExprClass; pub const ZigClangStmt_ImaginaryLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_ImaginaryLiteralClass; pub const ZigClangStmt_ImplicitValueInitExprClass = enum_ZigClangStmtClass.ZigClangStmt_ImplicitValueInitExprClass; pub const ZigClangStmt_InitListExprClass = enum_ZigClangStmtClass.ZigClangStmt_InitListExprClass; pub const ZigClangStmt_IntegerLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_IntegerLiteralClass; pub const ZigClangStmt_LambdaExprClass = enum_ZigClangStmtClass.ZigClangStmt_LambdaExprClass; pub const ZigClangStmt_MSPropertyRefExprClass = enum_ZigClangStmtClass.ZigClangStmt_MSPropertyRefExprClass; pub const ZigClangStmt_MSPropertySubscriptExprClass = enum_ZigClangStmtClass.ZigClangStmt_MSPropertySubscriptExprClass; pub const ZigClangStmt_MaterializeTemporaryExprClass = enum_ZigClangStmtClass.ZigClangStmt_MaterializeTemporaryExprClass; pub const ZigClangStmt_MemberExprClass = enum_ZigClangStmtClass.ZigClangStmt_MemberExprClass; pub const ZigClangStmt_NoInitExprClass = enum_ZigClangStmtClass.ZigClangStmt_NoInitExprClass; pub const ZigClangStmt_OMPArraySectionExprClass = enum_ZigClangStmtClass.ZigClangStmt_OMPArraySectionExprClass; pub const ZigClangStmt_ObjCArrayLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCArrayLiteralClass; pub const ZigClangStmt_ObjCAvailabilityCheckExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCAvailabilityCheckExprClass; pub const ZigClangStmt_ObjCBoolLiteralExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCBoolLiteralExprClass; pub const ZigClangStmt_ObjCBoxedExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCBoxedExprClass; pub const ZigClangStmt_ObjCDictionaryLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCDictionaryLiteralClass; pub const ZigClangStmt_ObjCEncodeExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCEncodeExprClass; pub const ZigClangStmt_ObjCIndirectCopyRestoreExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCIndirectCopyRestoreExprClass; pub const ZigClangStmt_ObjCIsaExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCIsaExprClass; pub const ZigClangStmt_ObjCIvarRefExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCIvarRefExprClass; pub const ZigClangStmt_ObjCMessageExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCMessageExprClass; pub const ZigClangStmt_ObjCPropertyRefExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCPropertyRefExprClass; pub const ZigClangStmt_ObjCProtocolExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCProtocolExprClass; pub const ZigClangStmt_ObjCSelectorExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCSelectorExprClass; pub const ZigClangStmt_ObjCStringLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCStringLiteralClass; pub const ZigClangStmt_ObjCSubscriptRefExprClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCSubscriptRefExprClass; pub const ZigClangStmt_OffsetOfExprClass = enum_ZigClangStmtClass.ZigClangStmt_OffsetOfExprClass; pub const ZigClangStmt_OpaqueValueExprClass = enum_ZigClangStmtClass.ZigClangStmt_OpaqueValueExprClass; pub const ZigClangStmt_UnresolvedLookupExprClass = enum_ZigClangStmtClass.ZigClangStmt_UnresolvedLookupExprClass; pub const ZigClangStmt_UnresolvedMemberExprClass = enum_ZigClangStmtClass.ZigClangStmt_UnresolvedMemberExprClass; pub const ZigClangStmt_PackExpansionExprClass = enum_ZigClangStmtClass.ZigClangStmt_PackExpansionExprClass; pub const ZigClangStmt_ParenExprClass = enum_ZigClangStmtClass.ZigClangStmt_ParenExprClass; pub const ZigClangStmt_ParenListExprClass = enum_ZigClangStmtClass.ZigClangStmt_ParenListExprClass; pub const ZigClangStmt_PredefinedExprClass = enum_ZigClangStmtClass.ZigClangStmt_PredefinedExprClass; pub const ZigClangStmt_PseudoObjectExprClass = enum_ZigClangStmtClass.ZigClangStmt_PseudoObjectExprClass; pub const ZigClangStmt_ShuffleVectorExprClass = enum_ZigClangStmtClass.ZigClangStmt_ShuffleVectorExprClass; pub const ZigClangStmt_SizeOfPackExprClass = enum_ZigClangStmtClass.ZigClangStmt_SizeOfPackExprClass; pub const ZigClangStmt_StmtExprClass = enum_ZigClangStmtClass.ZigClangStmt_StmtExprClass; pub const ZigClangStmt_StringLiteralClass = enum_ZigClangStmtClass.ZigClangStmt_StringLiteralClass; pub const ZigClangStmt_SubstNonTypeTemplateParmExprClass = enum_ZigClangStmtClass.ZigClangStmt_SubstNonTypeTemplateParmExprClass; pub const ZigClangStmt_SubstNonTypeTemplateParmPackExprClass = enum_ZigClangStmtClass.ZigClangStmt_SubstNonTypeTemplateParmPackExprClass; pub const ZigClangStmt_TypeTraitExprClass = enum_ZigClangStmtClass.ZigClangStmt_TypeTraitExprClass; pub const ZigClangStmt_TypoExprClass = enum_ZigClangStmtClass.ZigClangStmt_TypoExprClass; pub const ZigClangStmt_UnaryExprOrTypeTraitExprClass = enum_ZigClangStmtClass.ZigClangStmt_UnaryExprOrTypeTraitExprClass; pub const ZigClangStmt_UnaryOperatorClass = enum_ZigClangStmtClass.ZigClangStmt_UnaryOperatorClass; pub const ZigClangStmt_VAArgExprClass = enum_ZigClangStmtClass.ZigClangStmt_VAArgExprClass; pub const ZigClangStmt_ForStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ForStmtClass; pub const ZigClangStmt_GotoStmtClass = enum_ZigClangStmtClass.ZigClangStmt_GotoStmtClass; pub const ZigClangStmt_IfStmtClass = enum_ZigClangStmtClass.ZigClangStmt_IfStmtClass; pub const ZigClangStmt_IndirectGotoStmtClass = enum_ZigClangStmtClass.ZigClangStmt_IndirectGotoStmtClass; pub const ZigClangStmt_LabelStmtClass = enum_ZigClangStmtClass.ZigClangStmt_LabelStmtClass; pub const ZigClangStmt_MSDependentExistsStmtClass = enum_ZigClangStmtClass.ZigClangStmt_MSDependentExistsStmtClass; pub const ZigClangStmt_NullStmtClass = enum_ZigClangStmtClass.ZigClangStmt_NullStmtClass; pub const ZigClangStmt_OMPAtomicDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPAtomicDirectiveClass; pub const ZigClangStmt_OMPBarrierDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPBarrierDirectiveClass; pub const ZigClangStmt_OMPCancelDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPCancelDirectiveClass; pub const ZigClangStmt_OMPCancellationPointDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPCancellationPointDirectiveClass; pub const ZigClangStmt_OMPCriticalDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPCriticalDirectiveClass; pub const ZigClangStmt_OMPFlushDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPFlushDirectiveClass; pub const ZigClangStmt_OMPDistributeDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPDistributeDirectiveClass; pub const ZigClangStmt_OMPDistributeParallelForDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPDistributeParallelForDirectiveClass; pub const ZigClangStmt_OMPDistributeParallelForSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPDistributeParallelForSimdDirectiveClass; pub const ZigClangStmt_OMPDistributeSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPDistributeSimdDirectiveClass; pub const ZigClangStmt_OMPForDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPForDirectiveClass; pub const ZigClangStmt_OMPForSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPForSimdDirectiveClass; pub const ZigClangStmt_OMPParallelForDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPParallelForDirectiveClass; pub const ZigClangStmt_OMPParallelForSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPParallelForSimdDirectiveClass; pub const ZigClangStmt_OMPSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPSimdDirectiveClass; pub const ZigClangStmt_OMPTargetParallelForSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetParallelForSimdDirectiveClass; pub const ZigClangStmt_OMPTargetSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetSimdDirectiveClass; pub const ZigClangStmt_OMPTargetTeamsDistributeDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetTeamsDistributeDirectiveClass; pub const ZigClangStmt_OMPTargetTeamsDistributeParallelForDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetTeamsDistributeParallelForDirectiveClass; pub const ZigClangStmt_OMPTargetTeamsDistributeParallelForSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetTeamsDistributeParallelForSimdDirectiveClass; pub const ZigClangStmt_OMPTargetTeamsDistributeSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetTeamsDistributeSimdDirectiveClass; pub const ZigClangStmt_OMPTaskLoopDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTaskLoopDirectiveClass; pub const ZigClangStmt_OMPTaskLoopSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTaskLoopSimdDirectiveClass; pub const ZigClangStmt_OMPTeamsDistributeDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTeamsDistributeDirectiveClass; pub const ZigClangStmt_OMPTeamsDistributeParallelForDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTeamsDistributeParallelForDirectiveClass; pub const ZigClangStmt_OMPTeamsDistributeParallelForSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTeamsDistributeParallelForSimdDirectiveClass; pub const ZigClangStmt_OMPTeamsDistributeSimdDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTeamsDistributeSimdDirectiveClass; pub const ZigClangStmt_OMPMasterDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPMasterDirectiveClass; pub const ZigClangStmt_OMPOrderedDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPOrderedDirectiveClass; pub const ZigClangStmt_OMPParallelDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPParallelDirectiveClass; pub const ZigClangStmt_OMPParallelSectionsDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPParallelSectionsDirectiveClass; pub const ZigClangStmt_OMPSectionDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPSectionDirectiveClass; pub const ZigClangStmt_OMPSectionsDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPSectionsDirectiveClass; pub const ZigClangStmt_OMPSingleDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPSingleDirectiveClass; pub const ZigClangStmt_OMPTargetDataDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetDataDirectiveClass; pub const ZigClangStmt_OMPTargetDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetDirectiveClass; pub const ZigClangStmt_OMPTargetEnterDataDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetEnterDataDirectiveClass; pub const ZigClangStmt_OMPTargetExitDataDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetExitDataDirectiveClass; pub const ZigClangStmt_OMPTargetParallelDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetParallelDirectiveClass; pub const ZigClangStmt_OMPTargetParallelForDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetParallelForDirectiveClass; pub const ZigClangStmt_OMPTargetTeamsDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetTeamsDirectiveClass; pub const ZigClangStmt_OMPTargetUpdateDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTargetUpdateDirectiveClass; pub const ZigClangStmt_OMPTaskDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTaskDirectiveClass; pub const ZigClangStmt_OMPTaskgroupDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTaskgroupDirectiveClass; pub const ZigClangStmt_OMPTaskwaitDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTaskwaitDirectiveClass; pub const ZigClangStmt_OMPTaskyieldDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTaskyieldDirectiveClass; pub const ZigClangStmt_OMPTeamsDirectiveClass = enum_ZigClangStmtClass.ZigClangStmt_OMPTeamsDirectiveClass; pub const ZigClangStmt_ObjCAtCatchStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCAtCatchStmtClass; pub const ZigClangStmt_ObjCAtFinallyStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCAtFinallyStmtClass; pub const ZigClangStmt_ObjCAtSynchronizedStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCAtSynchronizedStmtClass; pub const ZigClangStmt_ObjCAtThrowStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCAtThrowStmtClass; pub const ZigClangStmt_ObjCAtTryStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCAtTryStmtClass; pub const ZigClangStmt_ObjCAutoreleasePoolStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCAutoreleasePoolStmtClass; pub const ZigClangStmt_ObjCForCollectionStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ObjCForCollectionStmtClass; pub const ZigClangStmt_ReturnStmtClass = enum_ZigClangStmtClass.ZigClangStmt_ReturnStmtClass; pub const ZigClangStmt_SEHExceptStmtClass = enum_ZigClangStmtClass.ZigClangStmt_SEHExceptStmtClass; pub const ZigClangStmt_SEHFinallyStmtClass = enum_ZigClangStmtClass.ZigClangStmt_SEHFinallyStmtClass; pub const ZigClangStmt_SEHLeaveStmtClass = enum_ZigClangStmtClass.ZigClangStmt_SEHLeaveStmtClass; pub const ZigClangStmt_SEHTryStmtClass = enum_ZigClangStmtClass.ZigClangStmt_SEHTryStmtClass; pub const ZigClangStmt_CaseStmtClass = enum_ZigClangStmtClass.ZigClangStmt_CaseStmtClass; pub const ZigClangStmt_DefaultStmtClass = enum_ZigClangStmtClass.ZigClangStmt_DefaultStmtClass; pub const ZigClangStmt_SwitchStmtClass = enum_ZigClangStmtClass.ZigClangStmt_SwitchStmtClass; pub const ZigClangStmt_WhileStmtClass = enum_ZigClangStmtClass.ZigClangStmt_WhileStmtClass; pub const enum_ZigClangStmtClass = extern enum { ZigClangStmt_NoStmtClass = 0, ZigClangStmt_GCCAsmStmtClass = 1, ZigClangStmt_MSAsmStmtClass = 2, ZigClangStmt_AttributedStmtClass = 3, ZigClangStmt_BreakStmtClass = 4, ZigClangStmt_CXXCatchStmtClass = 5, ZigClangStmt_CXXForRangeStmtClass = 6, ZigClangStmt_CXXTryStmtClass = 7, ZigClangStmt_CapturedStmtClass = 8, ZigClangStmt_CompoundStmtClass = 9, ZigClangStmt_ContinueStmtClass = 10, ZigClangStmt_CoreturnStmtClass = 11, ZigClangStmt_CoroutineBodyStmtClass = 12, ZigClangStmt_DeclStmtClass = 13, ZigClangStmt_DoStmtClass = 14, ZigClangStmt_BinaryConditionalOperatorClass = 15, ZigClangStmt_ConditionalOperatorClass = 16, ZigClangStmt_AddrLabelExprClass = 17, ZigClangStmt_ArrayInitIndexExprClass = 18, ZigClangStmt_ArrayInitLoopExprClass = 19, ZigClangStmt_ArraySubscriptExprClass = 20, ZigClangStmt_ArrayTypeTraitExprClass = 21, ZigClangStmt_AsTypeExprClass = 22, ZigClangStmt_AtomicExprClass = 23, ZigClangStmt_BinaryOperatorClass = 24, ZigClangStmt_CompoundAssignOperatorClass = 25, ZigClangStmt_BlockExprClass = 26, ZigClangStmt_CXXBindTemporaryExprClass = 27, ZigClangStmt_CXXBoolLiteralExprClass = 28, ZigClangStmt_CXXConstructExprClass = 29, ZigClangStmt_CXXTemporaryObjectExprClass = 30, ZigClangStmt_CXXDefaultArgExprClass = 31, ZigClangStmt_CXXDefaultInitExprClass = 32, ZigClangStmt_CXXDeleteExprClass = 33, ZigClangStmt_CXXDependentScopeMemberExprClass = 34, ZigClangStmt_CXXFoldExprClass = 35, ZigClangStmt_CXXInheritedCtorInitExprClass = 36, ZigClangStmt_CXXNewExprClass = 37, ZigClangStmt_CXXNoexceptExprClass = 38, ZigClangStmt_CXXNullPtrLiteralExprClass = 39, ZigClangStmt_CXXPseudoDestructorExprClass = 40, ZigClangStmt_CXXScalarValueInitExprClass = 41, ZigClangStmt_CXXStdInitializerListExprClass = 42, ZigClangStmt_CXXThisExprClass = 43, ZigClangStmt_CXXThrowExprClass = 44, ZigClangStmt_CXXTypeidExprClass = 45, ZigClangStmt_CXXUnresolvedConstructExprClass = 46, ZigClangStmt_CXXUuidofExprClass = 47, ZigClangStmt_CallExprClass = 48, ZigClangStmt_CUDAKernelCallExprClass = 49, ZigClangStmt_CXXMemberCallExprClass = 50, ZigClangStmt_CXXOperatorCallExprClass = 51, ZigClangStmt_UserDefinedLiteralClass = 52, ZigClangStmt_CStyleCastExprClass = 53, ZigClangStmt_CXXFunctionalCastExprClass = 54, ZigClangStmt_CXXConstCastExprClass = 55, ZigClangStmt_CXXDynamicCastExprClass = 56, ZigClangStmt_CXXReinterpretCastExprClass = 57, ZigClangStmt_CXXStaticCastExprClass = 58, ZigClangStmt_ObjCBridgedCastExprClass = 59, ZigClangStmt_ImplicitCastExprClass = 60, ZigClangStmt_CharacterLiteralClass = 61, ZigClangStmt_ChooseExprClass = 62, ZigClangStmt_CompoundLiteralExprClass = 63, ZigClangStmt_ConvertVectorExprClass = 64, ZigClangStmt_CoawaitExprClass = 65, ZigClangStmt_CoyieldExprClass = 66, ZigClangStmt_DeclRefExprClass = 67, ZigClangStmt_DependentCoawaitExprClass = 68, ZigClangStmt_DependentScopeDeclRefExprClass = 69, ZigClangStmt_DesignatedInitExprClass = 70, ZigClangStmt_DesignatedInitUpdateExprClass = 71, ZigClangStmt_ExpressionTraitExprClass = 72, ZigClangStmt_ExtVectorElementExprClass = 73, ZigClangStmt_FixedPointLiteralClass = 74, ZigClangStmt_FloatingLiteralClass = 75, ZigClangStmt_ConstantExprClass = 76, ZigClangStmt_ExprWithCleanupsClass = 77, ZigClangStmt_FunctionParmPackExprClass = 78, ZigClangStmt_GNUNullExprClass = 79, ZigClangStmt_GenericSelectionExprClass = 80, ZigClangStmt_ImaginaryLiteralClass = 81, ZigClangStmt_ImplicitValueInitExprClass = 82, ZigClangStmt_InitListExprClass = 83, ZigClangStmt_IntegerLiteralClass = 84, ZigClangStmt_LambdaExprClass = 85, ZigClangStmt_MSPropertyRefExprClass = 86, ZigClangStmt_MSPropertySubscriptExprClass = 87, ZigClangStmt_MaterializeTemporaryExprClass = 88, ZigClangStmt_MemberExprClass = 89, ZigClangStmt_NoInitExprClass = 90, ZigClangStmt_OMPArraySectionExprClass = 91, ZigClangStmt_ObjCArrayLiteralClass = 92, ZigClangStmt_ObjCAvailabilityCheckExprClass = 93, ZigClangStmt_ObjCBoolLiteralExprClass = 94, ZigClangStmt_ObjCBoxedExprClass = 95, ZigClangStmt_ObjCDictionaryLiteralClass = 96, ZigClangStmt_ObjCEncodeExprClass = 97, ZigClangStmt_ObjCIndirectCopyRestoreExprClass = 98, ZigClangStmt_ObjCIsaExprClass = 99, ZigClangStmt_ObjCIvarRefExprClass = 100, ZigClangStmt_ObjCMessageExprClass = 101, ZigClangStmt_ObjCPropertyRefExprClass = 102, ZigClangStmt_ObjCProtocolExprClass = 103, ZigClangStmt_ObjCSelectorExprClass = 104, ZigClangStmt_ObjCStringLiteralClass = 105, ZigClangStmt_ObjCSubscriptRefExprClass = 106, ZigClangStmt_OffsetOfExprClass = 107, ZigClangStmt_OpaqueValueExprClass = 108, ZigClangStmt_UnresolvedLookupExprClass = 109, ZigClangStmt_UnresolvedMemberExprClass = 110, ZigClangStmt_PackExpansionExprClass = 111, ZigClangStmt_ParenExprClass = 112, ZigClangStmt_ParenListExprClass = 113, ZigClangStmt_PredefinedExprClass = 114, ZigClangStmt_PseudoObjectExprClass = 115, ZigClangStmt_ShuffleVectorExprClass = 116, ZigClangStmt_SizeOfPackExprClass = 117, ZigClangStmt_StmtExprClass = 118, ZigClangStmt_StringLiteralClass = 119, ZigClangStmt_SubstNonTypeTemplateParmExprClass = 120, ZigClangStmt_SubstNonTypeTemplateParmPackExprClass = 121, ZigClangStmt_TypeTraitExprClass = 122, ZigClangStmt_TypoExprClass = 123, ZigClangStmt_UnaryExprOrTypeTraitExprClass = 124, ZigClangStmt_UnaryOperatorClass = 125, ZigClangStmt_VAArgExprClass = 126, ZigClangStmt_ForStmtClass = 127, ZigClangStmt_GotoStmtClass = 128, ZigClangStmt_IfStmtClass = 129, ZigClangStmt_IndirectGotoStmtClass = 130, ZigClangStmt_LabelStmtClass = 131, ZigClangStmt_MSDependentExistsStmtClass = 132, ZigClangStmt_NullStmtClass = 133, ZigClangStmt_OMPAtomicDirectiveClass = 134, ZigClangStmt_OMPBarrierDirectiveClass = 135, ZigClangStmt_OMPCancelDirectiveClass = 136, ZigClangStmt_OMPCancellationPointDirectiveClass = 137, ZigClangStmt_OMPCriticalDirectiveClass = 138, ZigClangStmt_OMPFlushDirectiveClass = 139, ZigClangStmt_OMPDistributeDirectiveClass = 140, ZigClangStmt_OMPDistributeParallelForDirectiveClass = 141, ZigClangStmt_OMPDistributeParallelForSimdDirectiveClass = 142, ZigClangStmt_OMPDistributeSimdDirectiveClass = 143, ZigClangStmt_OMPForDirectiveClass = 144, ZigClangStmt_OMPForSimdDirectiveClass = 145, ZigClangStmt_OMPParallelForDirectiveClass = 146, ZigClangStmt_OMPParallelForSimdDirectiveClass = 147, ZigClangStmt_OMPSimdDirectiveClass = 148, ZigClangStmt_OMPTargetParallelForSimdDirectiveClass = 149, ZigClangStmt_OMPTargetSimdDirectiveClass = 150, ZigClangStmt_OMPTargetTeamsDistributeDirectiveClass = 151, ZigClangStmt_OMPTargetTeamsDistributeParallelForDirectiveClass = 152, ZigClangStmt_OMPTargetTeamsDistributeParallelForSimdDirectiveClass = 153, ZigClangStmt_OMPTargetTeamsDistributeSimdDirectiveClass = 154, ZigClangStmt_OMPTaskLoopDirectiveClass = 155, ZigClangStmt_OMPTaskLoopSimdDirectiveClass = 156, ZigClangStmt_OMPTeamsDistributeDirectiveClass = 157, ZigClangStmt_OMPTeamsDistributeParallelForDirectiveClass = 158, ZigClangStmt_OMPTeamsDistributeParallelForSimdDirectiveClass = 159, ZigClangStmt_OMPTeamsDistributeSimdDirectiveClass = 160, ZigClangStmt_OMPMasterDirectiveClass = 161, ZigClangStmt_OMPOrderedDirectiveClass = 162, ZigClangStmt_OMPParallelDirectiveClass = 163, ZigClangStmt_OMPParallelSectionsDirectiveClass = 164, ZigClangStmt_OMPSectionDirectiveClass = 165, ZigClangStmt_OMPSectionsDirectiveClass = 166, ZigClangStmt_OMPSingleDirectiveClass = 167, ZigClangStmt_OMPTargetDataDirectiveClass = 168, ZigClangStmt_OMPTargetDirectiveClass = 169, ZigClangStmt_OMPTargetEnterDataDirectiveClass = 170, ZigClangStmt_OMPTargetExitDataDirectiveClass = 171, ZigClangStmt_OMPTargetParallelDirectiveClass = 172, ZigClangStmt_OMPTargetParallelForDirectiveClass = 173, ZigClangStmt_OMPTargetTeamsDirectiveClass = 174, ZigClangStmt_OMPTargetUpdateDirectiveClass = 175, ZigClangStmt_OMPTaskDirectiveClass = 176, ZigClangStmt_OMPTaskgroupDirectiveClass = 177, ZigClangStmt_OMPTaskwaitDirectiveClass = 178, ZigClangStmt_OMPTaskyieldDirectiveClass = 179, ZigClangStmt_OMPTeamsDirectiveClass = 180, ZigClangStmt_ObjCAtCatchStmtClass = 181, ZigClangStmt_ObjCAtFinallyStmtClass = 182, ZigClangStmt_ObjCAtSynchronizedStmtClass = 183, ZigClangStmt_ObjCAtThrowStmtClass = 184, ZigClangStmt_ObjCAtTryStmtClass = 185, ZigClangStmt_ObjCAutoreleasePoolStmtClass = 186, ZigClangStmt_ObjCForCollectionStmtClass = 187, ZigClangStmt_ReturnStmtClass = 188, ZigClangStmt_SEHExceptStmtClass = 189, ZigClangStmt_SEHFinallyStmtClass = 190, ZigClangStmt_SEHLeaveStmtClass = 191, ZigClangStmt_SEHTryStmtClass = 192, ZigClangStmt_CaseStmtClass = 193, ZigClangStmt_DefaultStmtClass = 194, ZigClangStmt_SwitchStmtClass = 195, ZigClangStmt_WhileStmtClass = 196, }; pub const ZigClangCK_Dependent = enum_ZigClangCK._Dependent; pub const ZigClangCK_BitCast = enum_ZigClangCK._BitCast; pub const ZigClangCK_LValueBitCast = enum_ZigClangCK._LValueBitCast; pub const ZigClangCK_LValueToRValue = enum_ZigClangCK._LValueToRValue; pub const ZigClangCK_NoOp = enum_ZigClangCK._NoOp; pub const ZigClangCK_BaseToDerived = enum_ZigClangCK._BaseToDerived; pub const ZigClangCK_DerivedToBase = enum_ZigClangCK._DerivedToBase; pub const ZigClangCK_UncheckedDerivedToBase = enum_ZigClangCK._UncheckedDerivedToBase; pub const ZigClangCK_Dynamic = enum_ZigClangCK._Dynamic; pub const ZigClangCK_ToUnion = enum_ZigClangCK._ToUnion; pub const ZigClangCK_ArrayToPointerDecay = enum_ZigClangCK._ArrayToPointerDecay; pub const ZigClangCK_FunctionToPointerDecay = enum_ZigClangCK._FunctionToPointerDecay; pub const ZigClangCK_NullToPointer = enum_ZigClangCK._NullToPointer; pub const ZigClangCK_NullToMemberPointer = enum_ZigClangCK._NullToMemberPointer; pub const ZigClangCK_BaseToDerivedMemberPointer = enum_ZigClangCK._BaseToDerivedMemberPointer; pub const ZigClangCK_DerivedToBaseMemberPointer = enum_ZigClangCK._DerivedToBaseMemberPointer; pub const ZigClangCK_MemberPointerToBoolean = enum_ZigClangCK._MemberPointerToBoolean; pub const ZigClangCK_ReinterpretMemberPointer = enum_ZigClangCK._ReinterpretMemberPointer; pub const ZigClangCK_UserDefinedConversion = enum_ZigClangCK._UserDefinedConversion; pub const ZigClangCK_ConstructorConversion = enum_ZigClangCK._ConstructorConversion; pub const ZigClangCK_IntegralToPointer = enum_ZigClangCK._IntegralToPointer; pub const ZigClangCK_PointerToIntegral = enum_ZigClangCK._PointerToIntegral; pub const ZigClangCK_PointerToBoolean = enum_ZigClangCK._PointerToBoolean; pub const ZigClangCK_ToVoid = enum_ZigClangCK._ToVoid; pub const ZigClangCK_VectorSplat = enum_ZigClangCK._VectorSplat; pub const ZigClangCK_IntegralCast = enum_ZigClangCK._IntegralCast; pub const ZigClangCK_IntegralToBoolean = enum_ZigClangCK._IntegralToBoolean; pub const ZigClangCK_IntegralToFloating = enum_ZigClangCK._IntegralToFloating; pub const ZigClangCK_FixedPointCast = enum_ZigClangCK._FixedPointCast; pub const ZigClangCK_FixedPointToBoolean = enum_ZigClangCK._FixedPointToBoolean; pub const ZigClangCK_FloatingToIntegral = enum_ZigClangCK._FloatingToIntegral; pub const ZigClangCK_FloatingToBoolean = enum_ZigClangCK._FloatingToBoolean; pub const ZigClangCK_BooleanToSignedIntegral = enum_ZigClangCK._BooleanToSignedIntegral; pub const ZigClangCK_FloatingCast = enum_ZigClangCK._FloatingCast; pub const ZigClangCK_CPointerToObjCPointerCast = enum_ZigClangCK._CPointerToObjCPointerCast; pub const ZigClangCK_BlockPointerToObjCPointerCast = enum_ZigClangCK._BlockPointerToObjCPointerCast; pub const ZigClangCK_AnyPointerToBlockPointerCast = enum_ZigClangCK._AnyPointerToBlockPointerCast; pub const ZigClangCK_ObjCObjectLValueCast = enum_ZigClangCK._ObjCObjectLValueCast; pub const ZigClangCK_FloatingRealToComplex = enum_ZigClangCK._FloatingRealToComplex; pub const ZigClangCK_FloatingComplexToReal = enum_ZigClangCK._FloatingComplexToReal; pub const ZigClangCK_FloatingComplexToBoolean = enum_ZigClangCK._FloatingComplexToBoolean; pub const ZigClangCK_FloatingComplexCast = enum_ZigClangCK._FloatingComplexCast; pub const ZigClangCK_FloatingComplexToIntegralComplex = enum_ZigClangCK._FloatingComplexToIntegralComplex; pub const ZigClangCK_IntegralRealToComplex = enum_ZigClangCK._IntegralRealToComplex; pub const ZigClangCK_IntegralComplexToReal = enum_ZigClangCK._IntegralComplexToReal; pub const ZigClangCK_IntegralComplexToBoolean = enum_ZigClangCK._IntegralComplexToBoolean; pub const ZigClangCK_IntegralComplexCast = enum_ZigClangCK._IntegralComplexCast; pub const ZigClangCK_IntegralComplexToFloatingComplex = enum_ZigClangCK._IntegralComplexToFloatingComplex; pub const ZigClangCK_ARCProduceObject = enum_ZigClangCK._ARCProduceObject; pub const ZigClangCK_ARCConsumeObject = enum_ZigClangCK._ARCConsumeObject; pub const ZigClangCK_ARCReclaimReturnedObject = enum_ZigClangCK._ARCReclaimReturnedObject; pub const ZigClangCK_ARCExtendBlockObject = enum_ZigClangCK._ARCExtendBlockObject; pub const ZigClangCK_AtomicToNonAtomic = enum_ZigClangCK._AtomicToNonAtomic; pub const ZigClangCK_NonAtomicToAtomic = enum_ZigClangCK._NonAtomicToAtomic; pub const ZigClangCK_CopyAndAutoreleaseBlockObject = enum_ZigClangCK._CopyAndAutoreleaseBlockObject; pub const ZigClangCK_BuiltinFnToFnPtr = enum_ZigClangCK._BuiltinFnToFnPtr; pub const ZigClangCK_ZeroToOCLOpaqueType = enum_ZigClangCK._ZeroToOCLOpaqueType; pub const ZigClangCK_AddressSpaceConversion = enum_ZigClangCK._AddressSpaceConversion; pub const ZigClangCK_IntToOCLSampler = enum_ZigClangCK._IntToOCLSampler; pub const enum_ZigClangCK = extern enum { _Dependent, _BitCast, _LValueBitCast, _LValueToRValue, _NoOp, _BaseToDerived, _DerivedToBase, _UncheckedDerivedToBase, _Dynamic, _ToUnion, _ArrayToPointerDecay, _FunctionToPointerDecay, _NullToPointer, _NullToMemberPointer, _BaseToDerivedMemberPointer, _DerivedToBaseMemberPointer, _MemberPointerToBoolean, _ReinterpretMemberPointer, _UserDefinedConversion, _ConstructorConversion, _IntegralToPointer, _PointerToIntegral, _PointerToBoolean, _ToVoid, _VectorSplat, _IntegralCast, _IntegralToBoolean, _IntegralToFloating, _FixedPointCast, _FixedPointToBoolean, _FloatingToIntegral, _FloatingToBoolean, _BooleanToSignedIntegral, _FloatingCast, _CPointerToObjCPointerCast, _BlockPointerToObjCPointerCast, _AnyPointerToBlockPointerCast, _ObjCObjectLValueCast, _FloatingRealToComplex, _FloatingComplexToReal, _FloatingComplexToBoolean, _FloatingComplexCast, _FloatingComplexToIntegralComplex, _IntegralRealToComplex, _IntegralComplexToReal, _IntegralComplexToBoolean, _IntegralComplexCast, _IntegralComplexToFloatingComplex, _ARCProduceObject, _ARCConsumeObject, _ARCReclaimReturnedObject, _ARCExtendBlockObject, _AtomicToNonAtomic, _NonAtomicToAtomic, _CopyAndAutoreleaseBlockObject, _BuiltinFnToFnPtr, _ZeroToOCLOpaqueType, _AddressSpaceConversion, _IntToOCLSampler, }; pub const ZigClangAPValueUninitialized = enum_ZigClangAPValueKind.ZigClangAPValueUninitialized; pub const ZigClangAPValueInt = enum_ZigClangAPValueKind.ZigClangAPValueInt; pub const ZigClangAPValueFloat = enum_ZigClangAPValueKind.ZigClangAPValueFloat; pub const ZigClangAPValueComplexInt = enum_ZigClangAPValueKind.ZigClangAPValueComplexInt; pub const ZigClangAPValueComplexFloat = enum_ZigClangAPValueKind.ZigClangAPValueComplexFloat; pub const ZigClangAPValueLValue = enum_ZigClangAPValueKind.ZigClangAPValueLValue; pub const ZigClangAPValueVector = enum_ZigClangAPValueKind.ZigClangAPValueVector; pub const ZigClangAPValueArray = enum_ZigClangAPValueKind.ZigClangAPValueArray; pub const ZigClangAPValueStruct = enum_ZigClangAPValueKind.ZigClangAPValueStruct; pub const ZigClangAPValueUnion = enum_ZigClangAPValueKind.ZigClangAPValueUnion; pub const ZigClangAPValueMemberPointer = enum_ZigClangAPValueKind.ZigClangAPValueMemberPointer; pub const ZigClangAPValueAddrLabelDiff = enum_ZigClangAPValueKind.ZigClangAPValueAddrLabelDiff; pub const enum_ZigClangAPValueKind = extern enum { ZigClangAPValueUninitialized, ZigClangAPValueInt, ZigClangAPValueFloat, ZigClangAPValueComplexInt, ZigClangAPValueComplexFloat, ZigClangAPValueLValue, ZigClangAPValueVector, ZigClangAPValueArray, ZigClangAPValueStruct, ZigClangAPValueUnion, ZigClangAPValueMemberPointer, ZigClangAPValueAddrLabelDiff, }; pub extern fn ZigClangSourceManager_getSpellingLoc(arg0: ?*const struct_ZigClangSourceManager, Loc: struct_ZigClangSourceLocation) struct_ZigClangSourceLocation; pub extern fn ZigClangSourceManager_getFilename(arg0: ?*const struct_ZigClangSourceManager, SpellingLoc: struct_ZigClangSourceLocation) [*c]const u8; pub extern fn ZigClangSourceManager_getSpellingLineNumber(arg0: ?*const struct_ZigClangSourceManager, Loc: struct_ZigClangSourceLocation) c_uint; pub extern fn ZigClangSourceManager_getSpellingColumnNumber(arg0: ?*const struct_ZigClangSourceManager, Loc: struct_ZigClangSourceLocation) c_uint; pub extern fn ZigClangSourceManager_getCharacterData(arg0: ?*const struct_ZigClangSourceManager, SL: struct_ZigClangSourceLocation) [*c]const u8; pub extern fn ZigClangASTContext_getPointerType(arg0: ?*const struct_ZigClangASTContext, T: struct_ZigClangQualType) struct_ZigClangQualType; pub extern fn ZigClangASTUnit_getASTContext(arg0: ?*struct_ZigClangASTUnit) ?*struct_ZigClangASTContext; pub extern fn ZigClangASTUnit_getSourceManager(arg0: ?*struct_ZigClangASTUnit) ?*struct_ZigClangSourceManager; pub extern fn ZigClangASTUnit_visitLocalTopLevelDecls(arg0: ?*struct_ZigClangASTUnit, context: ?*c_void, Fn: ?extern fn (?*c_void, ?*const struct_ZigClangDecl) bool) bool; pub extern fn ZigClangRecordType_getDecl(record_ty: ?*const struct_ZigClangRecordType) ?*const struct_ZigClangRecordDecl; pub extern fn ZigClangEnumType_getDecl(record_ty: ?*const struct_ZigClangEnumType) ?*const struct_ZigClangEnumDecl; pub extern fn ZigClangRecordDecl_getCanonicalDecl(record_decl: ?*const struct_ZigClangRecordDecl) ?*const struct_ZigClangTagDecl; pub extern fn ZigClangEnumDecl_getCanonicalDecl(arg0: ?*const struct_ZigClangEnumDecl) ?*const struct_ZigClangTagDecl; pub extern fn ZigClangTypedefNameDecl_getCanonicalDecl(arg0: ?*const struct_ZigClangTypedefNameDecl) ?*const struct_ZigClangTypedefNameDecl; pub extern fn ZigClangRecordDecl_getDefinition(arg0: ?*const struct_ZigClangRecordDecl) ?*const struct_ZigClangRecordDecl; pub extern fn ZigClangEnumDecl_getDefinition(arg0: ?*const struct_ZigClangEnumDecl) ?*const struct_ZigClangEnumDecl; pub extern fn ZigClangRecordDecl_getLocation(arg0: ?*const struct_ZigClangRecordDecl) struct_ZigClangSourceLocation; pub extern fn ZigClangEnumDecl_getLocation(arg0: ?*const struct_ZigClangEnumDecl) struct_ZigClangSourceLocation; pub extern fn ZigClangTypedefNameDecl_getLocation(arg0: ?*const struct_ZigClangTypedefNameDecl) struct_ZigClangSourceLocation; pub extern fn ZigClangRecordDecl_isUnion(record_decl: ?*const struct_ZigClangRecordDecl) bool; pub extern fn ZigClangRecordDecl_isStruct(record_decl: ?*const struct_ZigClangRecordDecl) bool; pub extern fn ZigClangRecordDecl_isAnonymousStructOrUnion(record_decl: ?*const struct_ZigClangRecordDecl) bool; pub extern fn ZigClangEnumDecl_getIntegerType(arg0: ?*const struct_ZigClangEnumDecl) struct_ZigClangQualType; pub extern fn ZigClangDecl_getName_bytes_begin(decl: ?*const struct_ZigClangDecl) [*c]const u8; pub extern fn ZigClangSourceLocation_eq(a: struct_ZigClangSourceLocation, b: struct_ZigClangSourceLocation) bool; pub extern fn ZigClangTypedefType_getDecl(arg0: ?*const struct_ZigClangTypedefType) ?*const struct_ZigClangTypedefNameDecl; pub extern fn ZigClangTypedefNameDecl_getUnderlyingType(arg0: ?*const struct_ZigClangTypedefNameDecl) struct_ZigClangQualType; pub extern fn ZigClangQualType_getCanonicalType(arg0: struct_ZigClangQualType) struct_ZigClangQualType; pub extern fn ZigClangQualType_getTypePtr(arg0: struct_ZigClangQualType) ?*const struct_ZigClangType; pub extern fn ZigClangQualType_addConst(arg0: [*c]struct_ZigClangQualType) void; pub extern fn ZigClangQualType_eq(arg0: struct_ZigClangQualType, arg1: struct_ZigClangQualType) bool; pub extern fn ZigClangQualType_isConstQualified(arg0: struct_ZigClangQualType) bool; pub extern fn ZigClangQualType_isVolatileQualified(arg0: struct_ZigClangQualType) bool; pub extern fn ZigClangQualType_isRestrictQualified(arg0: struct_ZigClangQualType) bool; pub extern fn ZigClangType_getTypeClass(self: ?*const struct_ZigClangType) enum_ZigClangTypeClass; pub extern fn ZigClangType_isVoidType(self: ?*const struct_ZigClangType) bool; pub extern fn ZigClangType_getTypeClassName(self: ?*const struct_ZigClangType) [*c]const u8; pub extern fn ZigClangStmt_getBeginLoc(self: ?*const struct_ZigClangStmt) struct_ZigClangSourceLocation; pub extern fn ZigClangStmt_getStmtClass(self: ?*const struct_ZigClangStmt) enum_ZigClangStmtClass; pub extern fn ZigClangStmt_classof_Expr(self: ?*const struct_ZigClangStmt) bool; pub extern fn ZigClangExpr_getStmtClass(self: ?*const struct_ZigClangExpr) enum_ZigClangStmtClass; pub extern fn ZigClangExpr_getType(self: ?*const struct_ZigClangExpr) struct_ZigClangQualType; pub extern fn ZigClangExpr_getBeginLoc(self: ?*const struct_ZigClangExpr) struct_ZigClangSourceLocation; pub extern fn ZigClangAPValue_getKind(self: ?*const struct_ZigClangAPValue) enum_ZigClangAPValueKind; pub extern fn ZigClangAPValue_getInt(self: ?*const struct_ZigClangAPValue) ?*const struct_ZigClangAPSInt; pub extern fn ZigClangAPValue_getArrayInitializedElts(self: ?*const struct_ZigClangAPValue) c_uint; pub extern fn ZigClangAPValue_getArrayInitializedElt(self: ?*const struct_ZigClangAPValue, i: c_uint) ?*const struct_ZigClangAPValue; pub extern fn ZigClangAPValue_getArrayFiller(self: ?*const struct_ZigClangAPValue) ?*const struct_ZigClangAPValue; pub extern fn ZigClangAPValue_getArraySize(self: ?*const struct_ZigClangAPValue) c_uint; pub extern fn ZigClangAPValue_getLValueBase(self: ?*const struct_ZigClangAPValue) struct_ZigClangAPValueLValueBase; pub extern fn ZigClangAPSInt_isSigned(self: ?*const struct_ZigClangAPSInt) bool; pub extern fn ZigClangAPSInt_isNegative(self: ?*const struct_ZigClangAPSInt) bool; pub extern fn ZigClangAPSInt_negate(self: ?*const struct_ZigClangAPSInt) ?*const struct_ZigClangAPSInt; pub extern fn ZigClangAPSInt_free(self: ?*const struct_ZigClangAPSInt) void; pub extern fn ZigClangAPSInt_getRawData(self: ?*const struct_ZigClangAPSInt) [*c]const u64; pub extern fn ZigClangAPSInt_getNumWords(self: ?*const struct_ZigClangAPSInt) c_uint; pub extern fn ZigClangAPValueLValueBase_dyn_cast_Expr(self: struct_ZigClangAPValueLValueBase) ?*const struct_ZigClangExpr; pub extern fn ZigClangASTUnit_delete(arg0: ?*struct_ZigClangASTUnit) void; pub const ZigClangSourceLocation = struct_ZigClangSourceLocation; pub const ZigClangQualType = struct_ZigClangQualType; pub const ZigClangAPValueLValueBase = struct_ZigClangAPValueLValueBase; pub const ZigClangAPValue = struct_ZigClangAPValue; pub const ZigClangAPSInt = struct_ZigClangAPSInt; pub const ZigClangASTContext = struct_ZigClangASTContext; pub const ZigClangASTUnit = struct_ZigClangASTUnit; pub const ZigClangArraySubscriptExpr = struct_ZigClangArraySubscriptExpr; pub const ZigClangArrayType = struct_ZigClangArrayType; pub const ZigClangAttributedType = struct_ZigClangAttributedType; pub const ZigClangBinaryOperator = struct_ZigClangBinaryOperator; pub const ZigClangBreakStmt = struct_ZigClangBreakStmt; pub const ZigClangBuiltinType = struct_ZigClangBuiltinType; pub const ZigClangCStyleCastExpr = struct_ZigClangCStyleCastExpr; pub const ZigClangCallExpr = struct_ZigClangCallExpr; pub const ZigClangCaseStmt = struct_ZigClangCaseStmt; pub const ZigClangCompoundAssignOperator = struct_ZigClangCompoundAssignOperator; pub const ZigClangCompoundStmt = struct_ZigClangCompoundStmt; pub const ZigClangConditionalOperator = struct_ZigClangConditionalOperator; pub const ZigClangConstantArrayType = struct_ZigClangConstantArrayType; pub const ZigClangContinueStmt = struct_ZigClangContinueStmt; pub const ZigClangDecayedType = struct_ZigClangDecayedType; pub const ZigClangDecl = struct_ZigClangDecl; pub const ZigClangDeclRefExpr = struct_ZigClangDeclRefExpr; pub const ZigClangDeclStmt = struct_ZigClangDeclStmt; pub const ZigClangDefaultStmt = struct_ZigClangDefaultStmt; pub const ZigClangDiagnosticOptions = struct_ZigClangDiagnosticOptions; pub const ZigClangDiagnosticsEngine = struct_ZigClangDiagnosticsEngine; pub const ZigClangDoStmt = struct_ZigClangDoStmt; pub const ZigClangElaboratedType = struct_ZigClangElaboratedType; pub const ZigClangEnumConstantDecl = struct_ZigClangEnumConstantDecl; pub const ZigClangEnumDecl = struct_ZigClangEnumDecl; pub const ZigClangEnumType = struct_ZigClangEnumType; pub const ZigClangExpr = struct_ZigClangExpr; pub const ZigClangFieldDecl = struct_ZigClangFieldDecl; pub const ZigClangFileID = struct_ZigClangFileID; pub const ZigClangForStmt = struct_ZigClangForStmt; pub const ZigClangFullSourceLoc = struct_ZigClangFullSourceLoc; pub const ZigClangFunctionDecl = struct_ZigClangFunctionDecl; pub const ZigClangFunctionProtoType = struct_ZigClangFunctionProtoType; pub const ZigClangIfStmt = struct_ZigClangIfStmt; pub const ZigClangImplicitCastExpr = struct_ZigClangImplicitCastExpr; pub const ZigClangIncompleteArrayType = struct_ZigClangIncompleteArrayType; pub const ZigClangIntegerLiteral = struct_ZigClangIntegerLiteral; pub const ZigClangMacroDefinitionRecord = struct_ZigClangMacroDefinitionRecord; pub const ZigClangMemberExpr = struct_ZigClangMemberExpr; pub const ZigClangNamedDecl = struct_ZigClangNamedDecl; pub const ZigClangNone = struct_ZigClangNone; pub const ZigClangPCHContainerOperations = struct_ZigClangPCHContainerOperations; pub const ZigClangParenExpr = struct_ZigClangParenExpr; pub const ZigClangParenType = struct_ZigClangParenType; pub const ZigClangParmVarDecl = struct_ZigClangParmVarDecl; pub const ZigClangPointerType = struct_ZigClangPointerType; pub const ZigClangPreprocessedEntity = struct_ZigClangPreprocessedEntity; pub const ZigClangRecordDecl = struct_ZigClangRecordDecl; pub const ZigClangRecordType = struct_ZigClangRecordType; pub const ZigClangReturnStmt = struct_ZigClangReturnStmt; pub const ZigClangSkipFunctionBodiesScope = struct_ZigClangSkipFunctionBodiesScope; pub const ZigClangSourceManager = struct_ZigClangSourceManager; pub const ZigClangSourceRange = struct_ZigClangSourceRange; pub const ZigClangStmt = struct_ZigClangStmt; pub const ZigClangStorageClass = struct_ZigClangStorageClass; pub const ZigClangStringLiteral = struct_ZigClangStringLiteral; pub const ZigClangStringRef = struct_ZigClangStringRef; pub const ZigClangSwitchStmt = struct_ZigClangSwitchStmt; pub const ZigClangTagDecl = struct_ZigClangTagDecl; pub const ZigClangType = struct_ZigClangType; pub const ZigClangTypedefNameDecl = struct_ZigClangTypedefNameDecl; pub const ZigClangTypedefType = struct_ZigClangTypedefType; pub const ZigClangUnaryExprOrTypeTraitExpr = struct_ZigClangUnaryExprOrTypeTraitExpr; pub const ZigClangUnaryOperator = struct_ZigClangUnaryOperator; pub const ZigClangValueDecl = struct_ZigClangValueDecl; pub const ZigClangVarDecl = struct_ZigClangVarDecl; pub const ZigClangWhileStmt = struct_ZigClangWhileStmt; pub const ZigClangBO = enum_ZigClangBO; pub const ZigClangUO = enum_ZigClangUO; pub const ZigClangTypeClass = enum_ZigClangTypeClass; pub const ZigClangStmtClass = enum_ZigClangStmtClass; pub const ZigClangCK = enum_ZigClangCK; pub const ZigClangAPValueKind = enum_ZigClangAPValueKind; pub const Stage2ErrorMsg = extern struct { filename_ptr: ?[*]const u8, filename_len: usize, msg_ptr: [*]const u8, msg_len: usize, // valid until the ASTUnit is freed source: ?[*]const u8, // 0 based line: c_uint, // 0 based column: c_uint, // byte offset into source offset: c_uint, }; pub extern fn ZigClangErrorMsg_delete(ptr: [*c]Stage2ErrorMsg, len: usize) void; pub extern fn ZigClangLoadFromCommandLine( args_begin: [*]?[*]const u8, args_end: [*]?[*]const u8, errors_ptr: *[*]Stage2ErrorMsg, errors_len: *usize, resources_path: [*c]const u8, ) ?*ZigClangASTUnit;
src-self-hosted/clang.zig
pub const SUCCESS = 0x00000000; pub const WAIT_0 = 0x00000000; pub const WAIT_1 = 0x00000001; pub const WAIT_2 = 0x00000002; pub const WAIT_3 = 0x00000003; pub const WAIT_63 = 0x0000003F; pub const ABANDONED = 0x00000080; pub const ABANDONED_WAIT_0 = 0x00000080; pub const ABANDONED_WAIT_63 = 0x000000BF; pub const USER_APC = 0x000000C0; pub const ALERTED = 0x00000101; pub const TIMEOUT = 0x00000102; pub const PENDING = 0x00000103; pub const REPARSE = 0x00000104; pub const MORE_ENTRIES = 0x00000105; pub const NOT_ALL_ASSIGNED = 0x00000106; pub const SOME_NOT_MAPPED = 0x00000107; pub const OPLOCK_BREAK_IN_PROGRESS = 0x00000108; pub const VOLUME_MOUNTED = 0x00000109; pub const RXACT_COMMITTED = 0x0000010A; pub const NOTIFY_CLEANUP = 0x0000010B; pub const NOTIFY_ENUM_DIR = 0x0000010C; pub const NO_QUOTAS_FOR_ACCOUNT = 0x0000010D; pub const PRIMARY_TRANSPORT_CONNECT_FAILED = 0x0000010E; pub const PAGE_FAULT_TRANSITION = 0x00000110; pub const PAGE_FAULT_DEMAND_ZERO = 0x00000111; pub const PAGE_FAULT_COPY_ON_WRITE = 0x00000112; pub const PAGE_FAULT_GUARD_PAGE = 0x00000113; pub const PAGE_FAULT_PAGING_FILE = 0x00000114; pub const CACHE_PAGE_LOCKED = 0x00000115; pub const CRASH_DUMP = 0x00000116; pub const BUFFER_ALL_ZEROS = 0x00000117; pub const REPARSE_OBJECT = 0x00000118; pub const RESOURCE_REQUIREMENTS_CHANGED = 0x00000119; pub const TRANSLATION_COMPLETE = 0x00000120; pub const DS_MEMBERSHIP_EVALUATED_LOCALLY = 0x00000121; pub const NOTHING_TO_TERMINATE = 0x00000122; pub const PROCESS_NOT_IN_JOB = 0x00000123; pub const PROCESS_IN_JOB = 0x00000124; pub const VOLSNAP_HIBERNATE_READY = 0x00000125; pub const FSFILTER_OP_COMPLETED_SUCCESSFULLY = 0x00000126; pub const INTERRUPT_VECTOR_ALREADY_CONNECTED = 0x00000127; pub const INTERRUPT_STILL_CONNECTED = 0x00000128; pub const PROCESS_CLONED = 0x00000129; pub const FILE_LOCKED_WITH_ONLY_READERS = 0x0000012A; pub const FILE_LOCKED_WITH_WRITERS = 0x0000012B; pub const RESOURCEMANAGER_READ_ONLY = 0x00000202; pub const WAIT_FOR_OPLOCK = 0x00000367; pub const FLT_IO_COMPLETE = 0x001C0001; pub const FILE_NOT_AVAILABLE = 0xC0000467; pub const OBJECT_NAME_EXISTS = 0x40000000; pub const THREAD_WAS_SUSPENDED = 0x40000001; pub const WORKING_SET_LIMIT_RANGE = 0x40000002; pub const IMAGE_NOT_AT_BASE = 0x40000003; pub const RXACT_STATE_CREATED = 0x40000004; pub const SEGMENT_NOTIFICATION = 0x40000005; pub const LOCAL_USER_SESSION_KEY = 0x40000006; pub const BAD_CURRENT_DIRECTORY = 0x40000007; pub const SERIAL_MORE_WRITES = 0x40000008; pub const REGISTRY_RECOVERED = 0x40000009; pub const FT_READ_RECOVERY_FROM_BACKUP = 0x4000000A; pub const FT_WRITE_RECOVERY = 0x4000000B; pub const SERIAL_COUNTER_TIMEOUT = 0x4000000C; pub const NULL_LM_PASSWORD = <PASSWORD>; pub const IMAGE_MACHINE_TYPE_MISMATCH = 0x4000000E; pub const RECEIVE_PARTIAL = 0x4000000F; pub const RECEIVE_EXPEDITED = 0x40000010; pub const RECEIVE_PARTIAL_EXPEDITED = 0x40000011; pub const EVENT_DONE = 0x40000012; pub const EVENT_PENDING = 0x40000013; pub const CHECKING_FILE_SYSTEM = 0x40000014; pub const FATAL_APP_EXIT = 0x40000015; pub const PREDEFINED_HANDLE = 0x40000016; pub const WAS_UNLOCKED = 0x40000017; pub const SERVICE_NOTIFICATION = 0x40000018; pub const WAS_LOCKED = 0x40000019; pub const LOG_HARD_ERROR = 0x4000001A; pub const ALREADY_WIN32 = 0x4000001B; pub const WX86_UNSIMULATE = 0x4000001C; pub const WX86_CONTINUE = 0x4000001D; pub const WX86_SINGLE_STEP = 0x4000001E; pub const WX86_BREAKPOINT = 0x4000001F; pub const WX86_EXCEPTION_CONTINUE = 0x40000020; pub const WX86_EXCEPTION_LASTCHANCE = 0x40000021; pub const WX86_EXCEPTION_CHAIN = 0x40000022; pub const IMAGE_MACHINE_TYPE_MISMATCH_EXE = 0x40000023; pub const NO_YIELD_PERFORMED = 0x40000024; pub const TIMER_RESUME_IGNORED = 0x40000025; pub const ARBITRATION_UNHANDLED = 0x40000026; pub const CARDBUS_NOT_SUPPORTED = 0x40000027; pub const WX86_CREATEWX86TIB = 0x40000028; pub const MP_PROCESSOR_MISMATCH = 0x40000029; pub const HIBERNATED = 0x4000002A; pub const RESUME_HIBERNATION = 0x4000002B; pub const FIRMWARE_UPDATED = 0x4000002C; pub const DRIVERS_LEAKING_LOCKED_PAGES = 0x4000002D; pub const MESSAGE_RETRIEVED = 0x4000002E; pub const SYSTEM_POWERSTATE_TRANSITION = 0x4000002F; pub const ALPC_CHECK_COMPLETION_LIST = 0x40000030; pub const SYSTEM_POWERSTATE_COMPLEX_TRANSITION = 0x40000031; pub const ACCESS_AUDIT_BY_POLICY = 0x40000032; pub const ABANDON_HIBERFILE = 0x40000033; pub const BIZRULES_NOT_ENABLED = 0x40000034; pub const WAKE_SYSTEM = 0x40000294; pub const DS_SHUTTING_DOWN = 0x40000370; pub const CTX_CDM_CONNECT = 0x400A0004; pub const CTX_CDM_DISCONNECT = 0x400A0005; pub const SXS_RELEASE_ACTIVATION_CONTEXT = 0x4015000D; pub const RECOVERY_NOT_NEEDED = 0x40190034; pub const RM_ALREADY_STARTED = 0x40190035; pub const LOG_NO_RESTART = 0x401A000C; pub const VIDEO_DRIVER_DEBUG_REPORT_REQUEST = 0x401B00EC; pub const GRAPHICS_PARTIAL_DATA_POPULATED = 0x401E000A; pub const GRAPHICS_DRIVER_MISMATCH = 0x401E0117; pub const GRAPHICS_MODE_NOT_PINNED = 0x401E0307; pub const GRAPHICS_NO_PREFERRED_MODE = 0x401E031E; pub const GRAPHICS_DATASET_IS_EMPTY = 0x401E034B; pub const GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET = 0x401E034C; pub const GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED = 0x401E0351; pub const GRAPHICS_UNKNOWN_CHILD_STATUS = 0x401E042F; pub const GRAPHICS_LEADLINK_START_DEFERRED = 0x401E0437; pub const GRAPHICS_POLLING_TOO_FREQUENTLY = 0x401E0439; pub const GRAPHICS_START_DEFERRED = 0x401E043A; pub const NDIS_INDICATION_REQUIRED = 0x40230001; pub const GUARD_PAGE_VIOLATION = 0x80000001; pub const DATATYPE_MISALIGNMENT = 0x80000002; pub const BREAKPOINT = 0x80000003; pub const SINGLE_STEP = 0x80000004; pub const BUFFER_OVERFLOW = 0x80000005; pub const NO_MORE_FILES = 0x80000006; pub const WAKE_SYSTEM_DEBUGGER = 0x80000007; pub const HANDLES_CLOSED = 0x8000000A; pub const NO_INHERITANCE = 0x8000000B; pub const GUID_SUBSTITUTION_MADE = 0x8000000C; pub const PARTIAL_COPY = 0x8000000D; pub const DEVICE_PAPER_EMPTY = 0x8000000E; pub const DEVICE_POWERED_OFF = 0x8000000F; pub const DEVICE_OFF_LINE = 0x80000010; pub const DEVICE_BUSY = 0x80000011; pub const NO_MORE_EAS = 0x80000012; pub const INVALID_EA_NAME = 0x80000013; pub const EA_LIST_INCONSISTENT = 0x80000014; pub const INVALID_EA_FLAG = 0x80000015; pub const VERIFY_REQUIRED = 0x80000016; pub const EXTRANEOUS_INFORMATION = 0x80000017; pub const RXACT_COMMIT_NECESSARY = 0x80000018; pub const NO_MORE_ENTRIES = 0x8000001A; pub const FILEMARK_DETECTED = 0x8000001B; pub const MEDIA_CHANGED = 0x8000001C; pub const BUS_RESET = 0x8000001D; pub const END_OF_MEDIA = 0x8000001E; pub const BEGINNING_OF_MEDIA = 0x8000001F; pub const MEDIA_CHECK = 0x80000020; pub const SETMARK_DETECTED = 0x80000021; pub const NO_DATA_DETECTED = 0x80000022; pub const REDIRECTOR_HAS_OPEN_HANDLES = 0x80000023; pub const SERVER_HAS_OPEN_HANDLES = 0x80000024; pub const ALREADY_DISCONNECTED = 0x80000025; pub const LONGJUMP = 0x80000026; pub const CLEANER_CARTRIDGE_INSTALLED = 0x80000027; pub const PLUGPLAY_QUERY_VETOED = 0x80000028; pub const UNWIND_CONSOLIDATE = 0x80000029; pub const REGISTRY_HIVE_RECOVERED = 0x8000002A; pub const DLL_MIGHT_BE_INSECURE = 0x8000002B; pub const DLL_MIGHT_BE_INCOMPATIBLE = 0x8000002C; pub const STOPPED_ON_SYMLINK = 0x8000002D; pub const DEVICE_REQUIRES_CLEANING = 0x80000288; pub const DEVICE_DOOR_OPEN = 0x80000289; pub const DATA_LOST_REPAIR = 0x80000803; pub const CLUSTER_NODE_ALREADY_UP = 0x80130001; pub const CLUSTER_NODE_ALREADY_DOWN = 0x80130002; pub const CLUSTER_NETWORK_ALREADY_ONLINE = 0x80130003; pub const CLUSTER_NETWORK_ALREADY_OFFLINE = 0x80130004; pub const CLUSTER_NODE_ALREADY_MEMBER = 0x80130005; pub const COULD_NOT_RESIZE_LOG = 0x80190009; pub const NO_TXF_METADATA = 0x80190029; pub const CANT_RECOVER_WITH_HANDLE_OPEN = 0x80190031; pub const TXF_METADATA_ALREADY_PRESENT = 0x80190041; pub const TRANSACTION_SCOPE_CALLBACKS_NOT_SET = 0x80190042; pub const VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED = 0x801B00EB; pub const FLT_BUFFER_TOO_SMALL = 0x801C0001; pub const FVE_PARTIAL_METADATA = 0x80210001; pub const FVE_TRANSIENT_STATE = 0x80210002; pub const UNSUCCESSFUL = 0xC0000001; pub const NOT_IMPLEMENTED = 0xC0000002; pub const INVALID_INFO_CLASS = 0xC0000003; pub const INFO_LENGTH_MISMATCH = 0xC0000004; pub const ACCESS_VIOLATION = 0xC0000005; pub const IN_PAGE_ERROR = 0xC0000006; pub const PAGEFILE_QUOTA = 0xC0000007; pub const INVALID_HANDLE = 0xC0000008; pub const BAD_INITIAL_STACK = 0xC0000009; pub const BAD_INITIAL_PC = 0xC000000A; pub const INVALID_CID = 0xC000000B; pub const TIMER_NOT_CANCELED = 0xC000000C; pub const INVALID_PARAMETER = 0xC000000D; pub const NO_SUCH_DEVICE = 0xC000000E; pub const NO_SUCH_FILE = 0xC000000F; pub const INVALID_DEVICE_REQUEST = 0xC0000010; pub const END_OF_FILE = 0xC0000011; pub const WRONG_VOLUME = 0xC0000012; pub const NO_MEDIA_IN_DEVICE = 0xC0000013; pub const UNRECOGNIZED_MEDIA = 0xC0000014; pub const NONEXISTENT_SECTOR = 0xC0000015; pub const MORE_PROCESSING_REQUIRED = 0xC0000016; pub const NO_MEMORY = 0xC0000017; pub const CONFLICTING_ADDRESSES = 0xC0000018; pub const NOT_MAPPED_VIEW = 0xC0000019; pub const UNABLE_TO_FREE_VM = 0xC000001A; pub const UNABLE_TO_DELETE_SECTION = 0xC000001B; pub const INVALID_SYSTEM_SERVICE = 0xC000001C; pub const ILLEGAL_INSTRUCTION = 0xC000001D; pub const INVALID_LOCK_SEQUENCE = 0xC000001E; pub const INVALID_VIEW_SIZE = 0xC000001F; pub const INVALID_FILE_FOR_SECTION = 0xC0000020; pub const ALREADY_COMMITTED = 0xC0000021; pub const ACCESS_DENIED = 0xC0000022; pub const BUFFER_TOO_SMALL = 0xC0000023; pub const OBJECT_TYPE_MISMATCH = 0xC0000024; pub const NONCONTINUABLE_EXCEPTION = 0xC0000025; pub const INVALID_DISPOSITION = 0xC0000026; pub const UNWIND = 0xC0000027; pub const BAD_STACK = 0xC0000028; pub const INVALID_UNWIND_TARGET = 0xC0000029; pub const NOT_LOCKED = 0xC000002A; pub const PARITY_ERROR = 0xC000002B; pub const UNABLE_TO_DECOMMIT_VM = 0xC000002C; pub const NOT_COMMITTED = 0xC000002D; pub const INVALID_PORT_ATTRIBUTES = 0xC000002E; pub const PORT_MESSAGE_TOO_LONG = 0xC000002F; pub const INVALID_PARAMETER_MIX = 0xC0000030; pub const INVALID_QUOTA_LOWER = 0xC0000031; pub const DISK_CORRUPT_ERROR = 0xC0000032; pub const OBJECT_NAME_INVALID = 0xC0000033; pub const OBJECT_NAME_NOT_FOUND = 0xC0000034; pub const OBJECT_NAME_COLLISION = 0xC0000035; pub const PORT_DISCONNECTED = 0xC0000037; pub const DEVICE_ALREADY_ATTACHED = 0xC0000038; pub const OBJECT_PATH_INVALID = 0xC0000039; pub const OBJECT_PATH_NOT_FOUND = 0xC000003A; pub const OBJECT_PATH_SYNTAX_BAD = 0xC000003B; pub const DATA_OVERRUN = 0xC000003C; pub const DATA_LATE_ERROR = 0xC000003D; pub const DATA_ERROR = 0xC000003E; pub const CRC_ERROR = 0xC000003F; pub const SECTION_TOO_BIG = 0xC0000040; pub const PORT_CONNECTION_REFUSED = 0xC0000041; pub const INVALID_PORT_HANDLE = 0xC0000042; pub const SHARING_VIOLATION = 0xC0000043; pub const QUOTA_EXCEEDED = 0xC0000044; pub const INVALID_PAGE_PROTECTION = 0xC0000045; pub const MUTANT_NOT_OWNED = 0xC0000046; pub const SEMAPHORE_LIMIT_EXCEEDED = 0xC0000047; pub const PORT_ALREADY_SET = 0xC0000048; pub const SECTION_NOT_IMAGE = 0xC0000049; pub const SUSPEND_COUNT_EXCEEDED = 0xC000004A; pub const THREAD_IS_TERMINATING = 0xC000004B; pub const BAD_WORKING_SET_LIMIT = 0xC000004C; pub const INCOMPATIBLE_FILE_MAP = 0xC000004D; pub const SECTION_PROTECTION = 0xC000004E; pub const EAS_NOT_SUPPORTED = 0xC000004F; pub const EA_TOO_LARGE = 0xC0000050; pub const NONEXISTENT_EA_ENTRY = 0xC0000051; pub const NO_EAS_ON_FILE = 0xC0000052; pub const EA_CORRUPT_ERROR = 0xC0000053; pub const FILE_LOCK_CONFLICT = 0xC0000054; pub const LOCK_NOT_GRANTED = 0xC0000055; pub const DELETE_PENDING = 0xC0000056; pub const CTL_FILE_NOT_SUPPORTED = 0xC0000057; pub const UNKNOWN_REVISION = 0xC0000058; pub const REVISION_MISMATCH = 0xC0000059; pub const INVALID_OWNER = 0xC000005A; pub const INVALID_PRIMARY_GROUP = 0xC000005B; pub const NO_IMPERSONATION_TOKEN = 0xC000005C; pub const CANT_DISABLE_MANDATORY = 0xC000005D; pub const NO_LOGON_SERVERS = 0xC000005E; pub const NO_SUCH_LOGON_SESSION = 0xC000005F; pub const NO_SUCH_PRIVILEGE = 0xC0000060; pub const PRIVILEGE_NOT_HELD = 0xC0000061; pub const INVALID_ACCOUNT_NAME = 0xC0000062; pub const USER_EXISTS = 0xC0000063; pub const NO_SUCH_USER = 0xC0000064; pub const GROUP_EXISTS = 0xC0000065; pub const NO_SUCH_GROUP = 0xC0000066; pub const MEMBER_IN_GROUP = 0xC0000067; pub const MEMBER_NOT_IN_GROUP = 0xC0000068; pub const LAST_ADMIN = 0xC0000069; pub const WRONG_PASSWORD = <PASSWORD>; pub const ILL_FORMED_PASSWORD = <PASSWORD>; pub const PASSWORD_RESTRICTION = <PASSWORD>; pub const LOGON_FAILURE = 0xC000006D; pub const ACCOUNT_RESTRICTION = 0xC000006E; pub const INVALID_LOGON_HOURS = 0xC000006F; pub const INVALID_WORKSTATION = 0xC0000070; pub const PASSWORD_EXPIRED = <PASSWORD>; pub const ACCOUNT_DISABLED = 0xC0000072; pub const NONE_MAPPED = 0xC0000073; pub const TOO_MANY_LUIDS_REQUESTED = 0xC0000074; pub const LUIDS_EXHAUSTED = 0xC0000075; pub const INVALID_SUB_AUTHORITY = 0xC0000076; pub const INVALID_ACL = 0xC0000077; pub const INVALID_SID = 0xC0000078; pub const INVALID_SECURITY_DESCR = 0xC0000079; pub const PROCEDURE_NOT_FOUND = 0xC000007A; pub const INVALID_IMAGE_FORMAT = 0xC000007B; pub const NO_TOKEN = 0xC000007C; pub const BAD_INHERITANCE_ACL = 0xC000007D; pub const RANGE_NOT_LOCKED = 0xC000007E; pub const DISK_FULL = 0xC000007F; pub const SERVER_DISABLED = 0xC0000080; pub const SERVER_NOT_DISABLED = 0xC0000081; pub const TOO_MANY_GUIDS_REQUESTED = 0xC0000082; pub const GUIDS_EXHAUSTED = 0xC0000083; pub const INVALID_ID_AUTHORITY = 0xC0000084; pub const AGENTS_EXHAUSTED = 0xC0000085; pub const INVALID_VOLUME_LABEL = 0xC0000086; pub const SECTION_NOT_EXTENDED = 0xC0000087; pub const NOT_MAPPED_DATA = 0xC0000088; pub const RESOURCE_DATA_NOT_FOUND = 0xC0000089; pub const RESOURCE_TYPE_NOT_FOUND = 0xC000008A; pub const RESOURCE_NAME_NOT_FOUND = 0xC000008B; pub const ARRAY_BOUNDS_EXCEEDED = 0xC000008C; pub const FLOAT_DENORMAL_OPERAND = 0xC000008D; pub const FLOAT_DIVIDE_BY_ZERO = 0xC000008E; pub const FLOAT_INEXACT_RESULT = 0xC000008F; pub const FLOAT_INVALID_OPERATION = 0xC0000090; pub const FLOAT_OVERFLOW = 0xC0000091; pub const FLOAT_STACK_CHECK = 0xC0000092; pub const FLOAT_UNDERFLOW = 0xC0000093; pub const INTEGER_DIVIDE_BY_ZERO = 0xC0000094; pub const INTEGER_OVERFLOW = 0xC0000095; pub const PRIVILEGED_INSTRUCTION = 0xC0000096; pub const TOO_MANY_PAGING_FILES = 0xC0000097; pub const FILE_INVALID = 0xC0000098; pub const ALLOTTED_SPACE_EXCEEDED = 0xC0000099; pub const INSUFFICIENT_RESOURCES = 0xC000009A; pub const DFS_EXIT_PATH_FOUND = 0xC000009B; pub const DEVICE_DATA_ERROR = 0xC000009C; pub const DEVICE_NOT_CONNECTED = 0xC000009D; pub const FREE_VM_NOT_AT_BASE = 0xC000009F; pub const MEMORY_NOT_ALLOCATED = 0xC00000A0; pub const WORKING_SET_QUOTA = 0xC00000A1; pub const MEDIA_WRITE_PROTECTED = 0xC00000A2; pub const DEVICE_NOT_READY = 0xC00000A3; pub const INVALID_GROUP_ATTRIBUTES = 0xC00000A4; pub const BAD_IMPERSONATION_LEVEL = 0xC00000A5; pub const CANT_OPEN_ANONYMOUS = 0xC00000A6; pub const BAD_VALIDATION_CLASS = 0xC00000A7; pub const BAD_TOKEN_TYPE = 0xC00000A8; pub const BAD_MASTER_BOOT_RECORD = 0xC00000A9; pub const INSTRUCTION_MISALIGNMENT = 0xC00000AA; pub const INSTANCE_NOT_AVAILABLE = 0xC00000AB; pub const PIPE_NOT_AVAILABLE = 0xC00000AC; pub const INVALID_PIPE_STATE = 0xC00000AD; pub const PIPE_BUSY = 0xC00000AE; pub const ILLEGAL_FUNCTION = 0xC00000AF; pub const PIPE_DISCONNECTED = 0xC00000B0; pub const PIPE_CLOSING = 0xC00000B1; pub const PIPE_CONNECTED = 0xC00000B2; pub const PIPE_LISTENING = 0xC00000B3; pub const INVALID_READ_MODE = 0xC00000B4; pub const IO_TIMEOUT = 0xC00000B5; pub const FILE_FORCED_CLOSED = 0xC00000B6; pub const PROFILING_NOT_STARTED = 0xC00000B7; pub const PROFILING_NOT_STOPPED = 0xC00000B8; pub const COULD_NOT_INTERPRET = 0xC00000B9; pub const FILE_IS_A_DIRECTORY = 0xC00000BA; pub const NOT_SUPPORTED = 0xC00000BB; pub const REMOTE_NOT_LISTENING = 0xC00000BC; pub const DUPLICATE_NAME = 0xC00000BD; pub const BAD_NETWORK_PATH = 0xC00000BE; pub const NETWORK_BUSY = 0xC00000BF; pub const DEVICE_DOES_NOT_EXIST = 0xC00000C0; pub const TOO_MANY_COMMANDS = 0xC00000C1; pub const ADAPTER_HARDWARE_ERROR = 0xC00000C2; pub const INVALID_NETWORK_RESPONSE = 0xC00000C3; pub const UNEXPECTED_NETWORK_ERROR = 0xC00000C4; pub const BAD_REMOTE_ADAPTER = 0xC00000C5; pub const PRINT_QUEUE_FULL = 0xC00000C6; pub const NO_SPOOL_SPACE = 0xC00000C7; pub const PRINT_CANCELLED = 0xC00000C8; pub const NETWORK_NAME_DELETED = 0xC00000C9; pub const NETWORK_ACCESS_DENIED = 0xC00000CA; pub const BAD_DEVICE_TYPE = 0xC00000CB; pub const BAD_NETWORK_NAME = 0xC00000CC; pub const TOO_MANY_NAMES = 0xC00000CD; pub const TOO_MANY_SESSIONS = 0xC00000CE; pub const SHARING_PAUSED = 0xC00000CF; pub const REQUEST_NOT_ACCEPTED = 0xC00000D0; pub const REDIRECTOR_PAUSED = 0xC00000D1; pub const NET_WRITE_FAULT = 0xC00000D2; pub const PROFILING_AT_LIMIT = 0xC00000D3; pub const NOT_SAME_DEVICE = 0xC00000D4; pub const FILE_RENAMED = 0xC00000D5; pub const VIRTUAL_CIRCUIT_CLOSED = 0xC00000D6; pub const NO_SECURITY_ON_OBJECT = 0xC00000D7; pub const CANT_WAIT = 0xC00000D8; pub const PIPE_EMPTY = 0xC00000D9; pub const CANT_ACCESS_DOMAIN_INFO = 0xC00000DA; pub const CANT_TERMINATE_SELF = 0xC00000DB; pub const INVALID_SERVER_STATE = 0xC00000DC; pub const INVALID_DOMAIN_STATE = 0xC00000DD; pub const INVALID_DOMAIN_ROLE = 0xC00000DE; pub const NO_SUCH_DOMAIN = 0xC00000DF; pub const DOMAIN_EXISTS = 0xC00000E0; pub const DOMAIN_LIMIT_EXCEEDED = 0xC00000E1; pub const OPLOCK_NOT_GRANTED = 0xC00000E2; pub const INVALID_OPLOCK_PROTOCOL = 0xC00000E3; pub const INTERNAL_DB_CORRUPTION = 0xC00000E4; pub const INTERNAL_ERROR = 0xC00000E5; pub const GENERIC_NOT_MAPPED = 0xC00000E6; pub const BAD_DESCRIPTOR_FORMAT = 0xC00000E7; pub const INVALID_USER_BUFFER = 0xC00000E8; pub const UNEXPECTED_IO_ERROR = 0xC00000E9; pub const UNEXPECTED_MM_CREATE_ERR = 0xC00000EA; pub const UNEXPECTED_MM_MAP_ERROR = 0xC00000EB; pub const UNEXPECTED_MM_EXTEND_ERR = 0xC00000EC; pub const NOT_LOGON_PROCESS = 0xC00000ED; pub const LOGON_SESSION_EXISTS = 0xC00000EE; pub const INVALID_PARAMETER_1 = 0xC00000EF; pub const INVALID_PARAMETER_2 = 0xC00000F0; pub const INVALID_PARAMETER_3 = 0xC00000F1; pub const INVALID_PARAMETER_4 = 0xC00000F2; pub const INVALID_PARAMETER_5 = 0xC00000F3; pub const INVALID_PARAMETER_6 = 0xC00000F4; pub const INVALID_PARAMETER_7 = 0xC00000F5; pub const INVALID_PARAMETER_8 = 0xC00000F6; pub const INVALID_PARAMETER_9 = 0xC00000F7; pub const INVALID_PARAMETER_10 = 0xC00000F8; pub const INVALID_PARAMETER_11 = 0xC00000F9; pub const INVALID_PARAMETER_12 = 0xC00000FA; pub const REDIRECTOR_NOT_STARTED = 0xC00000FB; pub const REDIRECTOR_STARTED = 0xC00000FC; pub const STACK_OVERFLOW = 0xC00000FD; pub const NO_SUCH_PACKAGE = 0xC00000FE; pub const BAD_FUNCTION_TABLE = 0xC00000FF; pub const VARIABLE_NOT_FOUND = 0xC0000100; pub const DIRECTORY_NOT_EMPTY = 0xC0000101; pub const FILE_CORRUPT_ERROR = 0xC0000102; pub const NOT_A_DIRECTORY = 0xC0000103; pub const BAD_LOGON_SESSION_STATE = 0xC0000104; pub const LOGON_SESSION_COLLISION = 0xC0000105; pub const NAME_TOO_LONG = 0xC0000106; pub const FILES_OPEN = 0xC0000107; pub const CONNECTION_IN_USE = 0xC0000108; pub const MESSAGE_NOT_FOUND = 0xC0000109; pub const PROCESS_IS_TERMINATING = 0xC000010A; pub const INVALID_LOGON_TYPE = 0xC000010B; pub const NO_GUID_TRANSLATION = 0xC000010C; pub const CANNOT_IMPERSONATE = 0xC000010D; pub const IMAGE_ALREADY_LOADED = 0xC000010E; pub const NO_LDT = 0xC0000117; pub const INVALID_LDT_SIZE = 0xC0000118; pub const INVALID_LDT_OFFSET = 0xC0000119; pub const INVALID_LDT_DESCRIPTOR = 0xC000011A; pub const INVALID_IMAGE_NE_FORMAT = 0xC000011B; pub const RXACT_INVALID_STATE = 0xC000011C; pub const RXACT_COMMIT_FAILURE = 0xC000011D; pub const MAPPED_FILE_SIZE_ZERO = 0xC000011E; pub const TOO_MANY_OPENED_FILES = 0xC000011F; pub const CANCELLED = 0xC0000120; pub const CANNOT_DELETE = 0xC0000121; pub const INVALID_COMPUTER_NAME = 0xC0000122; pub const FILE_DELETED = 0xC0000123; pub const SPECIAL_ACCOUNT = 0xC0000124; pub const SPECIAL_GROUP = 0xC0000125; pub const SPECIAL_USER = 0xC0000126; pub const MEMBERS_PRIMARY_GROUP = 0xC0000127; pub const FILE_CLOSED = 0xC0000128; pub const TOO_MANY_THREADS = 0xC0000129; pub const THREAD_NOT_IN_PROCESS = 0xC000012A; pub const TOKEN_ALREADY_IN_USE = 0xC000012B; pub const PAGEFILE_QUOTA_EXCEEDED = 0xC000012C; pub const COMMITMENT_LIMIT = 0xC000012D; pub const INVALID_IMAGE_LE_FORMAT = 0xC000012E; pub const INVALID_IMAGE_NOT_MZ = 0xC000012F; pub const INVALID_IMAGE_PROTECT = 0xC0000130; pub const INVALID_IMAGE_WIN_16 = 0xC0000131; pub const LOGON_SERVER_CONFLICT = 0xC0000132; pub const TIME_DIFFERENCE_AT_DC = 0xC0000133; pub const SYNCHRONIZATION_REQUIRED = 0xC0000134; pub const DLL_NOT_FOUND = 0xC0000135; pub const OPEN_FAILED = 0xC0000136; pub const IO_PRIVILEGE_FAILED = 0xC0000137; pub const ORDINAL_NOT_FOUND = 0xC0000138; pub const ENTRYPOINT_NOT_FOUND = 0xC0000139; pub const CONTROL_C_EXIT = 0xC000013A; pub const LOCAL_DISCONNECT = 0xC000013B; pub const REMOTE_DISCONNECT = 0xC000013C; pub const REMOTE_RESOURCES = 0xC000013D; pub const LINK_FAILED = 0xC000013E; pub const LINK_TIMEOUT = 0xC000013F; pub const INVALID_CONNECTION = 0xC0000140; pub const INVALID_ADDRESS = 0xC0000141; pub const DLL_INIT_FAILED = 0xC0000142; pub const MISSING_SYSTEMFILE = 0xC0000143; pub const UNHANDLED_EXCEPTION = 0xC0000144; pub const APP_INIT_FAILURE = 0xC0000145; pub const PAGEFILE_CREATE_FAILED = 0xC0000146; pub const NO_PAGEFILE = 0xC0000147; pub const INVALID_LEVEL = 0xC0000148; pub const WRONG_PASSWORD_CORE = 0xC0000149; pub const ILLEGAL_FLOAT_CONTEXT = 0xC000014A; pub const PIPE_BROKEN = 0xC000014B; pub const REGISTRY_CORRUPT = 0xC000014C; pub const REGISTRY_IO_FAILED = 0xC000014D; pub const NO_EVENT_PAIR = 0xC000014E; pub const UNRECOGNIZED_VOLUME = 0xC000014F; pub const SERIAL_NO_DEVICE_INITED = 0xC0000150; pub const NO_SUCH_ALIAS = 0xC0000151; pub const MEMBER_NOT_IN_ALIAS = 0xC0000152; pub const MEMBER_IN_ALIAS = 0xC0000153; pub const ALIAS_EXISTS = 0xC0000154; pub const LOGON_NOT_GRANTED = 0xC0000155; pub const TOO_MANY_SECRETS = 0xC0000156; pub const SECRET_TOO_LONG = 0xC0000157; pub const INTERNAL_DB_ERROR = 0xC0000158; pub const FULLSCREEN_MODE = 0xC0000159; pub const TOO_MANY_CONTEXT_IDS = 0xC000015A; pub const LOGON_TYPE_NOT_GRANTED = 0xC000015B; pub const NOT_REGISTRY_FILE = 0xC000015C; pub const NT_CROSS_ENCRYPTION_REQUIRED = 0xC000015D; pub const DOMAIN_CTRLR_CONFIG_ERROR = 0xC000015E; pub const FT_MISSING_MEMBER = 0xC000015F; pub const ILL_FORMED_SERVICE_ENTRY = 0xC0000160; pub const ILLEGAL_CHARACTER = 0xC0000161; pub const UNMAPPABLE_CHARACTER = 0xC0000162; pub const UNDEFINED_CHARACTER = 0xC0000163; pub const FLOPPY_VOLUME = 0xC0000164; pub const FLOPPY_ID_MARK_NOT_FOUND = 0xC0000165; pub const FLOPPY_WRONG_CYLINDER = 0xC0000166; pub const FLOPPY_UNKNOWN_ERROR = 0xC0000167; pub const FLOPPY_BAD_REGISTERS = 0xC0000168; pub const DISK_RECALIBRATE_FAILED = 0xC0000169; pub const DISK_OPERATION_FAILED = 0xC000016A; pub const DISK_RESET_FAILED = 0xC000016B; pub const SHARED_IRQ_BUSY = 0xC000016C; pub const FT_ORPHANING = 0xC000016D; pub const BIOS_FAILED_TO_CONNECT_INTERRUPT = 0xC000016E; pub const PARTITION_FAILURE = 0xC0000172; pub const INVALID_BLOCK_LENGTH = 0xC0000173; pub const DEVICE_NOT_PARTITIONED = 0xC0000174; pub const UNABLE_TO_LOCK_MEDIA = 0xC0000175; pub const UNABLE_TO_UNLOAD_MEDIA = 0xC0000176; pub const EOM_OVERFLOW = 0xC0000177; pub const NO_MEDIA = 0xC0000178; pub const NO_SUCH_MEMBER = 0xC000017A; pub const INVALID_MEMBER = 0xC000017B; pub const KEY_DELETED = 0xC000017C; pub const NO_LOG_SPACE = 0xC000017D; pub const TOO_MANY_SIDS = 0xC000017E; pub const LM_CROSS_ENCRYPTION_REQUIRED = 0xC000017F; pub const KEY_HAS_CHILDREN = 0xC0000180; pub const CHILD_MUST_BE_VOLATILE = 0xC0000181; pub const DEVICE_CONFIGURATION_ERROR = 0xC0000182; pub const DRIVER_INTERNAL_ERROR = 0xC0000183; pub const INVALID_DEVICE_STATE = 0xC0000184; pub const IO_DEVICE_ERROR = 0xC0000185; pub const DEVICE_PROTOCOL_ERROR = 0xC0000186; pub const BACKUP_CONTROLLER = 0xC0000187; pub const LOG_FILE_FULL = 0xC0000188; pub const TOO_LATE = 0xC0000189; pub const NO_TRUST_LSA_SECRET = 0xC000018A; pub const NO_TRUST_SAM_ACCOUNT = 0xC000018B; pub const TRUSTED_DOMAIN_FAILURE = 0xC000018C; pub const TRUSTED_RELATIONSHIP_FAILURE = 0xC000018D; pub const EVENTLOG_FILE_CORRUPT = 0xC000018E; pub const EVENTLOG_CANT_START = 0xC000018F; pub const TRUST_FAILURE = 0xC0000190; pub const MUTANT_LIMIT_EXCEEDED = 0xC0000191; pub const NETLOGON_NOT_STARTED = 0xC0000192; pub const ACCOUNT_EXPIRED = 0xC0000193; pub const POSSIBLE_DEADLOCK = 0xC0000194; pub const NETWORK_CREDENTIAL_CONFLICT = 0xC0000195; pub const REMOTE_SESSION_LIMIT = 0xC0000196; pub const EVENTLOG_FILE_CHANGED = 0xC0000197; pub const NOLOGON_INTERDOMAIN_TRUST_ACCOUNT = 0xC0000198; pub const NOLOGON_WORKSTATION_TRUST_ACCOUNT = 0xC0000199; pub const NOLOGON_SERVER_TRUST_ACCOUNT = 0xC000019A; pub const DOMAIN_TRUST_INCONSISTENT = 0xC000019B; pub const FS_DRIVER_REQUIRED = 0xC000019C; pub const IMAGE_ALREADY_LOADED_AS_DLL = 0xC000019D; pub const INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING = 0xC000019E; pub const SHORT_NAMES_NOT_ENABLED_ON_VOLUME = 0xC000019F; pub const SECURITY_STREAM_IS_INCONSISTENT = 0xC00001A0; pub const INVALID_LOCK_RANGE = 0xC00001A1; pub const INVALID_ACE_CONDITION = 0xC00001A2; pub const IMAGE_SUBSYSTEM_NOT_PRESENT = 0xC00001A3; pub const NOTIFICATION_GUID_ALREADY_DEFINED = 0xC00001A4; pub const NETWORK_OPEN_RESTRICTION = 0xC0000201; pub const NO_USER_SESSION_KEY = 0xC0000202; pub const USER_SESSION_DELETED = 0xC0000203; pub const RESOURCE_LANG_NOT_FOUND = 0xC0000204; pub const INSUFF_SERVER_RESOURCES = 0xC0000205; pub const INVALID_BUFFER_SIZE = 0xC0000206; pub const INVALID_ADDRESS_COMPONENT = 0xC0000207; pub const INVALID_ADDRESS_WILDCARD = 0xC0000208; pub const TOO_MANY_ADDRESSES = 0xC0000209; pub const ADDRESS_ALREADY_EXISTS = 0xC000020A; pub const ADDRESS_CLOSED = 0xC000020B; pub const CONNECTION_DISCONNECTED = 0xC000020C; pub const CONNECTION_RESET = 0xC000020D; pub const TOO_MANY_NODES = 0xC000020E; pub const TRANSACTION_ABORTED = 0xC000020F; pub const TRANSACTION_TIMED_OUT = 0xC0000210; pub const TRANSACTION_NO_RELEASE = 0xC0000211; pub const TRANSACTION_NO_MATCH = 0xC0000212; pub const TRANSACTION_RESPONDED = 0xC0000213; pub const TRANSACTION_INVALID_ID = 0xC0000214; pub const TRANSACTION_INVALID_TYPE = 0xC0000215; pub const NOT_SERVER_SESSION = 0xC0000216; pub const NOT_CLIENT_SESSION = 0xC0000217; pub const CANNOT_LOAD_REGISTRY_FILE = 0xC0000218; pub const DEBUG_ATTACH_FAILED = 0xC0000219; pub const SYSTEM_PROCESS_TERMINATED = 0xC000021A; pub const DATA_NOT_ACCEPTED = 0xC000021B; pub const NO_BROWSER_SERVERS_FOUND = 0xC000021C; pub const VDM_HARD_ERROR = 0xC000021D; pub const DRIVER_CANCEL_TIMEOUT = 0xC000021E; pub const REPLY_MESSAGE_MISMATCH = 0xC000021F; pub const MAPPED_ALIGNMENT = 0xC0000220; pub const IMAGE_CHECKSUM_MISMATCH = 0xC0000221; pub const LOST_WRITEBEHIND_DATA = 0xC0000222; pub const CLIENT_SERVER_PARAMETERS_INVALID = 0xC0000223; pub const PASSWORD_MUST_CHANGE = 0xC0000224; pub const NOT_FOUND = 0xC0000225; pub const NOT_TINY_STREAM = 0xC0000226; pub const RECOVERY_FAILURE = 0xC0000227; pub const STACK_OVERFLOW_READ = 0xC0000228; pub const FAIL_CHECK = 0xC0000229; pub const DUPLICATE_OBJECTID = 0xC000022A; pub const OBJECTID_EXISTS = 0xC000022B; pub const CONVERT_TO_LARGE = 0xC000022C; pub const RETRY = 0xC000022D; pub const FOUND_OUT_OF_SCOPE = 0xC000022E; pub const ALLOCATE_BUCKET = 0xC000022F; pub const PROPSET_NOT_FOUND = 0xC0000230; pub const MARSHALL_OVERFLOW = 0xC0000231; pub const INVALID_VARIANT = 0xC0000232; pub const DOMAIN_CONTROLLER_NOT_FOUND = 0xC0000233; pub const ACCOUNT_LOCKED_OUT = 0xC0000234; pub const HANDLE_NOT_CLOSABLE = 0xC0000235; pub const CONNECTION_REFUSED = 0xC0000236; pub const GRACEFUL_DISCONNECT = 0xC0000237; pub const ADDRESS_ALREADY_ASSOCIATED = 0xC0000238; pub const ADDRESS_NOT_ASSOCIATED = 0xC0000239; pub const CONNECTION_INVALID = 0xC000023A; pub const CONNECTION_ACTIVE = 0xC000023B; pub const NETWORK_UNREACHABLE = 0xC000023C; pub const HOST_UNREACHABLE = 0xC000023D; pub const PROTOCOL_UNREACHABLE = 0xC000023E; pub const PORT_UNREACHABLE = 0xC000023F; pub const REQUEST_ABORTED = 0xC0000240; pub const CONNECTION_ABORTED = 0xC0000241; pub const BAD_COMPRESSION_BUFFER = 0xC0000242; pub const USER_MAPPED_FILE = 0xC0000243; pub const AUDIT_FAILED = 0xC0000244; pub const TIMER_RESOLUTION_NOT_SET = 0xC0000245; pub const CONNECTION_COUNT_LIMIT = 0xC0000246; pub const LOGIN_TIME_RESTRICTION = 0xC0000247; pub const LOGIN_WKSTA_RESTRICTION = 0xC0000248; pub const IMAGE_MP_UP_MISMATCH = 0xC0000249; pub const INSUFFICIENT_LOGON_INFO = 0xC0000250; pub const BAD_DLL_ENTRYPOINT = 0xC0000251; pub const BAD_SERVICE_ENTRYPOINT = 0xC0000252; pub const LPC_REPLY_LOST = 0xC0000253; pub const IP_ADDRESS_CONFLICT1 = 0xC0000254; pub const IP_ADDRESS_CONFLICT2 = 0xC0000255; pub const REGISTRY_QUOTA_LIMIT = 0xC0000256; pub const PATH_NOT_COVERED = 0xC0000257; pub const NO_CALLBACK_ACTIVE = 0xC0000258; pub const LICENSE_QUOTA_EXCEEDED = 0xC0000259; pub const PWD_TOO_SHORT = 0xC000025A; pub const PWD_TOO_RECENT = 0xC000025B; pub const PWD_HISTORY_CONFLICT = 0xC000025C; pub const PLUGPLAY_NO_DEVICE = 0xC000025E; pub const UNSUPPORTED_COMPRESSION = 0xC000025F; pub const INVALID_HW_PROFILE = 0xC0000260; pub const INVALID_PLUGPLAY_DEVICE_PATH = 0xC0000261; pub const DRIVER_ORDINAL_NOT_FOUND = 0xC0000262; pub const DRIVER_ENTRYPOINT_NOT_FOUND = 0xC0000263; pub const RESOURCE_NOT_OWNED = 0xC0000264; pub const TOO_MANY_LINKS = 0xC0000265; pub const QUOTA_LIST_INCONSISTENT = 0xC0000266; pub const FILE_IS_OFFLINE = 0xC0000267; pub const EVALUATION_EXPIRATION = 0xC0000268; pub const ILLEGAL_DLL_RELOCATION = 0xC0000269; pub const LICENSE_VIOLATION = 0xC000026A; pub const DLL_INIT_FAILED_LOGOFF = 0xC000026B; pub const DRIVER_UNABLE_TO_LOAD = 0xC000026C; pub const DFS_UNAVAILABLE = 0xC000026D; pub const VOLUME_DISMOUNTED = 0xC000026E; pub const WX86_INTERNAL_ERROR = 0xC000026F; pub const WX86_FLOAT_STACK_CHECK = 0xC0000270; pub const VALIDATE_CONTINUE = 0xC0000271; pub const NO_MATCH = 0xC0000272; pub const NO_MORE_MATCHES = 0xC0000273; pub const NOT_A_REPARSE_POINT = 0xC0000275; pub const IO_REPARSE_TAG_INVALID = 0xC0000276; pub const IO_REPARSE_TAG_MISMATCH = 0xC0000277; pub const IO_REPARSE_DATA_INVALID = 0xC0000278; pub const IO_REPARSE_TAG_NOT_HANDLED = 0xC0000279; pub const REPARSE_POINT_NOT_RESOLVED = 0xC0000280; pub const DIRECTORY_IS_A_REPARSE_POINT = 0xC0000281; pub const RANGE_LIST_CONFLICT = 0xC0000282; pub const SOURCE_ELEMENT_EMPTY = 0xC0000283; pub const DESTINATION_ELEMENT_FULL = 0xC0000284; pub const ILLEGAL_ELEMENT_ADDRESS = 0xC0000285; pub const MAGAZINE_NOT_PRESENT = 0xC0000286; pub const REINITIALIZATION_NEEDED = 0xC0000287; pub const ENCRYPTION_FAILED = 0xC000028A; pub const DECRYPTION_FAILED = 0xC000028B; pub const RANGE_NOT_FOUND = 0xC000028C; pub const NO_RECOVERY_POLICY = 0xC000028D; pub const NO_EFS = 0xC000028E; pub const WRONG_EFS = 0xC000028F; pub const NO_USER_KEYS = 0xC0000290; pub const FILE_NOT_ENCRYPTED = 0xC0000291; pub const NOT_EXPORT_FORMAT = 0xC0000292; pub const FILE_ENCRYPTED = 0xC0000293; pub const WMI_GUID_NOT_FOUND = 0xC0000295; pub const WMI_INSTANCE_NOT_FOUND = 0xC0000296; pub const WMI_ITEMID_NOT_FOUND = 0xC0000297; pub const WMI_TRY_AGAIN = 0xC0000298; pub const SHARED_POLICY = 0xC0000299; pub const POLICY_OBJECT_NOT_FOUND = 0xC000029A; pub const POLICY_ONLY_IN_DS = 0xC000029B; pub const VOLUME_NOT_UPGRADED = 0xC000029C; pub const REMOTE_STORAGE_NOT_ACTIVE = 0xC000029D; pub const REMOTE_STORAGE_MEDIA_ERROR = 0xC000029E; pub const NO_TRACKING_SERVICE = 0xC000029F; pub const SERVER_SID_MISMATCH = 0xC00002A0; pub const DS_NO_ATTRIBUTE_OR_VALUE = 0xC00002A1; pub const DS_INVALID_ATTRIBUTE_SYNTAX = 0xC00002A2; pub const DS_ATTRIBUTE_TYPE_UNDEFINED = 0xC00002A3; pub const DS_ATTRIBUTE_OR_VALUE_EXISTS = 0xC00002A4; pub const DS_BUSY = 0xC00002A5; pub const DS_UNAVAILABLE = 0xC00002A6; pub const DS_NO_RIDS_ALLOCATED = 0xC00002A7; pub const DS_NO_MORE_RIDS = 0xC00002A8; pub const DS_INCORRECT_ROLE_OWNER = 0xC00002A9; pub const DS_RIDMGR_INIT_ERROR = 0xC00002AA; pub const DS_OBJ_CLASS_VIOLATION = 0xC00002AB; pub const DS_CANT_ON_NON_LEAF = 0xC00002AC; pub const DS_CANT_ON_RDN = 0xC00002AD; pub const DS_CANT_MOD_OBJ_CLASS = 0xC00002AE; pub const DS_CROSS_DOM_MOVE_FAILED = 0xC00002AF; pub const DS_GC_NOT_AVAILABLE = 0xC00002B0; pub const DIRECTORY_SERVICE_REQUIRED = 0xC00002B1; pub const REPARSE_ATTRIBUTE_CONFLICT = 0xC00002B2; pub const CANT_ENABLE_DENY_ONLY = 0xC00002B3; pub const FLOAT_MULTIPLE_FAULTS = 0xC00002B4; pub const FLOAT_MULTIPLE_TRAPS = 0xC00002B5; pub const DEVICE_REMOVED = 0xC00002B6; pub const JOURNAL_DELETE_IN_PROGRESS = 0xC00002B7; pub const JOURNAL_NOT_ACTIVE = 0xC00002B8; pub const NOINTERFACE = 0xC00002B9; pub const DS_ADMIN_LIMIT_EXCEEDED = 0xC00002C1; pub const DRIVER_FAILED_SLEEP = 0xC00002C2; pub const MUTUAL_AUTHENTICATION_FAILED = 0xC00002C3; pub const CORRUPT_SYSTEM_FILE = 0xC00002C4; pub const DATATYPE_MISALIGNMENT_ERROR = 0xC00002C5; pub const WMI_READ_ONLY = 0xC00002C6; pub const WMI_SET_FAILURE = 0xC00002C7; pub const COMMITMENT_MINIMUM = 0xC00002C8; pub const REG_NAT_CONSUMPTION = 0xC00002C9; pub const TRANSPORT_FULL = 0xC00002CA; pub const DS_SAM_INIT_FAILURE = 0xC00002CB; pub const ONLY_IF_CONNECTED = 0xC00002CC; pub const DS_SENSITIVE_GROUP_VIOLATION = 0xC00002CD; pub const PNP_RESTART_ENUMERATION = 0xC00002CE; pub const JOURNAL_ENTRY_DELETED = 0xC00002CF; pub const DS_CANT_MOD_PRIMARYGROUPID = 0xC00002D0; pub const SYSTEM_IMAGE_BAD_SIGNATURE = 0xC00002D1; pub const PNP_REBOOT_REQUIRED = 0xC00002D2; pub const POWER_STATE_INVALID = 0xC00002D3; pub const DS_INVALID_GROUP_TYPE = 0xC00002D4; pub const DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN = 0xC00002D5; pub const DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN = 0xC00002D6; pub const DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER = 0xC00002D7; pub const DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER = 0xC00002D8; pub const DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER = 0xC00002D9; pub const DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER = 0xC00002DA; pub const DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER = 0xC00002DB; pub const DS_HAVE_PRIMARY_MEMBERS = 0xC00002DC; pub const WMI_NOT_SUPPORTED = 0xC00002DD; pub const INSUFFICIENT_POWER = 0xC00002DE; pub const SAM_NEED_BOOTKEY_PASSWORD = <PASSWORD>; pub const SAM_NEED_BOOTKEY_FLOPPY = 0xC00002E0; pub const DS_CANT_START = 0xC00002E1; pub const DS_INIT_FAILURE = 0xC00002E2; pub const SAM_INIT_FAILURE = 0xC00002E3; pub const DS_GC_REQUIRED = 0xC00002E4; pub const DS_LOCAL_MEMBER_OF_LOCAL_ONLY = 0xC00002E5; pub const DS_NO_FPO_IN_UNIVERSAL_GROUPS = 0xC00002E6; pub const DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED = 0xC00002E7; pub const CURRENT_DOMAIN_NOT_ALLOWED = 0xC00002E9; pub const CANNOT_MAKE = 0xC00002EA; pub const SYSTEM_SHUTDOWN = 0xC00002EB; pub const DS_INIT_FAILURE_CONSOLE = 0xC00002EC; pub const DS_SAM_INIT_FAILURE_CONSOLE = 0xC00002ED; pub const UNFINISHED_CONTEXT_DELETED = 0xC00002EE; pub const NO_TGT_REPLY = 0xC00002EF; pub const OBJECTID_NOT_FOUND = 0xC00002F0; pub const NO_IP_ADDRESSES = 0xC00002F1; pub const WRONG_CREDENTIAL_HANDLE = 0xC00002F2; pub const CRYPTO_SYSTEM_INVALID = 0xC00002F3; pub const MAX_REFERRALS_EXCEEDED = 0xC00002F4; pub const MUST_BE_KDC = 0xC00002F5; pub const STRONG_CRYPTO_NOT_SUPPORTED = 0xC00002F6; pub const TOO_MANY_PRINCIPALS = 0xC00002F7; pub const NO_PA_DATA = 0xC00002F8; pub const PKINIT_NAME_MISMATCH = 0xC00002F9; pub const SMARTCARD_LOGON_REQUIRED = 0xC00002FA; pub const KDC_INVALID_REQUEST = 0xC00002FB; pub const KDC_UNABLE_TO_REFER = 0xC00002FC; pub const KDC_UNKNOWN_ETYPE = 0xC00002FD; pub const SHUTDOWN_IN_PROGRESS = 0xC00002FE; pub const SERVER_SHUTDOWN_IN_PROGRESS = 0xC00002FF; pub const NOT_SUPPORTED_ON_SBS = 0xC0000300; pub const WMI_GUID_DISCONNECTED = 0xC0000301; pub const WMI_ALREADY_DISABLED = 0xC0000302; pub const WMI_ALREADY_ENABLED = 0xC0000303; pub const MFT_TOO_FRAGMENTED = 0xC0000304; pub const COPY_PROTECTION_FAILURE = 0xC0000305; pub const CSS_AUTHENTICATION_FAILURE = 0xC0000306; pub const CSS_KEY_NOT_PRESENT = 0xC0000307; pub const CSS_KEY_NOT_ESTABLISHED = 0xC0000308; pub const CSS_SCRAMBLED_SECTOR = 0xC0000309; pub const CSS_REGION_MISMATCH = 0xC000030A; pub const CSS_RESETS_EXHAUSTED = 0xC000030B; pub const PKINIT_FAILURE = 0xC0000320; pub const SMARTCARD_SUBSYSTEM_FAILURE = 0xC0000321; pub const NO_KERB_KEY = 0xC0000322; pub const HOST_DOWN = 0xC0000350; pub const UNSUPPORTED_PREAUTH = 0xC0000351; pub const EFS_ALG_BLOB_TOO_BIG = 0xC0000352; pub const PORT_NOT_SET = 0xC0000353; pub const DEBUGGER_INACTIVE = 0xC0000354; pub const DS_VERSION_CHECK_FAILURE = 0xC0000355; pub const AUDITING_DISABLED = 0xC0000356; pub const PRENT4_MACHINE_ACCOUNT = 0xC0000357; pub const DS_AG_CANT_HAVE_UNIVERSAL_MEMBER = 0xC0000358; pub const INVALID_IMAGE_WIN_32 = 0xC0000359; pub const INVALID_IMAGE_WIN_64 = 0xC000035A; pub const BAD_BINDINGS = 0xC000035B; pub const NETWORK_SESSION_EXPIRED = 0xC000035C; pub const APPHELP_BLOCK = 0xC000035D; pub const ALL_SIDS_FILTERED = 0xC000035E; pub const NOT_SAFE_MODE_DRIVER = 0xC000035F; pub const ACCESS_DISABLED_BY_POLICY_DEFAULT = 0xC0000361; pub const ACCESS_DISABLED_BY_POLICY_PATH = 0xC0000362; pub const ACCESS_DISABLED_BY_POLICY_PUBLISHER = 0xC0000363; pub const ACCESS_DISABLED_BY_POLICY_OTHER = 0xC0000364; pub const FAILED_DRIVER_ENTRY = 0xC0000365; pub const DEVICE_ENUMERATION_ERROR = 0xC0000366; pub const MOUNT_POINT_NOT_RESOLVED = 0xC0000368; pub const INVALID_DEVICE_OBJECT_PARAMETER = 0xC0000369; pub const MCA_OCCURED = 0xC000036A; pub const DRIVER_BLOCKED_CRITICAL = 0xC000036B; pub const DRIVER_BLOCKED = 0xC000036C; pub const DRIVER_DATABASE_ERROR = 0xC000036D; pub const SYSTEM_HIVE_TOO_LARGE = 0xC000036E; pub const INVALID_IMPORT_OF_NON_DLL = 0xC000036F; pub const NO_SECRETS = 0xC0000371; pub const ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY = 0xC0000372; pub const FAILED_STACK_SWITCH = 0xC0000373; pub const HEAP_CORRUPTION = 0xC0000374; pub const SMARTCARD_WRONG_PIN = 0xC0000380; pub const SMARTCARD_CARD_BLOCKED = 0xC0000381; pub const SMARTCARD_CARD_NOT_AUTHENTICATED = 0xC0000382; pub const SMARTCARD_NO_CARD = 0xC0000383; pub const SMARTCARD_NO_KEY_CONTAINER = 0xC0000384; pub const SMARTCARD_NO_CERTIFICATE = 0xC0000385; pub const SMARTCARD_NO_KEYSET = 0xC0000386; pub const SMARTCARD_IO_ERROR = 0xC0000387; pub const DOWNGRADE_DETECTED = 0xC0000388; pub const SMARTCARD_CERT_REVOKED = 0xC0000389; pub const ISSUING_CA_UNTRUSTED = 0xC000038A; pub const REVOCATION_OFFLINE_C = 0xC000038B; pub const PKINIT_CLIENT_FAILURE = 0xC000038C; pub const SMARTCARD_CERT_EXPIRED = 0xC000038D; pub const DRIVER_FAILED_PRIOR_UNLOAD = 0xC000038E; pub const SMARTCARD_SILENT_CONTEXT = 0xC000038F; pub const PER_USER_TRUST_QUOTA_EXCEEDED = 0xC0000401; pub const ALL_USER_TRUST_QUOTA_EXCEEDED = 0xC0000402; pub const USER_DELETE_TRUST_QUOTA_EXCEEDED = 0xC0000403; pub const DS_NAME_NOT_UNIQUE = 0xC0000404; pub const DS_DUPLICATE_ID_FOUND = 0xC0000405; pub const DS_GROUP_CONVERSION_ERROR = 0xC0000406; pub const VOLSNAP_PREPARE_HIBERNATE = 0xC0000407; pub const USER2USER_REQUIRED = 0xC0000408; pub const STACK_BUFFER_OVERRUN = 0xC0000409; pub const NO_S4U_PROT_SUPPORT = 0xC000040A; pub const CROSSREALM_DELEGATION_FAILURE = 0xC000040B; pub const REVOCATION_OFFLINE_KDC = 0xC000040C; pub const ISSUING_CA_UNTRUSTED_KDC = 0xC000040D; pub const KDC_CERT_EXPIRED = 0xC000040E; pub const KDC_CERT_REVOKED = 0xC000040F; pub const PARAMETER_QUOTA_EXCEEDED = 0xC0000410; pub const HIBERNATION_FAILURE = 0xC0000411; pub const DELAY_LOAD_FAILED = 0xC0000412; pub const AUTHENTICATION_FIREWALL_FAILED = 0xC0000413; pub const VDM_DISALLOWED = 0xC0000414; pub const HUNG_DISPLAY_DRIVER_THREAD = 0xC0000415; pub const INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE = 0xC0000416; pub const INVALID_CRUNTIME_PARAMETER = 0xC0000417; pub const NTLM_BLOCKED = 0xC0000418; pub const DS_SRC_SID_EXISTS_IN_FOREST = 0xC0000419; pub const DS_DOMAIN_NAME_EXISTS_IN_FOREST = 0xC000041A; pub const DS_FLAT_NAME_EXISTS_IN_FOREST = 0xC000041B; pub const INVALID_USER_PRINCIPAL_NAME = 0xC000041C; pub const ASSERTION_FAILURE = 0xC0000420; pub const VERIFIER_STOP = 0xC0000421; pub const CALLBACK_POP_STACK = 0xC0000423; pub const INCOMPATIBLE_DRIVER_BLOCKED = 0xC0000424; pub const HIVE_UNLOADED = 0xC0000425; pub const COMPRESSION_DISABLED = 0xC0000426; pub const FILE_SYSTEM_LIMITATION = 0xC0000427; pub const INVALID_IMAGE_HASH = 0xC0000428; pub const NOT_CAPABLE = 0xC0000429; pub const REQUEST_OUT_OF_SEQUENCE = 0xC000042A; pub const IMPLEMENTATION_LIMIT = 0xC000042B; pub const ELEVATION_REQUIRED = 0xC000042C; pub const NO_SECURITY_CONTEXT = 0xC000042D; pub const PKU2U_CERT_FAILURE = 0xC000042E; pub const BEYOND_VDL = 0xC0000432; pub const ENCOUNTERED_WRITE_IN_PROGRESS = 0xC0000433; pub const PTE_CHANGED = 0xC0000434; pub const PURGE_FAILED = 0xC0000435; pub const CRED_REQUIRES_CONFIRMATION = 0xC0000440; pub const CS_ENCRYPTION_INVALID_SERVER_RESPONSE = 0xC0000441; pub const CS_ENCRYPTION_UNSUPPORTED_SERVER = 0xC0000442; pub const CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE = 0xC0000443; pub const CS_ENCRYPTION_NEW_ENCRYPTED_FILE = 0xC0000444; pub const CS_ENCRYPTION_FILE_NOT_CSE = 0xC0000445; pub const INVALID_LABEL = 0xC0000446; pub const DRIVER_PROCESS_TERMINATED = 0xC0000450; pub const AMBIGUOUS_SYSTEM_DEVICE = 0xC0000451; pub const SYSTEM_DEVICE_NOT_FOUND = 0xC0000452; pub const RESTART_BOOT_APPLICATION = 0xC0000453; pub const INSUFFICIENT_NVRAM_RESOURCES = 0xC0000454; pub const INVALID_TASK_NAME = 0xC0000500; pub const INVALID_TASK_INDEX = 0xC0000501; pub const THREAD_ALREADY_IN_TASK = 0xC0000502; pub const CALLBACK_BYPASS = 0xC0000503; pub const FAIL_FAST_EXCEPTION = 0xC0000602; pub const IMAGE_CERT_REVOKED = 0xC0000603; pub const PORT_CLOSED = 0xC0000700; pub const MESSAGE_LOST = 0xC0000701; pub const INVALID_MESSAGE = 0xC0000702; pub const REQUEST_CANCELED = 0xC0000703; pub const RECURSIVE_DISPATCH = 0xC0000704; pub const LPC_RECEIVE_BUFFER_EXPECTED = 0xC0000705; pub const LPC_INVALID_CONNECTION_USAGE = 0xC0000706; pub const LPC_REQUESTS_NOT_ALLOWED = 0xC0000707; pub const RESOURCE_IN_USE = 0xC0000708; pub const HARDWARE_MEMORY_ERROR = 0xC0000709; pub const THREADPOOL_HANDLE_EXCEPTION = 0xC000070A; pub const THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED = 0xC000070B; pub const THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED = 0xC000070C; pub const THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED = 0xC000070D; pub const THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED = 0xC000070E; pub const THREADPOOL_RELEASED_DURING_OPERATION = 0xC000070F; pub const CALLBACK_RETURNED_WHILE_IMPERSONATING = 0xC0000710; pub const APC_RETURNED_WHILE_IMPERSONATING = 0xC0000711; pub const PROCESS_IS_PROTECTED = 0xC0000712; pub const MCA_EXCEPTION = 0xC0000713; pub const CERTIFICATE_MAPPING_NOT_UNIQUE = 0xC0000714; pub const SYMLINK_CLASS_DISABLED = 0xC0000715; pub const INVALID_IDN_NORMALIZATION = 0xC0000716; pub const NO_UNICODE_TRANSLATION = 0xC0000717; pub const ALREADY_REGISTERED = 0xC0000718; pub const CONTEXT_MISMATCH = 0xC0000719; pub const PORT_ALREADY_HAS_COMPLETION_LIST = 0xC000071A; pub const CALLBACK_RETURNED_THREAD_PRIORITY = 0xC000071B; pub const INVALID_THREAD = 0xC000071C; pub const CALLBACK_RETURNED_TRANSACTION = 0xC000071D; pub const CALLBACK_RETURNED_LDR_LOCK = 0xC000071E; pub const CALLBACK_RETURNED_LANG = 0xC000071F; pub const CALLBACK_RETURNED_PRI_BACK = 0xC0000720; pub const DISK_REPAIR_DISABLED = 0xC0000800; pub const DS_DOMAIN_RENAME_IN_PROGRESS = 0xC0000801; pub const DISK_QUOTA_EXCEEDED = 0xC0000802; pub const CONTENT_BLOCKED = 0xC0000804; pub const BAD_CLUSTERS = 0xC0000805; pub const VOLUME_DIRTY = 0xC0000806; pub const FILE_CHECKED_OUT = 0xC0000901; pub const CHECKOUT_REQUIRED = 0xC0000902; pub const BAD_FILE_TYPE = 0xC0000903; pub const FILE_TOO_LARGE = 0xC0000904; pub const FORMS_AUTH_REQUIRED = 0xC0000905; pub const VIRUS_INFECTED = 0xC0000906; pub const VIRUS_DELETED = 0xC0000907; pub const BAD_MCFG_TABLE = 0xC0000908; pub const CANNOT_BREAK_OPLOCK = 0xC0000909; pub const WOW_ASSERTION = 0xC0009898; pub const INVALID_SIGNATURE = 0xC000A000; pub const HMAC_NOT_SUPPORTED = 0xC000A001; pub const IPSEC_QUEUE_OVERFLOW = 0xC000A010; pub const ND_QUEUE_OVERFLOW = 0xC000A011; pub const HOPLIMIT_EXCEEDED = 0xC000A012; pub const PROTOCOL_NOT_SUPPORTED = 0xC000A013; pub const LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED = 0xC000A080; pub const LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR = 0xC000A081; pub const LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR = 0xC000A082; pub const XML_PARSE_ERROR = 0xC000A083; pub const XMLDSIG_ERROR = 0xC000A084; pub const WRONG_COMPARTMENT = 0xC000A085; pub const AUTHIP_FAILURE = 0xC000A086; pub const DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS = 0xC000A087; pub const DS_OID_NOT_FOUND = 0xC000A088; pub const HASH_NOT_SUPPORTED = 0xC000A100; pub const HASH_NOT_PRESENT = 0xC000A101; pub const PNP_BAD_MPS_TABLE = 0xC0040035; pub const PNP_TRANSLATION_FAILED = 0xC0040036; pub const PNP_IRQ_TRANSLATION_FAILED = 0xC0040037; pub const PNP_INVALID_ID = 0xC0040038; pub const IO_REISSUE_AS_CACHED = 0xC0040039; pub const CTX_WINSTATION_NAME_INVALID = 0xC00A0001; pub const CTX_INVALID_PD = 0xC00A0002; pub const CTX_PD_NOT_FOUND = 0xC00A0003; pub const CTX_CLOSE_PENDING = 0xC00A0006; pub const CTX_NO_OUTBUF = 0xC00A0007; pub const CTX_MODEM_INF_NOT_FOUND = 0xC00A0008; pub const CTX_INVALID_MODEMNAME = 0xC00A0009; pub const CTX_RESPONSE_ERROR = 0xC00A000A; pub const CTX_MODEM_RESPONSE_TIMEOUT = 0xC00A000B; pub const CTX_MODEM_RESPONSE_NO_CARRIER = 0xC00A000C; pub const CTX_MODEM_RESPONSE_NO_DIALTONE = 0xC00A000D; pub const CTX_MODEM_RESPONSE_BUSY = 0xC00A000E; pub const CTX_MODEM_RESPONSE_VOICE = 0xC00A000F; pub const CTX_TD_ERROR = 0xC00A0010; pub const CTX_LICENSE_CLIENT_INVALID = 0xC00A0012; pub const CTX_LICENSE_NOT_AVAILABLE = 0xC00A0013; pub const CTX_LICENSE_EXPIRED = 0xC00A0014; pub const CTX_WINSTATION_NOT_FOUND = 0xC00A0015; pub const CTX_WINSTATION_NAME_COLLISION = 0xC00A0016; pub const CTX_WINSTATION_BUSY = 0xC00A0017; pub const CTX_BAD_VIDEO_MODE = 0xC00A0018; pub const CTX_GRAPHICS_INVALID = 0xC00A0022; pub const CTX_NOT_CONSOLE = 0xC00A0024; pub const CTX_CLIENT_QUERY_TIMEOUT = 0xC00A0026; pub const CTX_CONSOLE_DISCONNECT = 0xC00A0027; pub const CTX_CONSOLE_CONNECT = 0xC00A0028; pub const CTX_SHADOW_DENIED = 0xC00A002A; pub const CTX_WINSTATION_ACCESS_DENIED = 0xC00A002B; pub const CTX_INVALID_WD = 0xC00A002E; pub const CTX_WD_NOT_FOUND = 0xC00A002F; pub const CTX_SHADOW_INVALID = 0xC00A0030; pub const CTX_SHADOW_DISABLED = 0xC00A0031; pub const RDP_PROTOCOL_ERROR = 0xC00A0032; pub const CTX_CLIENT_LICENSE_NOT_SET = 0xC00A0033; pub const CTX_CLIENT_LICENSE_IN_USE = 0xC00A0034; pub const CTX_SHADOW_ENDED_BY_MODE_CHANGE = 0xC00A0035; pub const CTX_SHADOW_NOT_RUNNING = 0xC00A0036; pub const CTX_LOGON_DISABLED = 0xC00A0037; pub const CTX_SECURITY_LAYER_ERROR = 0xC00A0038; pub const TS_INCOMPATIBLE_SESSIONS = 0xC00A0039; pub const MUI_FILE_NOT_FOUND = 0xC00B0001; pub const MUI_INVALID_FILE = 0xC00B0002; pub const MUI_INVALID_RC_CONFIG = 0xC00B0003; pub const MUI_INVALID_LOCALE_NAME = 0xC00B0004; pub const MUI_INVALID_ULTIMATEFALLBACK_NAME = 0xC00B0005; pub const MUI_FILE_NOT_LOADED = 0xC00B0006; pub const RESOURCE_ENUM_USER_STOP = 0xC00B0007; pub const CLUSTER_INVALID_NODE = 0xC0130001; pub const CLUSTER_NODE_EXISTS = 0xC0130002; pub const CLUSTER_JOIN_IN_PROGRESS = 0xC0130003; pub const CLUSTER_NODE_NOT_FOUND = 0xC0130004; pub const CLUSTER_LOCAL_NODE_NOT_FOUND = 0xC0130005; pub const CLUSTER_NETWORK_EXISTS = 0xC0130006; pub const CLUSTER_NETWORK_NOT_FOUND = 0xC0130007; pub const CLUSTER_NETINTERFACE_EXISTS = 0xC0130008; pub const CLUSTER_NETINTERFACE_NOT_FOUND = 0xC0130009; pub const CLUSTER_INVALID_REQUEST = 0xC013000A; pub const CLUSTER_INVALID_NETWORK_PROVIDER = 0xC013000B; pub const CLUSTER_NODE_DOWN = 0xC013000C; pub const CLUSTER_NODE_UNREACHABLE = 0xC013000D; pub const CLUSTER_NODE_NOT_MEMBER = 0xC013000E; pub const CLUSTER_JOIN_NOT_IN_PROGRESS = 0xC013000F; pub const CLUSTER_INVALID_NETWORK = 0xC0130010; pub const CLUSTER_NO_NET_ADAPTERS = 0xC0130011; pub const CLUSTER_NODE_UP = 0xC0130012; pub const CLUSTER_NODE_PAUSED = 0xC0130013; pub const CLUSTER_NODE_NOT_PAUSED = 0xC0130014; pub const CLUSTER_NO_SECURITY_CONTEXT = 0xC0130015; pub const CLUSTER_NETWORK_NOT_INTERNAL = 0xC0130016; pub const CLUSTER_POISONED = 0xC0130017; pub const ACPI_INVALID_OPCODE = 0xC0140001; pub const ACPI_STACK_OVERFLOW = 0xC0140002; pub const ACPI_ASSERT_FAILED = 0xC0140003; pub const ACPI_INVALID_INDEX = 0xC0140004; pub const ACPI_INVALID_ARGUMENT = 0xC0140005; pub const ACPI_FATAL = 0xC0140006; pub const ACPI_INVALID_SUPERNAME = 0xC0140007; pub const ACPI_INVALID_ARGTYPE = 0xC0140008; pub const ACPI_INVALID_OBJTYPE = 0xC0140009; pub const ACPI_INVALID_TARGETTYPE = 0xC014000A; pub const ACPI_INCORRECT_ARGUMENT_COUNT = 0xC014000B; pub const ACPI_ADDRESS_NOT_MAPPED = 0xC014000C; pub const ACPI_INVALID_EVENTTYPE = 0xC014000D; pub const ACPI_HANDLER_COLLISION = 0xC014000E; pub const ACPI_INVALID_DATA = 0xC014000F; pub const ACPI_INVALID_REGION = 0xC0140010; pub const ACPI_INVALID_ACCESS_SIZE = 0xC0140011; pub const ACPI_ACQUIRE_GLOBAL_LOCK = 0xC0140012; pub const ACPI_ALREADY_INITIALIZED = 0xC0140013; pub const ACPI_NOT_INITIALIZED = 0xC0140014; pub const ACPI_INVALID_MUTEX_LEVEL = 0xC0140015; pub const ACPI_MUTEX_NOT_OWNED = 0xC0140016; pub const ACPI_MUTEX_NOT_OWNER = 0xC0140017; pub const ACPI_RS_ACCESS = 0xC0140018; pub const ACPI_INVALID_TABLE = 0xC0140019; pub const ACPI_REG_HANDLER_FAILED = 0xC0140020; pub const ACPI_POWER_REQUEST_FAILED = 0xC0140021; pub const SXS_SECTION_NOT_FOUND = 0xC0150001; pub const SXS_CANT_GEN_ACTCTX = 0xC0150002; pub const SXS_INVALID_ACTCTXDATA_FORMAT = 0xC0150003; pub const SXS_ASSEMBLY_NOT_FOUND = 0xC0150004; pub const SXS_MANIFEST_FORMAT_ERROR = 0xC0150005; pub const SXS_MANIFEST_PARSE_ERROR = 0xC0150006; pub const SXS_ACTIVATION_CONTEXT_DISABLED = 0xC0150007; pub const SXS_KEY_NOT_FOUND = 0xC0150008; pub const SXS_VERSION_CONFLICT = 0xC0150009; pub const SXS_WRONG_SECTION_TYPE = 0xC015000A; pub const SXS_THREAD_QUERIES_DISABLED = 0xC015000B; pub const SXS_ASSEMBLY_MISSING = 0xC015000C; pub const SXS_PROCESS_DEFAULT_ALREADY_SET = 0xC015000E; pub const SXS_EARLY_DEACTIVATION = 0xC015000F; pub const SXS_INVALID_DEACTIVATION = 0xC0150010; pub const SXS_MULTIPLE_DEACTIVATION = 0xC0150011; pub const SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY = 0xC0150012; pub const SXS_PROCESS_TERMINATION_REQUESTED = 0xC0150013; pub const SXS_CORRUPT_ACTIVATION_STACK = 0xC0150014; pub const SXS_CORRUPTION = 0xC0150015; pub const SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE = 0xC0150016; pub const SXS_INVALID_IDENTITY_ATTRIBUTE_NAME = 0xC0150017; pub const SXS_IDENTITY_DUPLICATE_ATTRIBUTE = 0xC0150018; pub const SXS_IDENTITY_PARSE_ERROR = 0xC0150019; pub const SXS_COMPONENT_STORE_CORRUPT = 0xC015001A; pub const SXS_FILE_HASH_MISMATCH = 0xC015001B; pub const SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT = 0xC015001C; pub const SXS_IDENTITIES_DIFFERENT = 0xC015001D; pub const SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT = 0xC015001E; pub const SXS_FILE_NOT_PART_OF_ASSEMBLY = 0xC015001F; pub const ADVANCED_INSTALLER_FAILED = 0xC0150020; pub const XML_ENCODING_MISMATCH = 0xC0150021; pub const SXS_MANIFEST_TOO_BIG = 0xC0150022; pub const SXS_SETTING_NOT_REGISTERED = 0xC0150023; pub const SXS_TRANSACTION_CLOSURE_INCOMPLETE = 0xC0150024; pub const SMI_PRIMITIVE_INSTALLER_FAILED = 0xC0150025; pub const GENERIC_COMMAND_FAILED = 0xC0150026; pub const SXS_FILE_HASH_MISSING = 0xC0150027; pub const TRANSACTIONAL_CONFLICT = 0xC0190001; pub const INVALID_TRANSACTION = 0xC0190002; pub const TRANSACTION_NOT_ACTIVE = 0xC0190003; pub const TM_INITIALIZATION_FAILED = 0xC0190004; pub const RM_NOT_ACTIVE = 0xC0190005; pub const RM_METADATA_CORRUPT = 0xC0190006; pub const TRANSACTION_NOT_JOINED = 0xC0190007; pub const DIRECTORY_NOT_RM = 0xC0190008; pub const TRANSACTIONS_UNSUPPORTED_REMOTE = 0xC019000A; pub const LOG_RESIZE_INVALID_SIZE = 0xC019000B; pub const REMOTE_FILE_VERSION_MISMATCH = 0xC019000C; pub const CRM_PROTOCOL_ALREADY_EXISTS = 0xC019000F; pub const TRANSACTION_PROPAGATION_FAILED = 0xC0190010; pub const CRM_PROTOCOL_NOT_FOUND = 0xC0190011; pub const TRANSACTION_SUPERIOR_EXISTS = 0xC0190012; pub const TRANSACTION_REQUEST_NOT_VALID = 0xC0190013; pub const TRANSACTION_NOT_REQUESTED = 0xC0190014; pub const TRANSACTION_ALREADY_ABORTED = 0xC0190015; pub const TRANSACTION_ALREADY_COMMITTED = 0xC0190016; pub const TRANSACTION_INVALID_MARSHALL_BUFFER = 0xC0190017; pub const CURRENT_TRANSACTION_NOT_VALID = 0xC0190018; pub const LOG_GROWTH_FAILED = 0xC0190019; pub const OBJECT_NO_LONGER_EXISTS = 0xC0190021; pub const STREAM_MINIVERSION_NOT_FOUND = 0xC0190022; pub const STREAM_MINIVERSION_NOT_VALID = 0xC0190023; pub const MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION = 0xC0190024; pub const CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT = 0xC0190025; pub const CANT_CREATE_MORE_STREAM_MINIVERSIONS = 0xC0190026; pub const HANDLE_NO_LONGER_VALID = 0xC0190028; pub const LOG_CORRUPTION_DETECTED = 0xC0190030; pub const RM_DISCONNECTED = 0xC0190032; pub const ENLISTMENT_NOT_SUPERIOR = 0xC0190033; pub const FILE_IDENTITY_NOT_PERSISTENT = 0xC0190036; pub const CANT_BREAK_TRANSACTIONAL_DEPENDENCY = 0xC0190037; pub const CANT_CROSS_RM_BOUNDARY = 0xC0190038; pub const TXF_DIR_NOT_EMPTY = 0xC0190039; pub const INDOUBT_TRANSACTIONS_EXIST = 0xC019003A; pub const TM_VOLATILE = 0xC019003B; pub const ROLLBACK_TIMER_EXPIRED = 0xC019003C; pub const TXF_ATTRIBUTE_CORRUPT = 0xC019003D; pub const EFS_NOT_ALLOWED_IN_TRANSACTION = 0xC019003E; pub const TRANSACTIONAL_OPEN_NOT_ALLOWED = 0xC019003F; pub const TRANSACTED_MAPPING_UNSUPPORTED_REMOTE = 0xC0190040; pub const TRANSACTION_REQUIRED_PROMOTION = 0xC0190043; pub const CANNOT_EXECUTE_FILE_IN_TRANSACTION = 0xC0190044; pub const TRANSACTIONS_NOT_FROZEN = 0xC0190045; pub const TRANSACTION_FREEZE_IN_PROGRESS = 0xC0190046; pub const NOT_SNAPSHOT_VOLUME = 0xC0190047; pub const NO_SAVEPOINT_WITH_OPEN_FILES = 0xC0190048; pub const SPARSE_NOT_ALLOWED_IN_TRANSACTION = 0xC0190049; pub const TM_IDENTITY_MISMATCH = 0xC019004A; pub const FLOATED_SECTION = 0xC019004B; pub const CANNOT_ACCEPT_TRANSACTED_WORK = 0xC019004C; pub const CANNOT_ABORT_TRANSACTIONS = 0xC019004D; pub const TRANSACTION_NOT_FOUND = 0xC019004E; pub const RESOURCEMANAGER_NOT_FOUND = 0xC019004F; pub const ENLISTMENT_NOT_FOUND = 0xC0190050; pub const TRANSACTIONMANAGER_NOT_FOUND = 0xC0190051; pub const TRANSACTIONMANAGER_NOT_ONLINE = 0xC0190052; pub const TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION = 0xC0190053; pub const TRANSACTION_NOT_ROOT = 0xC0190054; pub const TRANSACTION_OBJECT_EXPIRED = 0xC0190055; pub const COMPRESSION_NOT_ALLOWED_IN_TRANSACTION = 0xC0190056; pub const TRANSACTION_RESPONSE_NOT_ENLISTED = 0xC0190057; pub const TRANSACTION_RECORD_TOO_LONG = 0xC0190058; pub const NO_LINK_TRACKING_IN_TRANSACTION = 0xC0190059; pub const OPERATION_NOT_SUPPORTED_IN_TRANSACTION = 0xC019005A; pub const TRANSACTION_INTEGRITY_VIOLATED = 0xC019005B; pub const EXPIRED_HANDLE = 0xC0190060; pub const TRANSACTION_NOT_ENLISTED = 0xC0190061; pub const LOG_SECTOR_INVALID = 0xC01A0001; pub const LOG_SECTOR_PARITY_INVALID = 0xC01A0002; pub const LOG_SECTOR_REMAPPED = 0xC01A0003; pub const LOG_BLOCK_INCOMPLETE = 0xC01A0004; pub const LOG_INVALID_RANGE = 0xC01A0005; pub const LOG_BLOCKS_EXHAUSTED = 0xC01A0006; pub const LOG_READ_CONTEXT_INVALID = 0xC01A0007; pub const LOG_RESTART_INVALID = 0xC01A0008; pub const LOG_BLOCK_VERSION = 0xC01A0009; pub const LOG_BLOCK_INVALID = 0xC01A000A; pub const LOG_READ_MODE_INVALID = 0xC01A000B; pub const LOG_METADATA_CORRUPT = 0xC01A000D; pub const LOG_METADATA_INVALID = 0xC01A000E; pub const LOG_METADATA_INCONSISTENT = 0xC01A000F; pub const LOG_RESERVATION_INVALID = 0xC01A0010; pub const LOG_CANT_DELETE = 0xC01A0011; pub const LOG_CONTAINER_LIMIT_EXCEEDED = 0xC01A0012; pub const LOG_START_OF_LOG = 0xC01A0013; pub const LOG_POLICY_ALREADY_INSTALLED = 0xC01A0014; pub const LOG_POLICY_NOT_INSTALLED = 0xC01A0015; pub const LOG_POLICY_INVALID = 0xC01A0016; pub const LOG_POLICY_CONFLICT = 0xC01A0017; pub const LOG_PINNED_ARCHIVE_TAIL = 0xC01A0018; pub const LOG_RECORD_NONEXISTENT = 0xC01A0019; pub const LOG_RECORDS_RESERVED_INVALID = 0xC01A001A; pub const LOG_SPACE_RESERVED_INVALID = 0xC01A001B; pub const LOG_TAIL_INVALID = 0xC01A001C; pub const LOG_FULL = 0xC01A001D; pub const LOG_MULTIPLEXED = 0xC01A001E; pub const LOG_DEDICATED = 0xC01A001F; pub const LOG_ARCHIVE_NOT_IN_PROGRESS = 0xC01A0020; pub const LOG_ARCHIVE_IN_PROGRESS = 0xC01A0021; pub const LOG_EPHEMERAL = 0xC01A0022; pub const LOG_NOT_ENOUGH_CONTAINERS = 0xC01A0023; pub const LOG_CLIENT_ALREADY_REGISTERED = 0xC01A0024; pub const LOG_CLIENT_NOT_REGISTERED = 0xC01A0025; pub const LOG_FULL_HANDLER_IN_PROGRESS = 0xC01A0026; pub const LOG_CONTAINER_READ_FAILED = 0xC01A0027; pub const LOG_CONTAINER_WRITE_FAILED = 0xC01A0028; pub const LOG_CONTAINER_OPEN_FAILED = 0xC01A0029; pub const LOG_CONTAINER_STATE_INVALID = 0xC01A002A; pub const LOG_STATE_INVALID = 0xC01A002B; pub const LOG_PINNED = 0xC01A002C; pub const LOG_METADATA_FLUSH_FAILED = 0xC01A002D; pub const LOG_INCONSISTENT_SECURITY = 0xC01A002E; pub const LOG_APPENDED_FLUSH_FAILED = 0xC01A002F; pub const LOG_PINNED_RESERVATION = 0xC01A0030; pub const VIDEO_HUNG_DISPLAY_DRIVER_THREAD = 0xC01B00EA; pub const FLT_NO_HANDLER_DEFINED = 0xC01C0001; pub const FLT_CONTEXT_ALREADY_DEFINED = 0xC01C0002; pub const FLT_INVALID_ASYNCHRONOUS_REQUEST = 0xC01C0003; pub const FLT_DISALLOW_FAST_IO = 0xC01C0004; pub const FLT_INVALID_NAME_REQUEST = 0xC01C0005; pub const FLT_NOT_SAFE_TO_POST_OPERATION = 0xC01C0006; pub const FLT_NOT_INITIALIZED = 0xC01C0007; pub const FLT_FILTER_NOT_READY = 0xC01C0008; pub const FLT_POST_OPERATION_CLEANUP = 0xC01C0009; pub const FLT_INTERNAL_ERROR = 0xC01C000A; pub const FLT_DELETING_OBJECT = 0xC01C000B; pub const FLT_MUST_BE_NONPAGED_POOL = 0xC01C000C; pub const FLT_DUPLICATE_ENTRY = 0xC01C000D; pub const FLT_CBDQ_DISABLED = 0xC01C000E; pub const FLT_DO_NOT_ATTACH = 0xC01C000F; pub const FLT_DO_NOT_DETACH = 0xC01C0010; pub const FLT_INSTANCE_ALTITUDE_COLLISION = 0xC01C0011; pub const FLT_INSTANCE_NAME_COLLISION = 0xC01C0012; pub const FLT_FILTER_NOT_FOUND = 0xC01C0013; pub const FLT_VOLUME_NOT_FOUND = 0xC01C0014; pub const FLT_INSTANCE_NOT_FOUND = 0xC01C0015; pub const FLT_CONTEXT_ALLOCATION_NOT_FOUND = 0xC01C0016; pub const FLT_INVALID_CONTEXT_REGISTRATION = 0xC01C0017; pub const FLT_NAME_CACHE_MISS = 0xC01C0018; pub const FLT_NO_DEVICE_OBJECT = 0xC01C0019; pub const FLT_VOLUME_ALREADY_MOUNTED = 0xC01C001A; pub const FLT_ALREADY_ENLISTED = 0xC01C001B; pub const FLT_CONTEXT_ALREADY_LINKED = 0xC01C001C; pub const FLT_NO_WAITER_FOR_REPLY = 0xC01C0020; pub const MONITOR_NO_DESCRIPTOR = 0xC01D0001; pub const MONITOR_UNKNOWN_DESCRIPTOR_FORMAT = 0xC01D0002; pub const MONITOR_INVALID_DESCRIPTOR_CHECKSUM = 0xC01D0003; pub const MONITOR_INVALID_STANDARD_TIMING_BLOCK = 0xC01D0004; pub const MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED = 0xC01D0005; pub const MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK = 0xC01D0006; pub const MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK = 0xC01D0007; pub const MONITOR_NO_MORE_DESCRIPTOR_DATA = 0xC01D0008; pub const MONITOR_INVALID_DETAILED_TIMING_BLOCK = 0xC01D0009; pub const MONITOR_INVALID_MANUFACTURE_DATE = 0xC01D000A; pub const GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER = 0xC01E0000; pub const GRAPHICS_INSUFFICIENT_DMA_BUFFER = 0xC01E0001; pub const GRAPHICS_INVALID_DISPLAY_ADAPTER = 0xC01E0002; pub const GRAPHICS_ADAPTER_WAS_RESET = 0xC01E0003; pub const GRAPHICS_INVALID_DRIVER_MODEL = 0xC01E0004; pub const GRAPHICS_PRESENT_MODE_CHANGED = 0xC01E0005; pub const GRAPHICS_PRESENT_OCCLUDED = 0xC01E0006; pub const GRAPHICS_PRESENT_DENIED = 0xC01E0007; pub const GRAPHICS_CANNOTCOLORCONVERT = 0xC01E0008; pub const GRAPHICS_PRESENT_REDIRECTION_DISABLED = 0xC01E000B; pub const GRAPHICS_PRESENT_UNOCCLUDED = 0xC01E000C; pub const GRAPHICS_NO_VIDEO_MEMORY = 0xC01E0100; pub const GRAPHICS_CANT_LOCK_MEMORY = 0xC01E0101; pub const GRAPHICS_ALLOCATION_BUSY = 0xC01E0102; pub const GRAPHICS_TOO_MANY_REFERENCES = 0xC01E0103; pub const GRAPHICS_TRY_AGAIN_LATER = 0xC01E0104; pub const GRAPHICS_TRY_AGAIN_NOW = 0xC01E0105; pub const GRAPHICS_ALLOCATION_INVALID = 0xC01E0106; pub const GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE = 0xC01E0107; pub const GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED = 0xC01E0108; pub const GRAPHICS_CANT_EVICT_PINNED_ALLOCATION = 0xC01E0109; pub const GRAPHICS_INVALID_ALLOCATION_USAGE = 0xC01E0110; pub const GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION = 0xC01E0111; pub const GRAPHICS_ALLOCATION_CLOSED = 0xC01E0112; pub const GRAPHICS_INVALID_ALLOCATION_INSTANCE = 0xC01E0113; pub const GRAPHICS_INVALID_ALLOCATION_HANDLE = 0xC01E0114; pub const GRAPHICS_WRONG_ALLOCATION_DEVICE = 0xC01E0115; pub const GRAPHICS_ALLOCATION_CONTENT_LOST = 0xC01E0116; pub const GRAPHICS_GPU_EXCEPTION_ON_DEVICE = 0xC01E0200; pub const GRAPHICS_INVALID_VIDPN_TOPOLOGY = 0xC01E0300; pub const GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED = 0xC01E0301; pub const GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED = 0xC01E0302; pub const GRAPHICS_INVALID_VIDPN = 0xC01E0303; pub const GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE = 0xC01E0304; pub const GRAPHICS_INVALID_VIDEO_PRESENT_TARGET = 0xC01E0305; pub const GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED = 0xC01E0306; pub const GRAPHICS_INVALID_VIDPN_SOURCEMODESET = 0xC01E0308; pub const GRAPHICS_INVALID_VIDPN_TARGETMODESET = 0xC01E0309; pub const GRAPHICS_INVALID_FREQUENCY = 0xC01E030A; pub const GRAPHICS_INVALID_ACTIVE_REGION = 0xC01E030B; pub const GRAPHICS_INVALID_TOTAL_REGION = 0xC01E030C; pub const GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE = 0xC01E0310; pub const GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE = 0xC01E0311; pub const GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET = 0xC01E0312; pub const GRAPHICS_PATH_ALREADY_IN_TOPOLOGY = 0xC01E0313; pub const GRAPHICS_MODE_ALREADY_IN_MODESET = 0xC01E0314; pub const GRAPHICS_INVALID_VIDEOPRESENTSOURCESET = 0xC01E0315; pub const GRAPHICS_INVALID_VIDEOPRESENTTARGETSET = 0xC01E0316; pub const GRAPHICS_SOURCE_ALREADY_IN_SET = 0xC01E0317; pub const GRAPHICS_TARGET_ALREADY_IN_SET = 0xC01E0318; pub const GRAPHICS_INVALID_VIDPN_PRESENT_PATH = 0xC01E0319; pub const GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY = 0xC01E031A; pub const GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET = 0xC01E031B; pub const GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE = 0xC01E031C; pub const GRAPHICS_FREQUENCYRANGE_NOT_IN_SET = 0xC01E031D; pub const GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET = 0xC01E031F; pub const GRAPHICS_STALE_MODESET = 0xC01E0320; pub const GRAPHICS_INVALID_MONITOR_SOURCEMODESET = 0xC01E0321; pub const GRAPHICS_INVALID_MONITOR_SOURCE_MODE = 0xC01E0322; pub const GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN = 0xC01E0323; pub const GRAPHICS_MODE_ID_MUST_BE_UNIQUE = 0xC01E0324; pub const GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION = 0xC01E0325; pub const GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES = 0xC01E0326; pub const GRAPHICS_PATH_NOT_IN_TOPOLOGY = 0xC01E0327; pub const GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE = 0xC01E0328; pub const GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET = 0xC01E0329; pub const GRAPHICS_INVALID_MONITORDESCRIPTORSET = 0xC01E032A; pub const GRAPHICS_INVALID_MONITORDESCRIPTOR = 0xC01E032B; pub const GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET = 0xC01E032C; pub const GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET = 0xC01E032D; pub const GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE = 0xC01E032E; pub const GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE = 0xC01E032F; pub const GRAPHICS_RESOURCES_NOT_RELATED = 0xC01E0330; pub const GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE = 0xC01E0331; pub const GRAPHICS_TARGET_ID_MUST_BE_UNIQUE = 0xC01E0332; pub const GRAPHICS_NO_AVAILABLE_VIDPN_TARGET = 0xC01E0333; pub const GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER = 0xC01E0334; pub const GRAPHICS_NO_VIDPNMGR = 0xC01E0335; pub const GRAPHICS_NO_ACTIVE_VIDPN = 0xC01E0336; pub const GRAPHICS_STALE_VIDPN_TOPOLOGY = 0xC01E0337; pub const GRAPHICS_MONITOR_NOT_CONNECTED = 0xC01E0338; pub const GRAPHICS_SOURCE_NOT_IN_TOPOLOGY = 0xC01E0339; pub const GRAPHICS_INVALID_PRIMARYSURFACE_SIZE = 0xC01E033A; pub const GRAPHICS_INVALID_VISIBLEREGION_SIZE = 0xC01E033B; pub const GRAPHICS_INVALID_STRIDE = 0xC01E033C; pub const GRAPHICS_INVALID_PIXELFORMAT = 0xC01E033D; pub const GRAPHICS_INVALID_COLORBASIS = 0xC01E033E; pub const GRAPHICS_INVALID_PIXELVALUEACCESSMODE = 0xC01E033F; pub const GRAPHICS_TARGET_NOT_IN_TOPOLOGY = 0xC01E0340; pub const GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT = 0xC01E0341; pub const GRAPHICS_VIDPN_SOURCE_IN_USE = 0xC01E0342; pub const GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN = 0xC01E0343; pub const GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL = 0xC01E0344; pub const GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION = 0xC01E0345; pub const GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED = 0xC01E0346; pub const GRAPHICS_INVALID_GAMMA_RAMP = 0xC01E0347; pub const GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED = 0xC01E0348; pub const GRAPHICS_MULTISAMPLING_NOT_SUPPORTED = 0xC01E0349; pub const GRAPHICS_MODE_NOT_IN_MODESET = 0xC01E034A; pub const GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON = 0xC01E034D; pub const GRAPHICS_INVALID_PATH_CONTENT_TYPE = 0xC01E034E; pub const GRAPHICS_INVALID_COPYPROTECTION_TYPE = 0xC01E034F; pub const GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS = 0xC01E0350; pub const GRAPHICS_INVALID_SCANLINE_ORDERING = 0xC01E0352; pub const GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED = 0xC01E0353; pub const GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS = 0xC01E0354; pub const GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT = 0xC01E0355; pub const GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM = 0xC01E0356; pub const GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN = 0xC01E0357; pub const GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT = 0xC01E0358; pub const GRAPHICS_MAX_NUM_PATHS_REACHED = 0xC01E0359; pub const GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION = 0xC01E035A; pub const GRAPHICS_INVALID_CLIENT_TYPE = 0xC01E035B; pub const GRAPHICS_CLIENTVIDPN_NOT_SET = 0xC01E035C; pub const GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED = 0xC01E0400; pub const GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED = 0xC01E0401; pub const GRAPHICS_NOT_A_LINKED_ADAPTER = 0xC01E0430; pub const GRAPHICS_LEADLINK_NOT_ENUMERATED = 0xC01E0431; pub const GRAPHICS_CHAINLINKS_NOT_ENUMERATED = 0xC01E0432; pub const GRAPHICS_ADAPTER_CHAIN_NOT_READY = 0xC01E0433; pub const GRAPHICS_CHAINLINKS_NOT_STARTED = 0xC01E0434; pub const GRAPHICS_CHAINLINKS_NOT_POWERED_ON = 0xC01E0435; pub const GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE = 0xC01E0436; pub const GRAPHICS_NOT_POST_DEVICE_DRIVER = 0xC01E0438; pub const GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED = 0xC01E043B; pub const GRAPHICS_OPM_NOT_SUPPORTED = 0xC01E0500; pub const GRAPHICS_COPP_NOT_SUPPORTED = 0xC01E0501; pub const GRAPHICS_UAB_NOT_SUPPORTED = 0xC01E0502; pub const GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS = 0xC01E0503; pub const GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL = 0xC01E0504; pub const GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST = 0xC01E0505; pub const GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME = 0xC01E0506; pub const GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP = 0xC01E0507; pub const GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED = 0xC01E0508; pub const GRAPHICS_OPM_INVALID_POINTER = 0xC01E050A; pub const GRAPHICS_OPM_INTERNAL_ERROR = 0xC01E050B; pub const GRAPHICS_OPM_INVALID_HANDLE = 0xC01E050C; pub const GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE = 0xC01E050D; pub const GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH = 0xC01E050E; pub const GRAPHICS_OPM_SPANNING_MODE_ENABLED = 0xC01E050F; pub const GRAPHICS_OPM_THEATER_MODE_ENABLED = 0xC01E0510; pub const GRAPHICS_PVP_HFS_FAILED = 0xC01E0511; pub const GRAPHICS_OPM_INVALID_SRM = 0xC01E0512; pub const GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP = 0xC01E0513; pub const GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP = 0xC01E0514; pub const GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA = 0xC01E0515; pub const GRAPHICS_OPM_HDCP_SRM_NEVER_SET = 0xC01E0516; pub const GRAPHICS_OPM_RESOLUTION_TOO_HIGH = 0xC01E0517; pub const GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE = 0xC01E0518; pub const GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS = 0xC01E051A; pub const GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS = 0xC01E051B; pub const GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS = 0xC01E051C; pub const GRAPHICS_OPM_INVALID_INFORMATION_REQUEST = 0xC01E051D; pub const GRAPHICS_OPM_DRIVER_INTERNAL_ERROR = 0xC01E051E; pub const GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS = 0xC01E051F; pub const GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED = 0xC01E0520; pub const GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST = 0xC01E0521; pub const GRAPHICS_I2C_NOT_SUPPORTED = 0xC01E0580; pub const GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST = 0xC01E0581; pub const GRAPHICS_I2C_ERROR_TRANSMITTING_DATA = 0xC01E0582; pub const GRAPHICS_I2C_ERROR_RECEIVING_DATA = 0xC01E0583; pub const GRAPHICS_DDCCI_VCP_NOT_SUPPORTED = 0xC01E0584; pub const GRAPHICS_DDCCI_INVALID_DATA = 0xC01E0585; pub const GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE = 0xC01E0586; pub const GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING = 0xC01E0587; pub const GRAPHICS_MCA_INTERNAL_ERROR = 0xC01E0588; pub const GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND = 0xC01E0589; pub const GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH = 0xC01E058A; pub const GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM = 0xC01E058B; pub const GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE = 0xC01E058C; pub const GRAPHICS_MONITOR_NO_LONGER_EXISTS = 0xC01E058D; pub const GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED = 0xC01E05E0; pub const GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME = 0xC01E05E1; pub const GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP = 0xC01E05E2; pub const GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED = 0xC01E05E3; pub const GRAPHICS_INVALID_POINTER = 0xC01E05E4; pub const GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE = 0xC01E05E5; pub const GRAPHICS_PARAMETER_ARRAY_TOO_SMALL = 0xC01E05E6; pub const GRAPHICS_INTERNAL_ERROR = 0xC01E05E7; pub const GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS = 0xC01E05E8; pub const FVE_LOCKED_VOLUME = 0xC0210000; pub const FVE_NOT_ENCRYPTED = 0xC0210001; pub const FVE_BAD_INFORMATION = 0xC0210002; pub const FVE_TOO_SMALL = 0xC0210003; pub const FVE_FAILED_WRONG_FS = 0xC0210004; pub const FVE_FAILED_BAD_FS = 0xC0210005; pub const FVE_FS_NOT_EXTENDED = 0xC0210006; pub const FVE_FS_MOUNTED = 0xC0210007; pub const FVE_NO_LICENSE = 0xC0210008; pub const FVE_ACTION_NOT_ALLOWED = 0xC0210009; pub const FVE_BAD_DATA = 0xC021000A; pub const FVE_VOLUME_NOT_BOUND = 0xC021000B; pub const FVE_NOT_DATA_VOLUME = 0xC021000C; pub const FVE_CONV_READ_ERROR = 0xC021000D; pub const FVE_CONV_WRITE_ERROR = 0xC021000E; pub const FVE_OVERLAPPED_UPDATE = 0xC021000F; pub const FVE_FAILED_SECTOR_SIZE = 0xC0210010; pub const FVE_FAILED_AUTHENTICATION = 0xC0210011; pub const FVE_NOT_OS_VOLUME = 0xC0210012; pub const FVE_KEYFILE_NOT_FOUND = 0xC0210013; pub const FVE_KEYFILE_INVALID = 0xC0210014; pub const FVE_KEYFILE_NO_VMK = 0xC0210015; pub const FVE_TPM_DISABLED = 0xC0210016; pub const FVE_TPM_SRK_AUTH_NOT_ZERO = 0xC0210017; pub const FVE_TPM_INVALID_PCR = 0xC0210018; pub const FVE_TPM_NO_VMK = 0xC0210019; pub const FVE_PIN_INVALID = 0xC021001A; pub const FVE_AUTH_INVALID_APPLICATION = 0xC021001B; pub const FVE_AUTH_INVALID_CONFIG = 0xC021001C; pub const FVE_DEBUGGER_ENABLED = 0xC021001D; pub const FVE_DRY_RUN_FAILED = 0xC021001E; pub const FVE_BAD_METADATA_POINTER = 0xC021001F; pub const FVE_OLD_METADATA_COPY = 0xC0210020; pub const FVE_REBOOT_REQUIRED = 0xC0210021; pub const FVE_RAW_ACCESS = 0xC0210022; pub const FVE_RAW_BLOCKED = 0xC0210023; pub const FVE_NO_FEATURE_LICENSE = 0xC0210026; pub const FVE_POLICY_USER_DISABLE_RDV_NOT_ALLOWED = 0xC0210027; pub const FVE_CONV_RECOVERY_FAILED = 0xC0210028; pub const FVE_VIRTUALIZED_SPACE_TOO_BIG = 0xC0210029; pub const FVE_VOLUME_TOO_SMALL = 0xC0210030; pub const FWP_CALLOUT_NOT_FOUND = 0xC0220001; pub const FWP_CONDITION_NOT_FOUND = 0xC0220002; pub const FWP_FILTER_NOT_FOUND = 0xC0220003; pub const FWP_LAYER_NOT_FOUND = 0xC0220004; pub const FWP_PROVIDER_NOT_FOUND = 0xC0220005; pub const FWP_PROVIDER_CONTEXT_NOT_FOUND = 0xC0220006; pub const FWP_SUBLAYER_NOT_FOUND = 0xC0220007; pub const FWP_NOT_FOUND = 0xC0220008; pub const FWP_ALREADY_EXISTS = 0xC0220009; pub const FWP_IN_USE = 0xC022000A; pub const FWP_DYNAMIC_SESSION_IN_PROGRESS = 0xC022000B; pub const FWP_WRONG_SESSION = 0xC022000C; pub const FWP_NO_TXN_IN_PROGRESS = 0xC022000D; pub const FWP_TXN_IN_PROGRESS = 0xC022000E; pub const FWP_TXN_ABORTED = 0xC022000F; pub const FWP_SESSION_ABORTED = 0xC0220010; pub const FWP_INCOMPATIBLE_TXN = 0xC0220011; pub const FWP_TIMEOUT = 0xC0220012; pub const FWP_NET_EVENTS_DISABLED = 0xC0220013; pub const FWP_INCOMPATIBLE_LAYER = 0xC0220014; pub const FWP_KM_CLIENTS_ONLY = 0xC0220015; pub const FWP_LIFETIME_MISMATCH = 0xC0220016; pub const FWP_BUILTIN_OBJECT = 0xC0220017; pub const FWP_TOO_MANY_BOOTTIME_FILTERS = 0xC0220018; pub const FWP_TOO_MANY_CALLOUTS = 0xC0220018; pub const FWP_NOTIFICATION_DROPPED = 0xC0220019; pub const FWP_TRAFFIC_MISMATCH = 0xC022001A; pub const FWP_INCOMPATIBLE_SA_STATE = 0xC022001B; pub const FWP_NULL_POINTER = 0xC022001C; pub const FWP_INVALID_ENUMERATOR = 0xC022001D; pub const FWP_INVALID_FLAGS = 0xC022001E; pub const FWP_INVALID_NET_MASK = 0xC022001F; pub const FWP_INVALID_RANGE = 0xC0220020; pub const FWP_INVALID_INTERVAL = 0xC0220021; pub const FWP_ZERO_LENGTH_ARRAY = 0xC0220022; pub const FWP_NULL_DISPLAY_NAME = 0xC0220023; pub const FWP_INVALID_ACTION_TYPE = 0xC0220024; pub const FWP_INVALID_WEIGHT = 0xC0220025; pub const FWP_MATCH_TYPE_MISMATCH = 0xC0220026; pub const FWP_TYPE_MISMATCH = 0xC0220027; pub const FWP_OUT_OF_BOUNDS = 0xC0220028; pub const FWP_RESERVED = 0xC0220029; pub const FWP_DUPLICATE_CONDITION = 0xC022002A; pub const FWP_DUPLICATE_KEYMOD = 0xC022002B; pub const FWP_ACTION_INCOMPATIBLE_WITH_LAYER = 0xC022002C; pub const FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER = 0xC022002D; pub const FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER = 0xC022002E; pub const FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT = 0xC022002F; pub const FWP_INCOMPATIBLE_AUTH_METHOD = 0xC0220030; pub const FWP_INCOMPATIBLE_DH_GROUP = 0xC0220031; pub const FWP_EM_NOT_SUPPORTED = 0xC0220032; pub const FWP_NEVER_MATCH = 0xC0220033; pub const FWP_PROVIDER_CONTEXT_MISMATCH = 0xC0220034; pub const FWP_INVALID_PARAMETER = 0xC0220035; pub const FWP_TOO_MANY_SUBLAYERS = 0xC0220036; pub const FWP_CALLOUT_NOTIFICATION_FAILED = 0xC0220037; pub const FWP_INCOMPATIBLE_AUTH_CONFIG = 0xC0220038; pub const FWP_INCOMPATIBLE_CIPHER_CONFIG = 0xC0220039; pub const FWP_DUPLICATE_AUTH_METHOD = 0xC022003C; pub const FWP_TCPIP_NOT_READY = 0xC0220100; pub const FWP_INJECT_HANDLE_CLOSING = 0xC0220101; pub const FWP_INJECT_HANDLE_STALE = 0xC0220102; pub const FWP_CANNOT_PEND = 0xC0220103; pub const NDIS_CLOSING = 0xC0230002; pub const NDIS_BAD_VERSION = 0xC0230004; pub const NDIS_BAD_CHARACTERISTICS = 0xC0230005; pub const NDIS_ADAPTER_NOT_FOUND = 0xC0230006; pub const NDIS_OPEN_FAILED = 0xC0230007; pub const NDIS_DEVICE_FAILED = 0xC0230008; pub const NDIS_MULTICAST_FULL = 0xC0230009; pub const NDIS_MULTICAST_EXISTS = 0xC023000A; pub const NDIS_MULTICAST_NOT_FOUND = 0xC023000B; pub const NDIS_REQUEST_ABORTED = 0xC023000C; pub const NDIS_RESET_IN_PROGRESS = 0xC023000D; pub const NDIS_INVALID_PACKET = 0xC023000F; pub const NDIS_INVALID_DEVICE_REQUEST = 0xC0230010; pub const NDIS_ADAPTER_NOT_READY = 0xC0230011; pub const NDIS_INVALID_LENGTH = 0xC0230014; pub const NDIS_INVALID_DATA = 0xC0230015; pub const NDIS_BUFFER_TOO_SHORT = 0xC0230016; pub const NDIS_INVALID_OID = 0xC0230017; pub const NDIS_ADAPTER_REMOVED = 0xC0230018; pub const NDIS_UNSUPPORTED_MEDIA = 0xC0230019; pub const NDIS_GROUP_ADDRESS_IN_USE = 0xC023001A; pub const NDIS_FILE_NOT_FOUND = 0xC023001B; pub const NDIS_ERROR_READING_FILE = 0xC023001C; pub const NDIS_ALREADY_MAPPED = 0xC023001D; pub const NDIS_RESOURCE_CONFLICT = 0xC023001E; pub const NDIS_MEDIA_DISCONNECTED = 0xC023001F; pub const NDIS_INVALID_ADDRESS = 0xC0230022; pub const NDIS_PAUSED = 0xC023002A; pub const NDIS_INTERFACE_NOT_FOUND = 0xC023002B; pub const NDIS_UNSUPPORTED_REVISION = 0xC023002C; pub const NDIS_INVALID_PORT = 0xC023002D; pub const NDIS_INVALID_PORT_STATE = 0xC023002E; pub const NDIS_LOW_POWER_STATE = 0xC023002F; pub const NDIS_NOT_SUPPORTED = 0xC02300BB; pub const NDIS_OFFLOAD_POLICY = 0xC023100F; pub const NDIS_OFFLOAD_CONNECTION_REJECTED = 0xC0231012; pub const NDIS_OFFLOAD_PATH_REJECTED = 0xC0231013; pub const NDIS_DOT11_AUTO_CONFIG_ENABLED = 0xC0232000; pub const NDIS_DOT11_MEDIA_IN_USE = 0xC0232001; pub const NDIS_DOT11_POWER_STATE_INVALID = 0xC0232002; pub const NDIS_PM_WOL_PATTERN_LIST_FULL = 0xC0232003; pub const NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL = 0xC0232004; pub const IPSEC_BAD_SPI = 0xC0360001; pub const IPSEC_SA_LIFETIME_EXPIRED = 0xC0360002; pub const IPSEC_WRONG_SA = 0xC0360003; pub const IPSEC_REPLAY_CHECK_FAILED = 0xC0360004; pub const IPSEC_INVALID_PACKET = 0xC0360005; pub const IPSEC_INTEGRITY_CHECK_FAILED = 0xC0360006; pub const IPSEC_CLEAR_TEXT_DROP = 0xC0360007; pub const IPSEC_AUTH_FIREWALL_DROP = 0xC0360008; pub const IPSEC_THROTTLE_DROP = 0xC0360009; pub const IPSEC_DOSP_BLOCK = 0xC0368000; pub const IPSEC_DOSP_RECEIVED_MULTICAST = 0xC0368001; pub const IPSEC_DOSP_INVALID_PACKET = 0xC0368002; pub const IPSEC_DOSP_STATE_LOOKUP_FAILED = 0xC0368003; pub const IPSEC_DOSP_MAX_ENTRIES = 0xC0368004; pub const IPSEC_DOSP_KEYMOD_NOT_ALLOWED = 0xC0368005; pub const IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES = 0xC0368006; pub const VOLMGR_MIRROR_NOT_SUPPORTED = 0xC038005B; pub const VOLMGR_RAID5_NOT_SUPPORTED = 0xC038005C; pub const VIRTDISK_PROVIDER_NOT_FOUND = 0xC03A0014; pub const VIRTDISK_NOT_VIRTUAL_DISK = 0xC03A0015; pub const VHD_PARENT_VHD_ACCESS_DENIED = 0xC03A0016; pub const VHD_CHILD_PARENT_SIZE_MISMATCH = 0xC03A0017; pub const VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED = 0xC03A0018; pub const VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT = 0xC03A0019;
lib/std/os/windows/status.zig
const aoc = @import("../aoc.zig"); const std = @import("std"); const LightMap = struct { const Conway = aoc.Conway(aoc.Coord); const bounds = aoc.CoordRange.initWithBounds(aoc.Coord.init(.{0, 0}), aoc.Coord.init(.{99, 99})); conway: Conway, fn init(allocator: std.mem.Allocator) LightMap { return .{ .conway = Conway.init(allocator) }; } fn deinit(self: *LightMap) void { self.conway.deinit(); } fn animate(self: *LightMap) !void { var iter = self.conway.stepIterator(); while (try iter.next()) { if (bounds.coordInRange(iter.coord)) { try iter.setActive(iter.active_neighbors == 3 or (iter.active_neighbors == 2 and iter.active)); } } } fn activate(self: *LightMap, coord: aoc.Coord) !void { try self.conway.active_spots.put(coord, {}); } fn makeDefective(self: *LightMap) !void { try self.activate(aoc.Coord.init(.{0, 0})); try self.activate(aoc.Coord.init(.{0, 99})); try self.activate(aoc.Coord.init(.{99, 0})); try self.activate(aoc.Coord.init(.{99, 99})); } }; pub fn run(problem: *aoc.Problem) !aoc.Solution { var map_good = LightMap.init(problem.allocator); defer map_good.deinit(); var map_bad = LightMap.init(problem.allocator); defer map_bad.deinit(); var coord = aoc.PredefinedCoord.ORIGIN; for (problem.input) |c| { switch (c) { '#' => { try map_good.activate(coord); try map_bad.activate(coord); }, '.' => {}, '\n' => { coord.row += 1; coord.col = -1; }, else => unreachable, } coord.col += 1; } try map_bad.makeDefective(); var loop: u8 = 0; while (loop < 100) : (loop += 1) { try map_good.animate(); try map_bad.animate(); try map_bad.makeDefective(); } return problem.solution( map_good.conway.active_spots.count(), map_bad.conway.active_spots.count(), ); }
src/main/zig/2015/day18.zig
// // AUTO GENERATED! DO NOT EDIT! // const std = @import("std"); pub const ViewId = u16; pub const StateFlags = u64; /// Enable R write. pub const StateFlags_WriteR: StateFlags = 0x0000000000000001; /// Enable G write. pub const StateFlags_WriteG: StateFlags = 0x0000000000000002; /// Enable B write. pub const StateFlags_WriteB: StateFlags = 0x0000000000000004; /// Enable alpha write. pub const StateFlags_WriteA: StateFlags = 0x0000000000000008; /// Enable depth write. pub const StateFlags_WriteZ: StateFlags = 0x0000004000000000; /// Enable RGB write. pub const StateFlags_WriteRgb: StateFlags = 0x0000000000000007; /// Write all channels mask. pub const StateFlags_WriteMask: StateFlags = 0x000000400000000f; /// Enable depth test, less. pub const StateFlags_DepthTestLess: StateFlags = 0x0000000000000010; /// Enable depth test, less or equal. pub const StateFlags_DepthTestLequal: StateFlags = 0x0000000000000020; /// Enable depth test, equal. pub const StateFlags_DepthTestEqual: StateFlags = 0x0000000000000030; /// Enable depth test, greater or equal. pub const StateFlags_DepthTestGequal: StateFlags = 0x0000000000000040; /// Enable depth test, greater. pub const StateFlags_DepthTestGreater: StateFlags = 0x0000000000000050; /// Enable depth test, not equal. pub const StateFlags_DepthTestNotequal: StateFlags = 0x0000000000000060; /// Enable depth test, never. pub const StateFlags_DepthTestNever: StateFlags = 0x0000000000000070; /// Enable depth test, always. pub const StateFlags_DepthTestAlways: StateFlags = 0x0000000000000080; pub const StateFlags_DepthTestShift: StateFlags = 4; pub const StateFlags_DepthTestMask: StateFlags = 0x00000000000000f0; /// 0, 0, 0, 0 pub const StateFlags_BlendZero: StateFlags = 0x0000000000001000; /// 1, 1, 1, 1 pub const StateFlags_BlendOne: StateFlags = 0x0000000000002000; /// Rs, Gs, Bs, As pub const StateFlags_BlendSrcColor: StateFlags = 0x0000000000003000; /// 1-Rs, 1-Gs, 1-Bs, 1-As pub const StateFlags_BlendInvSrcColor: StateFlags = 0x0000000000004000; /// As, As, As, As pub const StateFlags_BlendSrcAlpha: StateFlags = 0x0000000000005000; /// 1-As, 1-As, 1-As, 1-As pub const StateFlags_BlendInvSrcAlpha: StateFlags = 0x0000000000006000; /// Ad, Ad, Ad, Ad pub const StateFlags_BlendDstAlpha: StateFlags = 0x0000000000007000; /// 1-Ad, 1-Ad, 1-Ad ,1-Ad pub const StateFlags_BlendInvDstAlpha: StateFlags = 0x0000000000008000; /// Rd, Gd, Bd, Ad pub const StateFlags_BlendDstColor: StateFlags = 0x0000000000009000; /// 1-Rd, 1-Gd, 1-Bd, 1-Ad pub const StateFlags_BlendInvDstColor: StateFlags = 0x000000000000a000; /// f, f, f, 1; f = min(As, 1-Ad) pub const StateFlags_BlendSrcAlphaSat: StateFlags = 0x000000000000b000; /// Blend factor pub const StateFlags_BlendFactor: StateFlags = 0x000000000000c000; /// 1-Blend factor pub const StateFlags_BlendInvFactor: StateFlags = 0x000000000000d000; pub const StateFlags_BlendShift: StateFlags = 12; pub const StateFlags_BlendMask: StateFlags = 0x000000000ffff000; /// Blend add: src + dst. pub const StateFlags_BlendEquationAdd: StateFlags = 0x0000000000000000; /// Blend subtract: src - dst. pub const StateFlags_BlendEquationSub: StateFlags = 0x0000000010000000; /// Blend reverse subtract: dst - src. pub const StateFlags_BlendEquationRevsub: StateFlags = 0x0000000020000000; /// Blend min: min(src, dst). pub const StateFlags_BlendEquationMin: StateFlags = 0x0000000030000000; /// Blend max: max(src, dst). pub const StateFlags_BlendEquationMax: StateFlags = 0x0000000040000000; pub const StateFlags_BlendEquationShift: StateFlags = 28; pub const StateFlags_BlendEquationMask: StateFlags = 0x00000003f0000000; /// Cull clockwise triangles. pub const StateFlags_CullCw: StateFlags = 0x0000001000000000; /// Cull counter-clockwise triangles. pub const StateFlags_CullCcw: StateFlags = 0x0000002000000000; pub const StateFlags_CullShift: StateFlags = 36; pub const StateFlags_CullMask: StateFlags = 0x0000003000000000; pub const StateFlags_AlphaRefShift: StateFlags = 40; pub const StateFlags_AlphaRefMask: StateFlags = 0x0000ff0000000000; /// Tristrip. pub const StateFlags_PtTristrip: StateFlags = 0x0001000000000000; /// Lines. pub const StateFlags_PtLines: StateFlags = 0x0002000000000000; /// Line strip. pub const StateFlags_PtLinestrip: StateFlags = 0x0003000000000000; /// Points. pub const StateFlags_PtPoints: StateFlags = 0x0004000000000000; pub const StateFlags_PtShift: StateFlags = 48; pub const StateFlags_PtMask: StateFlags = 0x0007000000000000; pub const StateFlags_PointSizeShift: StateFlags = 52; pub const StateFlags_PointSizeMask: StateFlags = 0x00f0000000000000; /// Enable MSAA rasterization. pub const StateFlags_Msaa: StateFlags = 0x0100000000000000; /// Enable line AA rasterization. pub const StateFlags_Lineaa: StateFlags = 0x0200000000000000; /// Enable conservative rasterization. pub const StateFlags_ConservativeRaster: StateFlags = 0x0400000000000000; /// No state. pub const StateFlags_None: StateFlags = 0x0000000000000000; /// Front counter-clockwise (default is clockwise). pub const StateFlags_FrontCcw: StateFlags = 0x0000008000000000; /// Enable blend independent. pub const StateFlags_BlendIndependent: StateFlags = 0x0000000400000000; /// Enable alpha to coverage. pub const StateFlags_BlendAlphaToCoverage: StateFlags = 0x0000000800000000; /// Default state is write to RGB, alpha, and depth with depth test less enabled, with clockwise /// culling and MSAA (when writing into MSAA frame buffer, otherwise this flag is ignored). pub const StateFlags_Default: StateFlags = 0x010000500000001f; pub const StateFlags_Mask: StateFlags = 0xffffffffffffffff; pub const StateFlags_ReservedShift: StateFlags = 61; pub const StateFlags_ReservedMask: StateFlags = 0xe000000000000000; pub const StencilFlags = u32; pub const StencilFlags_FuncRefShift: StencilFlags = 0; pub const StencilFlags_FuncRefMask: StencilFlags = 0x000000ff; pub const StencilFlags_FuncRmaskShift: StencilFlags = 8; pub const StencilFlags_FuncRmaskMask: StencilFlags = 0x0000ff00; pub const StencilFlags_None: StencilFlags = 0x00000000; pub const StencilFlags_Mask: StencilFlags = 0xffffffff; pub const StencilFlags_Default: StencilFlags = 0x00000000; /// Enable stencil test, less. pub const StencilFlags_TestLess: StencilFlags = 0x00010000; /// Enable stencil test, less or equal. pub const StencilFlags_TestLequal: StencilFlags = 0x00020000; /// Enable stencil test, equal. pub const StencilFlags_TestEqual: StencilFlags = 0x00030000; /// Enable stencil test, greater or equal. pub const StencilFlags_TestGequal: StencilFlags = 0x00040000; /// Enable stencil test, greater. pub const StencilFlags_TestGreater: StencilFlags = 0x00050000; /// Enable stencil test, not equal. pub const StencilFlags_TestNotequal: StencilFlags = 0x00060000; /// Enable stencil test, never. pub const StencilFlags_TestNever: StencilFlags = 0x00070000; /// Enable stencil test, always. pub const StencilFlags_TestAlways: StencilFlags = 0x00080000; pub const StencilFlags_TestShift: StencilFlags = 16; pub const StencilFlags_TestMask: StencilFlags = 0x000f0000; /// Zero. pub const StencilFlags_OpFailSZero: StencilFlags = 0x00000000; /// Keep. pub const StencilFlags_OpFailSKeep: StencilFlags = 0x00100000; /// Replace. pub const StencilFlags_OpFailSReplace: StencilFlags = 0x00200000; /// Increment and wrap. pub const StencilFlags_OpFailSIncr: StencilFlags = 0x00300000; /// Increment and clamp. pub const StencilFlags_OpFailSIncrsat: StencilFlags = 0x00400000; /// Decrement and wrap. pub const StencilFlags_OpFailSDecr: StencilFlags = 0x00500000; /// Decrement and clamp. pub const StencilFlags_OpFailSDecrsat: StencilFlags = 0x00600000; /// Invert. pub const StencilFlags_OpFailSInvert: StencilFlags = 0x00700000; pub const StencilFlags_OpFailSShift: StencilFlags = 20; pub const StencilFlags_OpFailSMask: StencilFlags = 0x00f00000; /// Zero. pub const StencilFlags_OpFailZZero: StencilFlags = 0x00000000; /// Keep. pub const StencilFlags_OpFailZKeep: StencilFlags = 0x01000000; /// Replace. pub const StencilFlags_OpFailZReplace: StencilFlags = 0x02000000; /// Increment and wrap. pub const StencilFlags_OpFailZIncr: StencilFlags = 0x03000000; /// Increment and clamp. pub const StencilFlags_OpFailZIncrsat: StencilFlags = 0x04000000; /// Decrement and wrap. pub const StencilFlags_OpFailZDecr: StencilFlags = 0x05000000; /// Decrement and clamp. pub const StencilFlags_OpFailZDecrsat: StencilFlags = 0x06000000; /// Invert. pub const StencilFlags_OpFailZInvert: StencilFlags = 0x07000000; pub const StencilFlags_OpFailZShift: StencilFlags = 24; pub const StencilFlags_OpFailZMask: StencilFlags = 0x0f000000; /// Zero. pub const StencilFlags_OpPassZZero: StencilFlags = 0x00000000; /// Keep. pub const StencilFlags_OpPassZKeep: StencilFlags = 0x10000000; /// Replace. pub const StencilFlags_OpPassZReplace: StencilFlags = 0x20000000; /// Increment and wrap. pub const StencilFlags_OpPassZIncr: StencilFlags = 0x30000000; /// Increment and clamp. pub const StencilFlags_OpPassZIncrsat: StencilFlags = 0x40000000; /// Decrement and wrap. pub const StencilFlags_OpPassZDecr: StencilFlags = 0x50000000; /// Decrement and clamp. pub const StencilFlags_OpPassZDecrsat: StencilFlags = 0x60000000; /// Invert. pub const StencilFlags_OpPassZInvert: StencilFlags = 0x70000000; pub const StencilFlags_OpPassZShift: StencilFlags = 28; pub const StencilFlags_OpPassZMask: StencilFlags = 0xf0000000; pub const ClearFlags = u16; /// No clear flags. pub const ClearFlags_None: ClearFlags = 0x0000; /// Clear color. pub const ClearFlags_Color: ClearFlags = 0x0001; /// Clear depth. pub const ClearFlags_Depth: ClearFlags = 0x0002; /// Clear stencil. pub const ClearFlags_Stencil: ClearFlags = 0x0004; /// Discard frame buffer attachment 0. pub const ClearFlags_DiscardColor0: ClearFlags = 0x0008; /// Discard frame buffer attachment 1. pub const ClearFlags_DiscardColor1: ClearFlags = 0x0010; /// Discard frame buffer attachment 2. pub const ClearFlags_DiscardColor2: ClearFlags = 0x0020; /// Discard frame buffer attachment 3. pub const ClearFlags_DiscardColor3: ClearFlags = 0x0040; /// Discard frame buffer attachment 4. pub const ClearFlags_DiscardColor4: ClearFlags = 0x0080; /// Discard frame buffer attachment 5. pub const ClearFlags_DiscardColor5: ClearFlags = 0x0100; /// Discard frame buffer attachment 6. pub const ClearFlags_DiscardColor6: ClearFlags = 0x0200; /// Discard frame buffer attachment 7. pub const ClearFlags_DiscardColor7: ClearFlags = 0x0400; /// Discard frame buffer depth attachment. pub const ClearFlags_DiscardDepth: ClearFlags = 0x0800; /// Discard frame buffer stencil attachment. pub const ClearFlags_DiscardStencil: ClearFlags = 0x1000; pub const ClearFlags_DiscardColorMask: ClearFlags = 0x07f8; pub const ClearFlags_DiscardMask: ClearFlags = 0x1ff8; pub const DiscardFlags = u32; /// Preserve everything. pub const DiscardFlags_None: DiscardFlags = 0x00000000; /// Discard texture sampler and buffer bindings. pub const DiscardFlags_Bindings: DiscardFlags = 0x00000001; /// Discard index buffer. pub const DiscardFlags_IndexBuffer: DiscardFlags = 0x00000002; /// Discard instance data. pub const DiscardFlags_InstanceData: DiscardFlags = 0x00000004; /// Discard state and uniform bindings. pub const DiscardFlags_State: DiscardFlags = 0x00000008; /// Discard transform. pub const DiscardFlags_Transform: DiscardFlags = 0x00000010; /// Discard vertex streams. pub const DiscardFlags_VertexStreams: DiscardFlags = 0x00000020; /// Discard all states. pub const DiscardFlags_All: DiscardFlags = 0x000000ff; pub const DebugFlags = u32; /// No debug. pub const DebugFlags_None: DebugFlags = 0x00000000; /// Enable wireframe for all primitives. pub const DebugFlags_Wireframe: DebugFlags = 0x00000001; /// Enable infinitely fast hardware test. No draw calls will be submitted to driver. /// It's useful when profiling to quickly assess bottleneck between CPU and GPU. pub const DebugFlags_Ifh: DebugFlags = 0x00000002; /// Enable statistics display. pub const DebugFlags_Stats: DebugFlags = 0x00000004; /// Enable debug text display. pub const DebugFlags_Text: DebugFlags = 0x00000008; /// Enable profiler. This causes per-view statistics to be collected, available through `bgfx::Stats::ViewStats`. This is unrelated to the profiler functions in `bgfx::CallbackI`. pub const DebugFlags_Profiler: DebugFlags = 0x00000010; pub const BufferFlags = u16; /// 1 8-bit value pub const BufferFlags_ComputeFormat8x1: BufferFlags = 0x0001; /// 2 8-bit values pub const BufferFlags_ComputeFormat8x2: BufferFlags = 0x0002; /// 4 8-bit values pub const BufferFlags_ComputeFormat8x4: BufferFlags = 0x0003; /// 1 16-bit value pub const BufferFlags_ComputeFormat16x1: BufferFlags = 0x0004; /// 2 16-bit values pub const BufferFlags_ComputeFormat16x2: BufferFlags = 0x0005; /// 4 16-bit values pub const BufferFlags_ComputeFormat16x4: BufferFlags = 0x0006; /// 1 32-bit value pub const BufferFlags_ComputeFormat32x1: BufferFlags = 0x0007; /// 2 32-bit values pub const BufferFlags_ComputeFormat32x2: BufferFlags = 0x0008; /// 4 32-bit values pub const BufferFlags_ComputeFormat32x4: BufferFlags = 0x0009; pub const BufferFlags_ComputeFormatShift: BufferFlags = 0; pub const BufferFlags_ComputeFormatMask: BufferFlags = 0x000f; /// Type `int`. pub const BufferFlags_ComputeTypeInt: BufferFlags = 0x0010; /// Type `uint`. pub const BufferFlags_ComputeTypeUint: BufferFlags = 0x0020; /// Type `float`. pub const BufferFlags_ComputeTypeFloat: BufferFlags = 0x0030; pub const BufferFlags_ComputeTypeShift: BufferFlags = 4; pub const BufferFlags_ComputeTypeMask: BufferFlags = 0x0030; pub const BufferFlags_None: BufferFlags = 0x0000; /// Buffer will be read by shader. pub const BufferFlags_ComputeRead: BufferFlags = 0x0100; /// Buffer will be used for writing. pub const BufferFlags_ComputeWrite: BufferFlags = 0x0200; /// Buffer will be used for storing draw indirect commands. pub const BufferFlags_DrawIndirect: BufferFlags = 0x0400; /// Allow dynamic index/vertex buffer resize during update. pub const BufferFlags_AllowResize: BufferFlags = 0x0800; /// Index buffer contains 32-bit indices. pub const BufferFlags_Index32: BufferFlags = 0x1000; pub const BufferFlags_ComputeReadWrite: BufferFlags = 0x0300; pub const TextureFlags = u64; pub const TextureFlags_None: TextureFlags = 0x0000000000000000; /// Texture will be used for MSAA sampling. pub const TextureFlags_MsaaSample: TextureFlags = 0x0000000800000000; /// Render target no MSAA. pub const TextureFlags_Rt: TextureFlags = 0x0000001000000000; /// Texture will be used for compute write. pub const TextureFlags_ComputeWrite: TextureFlags = 0x0000100000000000; /// Sample texture as sRGB. pub const TextureFlags_Srgb: TextureFlags = 0x0000200000000000; /// Texture will be used as blit destination. pub const TextureFlags_BlitDst: TextureFlags = 0x0000400000000000; /// Texture will be used for read back from GPU. pub const TextureFlags_ReadBack: TextureFlags = 0x0000800000000000; /// Render target MSAAx2 mode. pub const TextureFlags_RtMsaaX2: TextureFlags = 0x0000002000000000; /// Render target MSAAx4 mode. pub const TextureFlags_RtMsaaX4: TextureFlags = 0x0000003000000000; /// Render target MSAAx8 mode. pub const TextureFlags_RtMsaaX8: TextureFlags = 0x0000004000000000; /// Render target MSAAx16 mode. pub const TextureFlags_RtMsaaX16: TextureFlags = 0x0000005000000000; pub const TextureFlags_RtMsaaShift: TextureFlags = 36; pub const TextureFlags_RtMsaaMask: TextureFlags = 0x0000007000000000; /// Render target will be used for writing pub const TextureFlags_RtWriteOnly: TextureFlags = 0x0000008000000000; pub const TextureFlags_RtShift: TextureFlags = 36; pub const TextureFlags_RtMask: TextureFlags = 0x000000f000000000; pub const SamplerFlags = u32; /// Wrap U mode: Mirror pub const SamplerFlags_UMirror: SamplerFlags = 0x00000001; /// Wrap U mode: Clamp pub const SamplerFlags_UClamp: SamplerFlags = 0x00000002; /// Wrap U mode: Border pub const SamplerFlags_UBorder: SamplerFlags = 0x00000003; pub const SamplerFlags_UShift: SamplerFlags = 0; pub const SamplerFlags_UMask: SamplerFlags = 0x00000003; /// Wrap V mode: Mirror pub const SamplerFlags_VMirror: SamplerFlags = 0x00000004; /// Wrap V mode: Clamp pub const SamplerFlags_VClamp: SamplerFlags = 0x00000008; /// Wrap V mode: Border pub const SamplerFlags_VBorder: SamplerFlags = 0x0000000c; pub const SamplerFlags_VShift: SamplerFlags = 2; pub const SamplerFlags_VMask: SamplerFlags = 0x0000000c; /// Wrap W mode: Mirror pub const SamplerFlags_WMirror: SamplerFlags = 0x00000010; /// Wrap W mode: Clamp pub const SamplerFlags_WClamp: SamplerFlags = 0x00000020; /// Wrap W mode: Border pub const SamplerFlags_WBorder: SamplerFlags = 0x00000030; pub const SamplerFlags_WShift: SamplerFlags = 4; pub const SamplerFlags_WMask: SamplerFlags = 0x00000030; /// Min sampling mode: Point pub const SamplerFlags_MinPoint: SamplerFlags = 0x00000040; /// Min sampling mode: Anisotropic pub const SamplerFlags_MinAnisotropic: SamplerFlags = 0x00000080; pub const SamplerFlags_MinShift: SamplerFlags = 6; pub const SamplerFlags_MinMask: SamplerFlags = 0x000000c0; /// Mag sampling mode: Point pub const SamplerFlags_MagPoint: SamplerFlags = 0x00000100; /// Mag sampling mode: Anisotropic pub const SamplerFlags_MagAnisotropic: SamplerFlags = 0x00000200; pub const SamplerFlags_MagShift: SamplerFlags = 8; pub const SamplerFlags_MagMask: SamplerFlags = 0x00000300; /// Mip sampling mode: Point pub const SamplerFlags_MipPoint: SamplerFlags = 0x00000400; pub const SamplerFlags_MipShift: SamplerFlags = 10; pub const SamplerFlags_MipMask: SamplerFlags = 0x00000400; /// Compare when sampling depth texture: less. pub const SamplerFlags_CompareLess: SamplerFlags = 0x00010000; /// Compare when sampling depth texture: less or equal. pub const SamplerFlags_CompareLequal: SamplerFlags = 0x00020000; /// Compare when sampling depth texture: equal. pub const SamplerFlags_CompareEqual: SamplerFlags = 0x00030000; /// Compare when sampling depth texture: greater or equal. pub const SamplerFlags_CompareGequal: SamplerFlags = 0x00040000; /// Compare when sampling depth texture: greater. pub const SamplerFlags_CompareGreater: SamplerFlags = 0x00050000; /// Compare when sampling depth texture: not equal. pub const SamplerFlags_CompareNotequal: SamplerFlags = 0x00060000; /// Compare when sampling depth texture: never. pub const SamplerFlags_CompareNever: SamplerFlags = 0x00070000; /// Compare when sampling depth texture: always. pub const SamplerFlags_CompareAlways: SamplerFlags = 0x00080000; pub const SamplerFlags_CompareShift: SamplerFlags = 16; pub const SamplerFlags_CompareMask: SamplerFlags = 0x000f0000; pub const SamplerFlags_BorderColorShift: SamplerFlags = 24; pub const SamplerFlags_BorderColorMask: SamplerFlags = 0x0f000000; pub const SamplerFlags_ReservedShift: SamplerFlags = 28; pub const SamplerFlags_ReservedMask: SamplerFlags = 0xf0000000; pub const SamplerFlags_None: SamplerFlags = 0x00000000; /// Sample stencil instead of depth. pub const SamplerFlags_SampleStencil: SamplerFlags = 0x00100000; pub const SamplerFlags_Point: SamplerFlags = 0x00000540; pub const SamplerFlags_UvwMirror: SamplerFlags = 0x00000015; pub const SamplerFlags_UvwClamp: SamplerFlags = 0x0000002a; pub const SamplerFlags_UvwBorder: SamplerFlags = 0x0000003f; pub const SamplerFlags_BitsMask: SamplerFlags = 0x000f07ff; pub const ResetFlags = u32; /// Enable 2x MSAA. pub const ResetFlags_MsaaX2: ResetFlags = 0x00000010; /// Enable 4x MSAA. pub const ResetFlags_MsaaX4: ResetFlags = 0x00000020; /// Enable 8x MSAA. pub const ResetFlags_MsaaX8: ResetFlags = 0x00000030; /// Enable 16x MSAA. pub const ResetFlags_MsaaX16: ResetFlags = 0x00000040; pub const ResetFlags_MsaaShift: ResetFlags = 4; pub const ResetFlags_MsaaMask: ResetFlags = 0x00000070; /// No reset flags. pub const ResetFlags_None: ResetFlags = 0x00000000; /// Not supported yet. pub const ResetFlags_Fullscreen: ResetFlags = 0x00000001; /// Enable V-Sync. pub const ResetFlags_Vsync: ResetFlags = 0x00000080; /// Turn on/off max anisotropy. pub const ResetFlags_Maxanisotropy: ResetFlags = 0x00000100; /// Begin screen capture. pub const ResetFlags_Capture: ResetFlags = 0x00000200; /// Flush rendering after submitting to GPU. pub const ResetFlags_FlushAfterRender: ResetFlags = 0x00002000; /// This flag specifies where flip occurs. Default behaviour is that flip occurs /// before rendering new frame. This flag only has effect when `BGFX_CONFIG_MULTITHREADED=0`. pub const ResetFlags_FlipAfterRender: ResetFlags = 0x00004000; /// Enable sRGB backbuffer. pub const ResetFlags_SrgbBackbuffer: ResetFlags = 0x00008000; /// Enable HDR10 rendering. pub const ResetFlags_Hdr10: ResetFlags = 0x00010000; /// Enable HiDPI rendering. pub const ResetFlags_Hidpi: ResetFlags = 0x00020000; /// Enable depth clamp. pub const ResetFlags_DepthClamp: ResetFlags = 0x00040000; /// Suspend rendering. pub const ResetFlags_Suspend: ResetFlags = 0x00080000; pub const ResetFlags_FullscreenShift: ResetFlags = 0; pub const ResetFlags_FullscreenMask: ResetFlags = 0x00000001; pub const ResetFlags_ReservedShift: ResetFlags = 31; pub const ResetFlags_ReservedMask: ResetFlags = 0x80000000; pub const CapsFlags = u64; /// Alpha to coverage is supported. pub const CapsFlags_AlphaToCoverage: CapsFlags = 0x0000000000000001; /// Blend independent is supported. pub const CapsFlags_BlendIndependent: CapsFlags = 0x0000000000000002; /// Compute shaders are supported. pub const CapsFlags_Compute: CapsFlags = 0x0000000000000004; /// Conservative rasterization is supported. pub const CapsFlags_ConservativeRaster: CapsFlags = 0x0000000000000008; /// Draw indirect is supported. pub const CapsFlags_DrawIndirect: CapsFlags = 0x0000000000000010; /// Fragment depth is available in fragment shader. pub const CapsFlags_FragmentDepth: CapsFlags = 0x0000000000000020; /// Fragment ordering is available in fragment shader. pub const CapsFlags_FragmentOrdering: CapsFlags = 0x0000000000000040; /// Graphics debugger is present. pub const CapsFlags_GraphicsDebugger: CapsFlags = 0x0000000000000080; /// HDR10 rendering is supported. pub const CapsFlags_Hdr10: CapsFlags = 0x0000000000000100; /// HiDPI rendering is supported. pub const CapsFlags_Hidpi: CapsFlags = 0x0000000000000200; /// Image Read/Write is supported. pub const CapsFlags_ImageRw: CapsFlags = 0x0000000000000400; /// 32-bit indices are supported. pub const CapsFlags_Index32: CapsFlags = 0x0000000000000800; /// Instancing is supported. pub const CapsFlags_Instancing: CapsFlags = 0x0000000000001000; /// Occlusion query is supported. pub const CapsFlags_OcclusionQuery: CapsFlags = 0x0000000000002000; /// Renderer is on separate thread. pub const CapsFlags_RendererMultithreaded: CapsFlags = 0x0000000000004000; /// Multiple windows are supported. pub const CapsFlags_SwapChain: CapsFlags = 0x0000000000008000; /// 2D texture array is supported. pub const CapsFlags_Texture2DArray: CapsFlags = 0x0000000000010000; /// 3D textures are supported. pub const CapsFlags_Texture3D: CapsFlags = 0x0000000000020000; /// Texture blit is supported. pub const CapsFlags_TextureBlit: CapsFlags = 0x0000000000040000; pub const CapsFlags_TextureCompareReserved: CapsFlags = 0x0000000000080000; /// Texture compare less equal mode is supported. pub const CapsFlags_TextureCompareLequal: CapsFlags = 0x0000000000100000; /// Cubemap texture array is supported. pub const CapsFlags_TextureCubeArray: CapsFlags = 0x0000000000200000; /// CPU direct access to GPU texture memory. pub const CapsFlags_TextureDirectAccess: CapsFlags = 0x0000000000400000; /// Read-back texture is supported. pub const CapsFlags_TextureReadBack: CapsFlags = 0x0000000000800000; /// Vertex attribute half-float is supported. pub const CapsFlags_VertexAttribHalf: CapsFlags = 0x0000000001000000; /// Vertex attribute 10_10_10_2 is supported. pub const CapsFlags_VertexAttribUint10: CapsFlags = 0x0000000002000000; /// Rendering with VertexID only is supported. pub const CapsFlags_VertexId: CapsFlags = 0x0000000004000000; /// Viewport layer is available in vertex shader. pub const CapsFlags_ViewportLayerArray: CapsFlags = 0x0000000008000000; /// All texture compare modes are supported. pub const CapsFlags_TextureCompareAll: CapsFlags = 0x0000000000180000; pub const CapsFormatFlags = u32; /// Texture format is not supported. pub const CapsFormatFlags_TextureNone: CapsFormatFlags = 0x00000000; /// Texture format is supported. pub const CapsFormatFlags_Texture2D: CapsFormatFlags = 0x00000001; /// Texture as sRGB format is supported. pub const CapsFormatFlags_Texture2DSrgb: CapsFormatFlags = 0x00000002; /// Texture format is emulated. pub const CapsFormatFlags_Texture2DEmulated: CapsFormatFlags = 0x00000004; /// Texture format is supported. pub const CapsFormatFlags_Texture3D: CapsFormatFlags = 0x00000008; /// Texture as sRGB format is supported. pub const CapsFormatFlags_Texture3DSrgb: CapsFormatFlags = 0x00000010; /// Texture format is emulated. pub const CapsFormatFlags_Texture3DEmulated: CapsFormatFlags = 0x00000020; /// Texture format is supported. pub const CapsFormatFlags_TextureCube: CapsFormatFlags = 0x00000040; /// Texture as sRGB format is supported. pub const CapsFormatFlags_TextureCubeSrgb: CapsFormatFlags = 0x00000080; /// Texture format is emulated. pub const CapsFormatFlags_TextureCubeEmulated: CapsFormatFlags = 0x00000100; /// Texture format can be used from vertex shader. pub const CapsFormatFlags_TextureVertex: CapsFormatFlags = 0x00000200; /// Texture format can be used as image and read from. pub const CapsFormatFlags_TextureImageRead: CapsFormatFlags = 0x00000400; /// Texture format can be used as image and written to. pub const CapsFormatFlags_TextureImageWrite: CapsFormatFlags = 0x00000800; /// Texture format can be used as frame buffer. pub const CapsFormatFlags_TextureFramebuffer: CapsFormatFlags = 0x00001000; /// Texture format can be used as MSAA frame buffer. pub const CapsFormatFlags_TextureFramebufferMsaa: CapsFormatFlags = 0x00002000; /// Texture can be sampled as MSAA. pub const CapsFormatFlags_TextureMsaa: CapsFormatFlags = 0x00004000; /// Texture format supports auto-generated mips. pub const CapsFormatFlags_TextureMipAutogen: CapsFormatFlags = 0x00008000; pub const ResolveFlags = u32; /// No resolve flags. pub const ResolveFlags_None: ResolveFlags = 0x00000000; /// Auto-generate mip maps on resolve. pub const ResolveFlags_AutoGenMips: ResolveFlags = 0x00000001; pub const PciIdFlags = u16; /// Autoselect adapter. pub const PciIdFlags_None: PciIdFlags = 0x0000; /// Software rasterizer. pub const PciIdFlags_SoftwareRasterizer: PciIdFlags = 0x0001; /// AMD adapter. pub const PciIdFlags_Amd: PciIdFlags = 0x1002; /// Apple adapter. pub const PciIdFlags_Apple: PciIdFlags = 0x106b; /// Intel adapter. pub const PciIdFlags_Intel: PciIdFlags = 0x8086; /// nVidia adapter. pub const PciIdFlags_Nvidia: PciIdFlags = 0x10de; /// Microsoft adapter. pub const PciIdFlags_Microsoft: PciIdFlags = 0x1414; pub const CubeMapFlags = u32; /// Cubemap +x. pub const CubeMapFlags_PositiveX: CubeMapFlags = 0x00000000; /// Cubemap -x. pub const CubeMapFlags_NegativeX: CubeMapFlags = 0x00000001; /// Cubemap +y. pub const CubeMapFlags_PositiveY: CubeMapFlags = 0x00000002; /// Cubemap -y. pub const CubeMapFlags_NegativeY: CubeMapFlags = 0x00000003; /// Cubemap +z. pub const CubeMapFlags_PositiveZ: CubeMapFlags = 0x00000004; /// Cubemap -z. pub const CubeMapFlags_NegativeZ: CubeMapFlags = 0x00000005; pub const Fatal = enum(c_int) { DebugCheck, InvalidShader, UnableToInitialize, UnableToCreateTexture, DeviceLost, Count }; pub const RendererType = enum(c_int) { /// No rendering. Noop, /// AGC Agc, /// Direct3D 9.0 Direct3D9, /// Direct3D 11.0 Direct3D11, /// Direct3D 12.0 Direct3D12, /// GNM Gnm, /// Metal Metal, /// NVN Nvn, /// OpenGL ES 2.0+ OpenGLES, /// OpenGL 2.1+ OpenGL, /// Vulkan Vulkan, /// WebGPU WebGPU, Count }; pub const Access = enum(c_int) { /// Read. Read, /// Write. Write, /// Read and write. ReadWrite, Count }; pub const Attrib = enum(c_int) { /// a_position Position, /// a_normal Normal, /// a_tangent Tangent, /// a_bitangent Bitangent, /// a_color0 Color0, /// a_color1 Color1, /// a_color2 Color2, /// a_color3 Color3, /// a_indices Indices, /// a_weight Weight, /// a_texcoord0 TexCoord0, /// a_texcoord1 TexCoord1, /// a_texcoord2 TexCoord2, /// a_texcoord3 TexCoord3, /// a_texcoord4 TexCoord4, /// a_texcoord5 TexCoord5, /// a_texcoord6 TexCoord6, /// a_texcoord7 TexCoord7, Count }; pub const AttribType = enum(c_int) { /// Uint8 Uint8, /// Uint10, availability depends on: `BGFX_CAPS_VERTEX_ATTRIB_UINT10`. Uint10, /// Int16 Int16, /// Half, availability depends on: `BGFX_CAPS_VERTEX_ATTRIB_HALF`. Half, /// Float Float, Count }; pub const TextureFormat = enum(c_int) { /// DXT1 R5G6B5A1 BC1, /// DXT3 R5G6B5A4 BC2, /// DXT5 R5G6B5A8 BC3, /// LATC1/ATI1 R8 BC4, /// LATC2/ATI2 RG8 BC5, /// BC6H RGB16F BC6H, /// BC7 RGB 4-7 bits per color channel, 0-8 bits alpha BC7, /// ETC1 RGB8 ETC1, /// ETC2 RGB8 ETC2, /// ETC2 RGBA8 ETC2A, /// ETC2 RGB8A1 ETC2A1, /// PVRTC1 RGB 2BPP PTC12, /// PVRTC1 RGB 4BPP PTC14, /// PVRTC1 RGBA 2BPP PTC12A, /// PVRTC1 RGBA 4BPP PTC14A, /// PVRTC2 RGBA 2BPP PTC22, /// PVRTC2 RGBA 4BPP PTC24, /// ATC RGB 4BPP ATC, /// ATCE RGBA 8 BPP explicit alpha ATCE, /// ATCI RGBA 8 BPP interpolated alpha ATCI, /// ASTC 4x4 8.0 BPP ASTC4x4, /// ASTC 5x5 5.12 BPP ASTC5x5, /// ASTC 6x6 3.56 BPP ASTC6x6, /// ASTC 8x5 3.20 BPP ASTC8x5, /// ASTC 8x6 2.67 BPP ASTC8x6, /// ASTC 10x5 2.56 BPP ASTC10x5, /// Compressed formats above. Unknown, R1, A8, R8, R8I, R8U, R8S, R16, R16I, R16U, R16F, R16S, R32I, R32U, R32F, RG8, RG8I, RG8U, RG8S, RG16, RG16I, RG16U, RG16F, RG16S, RG32I, RG32U, RG32F, RGB8, RGB8I, RGB8U, RGB8S, RGB9E5F, BGRA8, RGBA8, RGBA8I, RGBA8U, RGBA8S, RGBA16, RGBA16I, RGBA16U, RGBA16F, RGBA16S, RGBA32I, RGBA32U, RGBA32F, R5G6B5, RGBA4, RGB5A1, RGB10A2, RG11B10F, /// Depth formats below. UnknownDepth, D16, D24, D24S8, D32, D16F, D24F, D32F, D0S8, Count }; pub const UniformType = enum(c_int) { /// Sampler. Sampler, /// Reserved, do not use. End, /// 4 floats vector. Vec4, /// 3x3 matrix. Mat3, /// 4x4 matrix. Mat4, Count }; pub const BackbufferRatio = enum(c_int) { /// Equal to backbuffer. Equal, /// One half size of backbuffer. Half, /// One quarter size of backbuffer. Quarter, /// One eighth size of backbuffer. Eighth, /// One sixteenth size of backbuffer. Sixteenth, /// Double size of backbuffer. Double, Count }; pub const OcclusionQueryResult = enum(c_int) { /// Query failed test. Invisible, /// Query passed test. Visible, /// Query result is not available yet. NoResult, Count }; pub const Topology = enum(c_int) { /// Triangle list. TriList, /// Triangle strip. TriStrip, /// Line list. LineList, /// Line strip. LineStrip, /// Point list. PointList, Count }; pub const TopologyConvert = enum(c_int) { /// Flip winding order of triangle list. TriListFlipWinding, /// Flip winding order of triangle strip. TriStripFlipWinding, /// Convert triangle list to line list. TriListToLineList, /// Convert triangle strip to triangle list. TriStripToTriList, /// Convert line strip to line list. LineStripToLineList, Count }; pub const TopologySort = enum(c_int) { DirectionFrontToBackMin, DirectionFrontToBackAvg, DirectionFrontToBackMax, DirectionBackToFrontMin, DirectionBackToFrontAvg, DirectionBackToFrontMax, DistanceFrontToBackMin, DistanceFrontToBackAvg, DistanceFrontToBackMax, DistanceBackToFrontMin, DistanceBackToFrontAvg, DistanceBackToFrontMax, Count }; pub const ViewMode = enum(c_int) { /// Default sort order. Default, /// Sort in the same order in which submit calls were called. Sequential, /// Sort draw call depth in ascending order. DepthAscending, /// Sort draw call depth in descending order. DepthDescending, Count }; pub const RenderFrame = enum(c_int) { /// Renderer context is not created yet. NoContext, /// Renderer context is created and rendering. Render, /// Renderer context wait for main thread signal timed out without rendering. Timeout, /// Renderer context is getting destroyed. Exiting, Count }; pub const Caps = extern struct { pub const GPU = extern struct { vendorId: u16, deviceId: u16, }; pub const Limits = extern struct { maxDrawCalls: u32, maxBlits: u32, maxTextureSize: u32, maxTextureLayers: u32, maxViews: u32, maxFrameBuffers: u32, maxFBAttachments: u32, maxPrograms: u32, maxShaders: u32, maxTextures: u32, maxTextureSamplers: u32, maxComputeBindings: u32, maxVertexLayouts: u32, maxVertexStreams: u32, maxIndexBuffers: u32, maxVertexBuffers: u32, maxDynamicIndexBuffers: u32, maxDynamicVertexBuffers: u32, maxUniforms: u32, maxOcclusionQueries: u32, maxEncoders: u32, minResourceCbSize: u32, transientVbSize: u32, transientIbSize: u32, }; rendererType: RendererType, supported: u64, vendorId: u16, deviceId: u16, homogeneousDepth: bool, originBottomLeft: bool, numGPUs: u8, gpu: [4]GPU, limits: Limits, formats: [85]u16, }; pub const InternalData = extern struct { caps: [*c]const Caps, context: ?*anyopaque, }; pub const PlatformData = extern struct { ndt: ?*anyopaque, nwh: ?*anyopaque, context: ?*anyopaque, backBuffer: ?*anyopaque, backBufferDS: ?*anyopaque, }; pub const Resolution = extern struct { format: TextureFormat, width: u32, height: u32, reset: u32, numBackBuffers: u8, maxFrameLatency: u8, }; pub const Init = extern struct { pub const Limits = extern struct { maxEncoders: u16, minResourceCbSize: u32, transientVbSize: u32, transientIbSize: u32, }; type: RendererType, vendorId: u16, deviceId: u16, capabilities: u64, debug: bool, profile: bool, platformData: PlatformData, resolution: Resolution, limits: Limits, callback: ?*anyopaque, allocator: ?*anyopaque, }; pub const Memory = extern struct { data: [*c]u8, size: u32, }; pub const TransientIndexBuffer = extern struct { data: [*c]u8, size: u32, startIndex: u32, handle: IndexBufferHandle, isIndex16: bool, }; pub const TransientVertexBuffer = extern struct { data: [*c]u8, size: u32, startVertex: u32, stride: u16, handle: VertexBufferHandle, layoutHandle: VertexLayoutHandle, }; pub const InstanceDataBuffer = extern struct { data: [*c]u8, size: u32, offset: u32, num: u32, stride: u16, handle: VertexBufferHandle, }; pub const TextureInfo = extern struct { format: TextureFormat, storageSize: u32, width: u16, height: u16, depth: u16, numLayers: u16, numMips: u8, bitsPerPixel: u8, cubeMap: bool, }; pub const UniformInfo = extern struct { name: [256]u8, type: UniformType, num: u16, }; pub const Attachment = extern struct { access: Access, handle: TextureHandle, mip: u16, layer: u16, numLayers: u16, resolve: u8, /// Init attachment. /// <param name="_handle">Render target texture handle.</param> /// <param name="_access">Access. See `Access::Enum`.</param> /// <param name="_layer">Cubemap side or depth layer/slice to use.</param> /// <param name="_numLayers">Number of texture layer/slice(s) in array to use.</param> /// <param name="_mip">Mip level.</param> /// <param name="_resolve">Resolve flags. See: `BGFX_RESOLVE_*`</param> pub inline fn init(self: *Attachment, _handle: TextureHandle, _access: Access, _layer: u16, _numLayers: u16, _mip: u16, _resolve: u8) void { return bgfx_attachment_init(self, _handle, _access, _layer, _numLayers, _mip, _resolve); } }; pub const Transform = extern struct { data: [*c]f32, num: u16, }; pub const ViewStats = extern struct { name: [256]u8, view: ViewId, cpuTimeBegin: i64, cpuTimeEnd: i64, gpuTimeBegin: i64, gpuTimeEnd: i64, }; pub const EncoderStats = extern struct { cpuTimeBegin: i64, cpuTimeEnd: i64, }; pub const Stats = extern struct { cpuTimeFrame: i64, cpuTimeBegin: i64, cpuTimeEnd: i64, cpuTimerFreq: i64, gpuTimeBegin: i64, gpuTimeEnd: i64, gpuTimerFreq: i64, waitRender: i64, waitSubmit: i64, numDraw: u32, numCompute: u32, numBlit: u32, maxGpuLatency: u32, numDynamicIndexBuffers: u16, numDynamicVertexBuffers: u16, numFrameBuffers: u16, numIndexBuffers: u16, numOcclusionQueries: u16, numPrograms: u16, numShaders: u16, numTextures: u16, numUniforms: u16, numVertexBuffers: u16, numVertexLayouts: u16, textureMemoryUsed: i64, rtMemoryUsed: i64, transientVbUsed: i32, transientIbUsed: i32, numPrims: [5]u32, gpuMemoryMax: i64, gpuMemoryUsed: i64, width: u16, height: u16, textWidth: u16, textHeight: u16, numViews: u16, viewStats: [*c]ViewStats, numEncoders: u8, encoderStats: [*c]EncoderStats, }; pub const VertexLayout = extern struct { hash: u32, stride: u16, offset: [18]u16, attributes: [18]u16, /// Start VertexLayout. /// <param name="_rendererType">Renderer backend type. See: `bgfx::RendererType`</param> pub inline fn begin(self: *VertexLayout, _rendererType: RendererType) *VertexLayout { return bgfx_vertex_layout_begin(self, _rendererType); } /// Add attribute to VertexLayout. /// @remarks Must be called between begin/end. /// <param name="_attrib">Attribute semantics. See: `bgfx::Attrib`</param> /// <param name="_num">Number of elements 1, 2, 3 or 4.</param> /// <param name="_type">Element type.</param> /// <param name="_normalized">When using fixed point AttribType (f.e. Uint8) value will be normalized for vertex shader usage. When normalized is set to true, AttribType::Uint8 value in range 0-255 will be in range 0.0-1.0 in vertex shader.</param> /// <param name="_asInt">Packaging rule for vertexPack, vertexUnpack, and vertexConvert for AttribType::Uint8 and AttribType::Int16. Unpacking code must be implemented inside vertex shader.</param> pub inline fn add(self: *VertexLayout, _attrib: Attrib, _num: u8, _type: AttribType, _normalized: bool, _asInt: bool) *VertexLayout { return bgfx_vertex_layout_add(self, _attrib, _num, _type, _normalized, _asInt); } /// Decode attribute. /// <param name="_attrib">Attribute semantics. See: `bgfx::Attrib`</param> /// <param name="_num">Number of elements.</param> /// <param name="_type">Element type.</param> /// <param name="_normalized">Attribute is normalized.</param> /// <param name="_asInt">Attribute is packed as int.</param> pub inline fn decode(self: *const VertexLayout, _attrib: Attrib, _num: [*c]u8 , _type: [*c]AttribType, _normalized: [*c]bool, _asInt: [*c]bool) void { return bgfx_vertex_layout_decode(self, _attrib, _num, _type, _normalized, _asInt); } /// Returns `true` if VertexLayout contains attribute. /// <param name="_attrib">Attribute semantics. See: `bgfx::Attrib`</param> pub inline fn has(self: *const VertexLayout, _attrib: Attrib) bool { return bgfx_vertex_layout_has(self, _attrib); } /// Skip `_num` bytes in vertex stream. /// <param name="_num">Number of bytes to skip.</param> pub inline fn skip(self: *VertexLayout, _num: u8) *VertexLayout { return bgfx_vertex_layout_skip(self, _num); } /// End VertexLayout. pub inline fn end(self: *VertexLayout) void { return bgfx_vertex_layout_end(self); } }; pub const Encoder = opaque { /// Sets a debug marker. This allows you to group graphics calls together for easy browsing in /// graphics debugging tools. /// <param name="_marker">Marker string.</param> pub inline fn setMarker(self: ?*Encoder, _marker: [*c]const u8) void { return bgfx_encoder_set_marker(self, _marker); } /// Set render states for draw primitive. /// @remarks /// 1. To set up more complex states use: /// `BGFX_STATE_ALPHA_REF(_ref)`, /// `BGFX_STATE_POINT_SIZE(_size)`, /// `BGFX_STATE_BLEND_FUNC(_src, _dst)`, /// `BGFX_STATE_BLEND_FUNC_SEPARATE(_srcRGB, _dstRGB, _srcA, _dstA)`, /// `BGFX_STATE_BLEND_EQUATION(_equation)`, /// `BGFX_STATE_BLEND_EQUATION_SEPARATE(_equationRGB, _equationA)` /// 2. `BGFX_STATE_BLEND_EQUATION_ADD` is set when no other blend /// equation is specified. /// <param name="_state">State flags. Default state for primitive type is triangles. See: `BGFX_STATE_DEFAULT`. - `BGFX_STATE_DEPTH_TEST_*` - Depth test function. - `BGFX_STATE_BLEND_*` - See remark 1 about BGFX_STATE_BLEND_FUNC. - `BGFX_STATE_BLEND_EQUATION_*` - See remark 2. - `BGFX_STATE_CULL_*` - Backface culling mode. - `BGFX_STATE_WRITE_*` - Enable R, G, B, A or Z write. - `BGFX_STATE_MSAA` - Enable hardware multisample antialiasing. - `BGFX_STATE_PT_[TRISTRIP/LINES/POINTS]` - Primitive type.</param> /// <param name="_rgba">Sets blend factor used by `BGFX_STATE_BLEND_FACTOR` and `BGFX_STATE_BLEND_INV_FACTOR` blend modes.</param> pub inline fn setState(self: ?*Encoder, _state: u64, _rgba: u32) void { return bgfx_encoder_set_state(self, _state, _rgba); } /// Set condition for rendering. /// <param name="_handle">Occlusion query handle.</param> /// <param name="_visible">Render if occlusion query is visible.</param> pub inline fn setCondition(self: ?*Encoder, _handle: OcclusionQueryHandle, _visible: bool) void { return bgfx_encoder_set_condition(self, _handle, _visible); } /// Set stencil test state. /// <param name="_fstencil">Front stencil state.</param> /// <param name="_bstencil">Back stencil state. If back is set to `BGFX_STENCIL_NONE` _fstencil is applied to both front and back facing primitives.</param> pub inline fn setStencil(self: ?*Encoder, _fstencil: u32, _bstencil: u32) void { return bgfx_encoder_set_stencil(self, _fstencil, _bstencil); } /// Set scissor for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// <param name="_x">Position x from the left corner of the window.</param> /// <param name="_y">Position y from the top corner of the window.</param> /// <param name="_width">Width of view scissor region.</param> /// <param name="_height">Height of view scissor region.</param> pub inline fn setScissor(self: ?*Encoder, _x: u16, _y: u16, _width: u16, _height: u16) u16 { return bgfx_encoder_set_scissor(self, _x, _y, _width, _height); } /// Set scissor from cache for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// <param name="_cache">Index in scissor cache.</param> pub inline fn setScissorCached(self: ?*Encoder, _cache: u16) void { return bgfx_encoder_set_scissor_cached(self, _cache); } /// Set model matrix for draw primitive. If it is not called, /// the model will be rendered with an identity model matrix. /// <param name="_mtx">Pointer to first matrix in array.</param> /// <param name="_num">Number of matrices in array.</param> pub inline fn setTransform(self: ?*Encoder, _mtx: ?*const anyopaque, _num: u16) u32 { return bgfx_encoder_set_transform(self, _mtx, _num); } /// Set model matrix from matrix cache for draw primitive. /// <param name="_cache">Index in matrix cache.</param> /// <param name="_num">Number of matrices from cache.</param> pub inline fn setTransformCached(self: ?*Encoder, _cache: u32, _num: u16) void { return bgfx_encoder_set_transform_cached(self, _cache, _num); } /// Reserve matrices in internal matrix cache. /// @attention Pointer returned can be modified until `bgfx::frame` is called. /// <param name="_transform">Pointer to `Transform` structure.</param> /// <param name="_num">Number of matrices.</param> pub inline fn allocTransform(self: ?*Encoder, _transform: [*c]Transform, _num: u16) u32 { return bgfx_encoder_alloc_transform(self, _transform, _num); } /// Set shader uniform parameter for draw primitive. /// <param name="_handle">Uniform.</param> /// <param name="_value">Pointer to uniform data.</param> /// <param name="_num">Number of elements. Passing `UINT16_MAX` will use the _num passed on uniform creation.</param> pub inline fn setUniform(self: ?*Encoder, _handle: UniformHandle, _value: ?*const anyopaque, _num: u16) void { return bgfx_encoder_set_uniform(self, _handle, _value, _num); } /// Set index buffer for draw primitive. /// <param name="_handle">Index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> pub inline fn setIndexBuffer(self: ?*Encoder, _handle: IndexBufferHandle, _firstIndex: u32, _numIndices: u32) void { return bgfx_encoder_set_index_buffer(self, _handle, _firstIndex, _numIndices); } /// Set index buffer for draw primitive. /// <param name="_handle">Dynamic index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> pub inline fn setDynamicIndexBuffer(self: ?*Encoder, _handle: DynamicIndexBufferHandle, _firstIndex: u32, _numIndices: u32) void { return bgfx_encoder_set_dynamic_index_buffer(self, _handle, _firstIndex, _numIndices); } /// Set index buffer for draw primitive. /// <param name="_tib">Transient index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> pub inline fn setTransientIndexBuffer(self: ?*Encoder, _tib: [*c]const TransientIndexBuffer, _firstIndex: u32, _numIndices: u32) void { return bgfx_encoder_set_transient_index_buffer(self, _tib, _firstIndex, _numIndices); } /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> pub inline fn setVertexBuffer(self: ?*Encoder, _stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32) void { return bgfx_encoder_set_vertex_buffer(self, _stream, _handle, _startVertex, _numVertices); } /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> /// <param name="_layoutHandle">Vertex layout for aliasing vertex buffer. If invalid handle is used, vertex layout used for creation of vertex buffer will be used.</param> pub inline fn setVertexBufferWithLayout(self: ?*Encoder, _stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void { return bgfx_encoder_set_vertex_buffer_with_layout(self, _stream, _handle, _startVertex, _numVertices, _layoutHandle); } /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Dynamic vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> pub inline fn setDynamicVertexBuffer(self: ?*Encoder, _stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32) void { return bgfx_encoder_set_dynamic_vertex_buffer(self, _stream, _handle, _startVertex, _numVertices); } pub inline fn setDynamicVertexBufferWithLayout(self: ?*Encoder, _stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void { return bgfx_encoder_set_dynamic_vertex_buffer_with_layout(self, _stream, _handle, _startVertex, _numVertices, _layoutHandle); } /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_tvb">Transient vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> pub inline fn setTransientVertexBuffer(self: ?*Encoder, _stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32) void { return bgfx_encoder_set_transient_vertex_buffer(self, _stream, _tvb, _startVertex, _numVertices); } /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_tvb">Transient vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> /// <param name="_layoutHandle">Vertex layout for aliasing vertex buffer. If invalid handle is used, vertex layout used for creation of vertex buffer will be used.</param> pub inline fn setTransientVertexBufferWithLayout(self: ?*Encoder, _stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void { return bgfx_encoder_set_transient_vertex_buffer_with_layout(self, _stream, _tvb, _startVertex, _numVertices, _layoutHandle); } /// Set number of vertices for auto generated vertices use in conjunction /// with gl_VertexID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. /// <param name="_numVertices">Number of vertices.</param> pub inline fn setVertexCount(self: ?*Encoder, _numVertices: u32) void { return bgfx_encoder_set_vertex_count(self, _numVertices); } /// Set instance data buffer for draw primitive. /// <param name="_idb">Transient instance data buffer.</param> /// <param name="_start">First instance data.</param> /// <param name="_num">Number of data instances.</param> pub inline fn setInstanceDataBuffer(self: ?*Encoder, _idb: [*c]const InstanceDataBuffer, _start: u32, _num: u32) void { return bgfx_encoder_set_instance_data_buffer(self, _idb, _start, _num); } /// Set instance data buffer for draw primitive. /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First instance data.</param> /// <param name="_num">Number of data instances. Set instance data buffer for draw primitive.</param> pub inline fn setInstanceDataFromVertexBuffer(self: ?*Encoder, _handle: VertexBufferHandle, _startVertex: u32, _num: u32) void { return bgfx_encoder_set_instance_data_from_vertex_buffer(self, _handle, _startVertex, _num); } /// Set instance data buffer for draw primitive. /// <param name="_handle">Dynamic vertex buffer.</param> /// <param name="_startVertex">First instance data.</param> /// <param name="_num">Number of data instances.</param> pub inline fn setInstanceDataFromDynamicVertexBuffer(self: ?*Encoder, _handle: DynamicVertexBufferHandle, _startVertex: u32, _num: u32) void { return bgfx_encoder_set_instance_data_from_dynamic_vertex_buffer(self, _handle, _startVertex, _num); } /// Set number of instances for auto generated instances use in conjunction /// with gl_InstanceID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. pub inline fn setInstanceCount(self: ?*Encoder, _numInstances: u32) void { return bgfx_encoder_set_instance_count(self, _numInstances); } /// Set texture stage for draw primitive. /// <param name="_stage">Texture unit.</param> /// <param name="_sampler">Program sampler.</param> /// <param name="_handle">Texture handle.</param> /// <param name="_flags">Texture sampling mode. Default value UINT32_MAX uses texture sampling settings from the texture. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> pub inline fn setTexture(self: ?*Encoder, _stage: u8, _sampler: UniformHandle, _handle: TextureHandle, _flags: u32) void { return bgfx_encoder_set_texture(self, _stage, _sampler, _handle, _flags); } /// Submit an empty primitive for rendering. Uniforms and draw state /// will be applied but no geometry will be submitted. Useful in cases /// when no other draw/compute primitive is submitted to view, but it's /// desired to execute clear view. /// @remark /// These empty draw calls will sort before ordinary draw calls. /// <param name="_id">View id.</param> pub inline fn touch(self: ?*Encoder, _id: ViewId) void { return bgfx_encoder_touch(self, _id); } /// Submit primitive for rendering. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn submit(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _depth: u32, _flags: u8) void { return bgfx_encoder_submit(self, _id, _program, _depth, _flags); } /// Submit primitive with occlusion query for rendering. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_occlusionQuery">Occlusion query.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn submitOcclusionQuery(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _occlusionQuery: OcclusionQueryHandle, _depth: u32, _flags: u8) void { return bgfx_encoder_submit_occlusion_query(self, _id, _program, _occlusionQuery, _depth, _flags); } /// Submit primitive for rendering with index and instance data info from /// indirect buffer. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_indirectHandle">Indirect buffer.</param> /// <param name="_start">First element in indirect buffer.</param> /// <param name="_num">Number of dispatches.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn submitIndirect(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _depth: u32, _flags: u8) void { return bgfx_encoder_submit_indirect(self, _id, _program, _indirectHandle, _start, _num, _depth, _flags); } /// Set compute index buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Index buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeIndexBuffer(self: ?*Encoder, _stage: u8, _handle: IndexBufferHandle, _access: Access) void { return bgfx_encoder_set_compute_index_buffer(self, _stage, _handle, _access); } /// Set compute vertex buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Vertex buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeVertexBuffer(self: ?*Encoder, _stage: u8, _handle: VertexBufferHandle, _access: Access) void { return bgfx_encoder_set_compute_vertex_buffer(self, _stage, _handle, _access); } /// Set compute dynamic index buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Dynamic index buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeDynamicIndexBuffer(self: ?*Encoder, _stage: u8, _handle: DynamicIndexBufferHandle, _access: Access) void { return bgfx_encoder_set_compute_dynamic_index_buffer(self, _stage, _handle, _access); } /// Set compute dynamic vertex buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Dynamic vertex buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeDynamicVertexBuffer(self: ?*Encoder, _stage: u8, _handle: DynamicVertexBufferHandle, _access: Access) void { return bgfx_encoder_set_compute_dynamic_vertex_buffer(self, _stage, _handle, _access); } /// Set compute indirect buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Indirect buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeIndirectBuffer(self: ?*Encoder, _stage: u8, _handle: IndirectBufferHandle, _access: Access) void { return bgfx_encoder_set_compute_indirect_buffer(self, _stage, _handle, _access); } /// Set compute image from texture. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Texture handle.</param> /// <param name="_mip">Mip level.</param> /// <param name="_access">Image access. See `Access::Enum`.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> pub inline fn setImage(self: ?*Encoder, _stage: u8, _handle: TextureHandle, _mip: u8, _access: Access, _format: TextureFormat) void { return bgfx_encoder_set_image(self, _stage, _handle, _mip, _access, _format); } /// Dispatch compute. /// <param name="_id">View id.</param> /// <param name="_program">Compute program.</param> /// <param name="_numX">Number of groups X.</param> /// <param name="_numY">Number of groups Y.</param> /// <param name="_numZ">Number of groups Z.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn dispatch(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _numX: u32, _numY: u32, _numZ: u32, _flags: u8) void { return bgfx_encoder_dispatch(self, _id, _program, _numX, _numY, _numZ, _flags); } /// Dispatch compute indirect. /// <param name="_id">View id.</param> /// <param name="_program">Compute program.</param> /// <param name="_indirectHandle">Indirect buffer.</param> /// <param name="_start">First element in indirect buffer.</param> /// <param name="_num">Number of dispatches.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn dispatchIndirect(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _flags: u8) void { return bgfx_encoder_dispatch_indirect(self, _id, _program, _indirectHandle, _start, _num, _flags); } /// Discard previously set state for draw or compute call. /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn discard(self: ?*Encoder, _flags: u8) void { return bgfx_encoder_discard(self, _flags); } /// Blit 2D texture region between two 2D textures. /// @attention Destination texture must be created with `BGFX_TEXTURE_BLIT_DST` flag. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_BLIT`. /// <param name="_id">View id.</param> /// <param name="_dst">Destination texture handle.</param> /// <param name="_dstMip">Destination texture mip level.</param> /// <param name="_dstX">Destination texture X position.</param> /// <param name="_dstY">Destination texture Y position.</param> /// <param name="_dstZ">If texture is 2D this argument should be 0. If destination texture is cube this argument represents destination texture cube face. For 3D texture this argument represents destination texture Z position.</param> /// <param name="_src">Source texture handle.</param> /// <param name="_srcMip">Source texture mip level.</param> /// <param name="_srcX">Source texture X position.</param> /// <param name="_srcY">Source texture Y position.</param> /// <param name="_srcZ">If texture is 2D this argument should be 0. If source texture is cube this argument represents source texture cube face. For 3D texture this argument represents source texture Z position.</param> /// <param name="_width">Width of region.</param> /// <param name="_height">Height of region.</param> /// <param name="_depth">If texture is 3D this argument represents depth of region, otherwise it's unused.</param> pub inline fn blit(self: ?*Encoder, _id: ViewId, _dst: TextureHandle, _dstMip: u8, _dstX: u16, _dstY: u16, _dstZ: u16, _src: TextureHandle, _srcMip: u8, _srcX: u16, _srcY: u16, _srcZ: u16, _width: u16, _height: u16, _depth: u16) void { return bgfx_encoder_blit(self, _id, _dst, _dstMip, _dstX, _dstY, _dstZ, _src, _srcMip, _srcX, _srcY, _srcZ, _width, _height, _depth); } }; pub const DynamicIndexBufferHandle = extern struct { idx: c_ushort, }; pub const DynamicVertexBufferHandle = extern struct { idx: c_ushort, }; pub const FrameBufferHandle = extern struct { idx: c_ushort, }; pub const IndexBufferHandle = extern struct { idx: c_ushort, }; pub const IndirectBufferHandle = extern struct { idx: c_ushort, }; pub const OcclusionQueryHandle = extern struct { idx: c_ushort, }; pub const ProgramHandle = extern struct { idx: c_ushort, }; pub const ShaderHandle = extern struct { idx: c_ushort, }; pub const TextureHandle = extern struct { idx: c_ushort, }; pub const UniformHandle = extern struct { idx: c_ushort, }; pub const VertexBufferHandle = extern struct { idx: c_ushort, }; pub const VertexLayoutHandle = extern struct { idx: c_ushort, }; /// Init attachment. /// <param name="_handle">Render target texture handle.</param> /// <param name="_access">Access. See `Access::Enum`.</param> /// <param name="_layer">Cubemap side or depth layer/slice to use.</param> /// <param name="_numLayers">Number of texture layer/slice(s) in array to use.</param> /// <param name="_mip">Mip level.</param> /// <param name="_resolve">Resolve flags. See: `BGFX_RESOLVE_*`</param> extern fn bgfx_attachment_init(self: [*c]Attachment, _handle: TextureHandle, _access: Access, _layer: u16, _numLayers: u16, _mip: u16, _resolve: u8) void; /// Start VertexLayout. /// <param name="_rendererType">Renderer backend type. See: `bgfx::RendererType`</param> extern fn bgfx_vertex_layout_begin(self: [*c]VertexLayout, _rendererType: RendererType) [*c]VertexLayout; /// Add attribute to VertexLayout. /// @remarks Must be called between begin/end. /// <param name="_attrib">Attribute semantics. See: `bgfx::Attrib`</param> /// <param name="_num">Number of elements 1, 2, 3 or 4.</param> /// <param name="_type">Element type.</param> /// <param name="_normalized">When using fixed point AttribType (f.e. Uint8) value will be normalized for vertex shader usage. When normalized is set to true, AttribType::Uint8 value in range 0-255 will be in range 0.0-1.0 in vertex shader.</param> /// <param name="_asInt">Packaging rule for vertexPack, vertexUnpack, and vertexConvert for AttribType::Uint8 and AttribType::Int16. Unpacking code must be implemented inside vertex shader.</param> extern fn bgfx_vertex_layout_add(self: [*c]VertexLayout, _attrib: Attrib, _num: u8, _type: AttribType, _normalized: bool, _asInt: bool) [*c]VertexLayout; /// Decode attribute. /// <param name="_attrib">Attribute semantics. See: `bgfx::Attrib`</param> /// <param name="_num">Number of elements.</param> /// <param name="_type">Element type.</param> /// <param name="_normalized">Attribute is normalized.</param> /// <param name="_asInt">Attribute is packed as int.</param> extern fn bgfx_vertex_layout_decode(self: [*c]const VertexLayout, _attrib: Attrib, _num: [*c]u8 , _type: [*c]AttribType, _normalized: [*c]bool, _asInt: [*c]bool) void; /// Returns `true` if VertexLayout contains attribute. /// <param name="_attrib">Attribute semantics. See: `bgfx::Attrib`</param> extern fn bgfx_vertex_layout_has(self: [*c]const VertexLayout, _attrib: Attrib) bool; /// Skip `_num` bytes in vertex stream. /// <param name="_num">Number of bytes to skip.</param> extern fn bgfx_vertex_layout_skip(self: [*c]VertexLayout, _num: u8) [*c]VertexLayout; /// End VertexLayout. extern fn bgfx_vertex_layout_end(self: [*c]VertexLayout) void; /// Pack vertex attribute into vertex stream format. /// <param name="_input">Value to be packed into vertex stream.</param> /// <param name="_inputNormalized">`true` if input value is already normalized.</param> /// <param name="_attr">Attribute to pack.</param> /// <param name="_layout">Vertex stream layout.</param> /// <param name="_data">Destination vertex stream where data will be packed.</param> /// <param name="_index">Vertex index that will be modified.</param> pub inline fn vertexPack(_input: [4]f32, _inputNormalized: bool, _attr: Attrib, _layout: [*c]const VertexLayout, _data: ?*anyopaque, _index: u32) void { return bgfx_vertex_pack(_input, _inputNormalized, _attr, _layout, _data, _index); } extern fn bgfx_vertex_pack(_input: [4]f32, _inputNormalized: bool, _attr: Attrib, _layout: [*c]const VertexLayout, _data: ?*anyopaque, _index: u32) void; /// Unpack vertex attribute from vertex stream format. /// <param name="_output">Result of unpacking.</param> /// <param name="_attr">Attribute to unpack.</param> /// <param name="_layout">Vertex stream layout.</param> /// <param name="_data">Source vertex stream from where data will be unpacked.</param> /// <param name="_index">Vertex index that will be unpacked.</param> pub inline fn vertexUnpack(_output: [4]f32, _attr: Attrib, _layout: [*c]const VertexLayout, _data: ?*const anyopaque, _index: u32) void { return bgfx_vertex_unpack(_output, _attr, _layout, _data, _index); } extern fn bgfx_vertex_unpack(_output: [4]f32, _attr: Attrib, _layout: [*c]const VertexLayout, _data: ?*const anyopaque, _index: u32) void; /// Converts vertex stream data from one vertex stream format to another. /// <param name="_dstLayout">Destination vertex stream layout.</param> /// <param name="_dstData">Destination vertex stream.</param> /// <param name="_srcLayout">Source vertex stream layout.</param> /// <param name="_srcData">Source vertex stream data.</param> /// <param name="_num">Number of vertices to convert from source to destination.</param> pub inline fn vertexConvert(_dstLayout: [*c]const VertexLayout, _dstData: ?*anyopaque, _srcLayout: [*c]const VertexLayout, _srcData: ?*const anyopaque, _num: u32) void { return bgfx_vertex_convert(_dstLayout, _dstData, _srcLayout, _srcData, _num); } extern fn bgfx_vertex_convert(_dstLayout: [*c]const VertexLayout, _dstData: ?*anyopaque, _srcLayout: [*c]const VertexLayout, _srcData: ?*const anyopaque, _num: u32) void; /// Weld vertices. /// <param name="_output">Welded vertices remapping table. The size of buffer must be the same as number of vertices.</param> /// <param name="_layout">Vertex stream layout.</param> /// <param name="_data">Vertex stream.</param> /// <param name="_num">Number of vertices in vertex stream.</param> /// <param name="_index32">Set to `true` if input indices are 32-bit.</param> /// <param name="_epsilon">Error tolerance for vertex position comparison.</param> pub inline fn weldVertices(_output: ?*anyopaque, _layout: [*c]const VertexLayout, _data: ?*const anyopaque, _num: u32, _index32: bool, _epsilon: f32) u32 { return bgfx_weld_vertices(_output, _layout, _data, _num, _index32, _epsilon); } extern fn bgfx_weld_vertices(_output: ?*anyopaque, _layout: [*c]const VertexLayout, _data: ?*const anyopaque, _num: u32, _index32: bool, _epsilon: f32) u32; /// Convert index buffer for use with different primitive topologies. /// <param name="_conversion">Conversion type, see `TopologyConvert::Enum`.</param> /// <param name="_dst">Destination index buffer. If this argument is NULL function will return number of indices after conversion.</param> /// <param name="_dstSize">Destination index buffer in bytes. It must be large enough to contain output indices. If destination size is insufficient index buffer will be truncated.</param> /// <param name="_indices">Source indices.</param> /// <param name="_numIndices">Number of input indices.</param> /// <param name="_index32">Set to `true` if input indices are 32-bit.</param> pub inline fn topologyConvert(_conversion: TopologyConvert, _dst: ?*anyopaque, _dstSize: u32, _indices: ?*const anyopaque, _numIndices: u32, _index32: bool) u32 { return bgfx_topology_convert(_conversion, _dst, _dstSize, _indices, _numIndices, _index32); } extern fn bgfx_topology_convert(_conversion: TopologyConvert, _dst: ?*anyopaque, _dstSize: u32, _indices: ?*const anyopaque, _numIndices: u32, _index32: bool) u32; /// Sort indices. /// <param name="_sort">Sort order, see `TopologySort::Enum`.</param> /// <param name="_dst">Destination index buffer.</param> /// <param name="_dstSize">Destination index buffer in bytes. It must be large enough to contain output indices. If destination size is insufficient index buffer will be truncated.</param> /// <param name="_dir">Direction (vector must be normalized).</param> /// <param name="_pos">Position.</param> /// <param name="_vertices">Pointer to first vertex represented as float x, y, z. Must contain at least number of vertices referencende by index buffer.</param> /// <param name="_stride">Vertex stride.</param> /// <param name="_indices">Source indices.</param> /// <param name="_numIndices">Number of input indices.</param> /// <param name="_index32">Set to `true` if input indices are 32-bit.</param> pub inline fn topologySortTriList(_sort: TopologySort, _dst: ?*anyopaque, _dstSize: u32, _dir: [3]f32, _pos: [3]f32, _vertices: ?*const anyopaque, _stride: u32, _indices: ?*const anyopaque, _numIndices: u32, _index32: bool) void { return bgfx_topology_sort_tri_list(_sort, _dst, _dstSize, _dir, _pos, _vertices, _stride, _indices, _numIndices, _index32); } extern fn bgfx_topology_sort_tri_list(_sort: TopologySort, _dst: ?*anyopaque, _dstSize: u32, _dir: [3]f32, _pos: [3]f32, _vertices: ?*const anyopaque, _stride: u32, _indices: ?*const anyopaque, _numIndices: u32, _index32: bool) void; /// Returns supported backend API renderers. /// <param name="_max">Maximum number of elements in _enum array.</param> /// <param name="_enum">Array where supported renderers will be written.</param> pub inline fn getSupportedRenderers(_max: u8, _enum: [*c]RendererType) u8 { return bgfx_get_supported_renderers(_max, _enum); } extern fn bgfx_get_supported_renderers(_max: u8, _enum: [*c]RendererType) u8; /// Returns name of renderer. /// <param name="_type">Renderer backend type. See: `bgfx::RendererType`</param> pub inline fn getRendererName(_type: RendererType) [*c]const u8 { return bgfx_get_renderer_name(_type); } extern fn bgfx_get_renderer_name(_type: RendererType) [*c]const u8; pub inline fn initCtor(_init: [*c]Init) void { return bgfx_init_ctor(_init); } extern fn bgfx_init_ctor(_init: [*c]Init) void; /// Initialize the bgfx library. /// <param name="_init">Initialization parameters. See: `bgfx::Init` for more info.</param> pub inline fn init(_init: [*c]const Init) bool { return bgfx_init(_init); } extern fn bgfx_init(_init: [*c]const Init) bool; /// Shutdown bgfx library. pub inline fn shutdown() void { return bgfx_shutdown(); } extern fn bgfx_shutdown() void; /// Reset graphic settings and back-buffer size. /// @attention This call doesn’t change the window size, it just resizes /// the back-buffer. Your windowing code controls the window size. /// <param name="_width">Back-buffer width.</param> /// <param name="_height">Back-buffer height.</param> /// <param name="_flags">See: `BGFX_RESET_*` for more info. - `BGFX_RESET_NONE` - No reset flags. - `BGFX_RESET_FULLSCREEN` - Not supported yet. - `BGFX_RESET_MSAA_X[2/4/8/16]` - Enable 2, 4, 8 or 16 x MSAA. - `BGFX_RESET_VSYNC` - Enable V-Sync. - `BGFX_RESET_MAXANISOTROPY` - Turn on/off max anisotropy. - `BGFX_RESET_CAPTURE` - Begin screen capture. - `BGFX_RESET_FLUSH_AFTER_RENDER` - Flush rendering after submitting to GPU. - `BGFX_RESET_FLIP_AFTER_RENDER` - This flag specifies where flip occurs. Default behaviour is that flip occurs before rendering new frame. This flag only has effect when `BGFX_CONFIG_MULTITHREADED=0`. - `BGFX_RESET_SRGB_BACKBUFFER` - Enable sRGB back-buffer.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> pub inline fn reset(_width: u32, _height: u32, _flags: u32, _format: TextureFormat) void { return bgfx_reset(_width, _height, _flags, _format); } extern fn bgfx_reset(_width: u32, _height: u32, _flags: u32, _format: TextureFormat) void; /// Advance to next frame. When using multithreaded renderer, this call /// just swaps internal buffers, kicks render thread, and returns. In /// singlethreaded renderer this call does frame rendering. /// <param name="_capture">Capture frame with graphics debugger.</param> pub inline fn frame(_capture: bool) u32 { return bgfx_frame(_capture); } extern fn bgfx_frame(_capture: bool) u32; /// Returns current renderer backend API type. /// @remarks /// Library must be initialized. pub inline fn getRendererType() RendererType { return bgfx_get_renderer_type(); } extern fn bgfx_get_renderer_type() RendererType; /// Returns renderer capabilities. /// @remarks /// Library must be initialized. pub inline fn getCaps() [*c]const Caps { return bgfx_get_caps(); } extern fn bgfx_get_caps() [*c]const Caps; /// Returns performance counters. /// @attention Pointer returned is valid until `bgfx::frame` is called. pub inline fn getStats() [*c]const Stats { return bgfx_get_stats(); } extern fn bgfx_get_stats() [*c]const Stats; /// Allocate buffer to pass to bgfx calls. Data will be freed inside bgfx. /// <param name="_size">Size to allocate.</param> pub inline fn alloc(_size: u32) [*c]const Memory { return bgfx_alloc(_size); } extern fn bgfx_alloc(_size: u32) [*c]const Memory; /// Allocate buffer and copy data into it. Data will be freed inside bgfx. /// <param name="_data">Pointer to data to be copied.</param> /// <param name="_size">Size of data to be copied.</param> pub inline fn copy(_data: ?*const anyopaque, _size: u32) [*c]const Memory { return bgfx_copy(_data, _size); } extern fn bgfx_copy(_data: ?*const anyopaque, _size: u32) [*c]const Memory; /// Make reference to data to pass to bgfx. Unlike `bgfx::alloc`, this call /// doesn't allocate memory for data. It just copies the _data pointer. You /// can pass `ReleaseFn` function pointer to release this memory after it's /// consumed, otherwise you must make sure _data is available for at least 2 /// `bgfx::frame` calls. `ReleaseFn` function must be able to be called /// from any thread. /// @attention Data passed must be available for at least 2 `bgfx::frame` calls. /// <param name="_data">Pointer to data.</param> /// <param name="_size">Size of data.</param> pub inline fn makeRef(_data: ?*const anyopaque, _size: u32) [*c]const Memory { return bgfx_make_ref(_data, _size); } extern fn bgfx_make_ref(_data: ?*const anyopaque, _size: u32) [*c]const Memory; /// Make reference to data to pass to bgfx. Unlike `bgfx::alloc`, this call /// doesn't allocate memory for data. It just copies the _data pointer. You /// can pass `ReleaseFn` function pointer to release this memory after it's /// consumed, otherwise you must make sure _data is available for at least 2 /// `bgfx::frame` calls. `ReleaseFn` function must be able to be called /// from any thread. /// @attention Data passed must be available for at least 2 `bgfx::frame` calls. /// <param name="_data">Pointer to data.</param> /// <param name="_size">Size of data.</param> /// <param name="_releaseFn">Callback function to release memory after use.</param> /// <param name="_userData">User data to be passed to callback function.</param> pub inline fn makeRefRelease(_data: ?*const anyopaque, _size: u32, _releaseFn: ?*anyopaque, _userData: ?*anyopaque) [*c]const Memory { return bgfx_make_ref_release(_data, _size, _releaseFn, _userData); } extern fn bgfx_make_ref_release(_data: ?*const anyopaque, _size: u32, _releaseFn: ?*anyopaque, _userData: ?*anyopaque) [*c]const Memory; /// Set debug flags. /// <param name="_debug">Available flags: - `BGFX_DEBUG_IFH` - Infinitely fast hardware. When this flag is set all rendering calls will be skipped. This is useful when profiling to quickly assess potential bottlenecks between CPU and GPU. - `BGFX_DEBUG_PROFILER` - Enable profiler. - `BGFX_DEBUG_STATS` - Display internal statistics. - `BGFX_DEBUG_TEXT` - Display debug text. - `BGFX_DEBUG_WIREFRAME` - Wireframe rendering. All rendering primitives will be rendered as lines.</param> pub inline fn setDebug(_debug: u32) void { return bgfx_set_debug(_debug); } extern fn bgfx_set_debug(_debug: u32) void; /// Clear internal debug text buffer. /// <param name="_attr">Background color.</param> /// <param name="_small">Default 8x16 or 8x8 font.</param> pub inline fn dbgTextClear(_attr: u8, _small: bool) void { return bgfx_dbg_text_clear(_attr, _small); } extern fn bgfx_dbg_text_clear(_attr: u8, _small: bool) void; /// Draw image into internal debug text buffer. /// <param name="_x">Position x from the left corner of the window.</param> /// <param name="_y">Position y from the top corner of the window.</param> /// <param name="_width">Image width.</param> /// <param name="_height">Image height.</param> /// <param name="_data">Raw image data (character/attribute raw encoding).</param> /// <param name="_pitch">Image pitch in bytes.</param> pub inline fn dbgTextImage(_x: u16, _y: u16, _width: u16, _height: u16, _data: ?*const anyopaque, _pitch: u16) void { return bgfx_dbg_text_image(_x, _y, _width, _height, _data, _pitch); } extern fn bgfx_dbg_text_image(_x: u16, _y: u16, _width: u16, _height: u16, _data: ?*const anyopaque, _pitch: u16) void; /// Create static index buffer. /// <param name="_mem">Index buffer data.</param> /// <param name="_flags">Buffer creation flags. - `BGFX_BUFFER_NONE` - No flags. - `BGFX_BUFFER_COMPUTE_READ` - Buffer will be read from by compute shader. - `BGFX_BUFFER_COMPUTE_WRITE` - Buffer will be written into by compute shader. When buffer is created with `BGFX_BUFFER_COMPUTE_WRITE` flag it cannot be updated from CPU. - `BGFX_BUFFER_COMPUTE_READ_WRITE` - Buffer will be used for read/write by compute shader. - `BGFX_BUFFER_ALLOW_RESIZE` - Buffer will resize on buffer update if a different amount of data is passed. If this flag is not specified, and more data is passed on update, the buffer will be trimmed to fit the existing buffer size. This flag has effect only on dynamic buffers. - `BGFX_BUFFER_INDEX32` - Buffer is using 32-bit indices. This flag has effect only on index buffers.</param> pub inline fn createIndexBuffer(_mem: [*c]const Memory, _flags: u16) IndexBufferHandle { return bgfx_create_index_buffer(_mem, _flags); } extern fn bgfx_create_index_buffer(_mem: [*c]const Memory, _flags: u16) IndexBufferHandle; /// Set static index buffer debug name. /// <param name="_handle">Static index buffer handle.</param> /// <param name="_name">Static index buffer name.</param> /// <param name="_len">Static index buffer name length (if length is INT32_MAX, it's expected that _name is zero terminated string.</param> pub inline fn setIndexBufferName(_handle: IndexBufferHandle, _name: [*c]const u8, _len: i32) void { return bgfx_set_index_buffer_name(_handle, _name, _len); } extern fn bgfx_set_index_buffer_name(_handle: IndexBufferHandle, _name: [*c]const u8, _len: i32) void; /// Destroy static index buffer. /// <param name="_handle">Static index buffer handle.</param> pub inline fn destroyIndexBuffer(_handle: IndexBufferHandle) void { return bgfx_destroy_index_buffer(_handle); } extern fn bgfx_destroy_index_buffer(_handle: IndexBufferHandle) void; /// Create vertex layout. /// <param name="_layout">Vertex layout.</param> pub inline fn createVertexLayout(_layout: [*c]const VertexLayout) VertexLayoutHandle { return bgfx_create_vertex_layout(_layout); } extern fn bgfx_create_vertex_layout(_layout: [*c]const VertexLayout) VertexLayoutHandle; /// Destroy vertex layout. /// <param name="_layoutHandle">Vertex layout handle.</param> pub inline fn destroyVertexLayout(_layoutHandle: VertexLayoutHandle) void { return bgfx_destroy_vertex_layout(_layoutHandle); } extern fn bgfx_destroy_vertex_layout(_layoutHandle: VertexLayoutHandle) void; /// Create static vertex buffer. /// <param name="_mem">Vertex buffer data.</param> /// <param name="_layout">Vertex layout.</param> /// <param name="_flags">Buffer creation flags. - `BGFX_BUFFER_NONE` - No flags. - `BGFX_BUFFER_COMPUTE_READ` - Buffer will be read from by compute shader. - `BGFX_BUFFER_COMPUTE_WRITE` - Buffer will be written into by compute shader. When buffer is created with `BGFX_BUFFER_COMPUTE_WRITE` flag it cannot be updated from CPU. - `BGFX_BUFFER_COMPUTE_READ_WRITE` - Buffer will be used for read/write by compute shader. - `BGFX_BUFFER_ALLOW_RESIZE` - Buffer will resize on buffer update if a different amount of data is passed. If this flag is not specified, and more data is passed on update, the buffer will be trimmed to fit the existing buffer size. This flag has effect only on dynamic buffers. - `BGFX_BUFFER_INDEX32` - Buffer is using 32-bit indices. This flag has effect only on index buffers.</param> pub inline fn createVertexBuffer(_mem: [*c]const Memory, _layout: [*c]const VertexLayout, _flags: u16) VertexBufferHandle { return bgfx_create_vertex_buffer(_mem, _layout, _flags); } extern fn bgfx_create_vertex_buffer(_mem: [*c]const Memory, _layout: [*c]const VertexLayout, _flags: u16) VertexBufferHandle; /// Set static vertex buffer debug name. /// <param name="_handle">Static vertex buffer handle.</param> /// <param name="_name">Static vertex buffer name.</param> /// <param name="_len">Static vertex buffer name length (if length is INT32_MAX, it's expected that _name is zero terminated string.</param> pub inline fn setVertexBufferName(_handle: VertexBufferHandle, _name: [*c]const u8, _len: i32) void { return bgfx_set_vertex_buffer_name(_handle, _name, _len); } extern fn bgfx_set_vertex_buffer_name(_handle: VertexBufferHandle, _name: [*c]const u8, _len: i32) void; /// Destroy static vertex buffer. /// <param name="_handle">Static vertex buffer handle.</param> pub inline fn destroyVertexBuffer(_handle: VertexBufferHandle) void { return bgfx_destroy_vertex_buffer(_handle); } extern fn bgfx_destroy_vertex_buffer(_handle: VertexBufferHandle) void; /// Create empty dynamic index buffer. /// <param name="_num">Number of indices.</param> /// <param name="_flags">Buffer creation flags. - `BGFX_BUFFER_NONE` - No flags. - `BGFX_BUFFER_COMPUTE_READ` - Buffer will be read from by compute shader. - `BGFX_BUFFER_COMPUTE_WRITE` - Buffer will be written into by compute shader. When buffer is created with `BGFX_BUFFER_COMPUTE_WRITE` flag it cannot be updated from CPU. - `BGFX_BUFFER_COMPUTE_READ_WRITE` - Buffer will be used for read/write by compute shader. - `BGFX_BUFFER_ALLOW_RESIZE` - Buffer will resize on buffer update if a different amount of data is passed. If this flag is not specified, and more data is passed on update, the buffer will be trimmed to fit the existing buffer size. This flag has effect only on dynamic buffers. - `BGFX_BUFFER_INDEX32` - Buffer is using 32-bit indices. This flag has effect only on index buffers.</param> pub inline fn createDynamicIndexBuffer(_num: u32, _flags: u16) DynamicIndexBufferHandle { return bgfx_create_dynamic_index_buffer(_num, _flags); } extern fn bgfx_create_dynamic_index_buffer(_num: u32, _flags: u16) DynamicIndexBufferHandle; /// Create a dynamic index buffer and initialize it. /// <param name="_mem">Index buffer data.</param> /// <param name="_flags">Buffer creation flags. - `BGFX_BUFFER_NONE` - No flags. - `BGFX_BUFFER_COMPUTE_READ` - Buffer will be read from by compute shader. - `BGFX_BUFFER_COMPUTE_WRITE` - Buffer will be written into by compute shader. When buffer is created with `BGFX_BUFFER_COMPUTE_WRITE` flag it cannot be updated from CPU. - `BGFX_BUFFER_COMPUTE_READ_WRITE` - Buffer will be used for read/write by compute shader. - `BGFX_BUFFER_ALLOW_RESIZE` - Buffer will resize on buffer update if a different amount of data is passed. If this flag is not specified, and more data is passed on update, the buffer will be trimmed to fit the existing buffer size. This flag has effect only on dynamic buffers. - `BGFX_BUFFER_INDEX32` - Buffer is using 32-bit indices. This flag has effect only on index buffers.</param> pub inline fn createDynamicIndexBufferMem(_mem: [*c]const Memory, _flags: u16) DynamicIndexBufferHandle { return bgfx_create_dynamic_index_buffer_mem(_mem, _flags); } extern fn bgfx_create_dynamic_index_buffer_mem(_mem: [*c]const Memory, _flags: u16) DynamicIndexBufferHandle; /// Update dynamic index buffer. /// <param name="_handle">Dynamic index buffer handle.</param> /// <param name="_startIndex">Start index.</param> /// <param name="_mem">Index buffer data.</param> pub inline fn updateDynamicIndexBuffer(_handle: DynamicIndexBufferHandle, _startIndex: u32, _mem: [*c]const Memory) void { return bgfx_update_dynamic_index_buffer(_handle, _startIndex, _mem); } extern fn bgfx_update_dynamic_index_buffer(_handle: DynamicIndexBufferHandle, _startIndex: u32, _mem: [*c]const Memory) void; /// Destroy dynamic index buffer. /// <param name="_handle">Dynamic index buffer handle.</param> pub inline fn destroyDynamicIndexBuffer(_handle: DynamicIndexBufferHandle) void { return bgfx_destroy_dynamic_index_buffer(_handle); } extern fn bgfx_destroy_dynamic_index_buffer(_handle: DynamicIndexBufferHandle) void; /// Create empty dynamic vertex buffer. /// <param name="_num">Number of vertices.</param> /// <param name="_layout">Vertex layout.</param> /// <param name="_flags">Buffer creation flags. - `BGFX_BUFFER_NONE` - No flags. - `BGFX_BUFFER_COMPUTE_READ` - Buffer will be read from by compute shader. - `BGFX_BUFFER_COMPUTE_WRITE` - Buffer will be written into by compute shader. When buffer is created with `BGFX_BUFFER_COMPUTE_WRITE` flag it cannot be updated from CPU. - `BGFX_BUFFER_COMPUTE_READ_WRITE` - Buffer will be used for read/write by compute shader. - `BGFX_BUFFER_ALLOW_RESIZE` - Buffer will resize on buffer update if a different amount of data is passed. If this flag is not specified, and more data is passed on update, the buffer will be trimmed to fit the existing buffer size. This flag has effect only on dynamic buffers. - `BGFX_BUFFER_INDEX32` - Buffer is using 32-bit indices. This flag has effect only on index buffers.</param> pub inline fn createDynamicVertexBuffer(_num: u32, _layout: [*c]const VertexLayout, _flags: u16) DynamicVertexBufferHandle { return bgfx_create_dynamic_vertex_buffer(_num, _layout, _flags); } extern fn bgfx_create_dynamic_vertex_buffer(_num: u32, _layout: [*c]const VertexLayout, _flags: u16) DynamicVertexBufferHandle; /// Create dynamic vertex buffer and initialize it. /// <param name="_mem">Vertex buffer data.</param> /// <param name="_layout">Vertex layout.</param> /// <param name="_flags">Buffer creation flags. - `BGFX_BUFFER_NONE` - No flags. - `BGFX_BUFFER_COMPUTE_READ` - Buffer will be read from by compute shader. - `BGFX_BUFFER_COMPUTE_WRITE` - Buffer will be written into by compute shader. When buffer is created with `BGFX_BUFFER_COMPUTE_WRITE` flag it cannot be updated from CPU. - `BGFX_BUFFER_COMPUTE_READ_WRITE` - Buffer will be used for read/write by compute shader. - `BGFX_BUFFER_ALLOW_RESIZE` - Buffer will resize on buffer update if a different amount of data is passed. If this flag is not specified, and more data is passed on update, the buffer will be trimmed to fit the existing buffer size. This flag has effect only on dynamic buffers. - `BGFX_BUFFER_INDEX32` - Buffer is using 32-bit indices. This flag has effect only on index buffers.</param> pub inline fn createDynamicVertexBufferMem(_mem: [*c]const Memory, _layout: [*c]const VertexLayout, _flags: u16) DynamicVertexBufferHandle { return bgfx_create_dynamic_vertex_buffer_mem(_mem, _layout, _flags); } extern fn bgfx_create_dynamic_vertex_buffer_mem(_mem: [*c]const Memory, _layout: [*c]const VertexLayout, _flags: u16) DynamicVertexBufferHandle; /// Update dynamic vertex buffer. /// <param name="_handle">Dynamic vertex buffer handle.</param> /// <param name="_startVertex">Start vertex.</param> /// <param name="_mem">Vertex buffer data.</param> pub inline fn updateDynamicVertexBuffer(_handle: DynamicVertexBufferHandle, _startVertex: u32, _mem: [*c]const Memory) void { return bgfx_update_dynamic_vertex_buffer(_handle, _startVertex, _mem); } extern fn bgfx_update_dynamic_vertex_buffer(_handle: DynamicVertexBufferHandle, _startVertex: u32, _mem: [*c]const Memory) void; /// Destroy dynamic vertex buffer. /// <param name="_handle">Dynamic vertex buffer handle.</param> pub inline fn destroyDynamicVertexBuffer(_handle: DynamicVertexBufferHandle) void { return bgfx_destroy_dynamic_vertex_buffer(_handle); } extern fn bgfx_destroy_dynamic_vertex_buffer(_handle: DynamicVertexBufferHandle) void; /// Returns number of requested or maximum available indices. /// <param name="_num">Number of required indices.</param> /// <param name="_index32">Set to `true` if input indices will be 32-bit.</param> pub inline fn getAvailTransientIndexBuffer(_num: u32, _index32: bool) u32 { return bgfx_get_avail_transient_index_buffer(_num, _index32); } extern fn bgfx_get_avail_transient_index_buffer(_num: u32, _index32: bool) u32; /// Returns number of requested or maximum available vertices. /// <param name="_num">Number of required vertices.</param> /// <param name="_layout">Vertex layout.</param> pub inline fn getAvailTransientVertexBuffer(_num: u32, _layout: [*c]const VertexLayout) u32 { return bgfx_get_avail_transient_vertex_buffer(_num, _layout); } extern fn bgfx_get_avail_transient_vertex_buffer(_num: u32, _layout: [*c]const VertexLayout) u32; /// Returns number of requested or maximum available instance buffer slots. /// <param name="_num">Number of required instances.</param> /// <param name="_stride">Stride per instance.</param> pub inline fn getAvailInstanceDataBuffer(_num: u32, _stride: u16) u32 { return bgfx_get_avail_instance_data_buffer(_num, _stride); } extern fn bgfx_get_avail_instance_data_buffer(_num: u32, _stride: u16) u32; /// Allocate transient index buffer. /// <param name="_tib">TransientIndexBuffer structure is filled and is valid for the duration of frame, and it can be reused for multiple draw calls.</param> /// <param name="_num">Number of indices to allocate.</param> /// <param name="_index32">Set to `true` if input indices will be 32-bit.</param> pub inline fn allocTransientIndexBuffer(_tib: [*c]TransientIndexBuffer, _num: u32, _index32: bool) void { return bgfx_alloc_transient_index_buffer(_tib, _num, _index32); } extern fn bgfx_alloc_transient_index_buffer(_tib: [*c]TransientIndexBuffer, _num: u32, _index32: bool) void; /// Allocate transient vertex buffer. /// <param name="_tvb">TransientVertexBuffer structure is filled and is valid for the duration of frame, and it can be reused for multiple draw calls.</param> /// <param name="_num">Number of vertices to allocate.</param> /// <param name="_layout">Vertex layout.</param> pub inline fn allocTransientVertexBuffer(_tvb: [*c]TransientVertexBuffer, _num: u32, _layout: [*c]const VertexLayout) void { return bgfx_alloc_transient_vertex_buffer(_tvb, _num, _layout); } extern fn bgfx_alloc_transient_vertex_buffer(_tvb: [*c]TransientVertexBuffer, _num: u32, _layout: [*c]const VertexLayout) void; /// Check for required space and allocate transient vertex and index /// buffers. If both space requirements are satisfied function returns /// true. /// <param name="_tvb">TransientVertexBuffer structure is filled and is valid for the duration of frame, and it can be reused for multiple draw calls.</param> /// <param name="_layout">Vertex layout.</param> /// <param name="_numVertices">Number of vertices to allocate.</param> /// <param name="_tib">TransientIndexBuffer structure is filled and is valid for the duration of frame, and it can be reused for multiple draw calls.</param> /// <param name="_numIndices">Number of indices to allocate.</param> /// <param name="_index32">Set to `true` if input indices will be 32-bit.</param> pub inline fn allocTransientBuffers(_tvb: [*c]TransientVertexBuffer, _layout: [*c]const VertexLayout, _numVertices: u32, _tib: [*c]TransientIndexBuffer, _numIndices: u32, _index32: bool) bool { return bgfx_alloc_transient_buffers(_tvb, _layout, _numVertices, _tib, _numIndices, _index32); } extern fn bgfx_alloc_transient_buffers(_tvb: [*c]TransientVertexBuffer, _layout: [*c]const VertexLayout, _numVertices: u32, _tib: [*c]TransientIndexBuffer, _numIndices: u32, _index32: bool) bool; /// Allocate instance data buffer. /// <param name="_idb">InstanceDataBuffer structure is filled and is valid for duration of frame, and it can be reused for multiple draw calls.</param> /// <param name="_num">Number of instances.</param> /// <param name="_stride">Instance stride. Must be multiple of 16.</param> pub inline fn allocInstanceDataBuffer(_idb: [*c]InstanceDataBuffer, _num: u32, _stride: u16) void { return bgfx_alloc_instance_data_buffer(_idb, _num, _stride); } extern fn bgfx_alloc_instance_data_buffer(_idb: [*c]InstanceDataBuffer, _num: u32, _stride: u16) void; /// Create draw indirect buffer. /// <param name="_num">Number of indirect calls.</param> pub inline fn createIndirectBuffer(_num: u32) IndirectBufferHandle { return bgfx_create_indirect_buffer(_num); } extern fn bgfx_create_indirect_buffer(_num: u32) IndirectBufferHandle; /// Destroy draw indirect buffer. /// <param name="_handle">Indirect buffer handle.</param> pub inline fn destroyIndirectBuffer(_handle: IndirectBufferHandle) void { return bgfx_destroy_indirect_buffer(_handle); } extern fn bgfx_destroy_indirect_buffer(_handle: IndirectBufferHandle) void; /// Create shader from memory buffer. /// <param name="_mem">Shader binary.</param> pub inline fn createShader(_mem: [*c]const Memory) ShaderHandle { return bgfx_create_shader(_mem); } extern fn bgfx_create_shader(_mem: [*c]const Memory) ShaderHandle; /// Returns the number of uniforms and uniform handles used inside a shader. /// @remarks /// Only non-predefined uniforms are returned. /// <param name="_handle">Shader handle.</param> /// <param name="_uniforms">UniformHandle array where data will be stored.</param> /// <param name="_max">Maximum capacity of array.</param> pub inline fn getShaderUniforms(_handle: ShaderHandle, _uniforms: [*c]UniformHandle, _max: u16) u16 { return bgfx_get_shader_uniforms(_handle, _uniforms, _max); } extern fn bgfx_get_shader_uniforms(_handle: ShaderHandle, _uniforms: [*c]UniformHandle, _max: u16) u16; /// Set shader debug name. /// <param name="_handle">Shader handle.</param> /// <param name="_name">Shader name.</param> /// <param name="_len">Shader name length (if length is INT32_MAX, it's expected that _name is zero terminated string).</param> pub inline fn setShaderName(_handle: ShaderHandle, _name: [*c]const u8, _len: i32) void { return bgfx_set_shader_name(_handle, _name, _len); } extern fn bgfx_set_shader_name(_handle: ShaderHandle, _name: [*c]const u8, _len: i32) void; /// Destroy shader. /// @remark Once a shader program is created with _handle, /// it is safe to destroy that shader. /// <param name="_handle">Shader handle.</param> pub inline fn destroyShader(_handle: ShaderHandle) void { return bgfx_destroy_shader(_handle); } extern fn bgfx_destroy_shader(_handle: ShaderHandle) void; /// Create program with vertex and fragment shaders. /// <param name="_vsh">Vertex shader.</param> /// <param name="_fsh">Fragment shader.</param> /// <param name="_destroyShaders">If true, shaders will be destroyed when program is destroyed.</param> pub inline fn createProgram(_vsh: ShaderHandle, _fsh: ShaderHandle, _destroyShaders: bool) ProgramHandle { return bgfx_create_program(_vsh, _fsh, _destroyShaders); } extern fn bgfx_create_program(_vsh: ShaderHandle, _fsh: ShaderHandle, _destroyShaders: bool) ProgramHandle; /// Create program with compute shader. /// <param name="_csh">Compute shader.</param> /// <param name="_destroyShaders">If true, shaders will be destroyed when program is destroyed.</param> pub inline fn createComputeProgram(_csh: ShaderHandle, _destroyShaders: bool) ProgramHandle { return bgfx_create_compute_program(_csh, _destroyShaders); } extern fn bgfx_create_compute_program(_csh: ShaderHandle, _destroyShaders: bool) ProgramHandle; /// Destroy program. /// <param name="_handle">Program handle.</param> pub inline fn destroyProgram(_handle: ProgramHandle) void { return bgfx_destroy_program(_handle); } extern fn bgfx_destroy_program(_handle: ProgramHandle) void; /// Validate texture parameters. /// <param name="_depth">Depth dimension of volume texture.</param> /// <param name="_cubeMap">Indicates that texture contains cubemap.</param> /// <param name="_numLayers">Number of layers in texture array.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_flags">Texture flags. See `BGFX_TEXTURE_*`.</param> pub inline fn isTextureValid(_depth: u16, _cubeMap: bool, _numLayers: u16, _format: TextureFormat, _flags: u64) bool { return bgfx_is_texture_valid(_depth, _cubeMap, _numLayers, _format, _flags); } extern fn bgfx_is_texture_valid(_depth: u16, _cubeMap: bool, _numLayers: u16, _format: TextureFormat, _flags: u64) bool; /// Validate frame buffer parameters. /// <param name="_num">Number of attachments.</param> /// <param name="_attachment">Attachment texture info. See: `bgfx::Attachment`.</param> pub inline fn isFrameBufferValid(_num: u8, _attachment: [*c]const Attachment) bool { return bgfx_is_frame_buffer_valid(_num, _attachment); } extern fn bgfx_is_frame_buffer_valid(_num: u8, _attachment: [*c]const Attachment) bool; /// Calculate amount of memory required for texture. /// <param name="_info">Resulting texture info structure. See: `TextureInfo`.</param> /// <param name="_width">Width.</param> /// <param name="_height">Height.</param> /// <param name="_depth">Depth dimension of volume texture.</param> /// <param name="_cubeMap">Indicates that texture contains cubemap.</param> /// <param name="_hasMips">Indicates that texture contains full mip-map chain.</param> /// <param name="_numLayers">Number of layers in texture array.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> pub inline fn calcTextureSize(_info: [*c]TextureInfo, _width: u16, _height: u16, _depth: u16, _cubeMap: bool, _hasMips: bool, _numLayers: u16, _format: TextureFormat) void { return bgfx_calc_texture_size(_info, _width, _height, _depth, _cubeMap, _hasMips, _numLayers, _format); } extern fn bgfx_calc_texture_size(_info: [*c]TextureInfo, _width: u16, _height: u16, _depth: u16, _cubeMap: bool, _hasMips: bool, _numLayers: u16, _format: TextureFormat) void; /// Create texture from memory buffer. /// <param name="_mem">DDS, KTX or PVR texture binary data.</param> /// <param name="_flags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> /// <param name="_skip">Skip top level mips when parsing texture.</param> /// <param name="_info">When non-`NULL` is specified it returns parsed texture information.</param> pub inline fn createTexture(_mem: [*c]const Memory, _flags: u64, _skip: u8, _info: [*c]TextureInfo) TextureHandle { return bgfx_create_texture(_mem, _flags, _skip, _info); } extern fn bgfx_create_texture(_mem: [*c]const Memory, _flags: u64, _skip: u8, _info: [*c]TextureInfo) TextureHandle; /// Create 2D texture. /// <param name="_width">Width.</param> /// <param name="_height">Height.</param> /// <param name="_hasMips">Indicates that texture contains full mip-map chain.</param> /// <param name="_numLayers">Number of layers in texture array. Must be 1 if caps `BGFX_CAPS_TEXTURE_2D_ARRAY` flag is not set.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_flags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> /// <param name="_mem">Texture data. If `_mem` is non-NULL, created texture will be immutable. If `_mem` is NULL content of the texture is uninitialized. When `_numLayers` is more than 1, expected memory layout is texture and all mips together for each array element.</param> pub inline fn createTexture2D(_width: u16, _height: u16, _hasMips: bool, _numLayers: u16, _format: TextureFormat, _flags: u64, _mem: [*c]const Memory) TextureHandle { return bgfx_create_texture_2d(_width, _height, _hasMips, _numLayers, _format, _flags, _mem); } extern fn bgfx_create_texture_2d(_width: u16, _height: u16, _hasMips: bool, _numLayers: u16, _format: TextureFormat, _flags: u64, _mem: [*c]const Memory) TextureHandle; /// Create texture with size based on back-buffer ratio. Texture will maintain ratio /// if back buffer resolution changes. /// <param name="_ratio">Texture size in respect to back-buffer size. See: `BackbufferRatio::Enum`.</param> /// <param name="_hasMips">Indicates that texture contains full mip-map chain.</param> /// <param name="_numLayers">Number of layers in texture array. Must be 1 if caps `BGFX_CAPS_TEXTURE_2D_ARRAY` flag is not set.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_flags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> pub inline fn createTexture2DScaled(_ratio: BackbufferRatio, _hasMips: bool, _numLayers: u16, _format: TextureFormat, _flags: u64) TextureHandle { return bgfx_create_texture_2d_scaled(_ratio, _hasMips, _numLayers, _format, _flags); } extern fn bgfx_create_texture_2d_scaled(_ratio: BackbufferRatio, _hasMips: bool, _numLayers: u16, _format: TextureFormat, _flags: u64) TextureHandle; /// Create 3D texture. /// <param name="_width">Width.</param> /// <param name="_height">Height.</param> /// <param name="_depth">Depth.</param> /// <param name="_hasMips">Indicates that texture contains full mip-map chain.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_flags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> /// <param name="_mem">Texture data. If `_mem` is non-NULL, created texture will be immutable. If `_mem` is NULL content of the texture is uninitialized. When `_numLayers` is more than 1, expected memory layout is texture and all mips together for each array element.</param> pub inline fn createTexture3D(_width: u16, _height: u16, _depth: u16, _hasMips: bool, _format: TextureFormat, _flags: u64, _mem: [*c]const Memory) TextureHandle { return bgfx_create_texture_3d(_width, _height, _depth, _hasMips, _format, _flags, _mem); } extern fn bgfx_create_texture_3d(_width: u16, _height: u16, _depth: u16, _hasMips: bool, _format: TextureFormat, _flags: u64, _mem: [*c]const Memory) TextureHandle; /// Create Cube texture. /// <param name="_size">Cube side size.</param> /// <param name="_hasMips">Indicates that texture contains full mip-map chain.</param> /// <param name="_numLayers">Number of layers in texture array. Must be 1 if caps `BGFX_CAPS_TEXTURE_2D_ARRAY` flag is not set.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_flags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> /// <param name="_mem">Texture data. If `_mem` is non-NULL, created texture will be immutable. If `_mem` is NULL content of the texture is uninitialized. When `_numLayers` is more than 1, expected memory layout is texture and all mips together for each array element.</param> pub inline fn createTextureCube(_size: u16, _hasMips: bool, _numLayers: u16, _format: TextureFormat, _flags: u64, _mem: [*c]const Memory) TextureHandle { return bgfx_create_texture_cube(_size, _hasMips, _numLayers, _format, _flags, _mem); } extern fn bgfx_create_texture_cube(_size: u16, _hasMips: bool, _numLayers: u16, _format: TextureFormat, _flags: u64, _mem: [*c]const Memory) TextureHandle; /// Update 2D texture. /// @attention It's valid to update only mutable texture. See `bgfx::createTexture2D` for more info. /// <param name="_handle">Texture handle.</param> /// <param name="_layer">Layer in texture array.</param> /// <param name="_mip">Mip level.</param> /// <param name="_x">X offset in texture.</param> /// <param name="_y">Y offset in texture.</param> /// <param name="_width">Width of texture block.</param> /// <param name="_height">Height of texture block.</param> /// <param name="_mem">Texture update data.</param> /// <param name="_pitch">Pitch of input image (bytes). When _pitch is set to UINT16_MAX, it will be calculated internally based on _width.</param> pub inline fn updateTexture2D(_handle: TextureHandle, _layer: u16, _mip: u8, _x: u16, _y: u16, _width: u16, _height: u16, _mem: [*c]const Memory, _pitch: u16) void { return bgfx_update_texture_2d(_handle, _layer, _mip, _x, _y, _width, _height, _mem, _pitch); } extern fn bgfx_update_texture_2d(_handle: TextureHandle, _layer: u16, _mip: u8, _x: u16, _y: u16, _width: u16, _height: u16, _mem: [*c]const Memory, _pitch: u16) void; /// Update 3D texture. /// @attention It's valid to update only mutable texture. See `bgfx::createTexture3D` for more info. /// <param name="_handle">Texture handle.</param> /// <param name="_mip">Mip level.</param> /// <param name="_x">X offset in texture.</param> /// <param name="_y">Y offset in texture.</param> /// <param name="_z">Z offset in texture.</param> /// <param name="_width">Width of texture block.</param> /// <param name="_height">Height of texture block.</param> /// <param name="_depth">Depth of texture block.</param> /// <param name="_mem">Texture update data.</param> pub inline fn updateTexture3D(_handle: TextureHandle, _mip: u8, _x: u16, _y: u16, _z: u16, _width: u16, _height: u16, _depth: u16, _mem: [*c]const Memory) void { return bgfx_update_texture_3d(_handle, _mip, _x, _y, _z, _width, _height, _depth, _mem); } extern fn bgfx_update_texture_3d(_handle: TextureHandle, _mip: u8, _x: u16, _y: u16, _z: u16, _width: u16, _height: u16, _depth: u16, _mem: [*c]const Memory) void; /// Update Cube texture. /// @attention It's valid to update only mutable texture. See `bgfx::createTextureCube` for more info. /// <param name="_handle">Texture handle.</param> /// <param name="_layer">Layer in texture array.</param> /// <param name="_side">Cubemap side `BGFX_CUBE_MAP_<POSITIVE or NEGATIVE>_<X, Y or Z>`, where 0 is +X, 1 is -X, 2 is +Y, 3 is -Y, 4 is +Z, and 5 is -Z. +----------+ |-z 2| | ^ +y | | | | Unfolded cube: | +---->+x | +----------+----------+----------+----------+ |+y 1|+y 4|+y 0|+y 5| | ^ -x | ^ +z | ^ +x | ^ -z | | | | | | | | | | | +---->+z | +---->+x | +---->-z | +---->-x | +----------+----------+----------+----------+ |+z 3| | ^ -y | | | | | +---->+x | +----------+</param> /// <param name="_mip">Mip level.</param> /// <param name="_x">X offset in texture.</param> /// <param name="_y">Y offset in texture.</param> /// <param name="_width">Width of texture block.</param> /// <param name="_height">Height of texture block.</param> /// <param name="_mem">Texture update data.</param> /// <param name="_pitch">Pitch of input image (bytes). When _pitch is set to UINT16_MAX, it will be calculated internally based on _width.</param> pub inline fn updateTextureCube(_handle: TextureHandle, _layer: u16, _side: u8, _mip: u8, _x: u16, _y: u16, _width: u16, _height: u16, _mem: [*c]const Memory, _pitch: u16) void { return bgfx_update_texture_cube(_handle, _layer, _side, _mip, _x, _y, _width, _height, _mem, _pitch); } extern fn bgfx_update_texture_cube(_handle: TextureHandle, _layer: u16, _side: u8, _mip: u8, _x: u16, _y: u16, _width: u16, _height: u16, _mem: [*c]const Memory, _pitch: u16) void; /// Read back texture content. /// @attention Texture must be created with `BGFX_TEXTURE_READ_BACK` flag. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_READ_BACK`. /// <param name="_handle">Texture handle.</param> /// <param name="_data">Destination buffer.</param> /// <param name="_mip">Mip level.</param> pub inline fn readTexture(_handle: TextureHandle, _data: ?*anyopaque, _mip: u8) u32 { return bgfx_read_texture(_handle, _data, _mip); } extern fn bgfx_read_texture(_handle: TextureHandle, _data: ?*anyopaque, _mip: u8) u32; /// Set texture debug name. /// <param name="_handle">Texture handle.</param> /// <param name="_name">Texture name.</param> /// <param name="_len">Texture name length (if length is INT32_MAX, it's expected that _name is zero terminated string.</param> pub inline fn setTextureName(_handle: TextureHandle, _name: [*c]const u8, _len: i32) void { return bgfx_set_texture_name(_handle, _name, _len); } extern fn bgfx_set_texture_name(_handle: TextureHandle, _name: [*c]const u8, _len: i32) void; /// Returns texture direct access pointer. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_DIRECT_ACCESS`. This feature /// is available on GPUs that have unified memory architecture (UMA) support. /// <param name="_handle">Texture handle.</param> pub inline fn getDirectAccessPtr(_handle: TextureHandle) ?*anyopaque { return bgfx_get_direct_access_ptr(_handle); } extern fn bgfx_get_direct_access_ptr(_handle: TextureHandle) ?*anyopaque; /// Destroy texture. /// <param name="_handle">Texture handle.</param> pub inline fn destroyTexture(_handle: TextureHandle) void { return bgfx_destroy_texture(_handle); } extern fn bgfx_destroy_texture(_handle: TextureHandle) void; /// Create frame buffer (simple). /// <param name="_width">Texture width.</param> /// <param name="_height">Texture height.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_textureFlags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> pub inline fn createFrameBuffer(_width: u16, _height: u16, _format: TextureFormat, _textureFlags: u64) FrameBufferHandle { return bgfx_create_frame_buffer(_width, _height, _format, _textureFlags); } extern fn bgfx_create_frame_buffer(_width: u16, _height: u16, _format: TextureFormat, _textureFlags: u64) FrameBufferHandle; /// Create frame buffer with size based on back-buffer ratio. Frame buffer will maintain ratio /// if back buffer resolution changes. /// <param name="_ratio">Frame buffer size in respect to back-buffer size. See: `BackbufferRatio::Enum`.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_textureFlags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> pub inline fn createFrameBufferScaled(_ratio: BackbufferRatio, _format: TextureFormat, _textureFlags: u64) FrameBufferHandle { return bgfx_create_frame_buffer_scaled(_ratio, _format, _textureFlags); } extern fn bgfx_create_frame_buffer_scaled(_ratio: BackbufferRatio, _format: TextureFormat, _textureFlags: u64) FrameBufferHandle; /// Create MRT frame buffer from texture handles (simple). /// <param name="_num">Number of texture handles.</param> /// <param name="_handles">Texture attachments.</param> /// <param name="_destroyTexture">If true, textures will be destroyed when frame buffer is destroyed.</param> pub inline fn createFrameBufferFromHandles(_num: u8, _handles: [*c]const TextureHandle, _destroyTexture: bool) FrameBufferHandle { return bgfx_create_frame_buffer_from_handles(_num, _handles, _destroyTexture); } extern fn bgfx_create_frame_buffer_from_handles(_num: u8, _handles: [*c]const TextureHandle, _destroyTexture: bool) FrameBufferHandle; /// Create MRT frame buffer from texture handles with specific layer and /// mip level. /// <param name="_num">Number of attachments.</param> /// <param name="_attachment">Attachment texture info. See: `bgfx::Attachment`.</param> /// <param name="_destroyTexture">If true, textures will be destroyed when frame buffer is destroyed.</param> pub inline fn createFrameBufferFromAttachment(_num: u8, _attachment: [*c]const Attachment, _destroyTexture: bool) FrameBufferHandle { return bgfx_create_frame_buffer_from_attachment(_num, _attachment, _destroyTexture); } extern fn bgfx_create_frame_buffer_from_attachment(_num: u8, _attachment: [*c]const Attachment, _destroyTexture: bool) FrameBufferHandle; /// Create frame buffer for multiple window rendering. /// @remarks /// Frame buffer cannot be used for sampling. /// @attention Availability depends on: `BGFX_CAPS_SWAP_CHAIN`. /// <param name="_nwh">OS' target native window handle.</param> /// <param name="_width">Window back buffer width.</param> /// <param name="_height">Window back buffer height.</param> /// <param name="_format">Window back buffer color format.</param> /// <param name="_depthFormat">Window back buffer depth format.</param> pub inline fn createFrameBufferFromNwh(_nwh: ?*anyopaque, _width: u16, _height: u16, _format: TextureFormat, _depthFormat: TextureFormat) FrameBufferHandle { return bgfx_create_frame_buffer_from_nwh(_nwh, _width, _height, _format, _depthFormat); } extern fn bgfx_create_frame_buffer_from_nwh(_nwh: ?*anyopaque, _width: u16, _height: u16, _format: TextureFormat, _depthFormat: TextureFormat) FrameBufferHandle; /// Set frame buffer debug name. /// <param name="_handle">Frame buffer handle.</param> /// <param name="_name">Frame buffer name.</param> /// <param name="_len">Frame buffer name length (if length is INT32_MAX, it's expected that _name is zero terminated string.</param> pub inline fn setFrameBufferName(_handle: FrameBufferHandle, _name: [*c]const u8, _len: i32) void { return bgfx_set_frame_buffer_name(_handle, _name, _len); } extern fn bgfx_set_frame_buffer_name(_handle: FrameBufferHandle, _name: [*c]const u8, _len: i32) void; /// Obtain texture handle of frame buffer attachment. /// <param name="_handle">Frame buffer handle.</param> pub inline fn getTexture(_handle: FrameBufferHandle, _attachment: u8) TextureHandle { return bgfx_get_texture(_handle, _attachment); } extern fn bgfx_get_texture(_handle: FrameBufferHandle, _attachment: u8) TextureHandle; /// Destroy frame buffer. /// <param name="_handle">Frame buffer handle.</param> pub inline fn destroyFrameBuffer(_handle: FrameBufferHandle) void { return bgfx_destroy_frame_buffer(_handle); } extern fn bgfx_destroy_frame_buffer(_handle: FrameBufferHandle) void; /// Create shader uniform parameter. /// @remarks /// 1. Uniform names are unique. It's valid to call `bgfx::createUniform` /// multiple times with the same uniform name. The library will always /// return the same handle, but the handle reference count will be /// incremented. This means that the same number of `bgfx::destroyUniform` /// must be called to properly destroy the uniform. /// 2. Predefined uniforms (declared in `bgfx_shader.sh`): /// - `u_viewRect vec4(x, y, width, height)` - view rectangle for current /// view, in pixels. /// - `u_viewTexel vec4(1.0/width, 1.0/height, undef, undef)` - inverse /// width and height /// - `u_view mat4` - view matrix /// - `u_invView mat4` - inverted view matrix /// - `u_proj mat4` - projection matrix /// - `u_invProj mat4` - inverted projection matrix /// - `u_viewProj mat4` - concatenated view projection matrix /// - `u_invViewProj mat4` - concatenated inverted view projection matrix /// - `u_model mat4[BGFX_CONFIG_MAX_BONES]` - array of model matrices. /// - `u_modelView mat4` - concatenated model view matrix, only first /// model matrix from array is used. /// - `u_modelViewProj mat4` - concatenated model view projection matrix. /// - `u_alphaRef float` - alpha reference value for alpha test. /// <param name="_name">Uniform name in shader.</param> /// <param name="_type">Type of uniform (See: `bgfx::UniformType`).</param> /// <param name="_num">Number of elements in array.</param> pub inline fn createUniform(_name: [*c]const u8, _type: UniformType, _num: u16) UniformHandle { return bgfx_create_uniform(_name, _type, _num); } extern fn bgfx_create_uniform(_name: [*c]const u8, _type: UniformType, _num: u16) UniformHandle; /// Retrieve uniform info. /// <param name="_handle">Handle to uniform object.</param> /// <param name="_info">Uniform info.</param> pub inline fn getUniformInfo(_handle: UniformHandle, _info: [*c]UniformInfo) void { return bgfx_get_uniform_info(_handle, _info); } extern fn bgfx_get_uniform_info(_handle: UniformHandle, _info: [*c]UniformInfo) void; /// Destroy shader uniform parameter. /// <param name="_handle">Handle to uniform object.</param> pub inline fn destroyUniform(_handle: UniformHandle) void { return bgfx_destroy_uniform(_handle); } extern fn bgfx_destroy_uniform(_handle: UniformHandle) void; /// Create occlusion query. pub inline fn createOcclusionQuery() OcclusionQueryHandle { return bgfx_create_occlusion_query(); } extern fn bgfx_create_occlusion_query() OcclusionQueryHandle; /// Retrieve occlusion query result from previous frame. /// <param name="_handle">Handle to occlusion query object.</param> /// <param name="_result">Number of pixels that passed test. This argument can be `NULL` if result of occlusion query is not needed.</param> pub inline fn getResult(_handle: OcclusionQueryHandle, _result: [*c]i32) OcclusionQueryResult { return bgfx_get_result(_handle, _result); } extern fn bgfx_get_result(_handle: OcclusionQueryHandle, _result: [*c]i32) OcclusionQueryResult; /// Destroy occlusion query. /// <param name="_handle">Handle to occlusion query object.</param> pub inline fn destroyOcclusionQuery(_handle: OcclusionQueryHandle) void { return bgfx_destroy_occlusion_query(_handle); } extern fn bgfx_destroy_occlusion_query(_handle: OcclusionQueryHandle) void; /// Set palette color value. /// <param name="_index">Index into palette.</param> /// <param name="_rgba">RGBA floating point values.</param> pub inline fn setPaletteColor(_index: u8, _rgba: [4]f32) void { return bgfx_set_palette_color(_index, _rgba); } extern fn bgfx_set_palette_color(_index: u8, _rgba: [4]f32) void; /// Set palette color value. /// <param name="_index">Index into palette.</param> /// <param name="_rgba">Packed 32-bit RGBA value.</param> pub inline fn setPaletteColorRgba8(_index: u8, _rgba: u32) void { return bgfx_set_palette_color_rgba8(_index, _rgba); } extern fn bgfx_set_palette_color_rgba8(_index: u8, _rgba: u32) void; /// Set view name. /// @remarks /// This is debug only feature. /// In graphics debugger view name will appear as: /// "nnnc <view name>" /// ^ ^ ^ /// | +--- compute (C) /// +------ view id /// <param name="_id">View id.</param> /// <param name="_name">View name.</param> pub inline fn setViewName(_id: ViewId, _name: [*c]const u8) void { return bgfx_set_view_name(_id, _name); } extern fn bgfx_set_view_name(_id: ViewId, _name: [*c]const u8) void; /// Set view rectangle. Draw primitive outside view will be clipped. /// <param name="_id">View id.</param> /// <param name="_x">Position x from the left corner of the window.</param> /// <param name="_y">Position y from the top corner of the window.</param> /// <param name="_width">Width of view port region.</param> /// <param name="_height">Height of view port region.</param> pub inline fn setViewRect(_id: ViewId, _x: u16, _y: u16, _width: u16, _height: u16) void { return bgfx_set_view_rect(_id, _x, _y, _width, _height); } extern fn bgfx_set_view_rect(_id: ViewId, _x: u16, _y: u16, _width: u16, _height: u16) void; /// Set view rectangle. Draw primitive outside view will be clipped. /// <param name="_id">View id.</param> /// <param name="_x">Position x from the left corner of the window.</param> /// <param name="_y">Position y from the top corner of the window.</param> /// <param name="_ratio">Width and height will be set in respect to back-buffer size. See: `BackbufferRatio::Enum`.</param> pub inline fn setViewRectRatio(_id: ViewId, _x: u16, _y: u16, _ratio: BackbufferRatio) void { return bgfx_set_view_rect_ratio(_id, _x, _y, _ratio); } extern fn bgfx_set_view_rect_ratio(_id: ViewId, _x: u16, _y: u16, _ratio: BackbufferRatio) void; /// Set view scissor. Draw primitive outside view will be clipped. When /// _x, _y, _width and _height are set to 0, scissor will be disabled. /// <param name="_id">View id.</param> /// <param name="_x">Position x from the left corner of the window.</param> /// <param name="_y">Position y from the top corner of the window.</param> /// <param name="_width">Width of view scissor region.</param> /// <param name="_height">Height of view scissor region.</param> pub inline fn setViewScissor(_id: ViewId, _x: u16, _y: u16, _width: u16, _height: u16) void { return bgfx_set_view_scissor(_id, _x, _y, _width, _height); } extern fn bgfx_set_view_scissor(_id: ViewId, _x: u16, _y: u16, _width: u16, _height: u16) void; /// Set view clear flags. /// <param name="_id">View id.</param> /// <param name="_flags">Clear flags. Use `BGFX_CLEAR_NONE` to remove any clear operation. See: `BGFX_CLEAR_*`.</param> /// <param name="_rgba">Color clear value.</param> /// <param name="_depth">Depth clear value.</param> /// <param name="_stencil">Stencil clear value.</param> pub inline fn setViewClear(_id: ViewId, _flags: u16, _rgba: u32, _depth: f32, _stencil: u8) void { return bgfx_set_view_clear(_id, _flags, _rgba, _depth, _stencil); } extern fn bgfx_set_view_clear(_id: ViewId, _flags: u16, _rgba: u32, _depth: f32, _stencil: u8) void; /// Set view clear flags with different clear color for each /// frame buffer texture. `bgfx::setPaletteColor` must be used to set up a /// clear color palette. /// <param name="_id">View id.</param> /// <param name="_flags">Clear flags. Use `BGFX_CLEAR_NONE` to remove any clear operation. See: `BGFX_CLEAR_*`.</param> /// <param name="_depth">Depth clear value.</param> /// <param name="_stencil">Stencil clear value.</param> /// <param name="_c0">Palette index for frame buffer attachment 0.</param> /// <param name="_c1">Palette index for frame buffer attachment 1.</param> /// <param name="_c2">Palette index for frame buffer attachment 2.</param> /// <param name="_c3">Palette index for frame buffer attachment 3.</param> /// <param name="_c4">Palette index for frame buffer attachment 4.</param> /// <param name="_c5">Palette index for frame buffer attachment 5.</param> /// <param name="_c6">Palette index for frame buffer attachment 6.</param> /// <param name="_c7">Palette index for frame buffer attachment 7.</param> pub inline fn setViewClearMrt(_id: ViewId, _flags: u16, _depth: f32, _stencil: u8, _c0: u8, _c1: u8, _c2: u8, _c3: u8, _c4: u8, _c5: u8, _c6: u8, _c7: u8) void { return bgfx_set_view_clear_mrt(_id, _flags, _depth, _stencil, _c0, _c1, _c2, _c3, _c4, _c5, _c6, _c7); } extern fn bgfx_set_view_clear_mrt(_id: ViewId, _flags: u16, _depth: f32, _stencil: u8, _c0: u8, _c1: u8, _c2: u8, _c3: u8, _c4: u8, _c5: u8, _c6: u8, _c7: u8) void; /// Set view sorting mode. /// @remarks /// View mode must be set prior calling `bgfx::submit` for the view. /// <param name="_id">View id.</param> /// <param name="_mode">View sort mode. See `ViewMode::Enum`.</param> pub inline fn setViewMode(_id: ViewId, _mode: ViewMode) void { return bgfx_set_view_mode(_id, _mode); } extern fn bgfx_set_view_mode(_id: ViewId, _mode: ViewMode) void; /// Set view frame buffer. /// @remarks /// Not persistent after `bgfx::reset` call. /// <param name="_id">View id.</param> /// <param name="_handle">Frame buffer handle. Passing `BGFX_INVALID_HANDLE` as frame buffer handle will draw primitives from this view into default back buffer.</param> pub inline fn setViewFrameBuffer(_id: ViewId, _handle: FrameBufferHandle) void { return bgfx_set_view_frame_buffer(_id, _handle); } extern fn bgfx_set_view_frame_buffer(_id: ViewId, _handle: FrameBufferHandle) void; /// Set view's view matrix and projection matrix, /// all draw primitives in this view will use these two matrices. /// <param name="_id">View id.</param> /// <param name="_view">View matrix.</param> /// <param name="_proj">Projection matrix.</param> pub inline fn setViewTransform(_id: ViewId, _view: ?*const anyopaque, _proj: ?*const anyopaque) void { return bgfx_set_view_transform(_id, _view, _proj); } extern fn bgfx_set_view_transform(_id: ViewId, _view: ?*const anyopaque, _proj: ?*const anyopaque) void; /// Post submit view reordering. /// <param name="_id">First view id.</param> /// <param name="_num">Number of views to remap.</param> /// <param name="_order">View remap id table. Passing `NULL` will reset view ids to default state.</param> pub inline fn setViewOrder(_id: ViewId, _num: u16, _order: [*c]const ViewId) void { return bgfx_set_view_order(_id, _num, _order); } extern fn bgfx_set_view_order(_id: ViewId, _num: u16, _order: [*c]const ViewId) void; /// Reset all view settings to default. pub inline fn resetView(_id: ViewId) void { return bgfx_reset_view(_id); } extern fn bgfx_reset_view(_id: ViewId) void; /// Begin submitting draw calls from thread. /// <param name="_forThread">Explicitly request an encoder for a worker thread.</param> pub inline fn encoderBegin(_forThread: bool) ?*Encoder { return bgfx_encoder_begin(_forThread); } extern fn bgfx_encoder_begin(_forThread: bool) ?*Encoder; /// End submitting draw calls from thread. /// <param name="_encoder">Encoder.</param> pub inline fn encoderEnd(_encoder: ?*Encoder) void { return bgfx_encoder_end(_encoder); } extern fn bgfx_encoder_end(_encoder: ?*Encoder) void; /// Sets a debug marker. This allows you to group graphics calls together for easy browsing in /// graphics debugging tools. /// <param name="_marker">Marker string.</param> extern fn bgfx_encoder_set_marker(self: ?*Encoder, _marker: [*c]const u8) void; /// Set render states for draw primitive. /// @remarks /// 1. To set up more complex states use: /// `BGFX_STATE_ALPHA_REF(_ref)`, /// `BGFX_STATE_POINT_SIZE(_size)`, /// `BGFX_STATE_BLEND_FUNC(_src, _dst)`, /// `BGFX_STATE_BLEND_FUNC_SEPARATE(_srcRGB, _dstRGB, _srcA, _dstA)`, /// `BGFX_STATE_BLEND_EQUATION(_equation)`, /// `BGFX_STATE_BLEND_EQUATION_SEPARATE(_equationRGB, _equationA)` /// 2. `BGFX_STATE_BLEND_EQUATION_ADD` is set when no other blend /// equation is specified. /// <param name="_state">State flags. Default state for primitive type is triangles. See: `BGFX_STATE_DEFAULT`. - `BGFX_STATE_DEPTH_TEST_*` - Depth test function. - `BGFX_STATE_BLEND_*` - See remark 1 about BGFX_STATE_BLEND_FUNC. - `BGFX_STATE_BLEND_EQUATION_*` - See remark 2. - `BGFX_STATE_CULL_*` - Backface culling mode. - `BGFX_STATE_WRITE_*` - Enable R, G, B, A or Z write. - `BGFX_STATE_MSAA` - Enable hardware multisample antialiasing. - `BGFX_STATE_PT_[TRISTRIP/LINES/POINTS]` - Primitive type.</param> /// <param name="_rgba">Sets blend factor used by `BGFX_STATE_BLEND_FACTOR` and `BGFX_STATE_BLEND_INV_FACTOR` blend modes.</param> extern fn bgfx_encoder_set_state(self: ?*Encoder, _state: u64, _rgba: u32) void; /// Set condition for rendering. /// <param name="_handle">Occlusion query handle.</param> /// <param name="_visible">Render if occlusion query is visible.</param> extern fn bgfx_encoder_set_condition(self: ?*Encoder, _handle: OcclusionQueryHandle, _visible: bool) void; /// Set stencil test state. /// <param name="_fstencil">Front stencil state.</param> /// <param name="_bstencil">Back stencil state. If back is set to `BGFX_STENCIL_NONE` _fstencil is applied to both front and back facing primitives.</param> extern fn bgfx_encoder_set_stencil(self: ?*Encoder, _fstencil: u32, _bstencil: u32) void; /// Set scissor for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// <param name="_x">Position x from the left corner of the window.</param> /// <param name="_y">Position y from the top corner of the window.</param> /// <param name="_width">Width of view scissor region.</param> /// <param name="_height">Height of view scissor region.</param> extern fn bgfx_encoder_set_scissor(self: ?*Encoder, _x: u16, _y: u16, _width: u16, _height: u16) u16; /// Set scissor from cache for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// <param name="_cache">Index in scissor cache.</param> extern fn bgfx_encoder_set_scissor_cached(self: ?*Encoder, _cache: u16) void; /// Set model matrix for draw primitive. If it is not called, /// the model will be rendered with an identity model matrix. /// <param name="_mtx">Pointer to first matrix in array.</param> /// <param name="_num">Number of matrices in array.</param> extern fn bgfx_encoder_set_transform(self: ?*Encoder, _mtx: ?*const anyopaque, _num: u16) u32; /// Set model matrix from matrix cache for draw primitive. /// <param name="_cache">Index in matrix cache.</param> /// <param name="_num">Number of matrices from cache.</param> extern fn bgfx_encoder_set_transform_cached(self: ?*Encoder, _cache: u32, _num: u16) void; /// Reserve matrices in internal matrix cache. /// @attention Pointer returned can be modified until `bgfx::frame` is called. /// <param name="_transform">Pointer to `Transform` structure.</param> /// <param name="_num">Number of matrices.</param> extern fn bgfx_encoder_alloc_transform(self: ?*Encoder, _transform: [*c]Transform, _num: u16) u32; /// Set shader uniform parameter for draw primitive. /// <param name="_handle">Uniform.</param> /// <param name="_value">Pointer to uniform data.</param> /// <param name="_num">Number of elements. Passing `UINT16_MAX` will use the _num passed on uniform creation.</param> extern fn bgfx_encoder_set_uniform(self: ?*Encoder, _handle: UniformHandle, _value: ?*const anyopaque, _num: u16) void; /// Set index buffer for draw primitive. /// <param name="_handle">Index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> extern fn bgfx_encoder_set_index_buffer(self: ?*Encoder, _handle: IndexBufferHandle, _firstIndex: u32, _numIndices: u32) void; /// Set index buffer for draw primitive. /// <param name="_handle">Dynamic index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> extern fn bgfx_encoder_set_dynamic_index_buffer(self: ?*Encoder, _handle: DynamicIndexBufferHandle, _firstIndex: u32, _numIndices: u32) void; /// Set index buffer for draw primitive. /// <param name="_tib">Transient index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> extern fn bgfx_encoder_set_transient_index_buffer(self: ?*Encoder, _tib: [*c]const TransientIndexBuffer, _firstIndex: u32, _numIndices: u32) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> extern fn bgfx_encoder_set_vertex_buffer(self: ?*Encoder, _stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> /// <param name="_layoutHandle">Vertex layout for aliasing vertex buffer. If invalid handle is used, vertex layout used for creation of vertex buffer will be used.</param> extern fn bgfx_encoder_set_vertex_buffer_with_layout(self: ?*Encoder, _stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Dynamic vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> extern fn bgfx_encoder_set_dynamic_vertex_buffer(self: ?*Encoder, _stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32) void; extern fn bgfx_encoder_set_dynamic_vertex_buffer_with_layout(self: ?*Encoder, _stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_tvb">Transient vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> extern fn bgfx_encoder_set_transient_vertex_buffer(self: ?*Encoder, _stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_tvb">Transient vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> /// <param name="_layoutHandle">Vertex layout for aliasing vertex buffer. If invalid handle is used, vertex layout used for creation of vertex buffer will be used.</param> extern fn bgfx_encoder_set_transient_vertex_buffer_with_layout(self: ?*Encoder, _stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void; /// Set number of vertices for auto generated vertices use in conjunction /// with gl_VertexID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. /// <param name="_numVertices">Number of vertices.</param> extern fn bgfx_encoder_set_vertex_count(self: ?*Encoder, _numVertices: u32) void; /// Set instance data buffer for draw primitive. /// <param name="_idb">Transient instance data buffer.</param> /// <param name="_start">First instance data.</param> /// <param name="_num">Number of data instances.</param> extern fn bgfx_encoder_set_instance_data_buffer(self: ?*Encoder, _idb: [*c]const InstanceDataBuffer, _start: u32, _num: u32) void; /// Set instance data buffer for draw primitive. /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First instance data.</param> /// <param name="_num">Number of data instances. Set instance data buffer for draw primitive.</param> extern fn bgfx_encoder_set_instance_data_from_vertex_buffer(self: ?*Encoder, _handle: VertexBufferHandle, _startVertex: u32, _num: u32) void; /// Set instance data buffer for draw primitive. /// <param name="_handle">Dynamic vertex buffer.</param> /// <param name="_startVertex">First instance data.</param> /// <param name="_num">Number of data instances.</param> extern fn bgfx_encoder_set_instance_data_from_dynamic_vertex_buffer(self: ?*Encoder, _handle: DynamicVertexBufferHandle, _startVertex: u32, _num: u32) void; /// Set number of instances for auto generated instances use in conjunction /// with gl_InstanceID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. extern fn bgfx_encoder_set_instance_count(self: ?*Encoder, _numInstances: u32) void; /// Set texture stage for draw primitive. /// <param name="_stage">Texture unit.</param> /// <param name="_sampler">Program sampler.</param> /// <param name="_handle">Texture handle.</param> /// <param name="_flags">Texture sampling mode. Default value UINT32_MAX uses texture sampling settings from the texture. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> extern fn bgfx_encoder_set_texture(self: ?*Encoder, _stage: u8, _sampler: UniformHandle, _handle: TextureHandle, _flags: u32) void; /// Submit an empty primitive for rendering. Uniforms and draw state /// will be applied but no geometry will be submitted. Useful in cases /// when no other draw/compute primitive is submitted to view, but it's /// desired to execute clear view. /// @remark /// These empty draw calls will sort before ordinary draw calls. /// <param name="_id">View id.</param> extern fn bgfx_encoder_touch(self: ?*Encoder, _id: ViewId) void; /// Submit primitive for rendering. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> extern fn bgfx_encoder_submit(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _depth: u32, _flags: u8) void; /// Submit primitive with occlusion query for rendering. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_occlusionQuery">Occlusion query.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> extern fn bgfx_encoder_submit_occlusion_query(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _occlusionQuery: OcclusionQueryHandle, _depth: u32, _flags: u8) void; /// Submit primitive for rendering with index and instance data info from /// indirect buffer. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_indirectHandle">Indirect buffer.</param> /// <param name="_start">First element in indirect buffer.</param> /// <param name="_num">Number of dispatches.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> extern fn bgfx_encoder_submit_indirect(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _depth: u32, _flags: u8) void; /// Set compute index buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Index buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> extern fn bgfx_encoder_set_compute_index_buffer(self: ?*Encoder, _stage: u8, _handle: IndexBufferHandle, _access: Access) void; /// Set compute vertex buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Vertex buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> extern fn bgfx_encoder_set_compute_vertex_buffer(self: ?*Encoder, _stage: u8, _handle: VertexBufferHandle, _access: Access) void; /// Set compute dynamic index buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Dynamic index buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> extern fn bgfx_encoder_set_compute_dynamic_index_buffer(self: ?*Encoder, _stage: u8, _handle: DynamicIndexBufferHandle, _access: Access) void; /// Set compute dynamic vertex buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Dynamic vertex buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> extern fn bgfx_encoder_set_compute_dynamic_vertex_buffer(self: ?*Encoder, _stage: u8, _handle: DynamicVertexBufferHandle, _access: Access) void; /// Set compute indirect buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Indirect buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> extern fn bgfx_encoder_set_compute_indirect_buffer(self: ?*Encoder, _stage: u8, _handle: IndirectBufferHandle, _access: Access) void; /// Set compute image from texture. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Texture handle.</param> /// <param name="_mip">Mip level.</param> /// <param name="_access">Image access. See `Access::Enum`.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> extern fn bgfx_encoder_set_image(self: ?*Encoder, _stage: u8, _handle: TextureHandle, _mip: u8, _access: Access, _format: TextureFormat) void; /// Dispatch compute. /// <param name="_id">View id.</param> /// <param name="_program">Compute program.</param> /// <param name="_numX">Number of groups X.</param> /// <param name="_numY">Number of groups Y.</param> /// <param name="_numZ">Number of groups Z.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> extern fn bgfx_encoder_dispatch(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _numX: u32, _numY: u32, _numZ: u32, _flags: u8) void; /// Dispatch compute indirect. /// <param name="_id">View id.</param> /// <param name="_program">Compute program.</param> /// <param name="_indirectHandle">Indirect buffer.</param> /// <param name="_start">First element in indirect buffer.</param> /// <param name="_num">Number of dispatches.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> extern fn bgfx_encoder_dispatch_indirect(self: ?*Encoder, _id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _flags: u8) void; /// Discard previously set state for draw or compute call. /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> extern fn bgfx_encoder_discard(self: ?*Encoder, _flags: u8) void; /// Blit 2D texture region between two 2D textures. /// @attention Destination texture must be created with `BGFX_TEXTURE_BLIT_DST` flag. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_BLIT`. /// <param name="_id">View id.</param> /// <param name="_dst">Destination texture handle.</param> /// <param name="_dstMip">Destination texture mip level.</param> /// <param name="_dstX">Destination texture X position.</param> /// <param name="_dstY">Destination texture Y position.</param> /// <param name="_dstZ">If texture is 2D this argument should be 0. If destination texture is cube this argument represents destination texture cube face. For 3D texture this argument represents destination texture Z position.</param> /// <param name="_src">Source texture handle.</param> /// <param name="_srcMip">Source texture mip level.</param> /// <param name="_srcX">Source texture X position.</param> /// <param name="_srcY">Source texture Y position.</param> /// <param name="_srcZ">If texture is 2D this argument should be 0. If source texture is cube this argument represents source texture cube face. For 3D texture this argument represents source texture Z position.</param> /// <param name="_width">Width of region.</param> /// <param name="_height">Height of region.</param> /// <param name="_depth">If texture is 3D this argument represents depth of region, otherwise it's unused.</param> extern fn bgfx_encoder_blit(self: ?*Encoder, _id: ViewId, _dst: TextureHandle, _dstMip: u8, _dstX: u16, _dstY: u16, _dstZ: u16, _src: TextureHandle, _srcMip: u8, _srcX: u16, _srcY: u16, _srcZ: u16, _width: u16, _height: u16, _depth: u16) void; /// Request screen shot of window back buffer. /// @remarks /// `bgfx::CallbackI::screenShot` must be implemented. /// @attention Frame buffer handle must be created with OS' target native window handle. /// <param name="_handle">Frame buffer handle. If handle is `BGFX_INVALID_HANDLE` request will be made for main window back buffer.</param> /// <param name="_filePath">Will be passed to `bgfx::CallbackI::screenShot` callback.</param> pub inline fn requestScreenShot(_handle: FrameBufferHandle, _filePath: [*c]const u8) void { return bgfx_request_screen_shot(_handle, _filePath); } extern fn bgfx_request_screen_shot(_handle: FrameBufferHandle, _filePath: [*c]const u8) void; /// Render frame. /// @attention `bgfx::renderFrame` is blocking call. It waits for /// `bgfx::frame` to be called from API thread to process frame. /// If timeout value is passed call will timeout and return even /// if `bgfx::frame` is not called. /// @warning This call should be only used on platforms that don't /// allow creating separate rendering thread. If it is called before /// to bgfx::init, render thread won't be created by bgfx::init call. /// <param name="_msecs">Timeout in milliseconds.</param> pub inline fn renderFrame(_msecs: i32) RenderFrame { return bgfx_render_frame(_msecs); } extern fn bgfx_render_frame(_msecs: i32) RenderFrame; /// Set platform data. /// @warning Must be called before `bgfx::init`. /// <param name="_data">Platform data.</param> pub inline fn setPlatformData(_data: [*c]const PlatformData) void { return bgfx_set_platform_data(_data); } extern fn bgfx_set_platform_data(_data: [*c]const PlatformData) void; /// Get internal data for interop. /// @attention It's expected you understand some bgfx internals before you /// use this call. /// @warning Must be called only on render thread. pub inline fn getInternalData() [*c]const InternalData { return bgfx_get_internal_data(); } extern fn bgfx_get_internal_data() [*c]const InternalData; /// Override internal texture with externally created texture. Previously /// created internal texture will released. /// @attention It's expected you understand some bgfx internals before you /// use this call. /// @warning Must be called only on render thread. /// <param name="_handle">Texture handle.</param> /// <param name="_ptr">Native API pointer to texture.</param> pub inline fn overrideInternalTexturePtr(_handle: TextureHandle, _ptr: usize) usize { return bgfx_override_internal_texture_ptr(_handle, _ptr); } extern fn bgfx_override_internal_texture_ptr(_handle: TextureHandle, _ptr: usize) usize; /// Override internal texture by creating new texture. Previously created /// internal texture will released. /// @attention It's expected you understand some bgfx internals before you /// use this call. /// @returns Native API pointer to texture. If result is 0, texture is not created yet from the /// main thread. /// @warning Must be called only on render thread. /// <param name="_handle">Texture handle.</param> /// <param name="_width">Width.</param> /// <param name="_height">Height.</param> /// <param name="_numMips">Number of mip-maps.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> /// <param name="_flags">Texture creation (see `BGFX_TEXTURE_*`.), and sampler (see `BGFX_SAMPLER_*`) flags. Default texture sampling mode is linear, and wrap mode is repeat. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> pub inline fn overrideInternalTexture(_handle: TextureHandle, _width: u16, _height: u16, _numMips: u8, _format: TextureFormat, _flags: u64) usize { return bgfx_override_internal_texture(_handle, _width, _height, _numMips, _format, _flags); } extern fn bgfx_override_internal_texture(_handle: TextureHandle, _width: u16, _height: u16, _numMips: u8, _format: TextureFormat, _flags: u64) usize; /// Sets a debug marker. This allows you to group graphics calls together for easy browsing in /// graphics debugging tools. /// <param name="_marker">Marker string.</param> pub inline fn setMarker(_marker: [*c]const u8) void { return bgfx_set_marker(_marker); } extern fn bgfx_set_marker(_marker: [*c]const u8) void; /// Set render states for draw primitive. /// @remarks /// 1. To set up more complex states use: /// `BGFX_STATE_ALPHA_REF(_ref)`, /// `BGFX_STATE_POINT_SIZE(_size)`, /// `BGFX_STATE_BLEND_FUNC(_src, _dst)`, /// `BGFX_STATE_BLEND_FUNC_SEPARATE(_srcRGB, _dstRGB, _srcA, _dstA)`, /// `BGFX_STATE_BLEND_EQUATION(_equation)`, /// `BGFX_STATE_BLEND_EQUATION_SEPARATE(_equationRGB, _equationA)` /// 2. `BGFX_STATE_BLEND_EQUATION_ADD` is set when no other blend /// equation is specified. /// <param name="_state">State flags. Default state for primitive type is triangles. See: `BGFX_STATE_DEFAULT`. - `BGFX_STATE_DEPTH_TEST_*` - Depth test function. - `BGFX_STATE_BLEND_*` - See remark 1 about BGFX_STATE_BLEND_FUNC. - `BGFX_STATE_BLEND_EQUATION_*` - See remark 2. - `BGFX_STATE_CULL_*` - Backface culling mode. - `BGFX_STATE_WRITE_*` - Enable R, G, B, A or Z write. - `BGFX_STATE_MSAA` - Enable hardware multisample antialiasing. - `BGFX_STATE_PT_[TRISTRIP/LINES/POINTS]` - Primitive type.</param> /// <param name="_rgba">Sets blend factor used by `BGFX_STATE_BLEND_FACTOR` and `BGFX_STATE_BLEND_INV_FACTOR` blend modes.</param> pub inline fn setState(_state: u64, _rgba: u32) void { return bgfx_set_state(_state, _rgba); } extern fn bgfx_set_state(_state: u64, _rgba: u32) void; /// Set condition for rendering. /// <param name="_handle">Occlusion query handle.</param> /// <param name="_visible">Render if occlusion query is visible.</param> pub inline fn setCondition(_handle: OcclusionQueryHandle, _visible: bool) void { return bgfx_set_condition(_handle, _visible); } extern fn bgfx_set_condition(_handle: OcclusionQueryHandle, _visible: bool) void; /// Set stencil test state. /// <param name="_fstencil">Front stencil state.</param> /// <param name="_bstencil">Back stencil state. If back is set to `BGFX_STENCIL_NONE` _fstencil is applied to both front and back facing primitives.</param> pub inline fn setStencil(_fstencil: u32, _bstencil: u32) void { return bgfx_set_stencil(_fstencil, _bstencil); } extern fn bgfx_set_stencil(_fstencil: u32, _bstencil: u32) void; /// Set scissor for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// <param name="_x">Position x from the left corner of the window.</param> /// <param name="_y">Position y from the top corner of the window.</param> /// <param name="_width">Width of view scissor region.</param> /// <param name="_height">Height of view scissor region.</param> pub inline fn setScissor(_x: u16, _y: u16, _width: u16, _height: u16) u16 { return bgfx_set_scissor(_x, _y, _width, _height); } extern fn bgfx_set_scissor(_x: u16, _y: u16, _width: u16, _height: u16) u16; /// Set scissor from cache for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// <param name="_cache">Index in scissor cache.</param> pub inline fn setScissorCached(_cache: u16) void { return bgfx_set_scissor_cached(_cache); } extern fn bgfx_set_scissor_cached(_cache: u16) void; /// Set model matrix for draw primitive. If it is not called, /// the model will be rendered with an identity model matrix. /// <param name="_mtx">Pointer to first matrix in array.</param> /// <param name="_num">Number of matrices in array.</param> pub inline fn setTransform(_mtx: ?*const anyopaque, _num: u16) u32 { return bgfx_set_transform(_mtx, _num); } extern fn bgfx_set_transform(_mtx: ?*const anyopaque, _num: u16) u32; /// Set model matrix from matrix cache for draw primitive. /// <param name="_cache">Index in matrix cache.</param> /// <param name="_num">Number of matrices from cache.</param> pub inline fn setTransformCached(_cache: u32, _num: u16) void { return bgfx_set_transform_cached(_cache, _num); } extern fn bgfx_set_transform_cached(_cache: u32, _num: u16) void; /// Reserve matrices in internal matrix cache. /// @attention Pointer returned can be modified until `bgfx::frame` is called. /// <param name="_transform">Pointer to `Transform` structure.</param> /// <param name="_num">Number of matrices.</param> pub inline fn allocTransform(_transform: [*c]Transform, _num: u16) u32 { return bgfx_alloc_transform(_transform, _num); } extern fn bgfx_alloc_transform(_transform: [*c]Transform, _num: u16) u32; /// Set shader uniform parameter for draw primitive. /// <param name="_handle">Uniform.</param> /// <param name="_value">Pointer to uniform data.</param> /// <param name="_num">Number of elements. Passing `UINT16_MAX` will use the _num passed on uniform creation.</param> pub inline fn setUniform(_handle: UniformHandle, _value: ?*const anyopaque, _num: u16) void { return bgfx_set_uniform(_handle, _value, _num); } extern fn bgfx_set_uniform(_handle: UniformHandle, _value: ?*const anyopaque, _num: u16) void; /// Set index buffer for draw primitive. /// <param name="_handle">Index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> pub inline fn setIndexBuffer(_handle: IndexBufferHandle, _firstIndex: u32, _numIndices: u32) void { return bgfx_set_index_buffer(_handle, _firstIndex, _numIndices); } extern fn bgfx_set_index_buffer(_handle: IndexBufferHandle, _firstIndex: u32, _numIndices: u32) void; /// Set index buffer for draw primitive. /// <param name="_handle">Dynamic index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> pub inline fn setDynamicIndexBuffer(_handle: DynamicIndexBufferHandle, _firstIndex: u32, _numIndices: u32) void { return bgfx_set_dynamic_index_buffer(_handle, _firstIndex, _numIndices); } extern fn bgfx_set_dynamic_index_buffer(_handle: DynamicIndexBufferHandle, _firstIndex: u32, _numIndices: u32) void; /// Set index buffer for draw primitive. /// <param name="_tib">Transient index buffer.</param> /// <param name="_firstIndex">First index to render.</param> /// <param name="_numIndices">Number of indices to render.</param> pub inline fn setTransientIndexBuffer(_tib: [*c]const TransientIndexBuffer, _firstIndex: u32, _numIndices: u32) void { return bgfx_set_transient_index_buffer(_tib, _firstIndex, _numIndices); } extern fn bgfx_set_transient_index_buffer(_tib: [*c]const TransientIndexBuffer, _firstIndex: u32, _numIndices: u32) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> pub inline fn setVertexBuffer(_stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32) void { return bgfx_set_vertex_buffer(_stream, _handle, _startVertex, _numVertices); } extern fn bgfx_set_vertex_buffer(_stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> /// <param name="_layoutHandle">Vertex layout for aliasing vertex buffer. If invalid handle is used, vertex layout used for creation of vertex buffer will be used.</param> pub inline fn setVertexBufferWithLayout(_stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void { return bgfx_set_vertex_buffer_with_layout(_stream, _handle, _startVertex, _numVertices, _layoutHandle); } extern fn bgfx_set_vertex_buffer_with_layout(_stream: u8, _handle: VertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Dynamic vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> pub inline fn setDynamicVertexBuffer(_stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32) void { return bgfx_set_dynamic_vertex_buffer(_stream, _handle, _startVertex, _numVertices); } extern fn bgfx_set_dynamic_vertex_buffer(_stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_handle">Dynamic vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> /// <param name="_layoutHandle">Vertex layout for aliasing vertex buffer. If invalid handle is used, vertex layout used for creation of vertex buffer will be used.</param> pub inline fn setDynamicVertexBufferWithLayout(_stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void { return bgfx_set_dynamic_vertex_buffer_with_layout(_stream, _handle, _startVertex, _numVertices, _layoutHandle); } extern fn bgfx_set_dynamic_vertex_buffer_with_layout(_stream: u8, _handle: DynamicVertexBufferHandle, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_tvb">Transient vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> pub inline fn setTransientVertexBuffer(_stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32) void { return bgfx_set_transient_vertex_buffer(_stream, _tvb, _startVertex, _numVertices); } extern fn bgfx_set_transient_vertex_buffer(_stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32) void; /// Set vertex buffer for draw primitive. /// <param name="_stream">Vertex stream.</param> /// <param name="_tvb">Transient vertex buffer.</param> /// <param name="_startVertex">First vertex to render.</param> /// <param name="_numVertices">Number of vertices to render.</param> /// <param name="_layoutHandle">Vertex layout for aliasing vertex buffer. If invalid handle is used, vertex layout used for creation of vertex buffer will be used.</param> pub inline fn setTransientVertexBufferWithLayout(_stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void { return bgfx_set_transient_vertex_buffer_with_layout(_stream, _tvb, _startVertex, _numVertices, _layoutHandle); } extern fn bgfx_set_transient_vertex_buffer_with_layout(_stream: u8, _tvb: [*c]const TransientVertexBuffer, _startVertex: u32, _numVertices: u32, _layoutHandle: VertexLayoutHandle) void; /// Set number of vertices for auto generated vertices use in conjunction /// with gl_VertexID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. /// <param name="_numVertices">Number of vertices.</param> pub inline fn setVertexCount(_numVertices: u32) void { return bgfx_set_vertex_count(_numVertices); } extern fn bgfx_set_vertex_count(_numVertices: u32) void; /// Set instance data buffer for draw primitive. /// <param name="_idb">Transient instance data buffer.</param> /// <param name="_start">First instance data.</param> /// <param name="_num">Number of data instances.</param> pub inline fn setInstanceDataBuffer(_idb: [*c]const InstanceDataBuffer, _start: u32, _num: u32) void { return bgfx_set_instance_data_buffer(_idb, _start, _num); } extern fn bgfx_set_instance_data_buffer(_idb: [*c]const InstanceDataBuffer, _start: u32, _num: u32) void; /// Set instance data buffer for draw primitive. /// <param name="_handle">Vertex buffer.</param> /// <param name="_startVertex">First instance data.</param> /// <param name="_num">Number of data instances. Set instance data buffer for draw primitive.</param> pub inline fn setInstanceDataFromVertexBuffer(_handle: VertexBufferHandle, _startVertex: u32, _num: u32) void { return bgfx_set_instance_data_from_vertex_buffer(_handle, _startVertex, _num); } extern fn bgfx_set_instance_data_from_vertex_buffer(_handle: VertexBufferHandle, _startVertex: u32, _num: u32) void; /// Set instance data buffer for draw primitive. /// <param name="_handle">Dynamic vertex buffer.</param> /// <param name="_startVertex">First instance data.</param> /// <param name="_num">Number of data instances.</param> pub inline fn setInstanceDataFromDynamicVertexBuffer(_handle: DynamicVertexBufferHandle, _startVertex: u32, _num: u32) void { return bgfx_set_instance_data_from_dynamic_vertex_buffer(_handle, _startVertex, _num); } extern fn bgfx_set_instance_data_from_dynamic_vertex_buffer(_handle: DynamicVertexBufferHandle, _startVertex: u32, _num: u32) void; /// Set number of instances for auto generated instances use in conjunction /// with gl_InstanceID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. pub inline fn setInstanceCount(_numInstances: u32) void { return bgfx_set_instance_count(_numInstances); } extern fn bgfx_set_instance_count(_numInstances: u32) void; /// Set texture stage for draw primitive. /// <param name="_stage">Texture unit.</param> /// <param name="_sampler">Program sampler.</param> /// <param name="_handle">Texture handle.</param> /// <param name="_flags">Texture sampling mode. Default value UINT32_MAX uses texture sampling settings from the texture. - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap mode. - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic sampling.</param> pub inline fn setTexture(_stage: u8, _sampler: UniformHandle, _handle: TextureHandle, _flags: u32) void { return bgfx_set_texture(_stage, _sampler, _handle, _flags); } extern fn bgfx_set_texture(_stage: u8, _sampler: UniformHandle, _handle: TextureHandle, _flags: u32) void; /// Submit an empty primitive for rendering. Uniforms and draw state /// will be applied but no geometry will be submitted. /// @remark /// These empty draw calls will sort before ordinary draw calls. /// <param name="_id">View id.</param> pub inline fn touch(_id: ViewId) void { return bgfx_touch(_id); } extern fn bgfx_touch(_id: ViewId) void; /// Submit primitive for rendering. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Which states to discard for next draw. See `BGFX_DISCARD_*`.</param> pub inline fn submit(_id: ViewId, _program: ProgramHandle, _depth: u32, _flags: u8) void { return bgfx_submit(_id, _program, _depth, _flags); } extern fn bgfx_submit(_id: ViewId, _program: ProgramHandle, _depth: u32, _flags: u8) void; /// Submit primitive with occlusion query for rendering. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_occlusionQuery">Occlusion query.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Which states to discard for next draw. See `BGFX_DISCARD_*`.</param> pub inline fn submitOcclusionQuery(_id: ViewId, _program: ProgramHandle, _occlusionQuery: OcclusionQueryHandle, _depth: u32, _flags: u8) void { return bgfx_submit_occlusion_query(_id, _program, _occlusionQuery, _depth, _flags); } extern fn bgfx_submit_occlusion_query(_id: ViewId, _program: ProgramHandle, _occlusionQuery: OcclusionQueryHandle, _depth: u32, _flags: u8) void; /// Submit primitive for rendering with index and instance data info from /// indirect buffer. /// <param name="_id">View id.</param> /// <param name="_program">Program.</param> /// <param name="_indirectHandle">Indirect buffer.</param> /// <param name="_start">First element in indirect buffer.</param> /// <param name="_num">Number of dispatches.</param> /// <param name="_depth">Depth for sorting.</param> /// <param name="_flags">Which states to discard for next draw. See `BGFX_DISCARD_*`.</param> pub inline fn submitIndirect(_id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _depth: u32, _flags: u8) void { return bgfx_submit_indirect(_id, _program, _indirectHandle, _start, _num, _depth, _flags); } extern fn bgfx_submit_indirect(_id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _depth: u32, _flags: u8) void; /// Set compute index buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Index buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeIndexBuffer(_stage: u8, _handle: IndexBufferHandle, _access: Access) void { return bgfx_set_compute_index_buffer(_stage, _handle, _access); } extern fn bgfx_set_compute_index_buffer(_stage: u8, _handle: IndexBufferHandle, _access: Access) void; /// Set compute vertex buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Vertex buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeVertexBuffer(_stage: u8, _handle: VertexBufferHandle, _access: Access) void { return bgfx_set_compute_vertex_buffer(_stage, _handle, _access); } extern fn bgfx_set_compute_vertex_buffer(_stage: u8, _handle: VertexBufferHandle, _access: Access) void; /// Set compute dynamic index buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Dynamic index buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeDynamicIndexBuffer(_stage: u8, _handle: DynamicIndexBufferHandle, _access: Access) void { return bgfx_set_compute_dynamic_index_buffer(_stage, _handle, _access); } extern fn bgfx_set_compute_dynamic_index_buffer(_stage: u8, _handle: DynamicIndexBufferHandle, _access: Access) void; /// Set compute dynamic vertex buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Dynamic vertex buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeDynamicVertexBuffer(_stage: u8, _handle: DynamicVertexBufferHandle, _access: Access) void { return bgfx_set_compute_dynamic_vertex_buffer(_stage, _handle, _access); } extern fn bgfx_set_compute_dynamic_vertex_buffer(_stage: u8, _handle: DynamicVertexBufferHandle, _access: Access) void; /// Set compute indirect buffer. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Indirect buffer handle.</param> /// <param name="_access">Buffer access. See `Access::Enum`.</param> pub inline fn setComputeIndirectBuffer(_stage: u8, _handle: IndirectBufferHandle, _access: Access) void { return bgfx_set_compute_indirect_buffer(_stage, _handle, _access); } extern fn bgfx_set_compute_indirect_buffer(_stage: u8, _handle: IndirectBufferHandle, _access: Access) void; /// Set compute image from texture. /// <param name="_stage">Compute stage.</param> /// <param name="_handle">Texture handle.</param> /// <param name="_mip">Mip level.</param> /// <param name="_access">Image access. See `Access::Enum`.</param> /// <param name="_format">Texture format. See: `TextureFormat::Enum`.</param> pub inline fn setImage(_stage: u8, _handle: TextureHandle, _mip: u8, _access: Access, _format: TextureFormat) void { return bgfx_set_image(_stage, _handle, _mip, _access, _format); } extern fn bgfx_set_image(_stage: u8, _handle: TextureHandle, _mip: u8, _access: Access, _format: TextureFormat) void; /// Dispatch compute. /// <param name="_id">View id.</param> /// <param name="_program">Compute program.</param> /// <param name="_numX">Number of groups X.</param> /// <param name="_numY">Number of groups Y.</param> /// <param name="_numZ">Number of groups Z.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn dispatch(_id: ViewId, _program: ProgramHandle, _numX: u32, _numY: u32, _numZ: u32, _flags: u8) void { return bgfx_dispatch(_id, _program, _numX, _numY, _numZ, _flags); } extern fn bgfx_dispatch(_id: ViewId, _program: ProgramHandle, _numX: u32, _numY: u32, _numZ: u32, _flags: u8) void; /// Dispatch compute indirect. /// <param name="_id">View id.</param> /// <param name="_program">Compute program.</param> /// <param name="_indirectHandle">Indirect buffer.</param> /// <param name="_start">First element in indirect buffer.</param> /// <param name="_num">Number of dispatches.</param> /// <param name="_flags">Discard or preserve states. See `BGFX_DISCARD_*`.</param> pub inline fn dispatchIndirect(_id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _flags: u8) void { return bgfx_dispatch_indirect(_id, _program, _indirectHandle, _start, _num, _flags); } extern fn bgfx_dispatch_indirect(_id: ViewId, _program: ProgramHandle, _indirectHandle: IndirectBufferHandle, _start: u16, _num: u16, _flags: u8) void; /// Discard previously set state for draw or compute call. /// <param name="_flags">Draw/compute states to discard.</param> pub inline fn discard(_flags: u8) void { return bgfx_discard(_flags); } extern fn bgfx_discard(_flags: u8) void; /// Blit 2D texture region between two 2D textures. /// @attention Destination texture must be created with `BGFX_TEXTURE_BLIT_DST` flag. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_BLIT`. /// <param name="_id">View id.</param> /// <param name="_dst">Destination texture handle.</param> /// <param name="_dstMip">Destination texture mip level.</param> /// <param name="_dstX">Destination texture X position.</param> /// <param name="_dstY">Destination texture Y position.</param> /// <param name="_dstZ">If texture is 2D this argument should be 0. If destination texture is cube this argument represents destination texture cube face. For 3D texture this argument represents destination texture Z position.</param> /// <param name="_src">Source texture handle.</param> /// <param name="_srcMip">Source texture mip level.</param> /// <param name="_srcX">Source texture X position.</param> /// <param name="_srcY">Source texture Y position.</param> /// <param name="_srcZ">If texture is 2D this argument should be 0. If source texture is cube this argument represents source texture cube face. For 3D texture this argument represents source texture Z position.</param> /// <param name="_width">Width of region.</param> /// <param name="_height">Height of region.</param> /// <param name="_depth">If texture is 3D this argument represents depth of region, otherwise it's unused.</param> pub inline fn blit(_id: ViewId, _dst: TextureHandle, _dstMip: u8, _dstX: u16, _dstY: u16, _dstZ: u16, _src: TextureHandle, _srcMip: u8, _srcX: u16, _srcY: u16, _srcZ: u16, _width: u16, _height: u16, _depth: u16) void { return bgfx_blit(_id, _dst, _dstMip, _dstX, _dstY, _dstZ, _src, _srcMip, _srcX, _srcY, _srcZ, _width, _height, _depth); } extern fn bgfx_blit(_id: ViewId, _dst: TextureHandle, _dstMip: u8, _dstX: u16, _dstY: u16, _dstZ: u16, _src: TextureHandle, _srcMip: u8, _srcX: u16, _srcY: u16, _srcZ: u16, _width: u16, _height: u16, _depth: u16) void;
bindings/zig/bgfx.zig
const std = @import("std"); const assert = std.debug.assert; const print = std.debug.print; const tools = @import("tools"); const Vec4 = [4]i8; fn dist(a: Vec4, b: Vec4) u32 { var d: u32 = 0; for (a) |_, i| { d += @intCast(u32, std.math.absInt(@as(i31, a[i]) - b[i]) catch unreachable); } return d; } pub fn run(input_text: []const u8, allocator: std.mem.Allocator) ![2][]const u8 { var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const param: struct { points: []const Vec4, } = param: { var points = std.ArrayList(Vec4).init(arena.allocator()); var it = std.mem.tokenize(u8, input_text, "\n\r"); while (it.next()) |line| { if (tools.match_pattern("{},{},{},{}", line)) |fields| { try points.append(Vec4{ @intCast(i8, fields[0].imm), @intCast(i8, fields[1].imm), @intCast(i8, fields[2].imm), @intCast(i8, fields[3].imm), }); } else unreachable; } break :param .{ .points = points.items }; }; const ans1 = ans: { const Tag = u16; const cluster_tag = try allocator.alloc(Tag, param.points.len); defer allocator.free(cluster_tag); var connected = std.ArrayList(Tag).init(arena.allocator()); defer connected.deinit(); var next_tag: Tag = 0; for (param.points) |p, i| { try connected.resize(0); if (i >= 1) { for (param.points[0 .. i - 1]) |o, j| { const d = dist(p, o); if (d <= 3) { const t = cluster_tag[j]; if (std.mem.indexOfScalar(Tag, connected.items, t) == null) try connected.append(t); } } } if (connected.items.len == 0) { try connected.append(next_tag); next_tag += 1; } const t = connected.items[0]; cluster_tag[i] = t; for (cluster_tag[0..i]) |*tag| { if (std.mem.indexOfScalar(Tag, connected.items, tag.*) != null) tag.* = t; } } const unique = &connected; try unique.resize(0); for (cluster_tag) |t| { if (std.mem.indexOfScalar(Tag, unique.items, t) == null) try unique.append(t); } break :ans unique.items.len; }; const ans2 = ans: { break :ans "gratis"; }; return [_][]const u8{ try std.fmt.allocPrint(allocator, "{}", .{ans1}), try std.fmt.allocPrint(allocator, "{s}", .{ans2}), }; } pub const main = tools.defaultMain("2018/input_day25.txt", run);
2018/day25.zig
const std = @import("std"); pub const Client = struct { file: ?std.fs.File = null, allocator: *std.mem.Allocator, host: []u8, pub fn init(allocator: *std.mem.Allocator, host: []const u8, port: u16) !Client { const result = Client{ .file = try std.net.tcpConnectToHost(allocator, host, port), .allocator = allocator, .host = try allocator.alloc(u8, host.len), }; std.mem.copy(u8, result.host, host); return result; } pub fn deinit(self: *Client) void { if (self.file) |f| { self.allocator.free(self.host); f.close(); self.file = null; } } pub fn requestAlloc(self: Client, url: []const u8) ![]const u8 { errdefer std.log.err("Client requestAlloc: verlasse die Funktion wegen Fehler", .{}); defer std.log.debug("Client requestAlloc: Ende", .{}); std.log.debug("Client requestAlloc: Anfang", .{}); if (self.file) |f| { try f.outStream().print("GET {} HTTP/1.1\r\n" ++ // try f.outStream(). "host: {}\r\n" ++ "\r\n", .{ url, self.host }); std.log.debug("Request sent", .{}); var response_code: ?u8 = null; var content_length: ?u32 = null; var eofHeader = false; while (!eofHeader) { var line = std.ArrayList(u8).init(self.allocator); defer line.deinit(); try f.inStream().readUntilDelimiterArrayList(&line, '\n', 100_000); std.log.debug("Client Zeile gelesen: {}", .{line.items}); eofHeader = std.mem.eql(u8, line.items, "\r"); if (!eofHeader) { const response_code_start = "HTTP/1.1 "; const content_length_start = "Content-Length: "; if (std.mem.indexOf(u8, line.items, response_code_start)) |pos| { if (pos == 0) { response_code = std.fmt.parseInt(u8, line.items[response_code_start.len .. response_code_start.len + 3], 10) catch return error.BadHeader; std.log.debug("Client response code erfolgreich empfangen ({})", .{response_code}); } } if (std.mem.indexOf(u8, line.items, content_length_start)) |pos| { if (pos == 0) { // line.items.len-1 wg \r content_length = std.fmt.parseInt(u8, line.items[content_length_start.len .. line.items.len - 1], 10) catch return error.BadHeader; std.log.debug("Client content-length empfangen ({})", .{content_length}); } } } } if (content_length == null or response_code == null) return error.BadHeader; const response = try self.allocator.alloc(u8, content_length.?); errdefer self.allocator.free(response); if ((try f.readAll(response)) != response.len) return error.BadBodyLength; return response; } else { return error.NoConnection; } } };
src/http_client.zig
pub fn Register(comptime R: type) type { return RegisterRW(R, R); } pub fn RegisterRW(comptime Read: type, comptime Write: type) type { return struct { raw_ptr: *volatile u32, const Self = @This(); pub fn init(address: usize) Self { return Self{ .raw_ptr = @intToPtr(*volatile u32, address) }; } pub fn initRange(address: usize, comptime dim_increment: usize, comptime num_registers: usize) [num_registers]Self { var registers: [num_registers]Self = undefined; var i: usize = 0; while (i < num_registers) : (i += 1) { registers[i] = Self.init(address + (i * dim_increment)); } return registers; } pub fn read(self: Self) Read { return @bitCast(Read, self.raw_ptr.*); } pub fn write(self: Self, value: Write) void { self.raw_ptr.* = @bitCast(u32, value); } pub fn modify(self: Self, new_value: anytype) void { if (Read != Write) { @compileError("Can't modify because read and write types for this register aren't the same."); } var old_value = self.read(); const info = @typeInfo(@TypeOf(new_value)); inline for (info.Struct.fields) |field| { @field(old_value, field.name) = @field(new_value, field.name); } self.write(old_value); } pub fn read_raw(self: Self) u32 { return self.raw_ptr.*; } pub fn write_raw(self: Self, value: u32) void { self.raw_ptr.* = value; } pub fn default_read_value(self: Self) Read { return Read{}; } pub fn default_write_value(self: Self) Write { return Write{}; } }; } pub const device_name = "STM32F103"; pub const device_revision = "1.1"; pub const device_description = "STM32F103"; pub const cpu = struct { pub const name = "CM3"; pub const revision = "r1p1"; pub const endian = "little"; pub const mpu_present = false; pub const fpu_present = false; pub const vendor_systick_config = false; pub const nvic_prio_bits = 4; }; /// Flexible static memory controller pub const FSMC = struct { const base_address = 0xa0000000; /// BCR1 const BCR1_val = packed struct { /// MBKEN [0:0] /// MBKEN MBKEN: u1 = 0, /// MUXEN [1:1] /// MUXEN MUXEN: u1 = 0, /// MTYP [2:3] /// MTYP MTYP: u2 = 0, /// MWID [4:5] /// MWID MWID: u2 = 1, /// FACCEN [6:6] /// FACCEN FACCEN: u1 = 1, /// unused [7:7] _unused7: u1 = 1, /// BURSTEN [8:8] /// BURSTEN BURSTEN: u1 = 0, /// WAITPOL [9:9] /// WAITPOL WAITPOL: u1 = 0, /// unused [10:10] _unused10: u1 = 0, /// WAITCFG [11:11] /// WAITCFG WAITCFG: u1 = 0, /// WREN [12:12] /// WREN WREN: u1 = 1, /// WAITEN [13:13] /// WAITEN WAITEN: u1 = 1, /// EXTMOD [14:14] /// EXTMOD EXTMOD: u1 = 0, /// ASYNCWAIT [15:15] /// ASYNCWAIT ASYNCWAIT: u1 = 0, /// unused [16:18] _unused16: u3 = 0, /// CBURSTRW [19:19] /// CBURSTRW CBURSTRW: u1 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// SRAM/NOR-Flash chip-select control register pub const BCR1 = Register(BCR1_val).init(base_address + 0x0); /// BTR1 const BTR1_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// BUSTURN [16:19] /// BUSTURN BUSTURN: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 3, /// unused [30:31] _unused30: u2 = 3, }; /// SRAM/NOR-Flash chip-select timing register pub const BTR1 = Register(BTR1_val).init(base_address + 0x4); /// BCR2 const BCR2_val = packed struct { /// MBKEN [0:0] /// MBKEN MBKEN: u1 = 0, /// MUXEN [1:1] /// MUXEN MUXEN: u1 = 0, /// MTYP [2:3] /// MTYP MTYP: u2 = 0, /// MWID [4:5] /// MWID MWID: u2 = 1, /// FACCEN [6:6] /// FACCEN FACCEN: u1 = 1, /// unused [7:7] _unused7: u1 = 1, /// BURSTEN [8:8] /// BURSTEN BURSTEN: u1 = 0, /// WAITPOL [9:9] /// WAITPOL WAITPOL: u1 = 0, /// WRAPMOD [10:10] /// WRAPMOD WRAPMOD: u1 = 0, /// WAITCFG [11:11] /// WAITCFG WAITCFG: u1 = 0, /// WREN [12:12] /// WREN WREN: u1 = 1, /// WAITEN [13:13] /// WAITEN WAITEN: u1 = 1, /// EXTMOD [14:14] /// EXTMOD EXTMOD: u1 = 0, /// ASYNCWAIT [15:15] /// ASYNCWAIT ASYNCWAIT: u1 = 0, /// unused [16:18] _unused16: u3 = 0, /// CBURSTRW [19:19] /// CBURSTRW CBURSTRW: u1 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// SRAM/NOR-Flash chip-select control register pub const BCR2 = Register(BCR2_val).init(base_address + 0x8); /// BTR2 const BTR2_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// BUSTURN [16:19] /// BUSTURN BUSTURN: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 3, /// unused [30:31] _unused30: u2 = 3, }; /// SRAM/NOR-Flash chip-select timing register pub const BTR2 = Register(BTR2_val).init(base_address + 0xc); /// BCR3 const BCR3_val = packed struct { /// MBKEN [0:0] /// MBKEN MBKEN: u1 = 0, /// MUXEN [1:1] /// MUXEN MUXEN: u1 = 0, /// MTYP [2:3] /// MTYP MTYP: u2 = 0, /// MWID [4:5] /// MWID MWID: u2 = 1, /// FACCEN [6:6] /// FACCEN FACCEN: u1 = 1, /// unused [7:7] _unused7: u1 = 1, /// BURSTEN [8:8] /// BURSTEN BURSTEN: u1 = 0, /// WAITPOL [9:9] /// WAITPOL WAITPOL: u1 = 0, /// WRAPMOD [10:10] /// WRAPMOD WRAPMOD: u1 = 0, /// WAITCFG [11:11] /// WAITCFG WAITCFG: u1 = 0, /// WREN [12:12] /// WREN WREN: u1 = 1, /// WAITEN [13:13] /// WAITEN WAITEN: u1 = 1, /// EXTMOD [14:14] /// EXTMOD EXTMOD: u1 = 0, /// ASYNCWAIT [15:15] /// ASYNCWAIT ASYNCWAIT: u1 = 0, /// unused [16:18] _unused16: u3 = 0, /// CBURSTRW [19:19] /// CBURSTRW CBURSTRW: u1 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// SRAM/NOR-Flash chip-select control register pub const BCR3 = Register(BCR3_val).init(base_address + 0x10); /// BTR3 const BTR3_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// BUSTURN [16:19] /// BUSTURN BUSTURN: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 3, /// unused [30:31] _unused30: u2 = 3, }; /// SRAM/NOR-Flash chip-select timing register pub const BTR3 = Register(BTR3_val).init(base_address + 0x14); /// BCR4 const BCR4_val = packed struct { /// MBKEN [0:0] /// MBKEN MBKEN: u1 = 0, /// MUXEN [1:1] /// MUXEN MUXEN: u1 = 0, /// MTYP [2:3] /// MTYP MTYP: u2 = 0, /// MWID [4:5] /// MWID MWID: u2 = 1, /// FACCEN [6:6] /// FACCEN FACCEN: u1 = 1, /// unused [7:7] _unused7: u1 = 1, /// BURSTEN [8:8] /// BURSTEN BURSTEN: u1 = 0, /// WAITPOL [9:9] /// WAITPOL WAITPOL: u1 = 0, /// WRAPMOD [10:10] /// WRAPMOD WRAPMOD: u1 = 0, /// WAITCFG [11:11] /// WAITCFG WAITCFG: u1 = 0, /// WREN [12:12] /// WREN WREN: u1 = 1, /// WAITEN [13:13] /// WAITEN WAITEN: u1 = 1, /// EXTMOD [14:14] /// EXTMOD EXTMOD: u1 = 0, /// ASYNCWAIT [15:15] /// ASYNCWAIT ASYNCWAIT: u1 = 0, /// unused [16:18] _unused16: u3 = 0, /// CBURSTRW [19:19] /// CBURSTRW CBURSTRW: u1 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// SRAM/NOR-Flash chip-select control register pub const BCR4 = Register(BCR4_val).init(base_address + 0x18); /// BTR4 const BTR4_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// BUSTURN [16:19] /// BUSTURN BUSTURN: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 3, /// unused [30:31] _unused30: u2 = 3, }; /// SRAM/NOR-Flash chip-select timing register pub const BTR4 = Register(BTR4_val).init(base_address + 0x1c); /// PCR2 const PCR2_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// PWAITEN [1:1] /// PWAITEN PWAITEN: u1 = 0, /// PBKEN [2:2] /// PBKEN PBKEN: u1 = 0, /// PTYP [3:3] /// PTYP PTYP: u1 = 1, /// PWID [4:5] /// PWID PWID: u2 = 1, /// ECCEN [6:6] /// ECCEN ECCEN: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// TCLR [9:12] /// TCLR TCLR: u4 = 0, /// TAR [13:16] /// TAR TAR: u4 = 0, /// ECCPS [17:19] /// ECCPS ECCPS: u3 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// PC Card/NAND Flash control register pub const PCR2 = Register(PCR2_val).init(base_address + 0x60); /// SR2 const SR2_val = packed struct { /// IRS [0:0] /// IRS IRS: u1 = 0, /// ILS [1:1] /// ILS ILS: u1 = 0, /// IFS [2:2] /// IFS IFS: u1 = 0, /// IREN [3:3] /// IREN IREN: u1 = 0, /// ILEN [4:4] /// ILEN ILEN: u1 = 0, /// IFEN [5:5] /// IFEN IFEN: u1 = 0, /// FEMPT [6:6] /// FEMPT FEMPT: u1 = 1, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// FIFO status and interrupt register pub const SR2 = Register(SR2_val).init(base_address + 0x64); /// PMEM2 const PMEM2_val = packed struct { /// MEMSETx [0:7] /// MEMSETx MEMSETx: u8 = 252, /// MEMWAITx [8:15] /// MEMWAITx MEMWAITx: u8 = 252, /// MEMHOLDx [16:23] /// MEMHOLDx MEMHOLDx: u8 = 252, /// MEMHIZx [24:31] /// MEMHIZx MEMHIZx: u8 = 252, }; /// Common memory space timing register pub const PMEM2 = Register(PMEM2_val).init(base_address + 0x68); /// PATT2 const PATT2_val = packed struct { /// ATTSETx [0:7] /// Attribute memory x setup ATTSETx: u8 = 252, /// ATTWAITx [8:15] /// Attribute memory x wait ATTWAITx: u8 = 252, /// ATTHOLDx [16:23] /// Attribute memory x hold ATTHOLDx: u8 = 252, /// ATTHIZx [24:31] /// Attribute memory x databus HiZ ATTHIZx: u8 = 252, }; /// Attribute memory space timing register pub const PATT2 = Register(PATT2_val).init(base_address + 0x6c); /// ECCR2 const ECCR2_val = packed struct { /// ECCx [0:31] /// ECC result ECCx: u32 = 0, }; /// ECC result register 2 pub const ECCR2 = Register(ECCR2_val).init(base_address + 0x74); /// PCR3 const PCR3_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// PWAITEN [1:1] /// PWAITEN PWAITEN: u1 = 0, /// PBKEN [2:2] /// PBKEN PBKEN: u1 = 0, /// PTYP [3:3] /// PTYP PTYP: u1 = 1, /// PWID [4:5] /// PWID PWID: u2 = 1, /// ECCEN [6:6] /// ECCEN ECCEN: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// TCLR [9:12] /// TCLR TCLR: u4 = 0, /// TAR [13:16] /// TAR TAR: u4 = 0, /// ECCPS [17:19] /// ECCPS ECCPS: u3 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// PC Card/NAND Flash control register pub const PCR3 = Register(PCR3_val).init(base_address + 0x80); /// SR3 const SR3_val = packed struct { /// IRS [0:0] /// IRS IRS: u1 = 0, /// ILS [1:1] /// ILS ILS: u1 = 0, /// IFS [2:2] /// IFS IFS: u1 = 0, /// IREN [3:3] /// IREN IREN: u1 = 0, /// ILEN [4:4] /// ILEN ILEN: u1 = 0, /// IFEN [5:5] /// IFEN IFEN: u1 = 0, /// FEMPT [6:6] /// FEMPT FEMPT: u1 = 1, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// FIFO status and interrupt register pub const SR3 = Register(SR3_val).init(base_address + 0x84); /// PMEM3 const PMEM3_val = packed struct { /// MEMSETx [0:7] /// MEMSETx MEMSETx: u8 = 252, /// MEMWAITx [8:15] /// MEMWAITx MEMWAITx: u8 = 252, /// MEMHOLDx [16:23] /// MEMHOLDx MEMHOLDx: u8 = 252, /// MEMHIZx [24:31] /// MEMHIZx MEMHIZx: u8 = 252, }; /// Common memory space timing register pub const PMEM3 = Register(PMEM3_val).init(base_address + 0x88); /// PATT3 const PATT3_val = packed struct { /// ATTSETx [0:7] /// ATTSETx ATTSETx: u8 = 252, /// ATTWAITx [8:15] /// ATTWAITx ATTWAITx: u8 = 252, /// ATTHOLDx [16:23] /// ATTHOLDx ATTHOLDx: u8 = 252, /// ATTHIZx [24:31] /// ATTHIZx ATTHIZx: u8 = 252, }; /// Attribute memory space timing register pub const PATT3 = Register(PATT3_val).init(base_address + 0x8c); /// ECCR3 const ECCR3_val = packed struct { /// ECCx [0:31] /// ECCx ECCx: u32 = 0, }; /// ECC result register 3 pub const ECCR3 = Register(ECCR3_val).init(base_address + 0x94); /// PCR4 const PCR4_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// PWAITEN [1:1] /// PWAITEN PWAITEN: u1 = 0, /// PBKEN [2:2] /// PBKEN PBKEN: u1 = 0, /// PTYP [3:3] /// PTYP PTYP: u1 = 1, /// PWID [4:5] /// PWID PWID: u2 = 1, /// ECCEN [6:6] /// ECCEN ECCEN: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// TCLR [9:12] /// TCLR TCLR: u4 = 0, /// TAR [13:16] /// TAR TAR: u4 = 0, /// ECCPS [17:19] /// ECCPS ECCPS: u3 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// PC Card/NAND Flash control register pub const PCR4 = Register(PCR4_val).init(base_address + 0xa0); /// SR4 const SR4_val = packed struct { /// IRS [0:0] /// IRS IRS: u1 = 0, /// ILS [1:1] /// ILS ILS: u1 = 0, /// IFS [2:2] /// IFS IFS: u1 = 0, /// IREN [3:3] /// IREN IREN: u1 = 0, /// ILEN [4:4] /// ILEN ILEN: u1 = 0, /// IFEN [5:5] /// IFEN IFEN: u1 = 0, /// FEMPT [6:6] /// FEMPT FEMPT: u1 = 1, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// FIFO status and interrupt register pub const SR4 = Register(SR4_val).init(base_address + 0xa4); /// PMEM4 const PMEM4_val = packed struct { /// MEMSETx [0:7] /// MEMSETx MEMSETx: u8 = 252, /// MEMWAITx [8:15] /// MEMWAITx MEMWAITx: u8 = 252, /// MEMHOLDx [16:23] /// MEMHOLDx MEMHOLDx: u8 = 252, /// MEMHIZx [24:31] /// MEMHIZx MEMHIZx: u8 = 252, }; /// Common memory space timing register pub const PMEM4 = Register(PMEM4_val).init(base_address + 0xa8); /// PATT4 const PATT4_val = packed struct { /// ATTSETx [0:7] /// ATTSETx ATTSETx: u8 = 252, /// ATTWAITx [8:15] /// ATTWAITx ATTWAITx: u8 = 252, /// ATTHOLDx [16:23] /// ATTHOLDx ATTHOLDx: u8 = 252, /// ATTHIZx [24:31] /// ATTHIZx ATTHIZx: u8 = 252, }; /// Attribute memory space timing register pub const PATT4 = Register(PATT4_val).init(base_address + 0xac); /// PIO4 const PIO4_val = packed struct { /// IOSETx [0:7] /// IOSETx IOSETx: u8 = 252, /// IOWAITx [8:15] /// IOWAITx IOWAITx: u8 = 252, /// IOHOLDx [16:23] /// IOHOLDx IOHOLDx: u8 = 252, /// IOHIZx [24:31] /// IOHIZx IOHIZx: u8 = 252, }; /// I/O space timing register 4 pub const PIO4 = Register(PIO4_val).init(base_address + 0xb0); /// BWTR1 const BWTR1_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// unused [16:19] _unused16: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// SRAM/NOR-Flash write timing registers pub const BWTR1 = Register(BWTR1_val).init(base_address + 0x104); /// BWTR2 const BWTR2_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// unused [16:19] _unused16: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// SRAM/NOR-Flash write timing registers pub const BWTR2 = Register(BWTR2_val).init(base_address + 0x10c); /// BWTR3 const BWTR3_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// unused [16:19] _unused16: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// SRAM/NOR-Flash write timing registers pub const BWTR3 = Register(BWTR3_val).init(base_address + 0x114); /// BWTR4 const BWTR4_val = packed struct { /// ADDSET [0:3] /// ADDSET ADDSET: u4 = 15, /// ADDHLD [4:7] /// ADDHLD ADDHLD: u4 = 15, /// DATAST [8:15] /// DATAST DATAST: u8 = 255, /// unused [16:19] _unused16: u4 = 15, /// CLKDIV [20:23] /// CLKDIV CLKDIV: u4 = 15, /// DATLAT [24:27] /// DATLAT DATLAT: u4 = 15, /// ACCMOD [28:29] /// ACCMOD ACCMOD: u2 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// SRAM/NOR-Flash write timing registers pub const BWTR4 = Register(BWTR4_val).init(base_address + 0x11c); }; /// Power control pub const PWR = struct { const base_address = 0x40007000; /// CR const CR_val = packed struct { /// LPDS [0:0] /// Low Power Deep Sleep LPDS: u1 = 0, /// PDDS [1:1] /// Power Down Deep Sleep PDDS: u1 = 0, /// CWUF [2:2] /// Clear Wake-up Flag CWUF: u1 = 0, /// CSBF [3:3] /// Clear STANDBY Flag CSBF: u1 = 0, /// PVDE [4:4] /// Power Voltage Detector PVDE: u1 = 0, /// PLS [5:7] /// PVD Level Selection PLS: u3 = 0, /// DBP [8:8] /// Disable Backup Domain write DBP: u1 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Power control register pub const CR = Register(CR_val).init(base_address + 0x0); /// CSR const CSR_val = packed struct { /// WUF [0:0] /// Wake-Up Flag WUF: u1 = 0, /// SBF [1:1] /// STANDBY Flag SBF: u1 = 0, /// PVDO [2:2] /// PVD Output PVDO: u1 = 0, /// unused [3:7] _unused3: u5 = 0, /// EWUP [8:8] /// Enable WKUP pin EWUP: u1 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Power control register pub const CSR = Register(CSR_val).init(base_address + 0x4); }; /// Reset and clock control pub const RCC = struct { const base_address = 0x40021000; /// CR const CR_val = packed struct { /// HSION [0:0] /// Internal High Speed clock HSION: u1 = 1, /// HSIRDY [1:1] /// Internal High Speed clock ready HSIRDY: u1 = 1, /// unused [2:2] _unused2: u1 = 0, /// HSITRIM [3:7] /// Internal High Speed clock HSITRIM: u5 = 16, /// HSICAL [8:15] /// Internal High Speed clock HSICAL: u8 = 0, /// HSEON [16:16] /// External High Speed clock HSEON: u1 = 0, /// HSERDY [17:17] /// External High Speed clock ready HSERDY: u1 = 0, /// HSEBYP [18:18] /// External High Speed clock HSEBYP: u1 = 0, /// CSSON [19:19] /// Clock Security System CSSON: u1 = 0, /// unused [20:23] _unused20: u4 = 0, /// PLLON [24:24] /// PLL enable PLLON: u1 = 0, /// PLLRDY [25:25] /// PLL clock ready flag PLLRDY: u1 = 0, /// unused [26:31] _unused26: u6 = 0, }; /// Clock control register pub const CR = Register(CR_val).init(base_address + 0x0); /// CFGR const CFGR_val = packed struct { /// SW [0:1] /// System clock Switch SW: u2 = 0, /// SWS [2:3] /// System Clock Switch Status SWS: u2 = 0, /// HPRE [4:7] /// AHB prescaler HPRE: u4 = 0, /// PPRE1 [8:10] /// APB Low speed prescaler PPRE1: u3 = 0, /// PPRE2 [11:13] /// APB High speed prescaler PPRE2: u3 = 0, /// ADCPRE [14:15] /// ADC prescaler ADCPRE: u2 = 0, /// PLLSRC [16:16] /// PLL entry clock source PLLSRC: u1 = 0, /// PLLXTPRE [17:17] /// HSE divider for PLL entry PLLXTPRE: u1 = 0, /// PLLMUL [18:21] /// PLL Multiplication Factor PLLMUL: u4 = 0, /// OTGFSPRE [22:22] /// USB OTG FS prescaler OTGFSPRE: u1 = 0, /// unused [23:23] _unused23: u1 = 0, /// MCO [24:26] /// Microcontroller clock MCO: u3 = 0, /// unused [27:31] _unused27: u5 = 0, }; /// Clock configuration register pub const CFGR = Register(CFGR_val).init(base_address + 0x4); /// CIR const CIR_val = packed struct { /// LSIRDYF [0:0] /// LSI Ready Interrupt flag LSIRDYF: u1 = 0, /// LSERDYF [1:1] /// LSE Ready Interrupt flag LSERDYF: u1 = 0, /// HSIRDYF [2:2] /// HSI Ready Interrupt flag HSIRDYF: u1 = 0, /// HSERDYF [3:3] /// HSE Ready Interrupt flag HSERDYF: u1 = 0, /// PLLRDYF [4:4] /// PLL Ready Interrupt flag PLLRDYF: u1 = 0, /// unused [5:6] _unused5: u2 = 0, /// CSSF [7:7] /// Clock Security System Interrupt CSSF: u1 = 0, /// LSIRDYIE [8:8] /// LSI Ready Interrupt Enable LSIRDYIE: u1 = 0, /// LSERDYIE [9:9] /// LSE Ready Interrupt Enable LSERDYIE: u1 = 0, /// HSIRDYIE [10:10] /// HSI Ready Interrupt Enable HSIRDYIE: u1 = 0, /// HSERDYIE [11:11] /// HSE Ready Interrupt Enable HSERDYIE: u1 = 0, /// PLLRDYIE [12:12] /// PLL Ready Interrupt Enable PLLRDYIE: u1 = 0, /// unused [13:15] _unused13: u3 = 0, /// LSIRDYC [16:16] /// LSI Ready Interrupt Clear LSIRDYC: u1 = 0, /// LSERDYC [17:17] /// LSE Ready Interrupt Clear LSERDYC: u1 = 0, /// HSIRDYC [18:18] /// HSI Ready Interrupt Clear HSIRDYC: u1 = 0, /// HSERDYC [19:19] /// HSE Ready Interrupt Clear HSERDYC: u1 = 0, /// PLLRDYC [20:20] /// PLL Ready Interrupt Clear PLLRDYC: u1 = 0, /// unused [21:22] _unused21: u2 = 0, /// CSSC [23:23] /// Clock security system interrupt CSSC: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// Clock interrupt register pub const CIR = Register(CIR_val).init(base_address + 0x8); /// APB2RSTR const APB2RSTR_val = packed struct { /// AFIORST [0:0] /// Alternate function I/O AFIORST: u1 = 0, /// unused [1:1] _unused1: u1 = 0, /// IOPARST [2:2] /// IO port A reset IOPARST: u1 = 0, /// IOPBRST [3:3] /// IO port B reset IOPBRST: u1 = 0, /// IOPCRST [4:4] /// IO port C reset IOPCRST: u1 = 0, /// IOPDRST [5:5] /// IO port D reset IOPDRST: u1 = 0, /// IOPERST [6:6] /// IO port E reset IOPERST: u1 = 0, /// IOPFRST [7:7] /// IO port F reset IOPFRST: u1 = 0, /// IOPGRST [8:8] /// IO port G reset IOPGRST: u1 = 0, /// ADC1RST [9:9] /// ADC 1 interface reset ADC1RST: u1 = 0, /// ADC2RST [10:10] /// ADC 2 interface reset ADC2RST: u1 = 0, /// TIM1RST [11:11] /// TIM1 timer reset TIM1RST: u1 = 0, /// SPI1RST [12:12] /// SPI 1 reset SPI1RST: u1 = 0, /// TIM8RST [13:13] /// TIM8 timer reset TIM8RST: u1 = 0, /// USART1RST [14:14] /// USART1 reset USART1RST: u1 = 0, /// ADC3RST [15:15] /// ADC 3 interface reset ADC3RST: u1 = 0, /// unused [16:18] _unused16: u3 = 0, /// TIM9RST [19:19] /// TIM9 timer reset TIM9RST: u1 = 0, /// TIM10RST [20:20] /// TIM10 timer reset TIM10RST: u1 = 0, /// TIM11RST [21:21] /// TIM11 timer reset TIM11RST: u1 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// APB2 peripheral reset register pub const APB2RSTR = Register(APB2RSTR_val).init(base_address + 0xc); /// APB1RSTR const APB1RSTR_val = packed struct { /// TIM2RST [0:0] /// Timer 2 reset TIM2RST: u1 = 0, /// TIM3RST [1:1] /// Timer 3 reset TIM3RST: u1 = 0, /// TIM4RST [2:2] /// Timer 4 reset TIM4RST: u1 = 0, /// TIM5RST [3:3] /// Timer 5 reset TIM5RST: u1 = 0, /// TIM6RST [4:4] /// Timer 6 reset TIM6RST: u1 = 0, /// TIM7RST [5:5] /// Timer 7 reset TIM7RST: u1 = 0, /// TIM12RST [6:6] /// Timer 12 reset TIM12RST: u1 = 0, /// TIM13RST [7:7] /// Timer 13 reset TIM13RST: u1 = 0, /// TIM14RST [8:8] /// Timer 14 reset TIM14RST: u1 = 0, /// unused [9:10] _unused9: u2 = 0, /// WWDGRST [11:11] /// Window watchdog reset WWDGRST: u1 = 0, /// unused [12:13] _unused12: u2 = 0, /// SPI2RST [14:14] /// SPI2 reset SPI2RST: u1 = 0, /// SPI3RST [15:15] /// SPI3 reset SPI3RST: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// USART2RST [17:17] /// USART 2 reset USART2RST: u1 = 0, /// USART3RST [18:18] /// USART 3 reset USART3RST: u1 = 0, /// UART4RST [19:19] /// UART 4 reset UART4RST: u1 = 0, /// UART5RST [20:20] /// UART 5 reset UART5RST: u1 = 0, /// I2C1RST [21:21] /// I2C1 reset I2C1RST: u1 = 0, /// I2C2RST [22:22] /// I2C2 reset I2C2RST: u1 = 0, /// USBRST [23:23] /// USB reset USBRST: u1 = 0, /// unused [24:24] _unused24: u1 = 0, /// CANRST [25:25] /// CAN reset CANRST: u1 = 0, /// unused [26:26] _unused26: u1 = 0, /// BKPRST [27:27] /// Backup interface reset BKPRST: u1 = 0, /// PWRRST [28:28] /// Power interface reset PWRRST: u1 = 0, /// DACRST [29:29] /// DAC interface reset DACRST: u1 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// APB1 peripheral reset register pub const APB1RSTR = Register(APB1RSTR_val).init(base_address + 0x10); /// AHBENR const AHBENR_val = packed struct { /// DMA1EN [0:0] /// DMA1 clock enable DMA1EN: u1 = 0, /// DMA2EN [1:1] /// DMA2 clock enable DMA2EN: u1 = 0, /// SRAMEN [2:2] /// SRAM interface clock SRAMEN: u1 = 1, /// unused [3:3] _unused3: u1 = 0, /// FLITFEN [4:4] /// FLITF clock enable FLITFEN: u1 = 1, /// unused [5:5] _unused5: u1 = 0, /// CRCEN [6:6] /// CRC clock enable CRCEN: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// FSMCEN [8:8] /// FSMC clock enable FSMCEN: u1 = 0, /// unused [9:9] _unused9: u1 = 0, /// SDIOEN [10:10] /// SDIO clock enable SDIOEN: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// AHB Peripheral Clock enable register pub const AHBENR = Register(AHBENR_val).init(base_address + 0x14); /// APB2ENR const APB2ENR_val = packed struct { /// AFIOEN [0:0] /// Alternate function I/O clock AFIOEN: u1 = 0, /// unused [1:1] _unused1: u1 = 0, /// IOPAEN [2:2] /// I/O port A clock enable IOPAEN: u1 = 0, /// IOPBEN [3:3] /// I/O port B clock enable IOPBEN: u1 = 0, /// IOPCEN [4:4] /// I/O port C clock enable IOPCEN: u1 = 0, /// IOPDEN [5:5] /// I/O port D clock enable IOPDEN: u1 = 0, /// IOPEEN [6:6] /// I/O port E clock enable IOPEEN: u1 = 0, /// IOPFEN [7:7] /// I/O port F clock enable IOPFEN: u1 = 0, /// IOPGEN [8:8] /// I/O port G clock enable IOPGEN: u1 = 0, /// ADC1EN [9:9] /// ADC 1 interface clock ADC1EN: u1 = 0, /// ADC2EN [10:10] /// ADC 2 interface clock ADC2EN: u1 = 0, /// TIM1EN [11:11] /// TIM1 Timer clock enable TIM1EN: u1 = 0, /// SPI1EN [12:12] /// SPI 1 clock enable SPI1EN: u1 = 0, /// TIM8EN [13:13] /// TIM8 Timer clock enable TIM8EN: u1 = 0, /// USART1EN [14:14] /// USART1 clock enable USART1EN: u1 = 0, /// ADC3EN [15:15] /// ADC3 interface clock ADC3EN: u1 = 0, /// unused [16:18] _unused16: u3 = 0, /// TIM9EN [19:19] /// TIM9 Timer clock enable TIM9EN: u1 = 0, /// TIM10EN [20:20] /// TIM10 Timer clock enable TIM10EN: u1 = 0, /// TIM11EN [21:21] /// TIM11 Timer clock enable TIM11EN: u1 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// APB2 peripheral clock enable register pub const APB2ENR = Register(APB2ENR_val).init(base_address + 0x18); /// APB1ENR const APB1ENR_val = packed struct { /// TIM2EN [0:0] /// Timer 2 clock enable TIM2EN: u1 = 0, /// TIM3EN [1:1] /// Timer 3 clock enable TIM3EN: u1 = 0, /// TIM4EN [2:2] /// Timer 4 clock enable TIM4EN: u1 = 0, /// TIM5EN [3:3] /// Timer 5 clock enable TIM5EN: u1 = 0, /// TIM6EN [4:4] /// Timer 6 clock enable TIM6EN: u1 = 0, /// TIM7EN [5:5] /// Timer 7 clock enable TIM7EN: u1 = 0, /// TIM12EN [6:6] /// Timer 12 clock enable TIM12EN: u1 = 0, /// TIM13EN [7:7] /// Timer 13 clock enable TIM13EN: u1 = 0, /// TIM14EN [8:8] /// Timer 14 clock enable TIM14EN: u1 = 0, /// unused [9:10] _unused9: u2 = 0, /// WWDGEN [11:11] /// Window watchdog clock WWDGEN: u1 = 0, /// unused [12:13] _unused12: u2 = 0, /// SPI2EN [14:14] /// SPI 2 clock enable SPI2EN: u1 = 0, /// SPI3EN [15:15] /// SPI 3 clock enable SPI3EN: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// USART2EN [17:17] /// USART 2 clock enable USART2EN: u1 = 0, /// USART3EN [18:18] /// USART 3 clock enable USART3EN: u1 = 0, /// UART4EN [19:19] /// UART 4 clock enable UART4EN: u1 = 0, /// UART5EN [20:20] /// UART 5 clock enable UART5EN: u1 = 0, /// I2C1EN [21:21] /// I2C 1 clock enable I2C1EN: u1 = 0, /// I2C2EN [22:22] /// I2C 2 clock enable I2C2EN: u1 = 0, /// USBEN [23:23] /// USB clock enable USBEN: u1 = 0, /// unused [24:24] _unused24: u1 = 0, /// CANEN [25:25] /// CAN clock enable CANEN: u1 = 0, /// unused [26:26] _unused26: u1 = 0, /// BKPEN [27:27] /// Backup interface clock BKPEN: u1 = 0, /// PWREN [28:28] /// Power interface clock PWREN: u1 = 0, /// DACEN [29:29] /// DAC interface clock enable DACEN: u1 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// APB1 peripheral clock enable register pub const APB1ENR = Register(APB1ENR_val).init(base_address + 0x1c); /// BDCR const BDCR_val = packed struct { /// LSEON [0:0] /// External Low Speed oscillator LSEON: u1 = 0, /// LSERDY [1:1] /// External Low Speed oscillator LSERDY: u1 = 0, /// LSEBYP [2:2] /// External Low Speed oscillator LSEBYP: u1 = 0, /// unused [3:7] _unused3: u5 = 0, /// RTCSEL [8:9] /// RTC clock source selection RTCSEL: u2 = 0, /// unused [10:14] _unused10: u5 = 0, /// RTCEN [15:15] /// RTC clock enable RTCEN: u1 = 0, /// BDRST [16:16] /// Backup domain software BDRST: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Backup domain control register pub const BDCR = Register(BDCR_val).init(base_address + 0x20); /// CSR const CSR_val = packed struct { /// LSION [0:0] /// Internal low speed oscillator LSION: u1 = 0, /// LSIRDY [1:1] /// Internal low speed oscillator LSIRDY: u1 = 0, /// unused [2:23] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, /// RMVF [24:24] /// Remove reset flag RMVF: u1 = 0, /// unused [25:25] _unused25: u1 = 0, /// PINRSTF [26:26] /// PIN reset flag PINRSTF: u1 = 1, /// PORRSTF [27:27] /// POR/PDR reset flag PORRSTF: u1 = 1, /// SFTRSTF [28:28] /// Software reset flag SFTRSTF: u1 = 0, /// IWDGRSTF [29:29] /// Independent watchdog reset IWDGRSTF: u1 = 0, /// WWDGRSTF [30:30] /// Window watchdog reset flag WWDGRSTF: u1 = 0, /// LPWRRSTF [31:31] /// Low-power reset flag LPWRRSTF: u1 = 0, }; /// Control/status register pub const CSR = Register(CSR_val).init(base_address + 0x24); }; /// General purpose I/O pub const GPIOA = struct { const base_address = 0x40010800; /// CRL const CRL_val = packed struct { /// MODE0 [0:1] /// Port n.0 mode bits MODE0: u2 = 0, /// CNF0 [2:3] /// Port n.0 configuration CNF0: u2 = 1, /// MODE1 [4:5] /// Port n.1 mode bits MODE1: u2 = 0, /// CNF1 [6:7] /// Port n.1 configuration CNF1: u2 = 1, /// MODE2 [8:9] /// Port n.2 mode bits MODE2: u2 = 0, /// CNF2 [10:11] /// Port n.2 configuration CNF2: u2 = 1, /// MODE3 [12:13] /// Port n.3 mode bits MODE3: u2 = 0, /// CNF3 [14:15] /// Port n.3 configuration CNF3: u2 = 1, /// MODE4 [16:17] /// Port n.4 mode bits MODE4: u2 = 0, /// CNF4 [18:19] /// Port n.4 configuration CNF4: u2 = 1, /// MODE5 [20:21] /// Port n.5 mode bits MODE5: u2 = 0, /// CNF5 [22:23] /// Port n.5 configuration CNF5: u2 = 1, /// MODE6 [24:25] /// Port n.6 mode bits MODE6: u2 = 0, /// CNF6 [26:27] /// Port n.6 configuration CNF6: u2 = 1, /// MODE7 [28:29] /// Port n.7 mode bits MODE7: u2 = 0, /// CNF7 [30:31] /// Port n.7 configuration CNF7: u2 = 1, }; /// Port configuration register low pub const CRL = Register(CRL_val).init(base_address + 0x0); /// CRH const CRH_val = packed struct { /// MODE8 [0:1] /// Port n.8 mode bits MODE8: u2 = 0, /// CNF8 [2:3] /// Port n.8 configuration CNF8: u2 = 1, /// MODE9 [4:5] /// Port n.9 mode bits MODE9: u2 = 0, /// CNF9 [6:7] /// Port n.9 configuration CNF9: u2 = 1, /// MODE10 [8:9] /// Port n.10 mode bits MODE10: u2 = 0, /// CNF10 [10:11] /// Port n.10 configuration CNF10: u2 = 1, /// MODE11 [12:13] /// Port n.11 mode bits MODE11: u2 = 0, /// CNF11 [14:15] /// Port n.11 configuration CNF11: u2 = 1, /// MODE12 [16:17] /// Port n.12 mode bits MODE12: u2 = 0, /// CNF12 [18:19] /// Port n.12 configuration CNF12: u2 = 1, /// MODE13 [20:21] /// Port n.13 mode bits MODE13: u2 = 0, /// CNF13 [22:23] /// Port n.13 configuration CNF13: u2 = 1, /// MODE14 [24:25] /// Port n.14 mode bits MODE14: u2 = 0, /// CNF14 [26:27] /// Port n.14 configuration CNF14: u2 = 1, /// MODE15 [28:29] /// Port n.15 mode bits MODE15: u2 = 0, /// CNF15 [30:31] /// Port n.15 configuration CNF15: u2 = 1, }; /// Port configuration register high pub const CRH = Register(CRH_val).init(base_address + 0x4); /// IDR const IDR_val = packed struct { /// IDR0 [0:0] /// Port input data IDR0: u1 = 0, /// IDR1 [1:1] /// Port input data IDR1: u1 = 0, /// IDR2 [2:2] /// Port input data IDR2: u1 = 0, /// IDR3 [3:3] /// Port input data IDR3: u1 = 0, /// IDR4 [4:4] /// Port input data IDR4: u1 = 0, /// IDR5 [5:5] /// Port input data IDR5: u1 = 0, /// IDR6 [6:6] /// Port input data IDR6: u1 = 0, /// IDR7 [7:7] /// Port input data IDR7: u1 = 0, /// IDR8 [8:8] /// Port input data IDR8: u1 = 0, /// IDR9 [9:9] /// Port input data IDR9: u1 = 0, /// IDR10 [10:10] /// Port input data IDR10: u1 = 0, /// IDR11 [11:11] /// Port input data IDR11: u1 = 0, /// IDR12 [12:12] /// Port input data IDR12: u1 = 0, /// IDR13 [13:13] /// Port input data IDR13: u1 = 0, /// IDR14 [14:14] /// Port input data IDR14: u1 = 0, /// IDR15 [15:15] /// Port input data IDR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port input data register pub const IDR = Register(IDR_val).init(base_address + 0x8); /// ODR const ODR_val = packed struct { /// ODR0 [0:0] /// Port output data ODR0: u1 = 0, /// ODR1 [1:1] /// Port output data ODR1: u1 = 0, /// ODR2 [2:2] /// Port output data ODR2: u1 = 0, /// ODR3 [3:3] /// Port output data ODR3: u1 = 0, /// ODR4 [4:4] /// Port output data ODR4: u1 = 0, /// ODR5 [5:5] /// Port output data ODR5: u1 = 0, /// ODR6 [6:6] /// Port output data ODR6: u1 = 0, /// ODR7 [7:7] /// Port output data ODR7: u1 = 0, /// ODR8 [8:8] /// Port output data ODR8: u1 = 0, /// ODR9 [9:9] /// Port output data ODR9: u1 = 0, /// ODR10 [10:10] /// Port output data ODR10: u1 = 0, /// ODR11 [11:11] /// Port output data ODR11: u1 = 0, /// ODR12 [12:12] /// Port output data ODR12: u1 = 0, /// ODR13 [13:13] /// Port output data ODR13: u1 = 0, /// ODR14 [14:14] /// Port output data ODR14: u1 = 0, /// ODR15 [15:15] /// Port output data ODR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port output data register pub const ODR = Register(ODR_val).init(base_address + 0xc); /// BSRR const BSRR_val = packed struct { /// BS0 [0:0] /// Set bit 0 BS0: u1 = 0, /// BS1 [1:1] /// Set bit 1 BS1: u1 = 0, /// BS2 [2:2] /// Set bit 1 BS2: u1 = 0, /// BS3 [3:3] /// Set bit 3 BS3: u1 = 0, /// BS4 [4:4] /// Set bit 4 BS4: u1 = 0, /// BS5 [5:5] /// Set bit 5 BS5: u1 = 0, /// BS6 [6:6] /// Set bit 6 BS6: u1 = 0, /// BS7 [7:7] /// Set bit 7 BS7: u1 = 0, /// BS8 [8:8] /// Set bit 8 BS8: u1 = 0, /// BS9 [9:9] /// Set bit 9 BS9: u1 = 0, /// BS10 [10:10] /// Set bit 10 BS10: u1 = 0, /// BS11 [11:11] /// Set bit 11 BS11: u1 = 0, /// BS12 [12:12] /// Set bit 12 BS12: u1 = 0, /// BS13 [13:13] /// Set bit 13 BS13: u1 = 0, /// BS14 [14:14] /// Set bit 14 BS14: u1 = 0, /// BS15 [15:15] /// Set bit 15 BS15: u1 = 0, /// BR0 [16:16] /// Reset bit 0 BR0: u1 = 0, /// BR1 [17:17] /// Reset bit 1 BR1: u1 = 0, /// BR2 [18:18] /// Reset bit 2 BR2: u1 = 0, /// BR3 [19:19] /// Reset bit 3 BR3: u1 = 0, /// BR4 [20:20] /// Reset bit 4 BR4: u1 = 0, /// BR5 [21:21] /// Reset bit 5 BR5: u1 = 0, /// BR6 [22:22] /// Reset bit 6 BR6: u1 = 0, /// BR7 [23:23] /// Reset bit 7 BR7: u1 = 0, /// BR8 [24:24] /// Reset bit 8 BR8: u1 = 0, /// BR9 [25:25] /// Reset bit 9 BR9: u1 = 0, /// BR10 [26:26] /// Reset bit 10 BR10: u1 = 0, /// BR11 [27:27] /// Reset bit 11 BR11: u1 = 0, /// BR12 [28:28] /// Reset bit 12 BR12: u1 = 0, /// BR13 [29:29] /// Reset bit 13 BR13: u1 = 0, /// BR14 [30:30] /// Reset bit 14 BR14: u1 = 0, /// BR15 [31:31] /// Reset bit 15 BR15: u1 = 0, }; /// Port bit set/reset register pub const BSRR = Register(BSRR_val).init(base_address + 0x10); /// BRR const BRR_val = packed struct { /// BR0 [0:0] /// Reset bit 0 BR0: u1 = 0, /// BR1 [1:1] /// Reset bit 1 BR1: u1 = 0, /// BR2 [2:2] /// Reset bit 1 BR2: u1 = 0, /// BR3 [3:3] /// Reset bit 3 BR3: u1 = 0, /// BR4 [4:4] /// Reset bit 4 BR4: u1 = 0, /// BR5 [5:5] /// Reset bit 5 BR5: u1 = 0, /// BR6 [6:6] /// Reset bit 6 BR6: u1 = 0, /// BR7 [7:7] /// Reset bit 7 BR7: u1 = 0, /// BR8 [8:8] /// Reset bit 8 BR8: u1 = 0, /// BR9 [9:9] /// Reset bit 9 BR9: u1 = 0, /// BR10 [10:10] /// Reset bit 10 BR10: u1 = 0, /// BR11 [11:11] /// Reset bit 11 BR11: u1 = 0, /// BR12 [12:12] /// Reset bit 12 BR12: u1 = 0, /// BR13 [13:13] /// Reset bit 13 BR13: u1 = 0, /// BR14 [14:14] /// Reset bit 14 BR14: u1 = 0, /// BR15 [15:15] /// Reset bit 15 BR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port bit reset register pub const BRR = Register(BRR_val).init(base_address + 0x14); /// LCKR const LCKR_val = packed struct { /// LCK0 [0:0] /// Port A Lock bit 0 LCK0: u1 = 0, /// LCK1 [1:1] /// Port A Lock bit 1 LCK1: u1 = 0, /// LCK2 [2:2] /// Port A Lock bit 2 LCK2: u1 = 0, /// LCK3 [3:3] /// Port A Lock bit 3 LCK3: u1 = 0, /// LCK4 [4:4] /// Port A Lock bit 4 LCK4: u1 = 0, /// LCK5 [5:5] /// Port A Lock bit 5 LCK5: u1 = 0, /// LCK6 [6:6] /// Port A Lock bit 6 LCK6: u1 = 0, /// LCK7 [7:7] /// Port A Lock bit 7 LCK7: u1 = 0, /// LCK8 [8:8] /// Port A Lock bit 8 LCK8: u1 = 0, /// LCK9 [9:9] /// Port A Lock bit 9 LCK9: u1 = 0, /// LCK10 [10:10] /// Port A Lock bit 10 LCK10: u1 = 0, /// LCK11 [11:11] /// Port A Lock bit 11 LCK11: u1 = 0, /// LCK12 [12:12] /// Port A Lock bit 12 LCK12: u1 = 0, /// LCK13 [13:13] /// Port A Lock bit 13 LCK13: u1 = 0, /// LCK14 [14:14] /// Port A Lock bit 14 LCK14: u1 = 0, /// LCK15 [15:15] /// Port A Lock bit 15 LCK15: u1 = 0, /// LCKK [16:16] /// Lock key LCKK: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Port configuration lock pub const LCKR = Register(LCKR_val).init(base_address + 0x18); }; /// General purpose I/O pub const GPIOB = struct { const base_address = 0x40010c00; /// CRL const CRL_val = packed struct { /// MODE0 [0:1] /// Port n.0 mode bits MODE0: u2 = 0, /// CNF0 [2:3] /// Port n.0 configuration CNF0: u2 = 1, /// MODE1 [4:5] /// Port n.1 mode bits MODE1: u2 = 0, /// CNF1 [6:7] /// Port n.1 configuration CNF1: u2 = 1, /// MODE2 [8:9] /// Port n.2 mode bits MODE2: u2 = 0, /// CNF2 [10:11] /// Port n.2 configuration CNF2: u2 = 1, /// MODE3 [12:13] /// Port n.3 mode bits MODE3: u2 = 0, /// CNF3 [14:15] /// Port n.3 configuration CNF3: u2 = 1, /// MODE4 [16:17] /// Port n.4 mode bits MODE4: u2 = 0, /// CNF4 [18:19] /// Port n.4 configuration CNF4: u2 = 1, /// MODE5 [20:21] /// Port n.5 mode bits MODE5: u2 = 0, /// CNF5 [22:23] /// Port n.5 configuration CNF5: u2 = 1, /// MODE6 [24:25] /// Port n.6 mode bits MODE6: u2 = 0, /// CNF6 [26:27] /// Port n.6 configuration CNF6: u2 = 1, /// MODE7 [28:29] /// Port n.7 mode bits MODE7: u2 = 0, /// CNF7 [30:31] /// Port n.7 configuration CNF7: u2 = 1, }; /// Port configuration register low pub const CRL = Register(CRL_val).init(base_address + 0x0); /// CRH const CRH_val = packed struct { /// MODE8 [0:1] /// Port n.8 mode bits MODE8: u2 = 0, /// CNF8 [2:3] /// Port n.8 configuration CNF8: u2 = 1, /// MODE9 [4:5] /// Port n.9 mode bits MODE9: u2 = 0, /// CNF9 [6:7] /// Port n.9 configuration CNF9: u2 = 1, /// MODE10 [8:9] /// Port n.10 mode bits MODE10: u2 = 0, /// CNF10 [10:11] /// Port n.10 configuration CNF10: u2 = 1, /// MODE11 [12:13] /// Port n.11 mode bits MODE11: u2 = 0, /// CNF11 [14:15] /// Port n.11 configuration CNF11: u2 = 1, /// MODE12 [16:17] /// Port n.12 mode bits MODE12: u2 = 0, /// CNF12 [18:19] /// Port n.12 configuration CNF12: u2 = 1, /// MODE13 [20:21] /// Port n.13 mode bits MODE13: u2 = 0, /// CNF13 [22:23] /// Port n.13 configuration CNF13: u2 = 1, /// MODE14 [24:25] /// Port n.14 mode bits MODE14: u2 = 0, /// CNF14 [26:27] /// Port n.14 configuration CNF14: u2 = 1, /// MODE15 [28:29] /// Port n.15 mode bits MODE15: u2 = 0, /// CNF15 [30:31] /// Port n.15 configuration CNF15: u2 = 1, }; /// Port configuration register high pub const CRH = Register(CRH_val).init(base_address + 0x4); /// IDR const IDR_val = packed struct { /// IDR0 [0:0] /// Port input data IDR0: u1 = 0, /// IDR1 [1:1] /// Port input data IDR1: u1 = 0, /// IDR2 [2:2] /// Port input data IDR2: u1 = 0, /// IDR3 [3:3] /// Port input data IDR3: u1 = 0, /// IDR4 [4:4] /// Port input data IDR4: u1 = 0, /// IDR5 [5:5] /// Port input data IDR5: u1 = 0, /// IDR6 [6:6] /// Port input data IDR6: u1 = 0, /// IDR7 [7:7] /// Port input data IDR7: u1 = 0, /// IDR8 [8:8] /// Port input data IDR8: u1 = 0, /// IDR9 [9:9] /// Port input data IDR9: u1 = 0, /// IDR10 [10:10] /// Port input data IDR10: u1 = 0, /// IDR11 [11:11] /// Port input data IDR11: u1 = 0, /// IDR12 [12:12] /// Port input data IDR12: u1 = 0, /// IDR13 [13:13] /// Port input data IDR13: u1 = 0, /// IDR14 [14:14] /// Port input data IDR14: u1 = 0, /// IDR15 [15:15] /// Port input data IDR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port input data register pub const IDR = Register(IDR_val).init(base_address + 0x8); /// ODR const ODR_val = packed struct { /// ODR0 [0:0] /// Port output data ODR0: u1 = 0, /// ODR1 [1:1] /// Port output data ODR1: u1 = 0, /// ODR2 [2:2] /// Port output data ODR2: u1 = 0, /// ODR3 [3:3] /// Port output data ODR3: u1 = 0, /// ODR4 [4:4] /// Port output data ODR4: u1 = 0, /// ODR5 [5:5] /// Port output data ODR5: u1 = 0, /// ODR6 [6:6] /// Port output data ODR6: u1 = 0, /// ODR7 [7:7] /// Port output data ODR7: u1 = 0, /// ODR8 [8:8] /// Port output data ODR8: u1 = 0, /// ODR9 [9:9] /// Port output data ODR9: u1 = 0, /// ODR10 [10:10] /// Port output data ODR10: u1 = 0, /// ODR11 [11:11] /// Port output data ODR11: u1 = 0, /// ODR12 [12:12] /// Port output data ODR12: u1 = 0, /// ODR13 [13:13] /// Port output data ODR13: u1 = 0, /// ODR14 [14:14] /// Port output data ODR14: u1 = 0, /// ODR15 [15:15] /// Port output data ODR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port output data register pub const ODR = Register(ODR_val).init(base_address + 0xc); /// BSRR const BSRR_val = packed struct { /// BS0 [0:0] /// Set bit 0 BS0: u1 = 0, /// BS1 [1:1] /// Set bit 1 BS1: u1 = 0, /// BS2 [2:2] /// Set bit 1 BS2: u1 = 0, /// BS3 [3:3] /// Set bit 3 BS3: u1 = 0, /// BS4 [4:4] /// Set bit 4 BS4: u1 = 0, /// BS5 [5:5] /// Set bit 5 BS5: u1 = 0, /// BS6 [6:6] /// Set bit 6 BS6: u1 = 0, /// BS7 [7:7] /// Set bit 7 BS7: u1 = 0, /// BS8 [8:8] /// Set bit 8 BS8: u1 = 0, /// BS9 [9:9] /// Set bit 9 BS9: u1 = 0, /// BS10 [10:10] /// Set bit 10 BS10: u1 = 0, /// BS11 [11:11] /// Set bit 11 BS11: u1 = 0, /// BS12 [12:12] /// Set bit 12 BS12: u1 = 0, /// BS13 [13:13] /// Set bit 13 BS13: u1 = 0, /// BS14 [14:14] /// Set bit 14 BS14: u1 = 0, /// BS15 [15:15] /// Set bit 15 BS15: u1 = 0, /// BR0 [16:16] /// Reset bit 0 BR0: u1 = 0, /// BR1 [17:17] /// Reset bit 1 BR1: u1 = 0, /// BR2 [18:18] /// Reset bit 2 BR2: u1 = 0, /// BR3 [19:19] /// Reset bit 3 BR3: u1 = 0, /// BR4 [20:20] /// Reset bit 4 BR4: u1 = 0, /// BR5 [21:21] /// Reset bit 5 BR5: u1 = 0, /// BR6 [22:22] /// Reset bit 6 BR6: u1 = 0, /// BR7 [23:23] /// Reset bit 7 BR7: u1 = 0, /// BR8 [24:24] /// Reset bit 8 BR8: u1 = 0, /// BR9 [25:25] /// Reset bit 9 BR9: u1 = 0, /// BR10 [26:26] /// Reset bit 10 BR10: u1 = 0, /// BR11 [27:27] /// Reset bit 11 BR11: u1 = 0, /// BR12 [28:28] /// Reset bit 12 BR12: u1 = 0, /// BR13 [29:29] /// Reset bit 13 BR13: u1 = 0, /// BR14 [30:30] /// Reset bit 14 BR14: u1 = 0, /// BR15 [31:31] /// Reset bit 15 BR15: u1 = 0, }; /// Port bit set/reset register pub const BSRR = Register(BSRR_val).init(base_address + 0x10); /// BRR const BRR_val = packed struct { /// BR0 [0:0] /// Reset bit 0 BR0: u1 = 0, /// BR1 [1:1] /// Reset bit 1 BR1: u1 = 0, /// BR2 [2:2] /// Reset bit 1 BR2: u1 = 0, /// BR3 [3:3] /// Reset bit 3 BR3: u1 = 0, /// BR4 [4:4] /// Reset bit 4 BR4: u1 = 0, /// BR5 [5:5] /// Reset bit 5 BR5: u1 = 0, /// BR6 [6:6] /// Reset bit 6 BR6: u1 = 0, /// BR7 [7:7] /// Reset bit 7 BR7: u1 = 0, /// BR8 [8:8] /// Reset bit 8 BR8: u1 = 0, /// BR9 [9:9] /// Reset bit 9 BR9: u1 = 0, /// BR10 [10:10] /// Reset bit 10 BR10: u1 = 0, /// BR11 [11:11] /// Reset bit 11 BR11: u1 = 0, /// BR12 [12:12] /// Reset bit 12 BR12: u1 = 0, /// BR13 [13:13] /// Reset bit 13 BR13: u1 = 0, /// BR14 [14:14] /// Reset bit 14 BR14: u1 = 0, /// BR15 [15:15] /// Reset bit 15 BR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port bit reset register pub const BRR = Register(BRR_val).init(base_address + 0x14); /// LCKR const LCKR_val = packed struct { /// LCK0 [0:0] /// Port A Lock bit 0 LCK0: u1 = 0, /// LCK1 [1:1] /// Port A Lock bit 1 LCK1: u1 = 0, /// LCK2 [2:2] /// Port A Lock bit 2 LCK2: u1 = 0, /// LCK3 [3:3] /// Port A Lock bit 3 LCK3: u1 = 0, /// LCK4 [4:4] /// Port A Lock bit 4 LCK4: u1 = 0, /// LCK5 [5:5] /// Port A Lock bit 5 LCK5: u1 = 0, /// LCK6 [6:6] /// Port A Lock bit 6 LCK6: u1 = 0, /// LCK7 [7:7] /// Port A Lock bit 7 LCK7: u1 = 0, /// LCK8 [8:8] /// Port A Lock bit 8 LCK8: u1 = 0, /// LCK9 [9:9] /// Port A Lock bit 9 LCK9: u1 = 0, /// LCK10 [10:10] /// Port A Lock bit 10 LCK10: u1 = 0, /// LCK11 [11:11] /// Port A Lock bit 11 LCK11: u1 = 0, /// LCK12 [12:12] /// Port A Lock bit 12 LCK12: u1 = 0, /// LCK13 [13:13] /// Port A Lock bit 13 LCK13: u1 = 0, /// LCK14 [14:14] /// Port A Lock bit 14 LCK14: u1 = 0, /// LCK15 [15:15] /// Port A Lock bit 15 LCK15: u1 = 0, /// LCKK [16:16] /// Lock key LCKK: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Port configuration lock pub const LCKR = Register(LCKR_val).init(base_address + 0x18); }; /// General purpose I/O pub const GPIOC = struct { const base_address = 0x40011000; /// CRL const CRL_val = packed struct { /// MODE0 [0:1] /// Port n.0 mode bits MODE0: u2 = 0, /// CNF0 [2:3] /// Port n.0 configuration CNF0: u2 = 1, /// MODE1 [4:5] /// Port n.1 mode bits MODE1: u2 = 0, /// CNF1 [6:7] /// Port n.1 configuration CNF1: u2 = 1, /// MODE2 [8:9] /// Port n.2 mode bits MODE2: u2 = 0, /// CNF2 [10:11] /// Port n.2 configuration CNF2: u2 = 1, /// MODE3 [12:13] /// Port n.3 mode bits MODE3: u2 = 0, /// CNF3 [14:15] /// Port n.3 configuration CNF3: u2 = 1, /// MODE4 [16:17] /// Port n.4 mode bits MODE4: u2 = 0, /// CNF4 [18:19] /// Port n.4 configuration CNF4: u2 = 1, /// MODE5 [20:21] /// Port n.5 mode bits MODE5: u2 = 0, /// CNF5 [22:23] /// Port n.5 configuration CNF5: u2 = 1, /// MODE6 [24:25] /// Port n.6 mode bits MODE6: u2 = 0, /// CNF6 [26:27] /// Port n.6 configuration CNF6: u2 = 1, /// MODE7 [28:29] /// Port n.7 mode bits MODE7: u2 = 0, /// CNF7 [30:31] /// Port n.7 configuration CNF7: u2 = 1, }; /// Port configuration register low pub const CRL = Register(CRL_val).init(base_address + 0x0); /// CRH const CRH_val = packed struct { /// MODE8 [0:1] /// Port n.8 mode bits MODE8: u2 = 0, /// CNF8 [2:3] /// Port n.8 configuration CNF8: u2 = 1, /// MODE9 [4:5] /// Port n.9 mode bits MODE9: u2 = 0, /// CNF9 [6:7] /// Port n.9 configuration CNF9: u2 = 1, /// MODE10 [8:9] /// Port n.10 mode bits MODE10: u2 = 0, /// CNF10 [10:11] /// Port n.10 configuration CNF10: u2 = 1, /// MODE11 [12:13] /// Port n.11 mode bits MODE11: u2 = 0, /// CNF11 [14:15] /// Port n.11 configuration CNF11: u2 = 1, /// MODE12 [16:17] /// Port n.12 mode bits MODE12: u2 = 0, /// CNF12 [18:19] /// Port n.12 configuration CNF12: u2 = 1, /// MODE13 [20:21] /// Port n.13 mode bits MODE13: u2 = 0, /// CNF13 [22:23] /// Port n.13 configuration CNF13: u2 = 1, /// MODE14 [24:25] /// Port n.14 mode bits MODE14: u2 = 0, /// CNF14 [26:27] /// Port n.14 configuration CNF14: u2 = 1, /// MODE15 [28:29] /// Port n.15 mode bits MODE15: u2 = 0, /// CNF15 [30:31] /// Port n.15 configuration CNF15: u2 = 1, }; /// Port configuration register high pub const CRH = Register(CRH_val).init(base_address + 0x4); /// IDR const IDR_val = packed struct { /// IDR0 [0:0] /// Port input data IDR0: u1 = 0, /// IDR1 [1:1] /// Port input data IDR1: u1 = 0, /// IDR2 [2:2] /// Port input data IDR2: u1 = 0, /// IDR3 [3:3] /// Port input data IDR3: u1 = 0, /// IDR4 [4:4] /// Port input data IDR4: u1 = 0, /// IDR5 [5:5] /// Port input data IDR5: u1 = 0, /// IDR6 [6:6] /// Port input data IDR6: u1 = 0, /// IDR7 [7:7] /// Port input data IDR7: u1 = 0, /// IDR8 [8:8] /// Port input data IDR8: u1 = 0, /// IDR9 [9:9] /// Port input data IDR9: u1 = 0, /// IDR10 [10:10] /// Port input data IDR10: u1 = 0, /// IDR11 [11:11] /// Port input data IDR11: u1 = 0, /// IDR12 [12:12] /// Port input data IDR12: u1 = 0, /// IDR13 [13:13] /// Port input data IDR13: u1 = 0, /// IDR14 [14:14] /// Port input data IDR14: u1 = 0, /// IDR15 [15:15] /// Port input data IDR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port input data register pub const IDR = Register(IDR_val).init(base_address + 0x8); /// ODR const ODR_val = packed struct { /// ODR0 [0:0] /// Port output data ODR0: u1 = 0, /// ODR1 [1:1] /// Port output data ODR1: u1 = 0, /// ODR2 [2:2] /// Port output data ODR2: u1 = 0, /// ODR3 [3:3] /// Port output data ODR3: u1 = 0, /// ODR4 [4:4] /// Port output data ODR4: u1 = 0, /// ODR5 [5:5] /// Port output data ODR5: u1 = 0, /// ODR6 [6:6] /// Port output data ODR6: u1 = 0, /// ODR7 [7:7] /// Port output data ODR7: u1 = 0, /// ODR8 [8:8] /// Port output data ODR8: u1 = 0, /// ODR9 [9:9] /// Port output data ODR9: u1 = 0, /// ODR10 [10:10] /// Port output data ODR10: u1 = 0, /// ODR11 [11:11] /// Port output data ODR11: u1 = 0, /// ODR12 [12:12] /// Port output data ODR12: u1 = 0, /// ODR13 [13:13] /// Port output data ODR13: u1 = 0, /// ODR14 [14:14] /// Port output data ODR14: u1 = 0, /// ODR15 [15:15] /// Port output data ODR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port output data register pub const ODR = Register(ODR_val).init(base_address + 0xc); /// BSRR const BSRR_val = packed struct { /// BS0 [0:0] /// Set bit 0 BS0: u1 = 0, /// BS1 [1:1] /// Set bit 1 BS1: u1 = 0, /// BS2 [2:2] /// Set bit 1 BS2: u1 = 0, /// BS3 [3:3] /// Set bit 3 BS3: u1 = 0, /// BS4 [4:4] /// Set bit 4 BS4: u1 = 0, /// BS5 [5:5] /// Set bit 5 BS5: u1 = 0, /// BS6 [6:6] /// Set bit 6 BS6: u1 = 0, /// BS7 [7:7] /// Set bit 7 BS7: u1 = 0, /// BS8 [8:8] /// Set bit 8 BS8: u1 = 0, /// BS9 [9:9] /// Set bit 9 BS9: u1 = 0, /// BS10 [10:10] /// Set bit 10 BS10: u1 = 0, /// BS11 [11:11] /// Set bit 11 BS11: u1 = 0, /// BS12 [12:12] /// Set bit 12 BS12: u1 = 0, /// BS13 [13:13] /// Set bit 13 BS13: u1 = 0, /// BS14 [14:14] /// Set bit 14 BS14: u1 = 0, /// BS15 [15:15] /// Set bit 15 BS15: u1 = 0, /// BR0 [16:16] /// Reset bit 0 BR0: u1 = 0, /// BR1 [17:17] /// Reset bit 1 BR1: u1 = 0, /// BR2 [18:18] /// Reset bit 2 BR2: u1 = 0, /// BR3 [19:19] /// Reset bit 3 BR3: u1 = 0, /// BR4 [20:20] /// Reset bit 4 BR4: u1 = 0, /// BR5 [21:21] /// Reset bit 5 BR5: u1 = 0, /// BR6 [22:22] /// Reset bit 6 BR6: u1 = 0, /// BR7 [23:23] /// Reset bit 7 BR7: u1 = 0, /// BR8 [24:24] /// Reset bit 8 BR8: u1 = 0, /// BR9 [25:25] /// Reset bit 9 BR9: u1 = 0, /// BR10 [26:26] /// Reset bit 10 BR10: u1 = 0, /// BR11 [27:27] /// Reset bit 11 BR11: u1 = 0, /// BR12 [28:28] /// Reset bit 12 BR12: u1 = 0, /// BR13 [29:29] /// Reset bit 13 BR13: u1 = 0, /// BR14 [30:30] /// Reset bit 14 BR14: u1 = 0, /// BR15 [31:31] /// Reset bit 15 BR15: u1 = 0, }; /// Port bit set/reset register pub const BSRR = Register(BSRR_val).init(base_address + 0x10); /// BRR const BRR_val = packed struct { /// BR0 [0:0] /// Reset bit 0 BR0: u1 = 0, /// BR1 [1:1] /// Reset bit 1 BR1: u1 = 0, /// BR2 [2:2] /// Reset bit 1 BR2: u1 = 0, /// BR3 [3:3] /// Reset bit 3 BR3: u1 = 0, /// BR4 [4:4] /// Reset bit 4 BR4: u1 = 0, /// BR5 [5:5] /// Reset bit 5 BR5: u1 = 0, /// BR6 [6:6] /// Reset bit 6 BR6: u1 = 0, /// BR7 [7:7] /// Reset bit 7 BR7: u1 = 0, /// BR8 [8:8] /// Reset bit 8 BR8: u1 = 0, /// BR9 [9:9] /// Reset bit 9 BR9: u1 = 0, /// BR10 [10:10] /// Reset bit 10 BR10: u1 = 0, /// BR11 [11:11] /// Reset bit 11 BR11: u1 = 0, /// BR12 [12:12] /// Reset bit 12 BR12: u1 = 0, /// BR13 [13:13] /// Reset bit 13 BR13: u1 = 0, /// BR14 [14:14] /// Reset bit 14 BR14: u1 = 0, /// BR15 [15:15] /// Reset bit 15 BR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port bit reset register pub const BRR = Register(BRR_val).init(base_address + 0x14); /// LCKR const LCKR_val = packed struct { /// LCK0 [0:0] /// Port A Lock bit 0 LCK0: u1 = 0, /// LCK1 [1:1] /// Port A Lock bit 1 LCK1: u1 = 0, /// LCK2 [2:2] /// Port A Lock bit 2 LCK2: u1 = 0, /// LCK3 [3:3] /// Port A Lock bit 3 LCK3: u1 = 0, /// LCK4 [4:4] /// Port A Lock bit 4 LCK4: u1 = 0, /// LCK5 [5:5] /// Port A Lock bit 5 LCK5: u1 = 0, /// LCK6 [6:6] /// Port A Lock bit 6 LCK6: u1 = 0, /// LCK7 [7:7] /// Port A Lock bit 7 LCK7: u1 = 0, /// LCK8 [8:8] /// Port A Lock bit 8 LCK8: u1 = 0, /// LCK9 [9:9] /// Port A Lock bit 9 LCK9: u1 = 0, /// LCK10 [10:10] /// Port A Lock bit 10 LCK10: u1 = 0, /// LCK11 [11:11] /// Port A Lock bit 11 LCK11: u1 = 0, /// LCK12 [12:12] /// Port A Lock bit 12 LCK12: u1 = 0, /// LCK13 [13:13] /// Port A Lock bit 13 LCK13: u1 = 0, /// LCK14 [14:14] /// Port A Lock bit 14 LCK14: u1 = 0, /// LCK15 [15:15] /// Port A Lock bit 15 LCK15: u1 = 0, /// LCKK [16:16] /// Lock key LCKK: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Port configuration lock pub const LCKR = Register(LCKR_val).init(base_address + 0x18); }; /// General purpose I/O pub const GPIOD = struct { const base_address = 0x40011400; /// CRL const CRL_val = packed struct { /// MODE0 [0:1] /// Port n.0 mode bits MODE0: u2 = 0, /// CNF0 [2:3] /// Port n.0 configuration CNF0: u2 = 1, /// MODE1 [4:5] /// Port n.1 mode bits MODE1: u2 = 0, /// CNF1 [6:7] /// Port n.1 configuration CNF1: u2 = 1, /// MODE2 [8:9] /// Port n.2 mode bits MODE2: u2 = 0, /// CNF2 [10:11] /// Port n.2 configuration CNF2: u2 = 1, /// MODE3 [12:13] /// Port n.3 mode bits MODE3: u2 = 0, /// CNF3 [14:15] /// Port n.3 configuration CNF3: u2 = 1, /// MODE4 [16:17] /// Port n.4 mode bits MODE4: u2 = 0, /// CNF4 [18:19] /// Port n.4 configuration CNF4: u2 = 1, /// MODE5 [20:21] /// Port n.5 mode bits MODE5: u2 = 0, /// CNF5 [22:23] /// Port n.5 configuration CNF5: u2 = 1, /// MODE6 [24:25] /// Port n.6 mode bits MODE6: u2 = 0, /// CNF6 [26:27] /// Port n.6 configuration CNF6: u2 = 1, /// MODE7 [28:29] /// Port n.7 mode bits MODE7: u2 = 0, /// CNF7 [30:31] /// Port n.7 configuration CNF7: u2 = 1, }; /// Port configuration register low pub const CRL = Register(CRL_val).init(base_address + 0x0); /// CRH const CRH_val = packed struct { /// MODE8 [0:1] /// Port n.8 mode bits MODE8: u2 = 0, /// CNF8 [2:3] /// Port n.8 configuration CNF8: u2 = 1, /// MODE9 [4:5] /// Port n.9 mode bits MODE9: u2 = 0, /// CNF9 [6:7] /// Port n.9 configuration CNF9: u2 = 1, /// MODE10 [8:9] /// Port n.10 mode bits MODE10: u2 = 0, /// CNF10 [10:11] /// Port n.10 configuration CNF10: u2 = 1, /// MODE11 [12:13] /// Port n.11 mode bits MODE11: u2 = 0, /// CNF11 [14:15] /// Port n.11 configuration CNF11: u2 = 1, /// MODE12 [16:17] /// Port n.12 mode bits MODE12: u2 = 0, /// CNF12 [18:19] /// Port n.12 configuration CNF12: u2 = 1, /// MODE13 [20:21] /// Port n.13 mode bits MODE13: u2 = 0, /// CNF13 [22:23] /// Port n.13 configuration CNF13: u2 = 1, /// MODE14 [24:25] /// Port n.14 mode bits MODE14: u2 = 0, /// CNF14 [26:27] /// Port n.14 configuration CNF14: u2 = 1, /// MODE15 [28:29] /// Port n.15 mode bits MODE15: u2 = 0, /// CNF15 [30:31] /// Port n.15 configuration CNF15: u2 = 1, }; /// Port configuration register high pub const CRH = Register(CRH_val).init(base_address + 0x4); /// IDR const IDR_val = packed struct { /// IDR0 [0:0] /// Port input data IDR0: u1 = 0, /// IDR1 [1:1] /// Port input data IDR1: u1 = 0, /// IDR2 [2:2] /// Port input data IDR2: u1 = 0, /// IDR3 [3:3] /// Port input data IDR3: u1 = 0, /// IDR4 [4:4] /// Port input data IDR4: u1 = 0, /// IDR5 [5:5] /// Port input data IDR5: u1 = 0, /// IDR6 [6:6] /// Port input data IDR6: u1 = 0, /// IDR7 [7:7] /// Port input data IDR7: u1 = 0, /// IDR8 [8:8] /// Port input data IDR8: u1 = 0, /// IDR9 [9:9] /// Port input data IDR9: u1 = 0, /// IDR10 [10:10] /// Port input data IDR10: u1 = 0, /// IDR11 [11:11] /// Port input data IDR11: u1 = 0, /// IDR12 [12:12] /// Port input data IDR12: u1 = 0, /// IDR13 [13:13] /// Port input data IDR13: u1 = 0, /// IDR14 [14:14] /// Port input data IDR14: u1 = 0, /// IDR15 [15:15] /// Port input data IDR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port input data register pub const IDR = Register(IDR_val).init(base_address + 0x8); /// ODR const ODR_val = packed struct { /// ODR0 [0:0] /// Port output data ODR0: u1 = 0, /// ODR1 [1:1] /// Port output data ODR1: u1 = 0, /// ODR2 [2:2] /// Port output data ODR2: u1 = 0, /// ODR3 [3:3] /// Port output data ODR3: u1 = 0, /// ODR4 [4:4] /// Port output data ODR4: u1 = 0, /// ODR5 [5:5] /// Port output data ODR5: u1 = 0, /// ODR6 [6:6] /// Port output data ODR6: u1 = 0, /// ODR7 [7:7] /// Port output data ODR7: u1 = 0, /// ODR8 [8:8] /// Port output data ODR8: u1 = 0, /// ODR9 [9:9] /// Port output data ODR9: u1 = 0, /// ODR10 [10:10] /// Port output data ODR10: u1 = 0, /// ODR11 [11:11] /// Port output data ODR11: u1 = 0, /// ODR12 [12:12] /// Port output data ODR12: u1 = 0, /// ODR13 [13:13] /// Port output data ODR13: u1 = 0, /// ODR14 [14:14] /// Port output data ODR14: u1 = 0, /// ODR15 [15:15] /// Port output data ODR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port output data register pub const ODR = Register(ODR_val).init(base_address + 0xc); /// BSRR const BSRR_val = packed struct { /// BS0 [0:0] /// Set bit 0 BS0: u1 = 0, /// BS1 [1:1] /// Set bit 1 BS1: u1 = 0, /// BS2 [2:2] /// Set bit 1 BS2: u1 = 0, /// BS3 [3:3] /// Set bit 3 BS3: u1 = 0, /// BS4 [4:4] /// Set bit 4 BS4: u1 = 0, /// BS5 [5:5] /// Set bit 5 BS5: u1 = 0, /// BS6 [6:6] /// Set bit 6 BS6: u1 = 0, /// BS7 [7:7] /// Set bit 7 BS7: u1 = 0, /// BS8 [8:8] /// Set bit 8 BS8: u1 = 0, /// BS9 [9:9] /// Set bit 9 BS9: u1 = 0, /// BS10 [10:10] /// Set bit 10 BS10: u1 = 0, /// BS11 [11:11] /// Set bit 11 BS11: u1 = 0, /// BS12 [12:12] /// Set bit 12 BS12: u1 = 0, /// BS13 [13:13] /// Set bit 13 BS13: u1 = 0, /// BS14 [14:14] /// Set bit 14 BS14: u1 = 0, /// BS15 [15:15] /// Set bit 15 BS15: u1 = 0, /// BR0 [16:16] /// Reset bit 0 BR0: u1 = 0, /// BR1 [17:17] /// Reset bit 1 BR1: u1 = 0, /// BR2 [18:18] /// Reset bit 2 BR2: u1 = 0, /// BR3 [19:19] /// Reset bit 3 BR3: u1 = 0, /// BR4 [20:20] /// Reset bit 4 BR4: u1 = 0, /// BR5 [21:21] /// Reset bit 5 BR5: u1 = 0, /// BR6 [22:22] /// Reset bit 6 BR6: u1 = 0, /// BR7 [23:23] /// Reset bit 7 BR7: u1 = 0, /// BR8 [24:24] /// Reset bit 8 BR8: u1 = 0, /// BR9 [25:25] /// Reset bit 9 BR9: u1 = 0, /// BR10 [26:26] /// Reset bit 10 BR10: u1 = 0, /// BR11 [27:27] /// Reset bit 11 BR11: u1 = 0, /// BR12 [28:28] /// Reset bit 12 BR12: u1 = 0, /// BR13 [29:29] /// Reset bit 13 BR13: u1 = 0, /// BR14 [30:30] /// Reset bit 14 BR14: u1 = 0, /// BR15 [31:31] /// Reset bit 15 BR15: u1 = 0, }; /// Port bit set/reset register pub const BSRR = Register(BSRR_val).init(base_address + 0x10); /// BRR const BRR_val = packed struct { /// BR0 [0:0] /// Reset bit 0 BR0: u1 = 0, /// BR1 [1:1] /// Reset bit 1 BR1: u1 = 0, /// BR2 [2:2] /// Reset bit 1 BR2: u1 = 0, /// BR3 [3:3] /// Reset bit 3 BR3: u1 = 0, /// BR4 [4:4] /// Reset bit 4 BR4: u1 = 0, /// BR5 [5:5] /// Reset bit 5 BR5: u1 = 0, /// BR6 [6:6] /// Reset bit 6 BR6: u1 = 0, /// BR7 [7:7] /// Reset bit 7 BR7: u1 = 0, /// BR8 [8:8] /// Reset bit 8 BR8: u1 = 0, /// BR9 [9:9] /// Reset bit 9 BR9: u1 = 0, /// BR10 [10:10] /// Reset bit 10 BR10: u1 = 0, /// BR11 [11:11] /// Reset bit 11 BR11: u1 = 0, /// BR12 [12:12] /// Reset bit 12 BR12: u1 = 0, /// BR13 [13:13] /// Reset bit 13 BR13: u1 = 0, /// BR14 [14:14] /// Reset bit 14 BR14: u1 = 0, /// BR15 [15:15] /// Reset bit 15 BR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port bit reset register pub const BRR = Register(BRR_val).init(base_address + 0x14); /// LCKR const LCKR_val = packed struct { /// LCK0 [0:0] /// Port A Lock bit 0 LCK0: u1 = 0, /// LCK1 [1:1] /// Port A Lock bit 1 LCK1: u1 = 0, /// LCK2 [2:2] /// Port A Lock bit 2 LCK2: u1 = 0, /// LCK3 [3:3] /// Port A Lock bit 3 LCK3: u1 = 0, /// LCK4 [4:4] /// Port A Lock bit 4 LCK4: u1 = 0, /// LCK5 [5:5] /// Port A Lock bit 5 LCK5: u1 = 0, /// LCK6 [6:6] /// Port A Lock bit 6 LCK6: u1 = 0, /// LCK7 [7:7] /// Port A Lock bit 7 LCK7: u1 = 0, /// LCK8 [8:8] /// Port A Lock bit 8 LCK8: u1 = 0, /// LCK9 [9:9] /// Port A Lock bit 9 LCK9: u1 = 0, /// LCK10 [10:10] /// Port A Lock bit 10 LCK10: u1 = 0, /// LCK11 [11:11] /// Port A Lock bit 11 LCK11: u1 = 0, /// LCK12 [12:12] /// Port A Lock bit 12 LCK12: u1 = 0, /// LCK13 [13:13] /// Port A Lock bit 13 LCK13: u1 = 0, /// LCK14 [14:14] /// Port A Lock bit 14 LCK14: u1 = 0, /// LCK15 [15:15] /// Port A Lock bit 15 LCK15: u1 = 0, /// LCKK [16:16] /// Lock key LCKK: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Port configuration lock pub const LCKR = Register(LCKR_val).init(base_address + 0x18); }; /// General purpose I/O pub const GPIOE = struct { const base_address = 0x40011800; /// CRL const CRL_val = packed struct { /// MODE0 [0:1] /// Port n.0 mode bits MODE0: u2 = 0, /// CNF0 [2:3] /// Port n.0 configuration CNF0: u2 = 1, /// MODE1 [4:5] /// Port n.1 mode bits MODE1: u2 = 0, /// CNF1 [6:7] /// Port n.1 configuration CNF1: u2 = 1, /// MODE2 [8:9] /// Port n.2 mode bits MODE2: u2 = 0, /// CNF2 [10:11] /// Port n.2 configuration CNF2: u2 = 1, /// MODE3 [12:13] /// Port n.3 mode bits MODE3: u2 = 0, /// CNF3 [14:15] /// Port n.3 configuration CNF3: u2 = 1, /// MODE4 [16:17] /// Port n.4 mode bits MODE4: u2 = 0, /// CNF4 [18:19] /// Port n.4 configuration CNF4: u2 = 1, /// MODE5 [20:21] /// Port n.5 mode bits MODE5: u2 = 0, /// CNF5 [22:23] /// Port n.5 configuration CNF5: u2 = 1, /// MODE6 [24:25] /// Port n.6 mode bits MODE6: u2 = 0, /// CNF6 [26:27] /// Port n.6 configuration CNF6: u2 = 1, /// MODE7 [28:29] /// Port n.7 mode bits MODE7: u2 = 0, /// CNF7 [30:31] /// Port n.7 configuration CNF7: u2 = 1, }; /// Port configuration register low pub const CRL = Register(CRL_val).init(base_address + 0x0); /// CRH const CRH_val = packed struct { /// MODE8 [0:1] /// Port n.8 mode bits MODE8: u2 = 0, /// CNF8 [2:3] /// Port n.8 configuration CNF8: u2 = 1, /// MODE9 [4:5] /// Port n.9 mode bits MODE9: u2 = 0, /// CNF9 [6:7] /// Port n.9 configuration CNF9: u2 = 1, /// MODE10 [8:9] /// Port n.10 mode bits MODE10: u2 = 0, /// CNF10 [10:11] /// Port n.10 configuration CNF10: u2 = 1, /// MODE11 [12:13] /// Port n.11 mode bits MODE11: u2 = 0, /// CNF11 [14:15] /// Port n.11 configuration CNF11: u2 = 1, /// MODE12 [16:17] /// Port n.12 mode bits MODE12: u2 = 0, /// CNF12 [18:19] /// Port n.12 configuration CNF12: u2 = 1, /// MODE13 [20:21] /// Port n.13 mode bits MODE13: u2 = 0, /// CNF13 [22:23] /// Port n.13 configuration CNF13: u2 = 1, /// MODE14 [24:25] /// Port n.14 mode bits MODE14: u2 = 0, /// CNF14 [26:27] /// Port n.14 configuration CNF14: u2 = 1, /// MODE15 [28:29] /// Port n.15 mode bits MODE15: u2 = 0, /// CNF15 [30:31] /// Port n.15 configuration CNF15: u2 = 1, }; /// Port configuration register high pub const CRH = Register(CRH_val).init(base_address + 0x4); /// IDR const IDR_val = packed struct { /// IDR0 [0:0] /// Port input data IDR0: u1 = 0, /// IDR1 [1:1] /// Port input data IDR1: u1 = 0, /// IDR2 [2:2] /// Port input data IDR2: u1 = 0, /// IDR3 [3:3] /// Port input data IDR3: u1 = 0, /// IDR4 [4:4] /// Port input data IDR4: u1 = 0, /// IDR5 [5:5] /// Port input data IDR5: u1 = 0, /// IDR6 [6:6] /// Port input data IDR6: u1 = 0, /// IDR7 [7:7] /// Port input data IDR7: u1 = 0, /// IDR8 [8:8] /// Port input data IDR8: u1 = 0, /// IDR9 [9:9] /// Port input data IDR9: u1 = 0, /// IDR10 [10:10] /// Port input data IDR10: u1 = 0, /// IDR11 [11:11] /// Port input data IDR11: u1 = 0, /// IDR12 [12:12] /// Port input data IDR12: u1 = 0, /// IDR13 [13:13] /// Port input data IDR13: u1 = 0, /// IDR14 [14:14] /// Port input data IDR14: u1 = 0, /// IDR15 [15:15] /// Port input data IDR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port input data register pub const IDR = Register(IDR_val).init(base_address + 0x8); /// ODR const ODR_val = packed struct { /// ODR0 [0:0] /// Port output data ODR0: u1 = 0, /// ODR1 [1:1] /// Port output data ODR1: u1 = 0, /// ODR2 [2:2] /// Port output data ODR2: u1 = 0, /// ODR3 [3:3] /// Port output data ODR3: u1 = 0, /// ODR4 [4:4] /// Port output data ODR4: u1 = 0, /// ODR5 [5:5] /// Port output data ODR5: u1 = 0, /// ODR6 [6:6] /// Port output data ODR6: u1 = 0, /// ODR7 [7:7] /// Port output data ODR7: u1 = 0, /// ODR8 [8:8] /// Port output data ODR8: u1 = 0, /// ODR9 [9:9] /// Port output data ODR9: u1 = 0, /// ODR10 [10:10] /// Port output data ODR10: u1 = 0, /// ODR11 [11:11] /// Port output data ODR11: u1 = 0, /// ODR12 [12:12] /// Port output data ODR12: u1 = 0, /// ODR13 [13:13] /// Port output data ODR13: u1 = 0, /// ODR14 [14:14] /// Port output data ODR14: u1 = 0, /// ODR15 [15:15] /// Port output data ODR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port output data register pub const ODR = Register(ODR_val).init(base_address + 0xc); /// BSRR const BSRR_val = packed struct { /// BS0 [0:0] /// Set bit 0 BS0: u1 = 0, /// BS1 [1:1] /// Set bit 1 BS1: u1 = 0, /// BS2 [2:2] /// Set bit 1 BS2: u1 = 0, /// BS3 [3:3] /// Set bit 3 BS3: u1 = 0, /// BS4 [4:4] /// Set bit 4 BS4: u1 = 0, /// BS5 [5:5] /// Set bit 5 BS5: u1 = 0, /// BS6 [6:6] /// Set bit 6 BS6: u1 = 0, /// BS7 [7:7] /// Set bit 7 BS7: u1 = 0, /// BS8 [8:8] /// Set bit 8 BS8: u1 = 0, /// BS9 [9:9] /// Set bit 9 BS9: u1 = 0, /// BS10 [10:10] /// Set bit 10 BS10: u1 = 0, /// BS11 [11:11] /// Set bit 11 BS11: u1 = 0, /// BS12 [12:12] /// Set bit 12 BS12: u1 = 0, /// BS13 [13:13] /// Set bit 13 BS13: u1 = 0, /// BS14 [14:14] /// Set bit 14 BS14: u1 = 0, /// BS15 [15:15] /// Set bit 15 BS15: u1 = 0, /// BR0 [16:16] /// Reset bit 0 BR0: u1 = 0, /// BR1 [17:17] /// Reset bit 1 BR1: u1 = 0, /// BR2 [18:18] /// Reset bit 2 BR2: u1 = 0, /// BR3 [19:19] /// Reset bit 3 BR3: u1 = 0, /// BR4 [20:20] /// Reset bit 4 BR4: u1 = 0, /// BR5 [21:21] /// Reset bit 5 BR5: u1 = 0, /// BR6 [22:22] /// Reset bit 6 BR6: u1 = 0, /// BR7 [23:23] /// Reset bit 7 BR7: u1 = 0, /// BR8 [24:24] /// Reset bit 8 BR8: u1 = 0, /// BR9 [25:25] /// Reset bit 9 BR9: u1 = 0, /// BR10 [26:26] /// Reset bit 10 BR10: u1 = 0, /// BR11 [27:27] /// Reset bit 11 BR11: u1 = 0, /// BR12 [28:28] /// Reset bit 12 BR12: u1 = 0, /// BR13 [29:29] /// Reset bit 13 BR13: u1 = 0, /// BR14 [30:30] /// Reset bit 14 BR14: u1 = 0, /// BR15 [31:31] /// Reset bit 15 BR15: u1 = 0, }; /// Port bit set/reset register pub const BSRR = Register(BSRR_val).init(base_address + 0x10); /// BRR const BRR_val = packed struct { /// BR0 [0:0] /// Reset bit 0 BR0: u1 = 0, /// BR1 [1:1] /// Reset bit 1 BR1: u1 = 0, /// BR2 [2:2] /// Reset bit 1 BR2: u1 = 0, /// BR3 [3:3] /// Reset bit 3 BR3: u1 = 0, /// BR4 [4:4] /// Reset bit 4 BR4: u1 = 0, /// BR5 [5:5] /// Reset bit 5 BR5: u1 = 0, /// BR6 [6:6] /// Reset bit 6 BR6: u1 = 0, /// BR7 [7:7] /// Reset bit 7 BR7: u1 = 0, /// BR8 [8:8] /// Reset bit 8 BR8: u1 = 0, /// BR9 [9:9] /// Reset bit 9 BR9: u1 = 0, /// BR10 [10:10] /// Reset bit 10 BR10: u1 = 0, /// BR11 [11:11] /// Reset bit 11 BR11: u1 = 0, /// BR12 [12:12] /// Reset bit 12 BR12: u1 = 0, /// BR13 [13:13] /// Reset bit 13 BR13: u1 = 0, /// BR14 [14:14] /// Reset bit 14 BR14: u1 = 0, /// BR15 [15:15] /// Reset bit 15 BR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port bit reset register pub const BRR = Register(BRR_val).init(base_address + 0x14); /// LCKR const LCKR_val = packed struct { /// LCK0 [0:0] /// Port A Lock bit 0 LCK0: u1 = 0, /// LCK1 [1:1] /// Port A Lock bit 1 LCK1: u1 = 0, /// LCK2 [2:2] /// Port A Lock bit 2 LCK2: u1 = 0, /// LCK3 [3:3] /// Port A Lock bit 3 LCK3: u1 = 0, /// LCK4 [4:4] /// Port A Lock bit 4 LCK4: u1 = 0, /// LCK5 [5:5] /// Port A Lock bit 5 LCK5: u1 = 0, /// LCK6 [6:6] /// Port A Lock bit 6 LCK6: u1 = 0, /// LCK7 [7:7] /// Port A Lock bit 7 LCK7: u1 = 0, /// LCK8 [8:8] /// Port A Lock bit 8 LCK8: u1 = 0, /// LCK9 [9:9] /// Port A Lock bit 9 LCK9: u1 = 0, /// LCK10 [10:10] /// Port A Lock bit 10 LCK10: u1 = 0, /// LCK11 [11:11] /// Port A Lock bit 11 LCK11: u1 = 0, /// LCK12 [12:12] /// Port A Lock bit 12 LCK12: u1 = 0, /// LCK13 [13:13] /// Port A Lock bit 13 LCK13: u1 = 0, /// LCK14 [14:14] /// Port A Lock bit 14 LCK14: u1 = 0, /// LCK15 [15:15] /// Port A Lock bit 15 LCK15: u1 = 0, /// LCKK [16:16] /// Lock key LCKK: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Port configuration lock pub const LCKR = Register(LCKR_val).init(base_address + 0x18); }; /// General purpose I/O pub const GPIOF = struct { const base_address = 0x40011c00; /// CRL const CRL_val = packed struct { /// MODE0 [0:1] /// Port n.0 mode bits MODE0: u2 = 0, /// CNF0 [2:3] /// Port n.0 configuration CNF0: u2 = 1, /// MODE1 [4:5] /// Port n.1 mode bits MODE1: u2 = 0, /// CNF1 [6:7] /// Port n.1 configuration CNF1: u2 = 1, /// MODE2 [8:9] /// Port n.2 mode bits MODE2: u2 = 0, /// CNF2 [10:11] /// Port n.2 configuration CNF2: u2 = 1, /// MODE3 [12:13] /// Port n.3 mode bits MODE3: u2 = 0, /// CNF3 [14:15] /// Port n.3 configuration CNF3: u2 = 1, /// MODE4 [16:17] /// Port n.4 mode bits MODE4: u2 = 0, /// CNF4 [18:19] /// Port n.4 configuration CNF4: u2 = 1, /// MODE5 [20:21] /// Port n.5 mode bits MODE5: u2 = 0, /// CNF5 [22:23] /// Port n.5 configuration CNF5: u2 = 1, /// MODE6 [24:25] /// Port n.6 mode bits MODE6: u2 = 0, /// CNF6 [26:27] /// Port n.6 configuration CNF6: u2 = 1, /// MODE7 [28:29] /// Port n.7 mode bits MODE7: u2 = 0, /// CNF7 [30:31] /// Port n.7 configuration CNF7: u2 = 1, }; /// Port configuration register low pub const CRL = Register(CRL_val).init(base_address + 0x0); /// CRH const CRH_val = packed struct { /// MODE8 [0:1] /// Port n.8 mode bits MODE8: u2 = 0, /// CNF8 [2:3] /// Port n.8 configuration CNF8: u2 = 1, /// MODE9 [4:5] /// Port n.9 mode bits MODE9: u2 = 0, /// CNF9 [6:7] /// Port n.9 configuration CNF9: u2 = 1, /// MODE10 [8:9] /// Port n.10 mode bits MODE10: u2 = 0, /// CNF10 [10:11] /// Port n.10 configuration CNF10: u2 = 1, /// MODE11 [12:13] /// Port n.11 mode bits MODE11: u2 = 0, /// CNF11 [14:15] /// Port n.11 configuration CNF11: u2 = 1, /// MODE12 [16:17] /// Port n.12 mode bits MODE12: u2 = 0, /// CNF12 [18:19] /// Port n.12 configuration CNF12: u2 = 1, /// MODE13 [20:21] /// Port n.13 mode bits MODE13: u2 = 0, /// CNF13 [22:23] /// Port n.13 configuration CNF13: u2 = 1, /// MODE14 [24:25] /// Port n.14 mode bits MODE14: u2 = 0, /// CNF14 [26:27] /// Port n.14 configuration CNF14: u2 = 1, /// MODE15 [28:29] /// Port n.15 mode bits MODE15: u2 = 0, /// CNF15 [30:31] /// Port n.15 configuration CNF15: u2 = 1, }; /// Port configuration register high pub const CRH = Register(CRH_val).init(base_address + 0x4); /// IDR const IDR_val = packed struct { /// IDR0 [0:0] /// Port input data IDR0: u1 = 0, /// IDR1 [1:1] /// Port input data IDR1: u1 = 0, /// IDR2 [2:2] /// Port input data IDR2: u1 = 0, /// IDR3 [3:3] /// Port input data IDR3: u1 = 0, /// IDR4 [4:4] /// Port input data IDR4: u1 = 0, /// IDR5 [5:5] /// Port input data IDR5: u1 = 0, /// IDR6 [6:6] /// Port input data IDR6: u1 = 0, /// IDR7 [7:7] /// Port input data IDR7: u1 = 0, /// IDR8 [8:8] /// Port input data IDR8: u1 = 0, /// IDR9 [9:9] /// Port input data IDR9: u1 = 0, /// IDR10 [10:10] /// Port input data IDR10: u1 = 0, /// IDR11 [11:11] /// Port input data IDR11: u1 = 0, /// IDR12 [12:12] /// Port input data IDR12: u1 = 0, /// IDR13 [13:13] /// Port input data IDR13: u1 = 0, /// IDR14 [14:14] /// Port input data IDR14: u1 = 0, /// IDR15 [15:15] /// Port input data IDR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port input data register pub const IDR = Register(IDR_val).init(base_address + 0x8); /// ODR const ODR_val = packed struct { /// ODR0 [0:0] /// Port output data ODR0: u1 = 0, /// ODR1 [1:1] /// Port output data ODR1: u1 = 0, /// ODR2 [2:2] /// Port output data ODR2: u1 = 0, /// ODR3 [3:3] /// Port output data ODR3: u1 = 0, /// ODR4 [4:4] /// Port output data ODR4: u1 = 0, /// ODR5 [5:5] /// Port output data ODR5: u1 = 0, /// ODR6 [6:6] /// Port output data ODR6: u1 = 0, /// ODR7 [7:7] /// Port output data ODR7: u1 = 0, /// ODR8 [8:8] /// Port output data ODR8: u1 = 0, /// ODR9 [9:9] /// Port output data ODR9: u1 = 0, /// ODR10 [10:10] /// Port output data ODR10: u1 = 0, /// ODR11 [11:11] /// Port output data ODR11: u1 = 0, /// ODR12 [12:12] /// Port output data ODR12: u1 = 0, /// ODR13 [13:13] /// Port output data ODR13: u1 = 0, /// ODR14 [14:14] /// Port output data ODR14: u1 = 0, /// ODR15 [15:15] /// Port output data ODR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port output data register pub const ODR = Register(ODR_val).init(base_address + 0xc); /// BSRR const BSRR_val = packed struct { /// BS0 [0:0] /// Set bit 0 BS0: u1 = 0, /// BS1 [1:1] /// Set bit 1 BS1: u1 = 0, /// BS2 [2:2] /// Set bit 1 BS2: u1 = 0, /// BS3 [3:3] /// Set bit 3 BS3: u1 = 0, /// BS4 [4:4] /// Set bit 4 BS4: u1 = 0, /// BS5 [5:5] /// Set bit 5 BS5: u1 = 0, /// BS6 [6:6] /// Set bit 6 BS6: u1 = 0, /// BS7 [7:7] /// Set bit 7 BS7: u1 = 0, /// BS8 [8:8] /// Set bit 8 BS8: u1 = 0, /// BS9 [9:9] /// Set bit 9 BS9: u1 = 0, /// BS10 [10:10] /// Set bit 10 BS10: u1 = 0, /// BS11 [11:11] /// Set bit 11 BS11: u1 = 0, /// BS12 [12:12] /// Set bit 12 BS12: u1 = 0, /// BS13 [13:13] /// Set bit 13 BS13: u1 = 0, /// BS14 [14:14] /// Set bit 14 BS14: u1 = 0, /// BS15 [15:15] /// Set bit 15 BS15: u1 = 0, /// BR0 [16:16] /// Reset bit 0 BR0: u1 = 0, /// BR1 [17:17] /// Reset bit 1 BR1: u1 = 0, /// BR2 [18:18] /// Reset bit 2 BR2: u1 = 0, /// BR3 [19:19] /// Reset bit 3 BR3: u1 = 0, /// BR4 [20:20] /// Reset bit 4 BR4: u1 = 0, /// BR5 [21:21] /// Reset bit 5 BR5: u1 = 0, /// BR6 [22:22] /// Reset bit 6 BR6: u1 = 0, /// BR7 [23:23] /// Reset bit 7 BR7: u1 = 0, /// BR8 [24:24] /// Reset bit 8 BR8: u1 = 0, /// BR9 [25:25] /// Reset bit 9 BR9: u1 = 0, /// BR10 [26:26] /// Reset bit 10 BR10: u1 = 0, /// BR11 [27:27] /// Reset bit 11 BR11: u1 = 0, /// BR12 [28:28] /// Reset bit 12 BR12: u1 = 0, /// BR13 [29:29] /// Reset bit 13 BR13: u1 = 0, /// BR14 [30:30] /// Reset bit 14 BR14: u1 = 0, /// BR15 [31:31] /// Reset bit 15 BR15: u1 = 0, }; /// Port bit set/reset register pub const BSRR = Register(BSRR_val).init(base_address + 0x10); /// BRR const BRR_val = packed struct { /// BR0 [0:0] /// Reset bit 0 BR0: u1 = 0, /// BR1 [1:1] /// Reset bit 1 BR1: u1 = 0, /// BR2 [2:2] /// Reset bit 1 BR2: u1 = 0, /// BR3 [3:3] /// Reset bit 3 BR3: u1 = 0, /// BR4 [4:4] /// Reset bit 4 BR4: u1 = 0, /// BR5 [5:5] /// Reset bit 5 BR5: u1 = 0, /// BR6 [6:6] /// Reset bit 6 BR6: u1 = 0, /// BR7 [7:7] /// Reset bit 7 BR7: u1 = 0, /// BR8 [8:8] /// Reset bit 8 BR8: u1 = 0, /// BR9 [9:9] /// Reset bit 9 BR9: u1 = 0, /// BR10 [10:10] /// Reset bit 10 BR10: u1 = 0, /// BR11 [11:11] /// Reset bit 11 BR11: u1 = 0, /// BR12 [12:12] /// Reset bit 12 BR12: u1 = 0, /// BR13 [13:13] /// Reset bit 13 BR13: u1 = 0, /// BR14 [14:14] /// Reset bit 14 BR14: u1 = 0, /// BR15 [15:15] /// Reset bit 15 BR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port bit reset register pub const BRR = Register(BRR_val).init(base_address + 0x14); /// LCKR const LCKR_val = packed struct { /// LCK0 [0:0] /// Port A Lock bit 0 LCK0: u1 = 0, /// LCK1 [1:1] /// Port A Lock bit 1 LCK1: u1 = 0, /// LCK2 [2:2] /// Port A Lock bit 2 LCK2: u1 = 0, /// LCK3 [3:3] /// Port A Lock bit 3 LCK3: u1 = 0, /// LCK4 [4:4] /// Port A Lock bit 4 LCK4: u1 = 0, /// LCK5 [5:5] /// Port A Lock bit 5 LCK5: u1 = 0, /// LCK6 [6:6] /// Port A Lock bit 6 LCK6: u1 = 0, /// LCK7 [7:7] /// Port A Lock bit 7 LCK7: u1 = 0, /// LCK8 [8:8] /// Port A Lock bit 8 LCK8: u1 = 0, /// LCK9 [9:9] /// Port A Lock bit 9 LCK9: u1 = 0, /// LCK10 [10:10] /// Port A Lock bit 10 LCK10: u1 = 0, /// LCK11 [11:11] /// Port A Lock bit 11 LCK11: u1 = 0, /// LCK12 [12:12] /// Port A Lock bit 12 LCK12: u1 = 0, /// LCK13 [13:13] /// Port A Lock bit 13 LCK13: u1 = 0, /// LCK14 [14:14] /// Port A Lock bit 14 LCK14: u1 = 0, /// LCK15 [15:15] /// Port A Lock bit 15 LCK15: u1 = 0, /// LCKK [16:16] /// Lock key LCKK: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Port configuration lock pub const LCKR = Register(LCKR_val).init(base_address + 0x18); }; /// General purpose I/O pub const GPIOG = struct { const base_address = 0x40012000; /// CRL const CRL_val = packed struct { /// MODE0 [0:1] /// Port n.0 mode bits MODE0: u2 = 0, /// CNF0 [2:3] /// Port n.0 configuration CNF0: u2 = 1, /// MODE1 [4:5] /// Port n.1 mode bits MODE1: u2 = 0, /// CNF1 [6:7] /// Port n.1 configuration CNF1: u2 = 1, /// MODE2 [8:9] /// Port n.2 mode bits MODE2: u2 = 0, /// CNF2 [10:11] /// Port n.2 configuration CNF2: u2 = 1, /// MODE3 [12:13] /// Port n.3 mode bits MODE3: u2 = 0, /// CNF3 [14:15] /// Port n.3 configuration CNF3: u2 = 1, /// MODE4 [16:17] /// Port n.4 mode bits MODE4: u2 = 0, /// CNF4 [18:19] /// Port n.4 configuration CNF4: u2 = 1, /// MODE5 [20:21] /// Port n.5 mode bits MODE5: u2 = 0, /// CNF5 [22:23] /// Port n.5 configuration CNF5: u2 = 1, /// MODE6 [24:25] /// Port n.6 mode bits MODE6: u2 = 0, /// CNF6 [26:27] /// Port n.6 configuration CNF6: u2 = 1, /// MODE7 [28:29] /// Port n.7 mode bits MODE7: u2 = 0, /// CNF7 [30:31] /// Port n.7 configuration CNF7: u2 = 1, }; /// Port configuration register low pub const CRL = Register(CRL_val).init(base_address + 0x0); /// CRH const CRH_val = packed struct { /// MODE8 [0:1] /// Port n.8 mode bits MODE8: u2 = 0, /// CNF8 [2:3] /// Port n.8 configuration CNF8: u2 = 1, /// MODE9 [4:5] /// Port n.9 mode bits MODE9: u2 = 0, /// CNF9 [6:7] /// Port n.9 configuration CNF9: u2 = 1, /// MODE10 [8:9] /// Port n.10 mode bits MODE10: u2 = 0, /// CNF10 [10:11] /// Port n.10 configuration CNF10: u2 = 1, /// MODE11 [12:13] /// Port n.11 mode bits MODE11: u2 = 0, /// CNF11 [14:15] /// Port n.11 configuration CNF11: u2 = 1, /// MODE12 [16:17] /// Port n.12 mode bits MODE12: u2 = 0, /// CNF12 [18:19] /// Port n.12 configuration CNF12: u2 = 1, /// MODE13 [20:21] /// Port n.13 mode bits MODE13: u2 = 0, /// CNF13 [22:23] /// Port n.13 configuration CNF13: u2 = 1, /// MODE14 [24:25] /// Port n.14 mode bits MODE14: u2 = 0, /// CNF14 [26:27] /// Port n.14 configuration CNF14: u2 = 1, /// MODE15 [28:29] /// Port n.15 mode bits MODE15: u2 = 0, /// CNF15 [30:31] /// Port n.15 configuration CNF15: u2 = 1, }; /// Port configuration register high pub const CRH = Register(CRH_val).init(base_address + 0x4); /// IDR const IDR_val = packed struct { /// IDR0 [0:0] /// Port input data IDR0: u1 = 0, /// IDR1 [1:1] /// Port input data IDR1: u1 = 0, /// IDR2 [2:2] /// Port input data IDR2: u1 = 0, /// IDR3 [3:3] /// Port input data IDR3: u1 = 0, /// IDR4 [4:4] /// Port input data IDR4: u1 = 0, /// IDR5 [5:5] /// Port input data IDR5: u1 = 0, /// IDR6 [6:6] /// Port input data IDR6: u1 = 0, /// IDR7 [7:7] /// Port input data IDR7: u1 = 0, /// IDR8 [8:8] /// Port input data IDR8: u1 = 0, /// IDR9 [9:9] /// Port input data IDR9: u1 = 0, /// IDR10 [10:10] /// Port input data IDR10: u1 = 0, /// IDR11 [11:11] /// Port input data IDR11: u1 = 0, /// IDR12 [12:12] /// Port input data IDR12: u1 = 0, /// IDR13 [13:13] /// Port input data IDR13: u1 = 0, /// IDR14 [14:14] /// Port input data IDR14: u1 = 0, /// IDR15 [15:15] /// Port input data IDR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port input data register pub const IDR = Register(IDR_val).init(base_address + 0x8); /// ODR const ODR_val = packed struct { /// ODR0 [0:0] /// Port output data ODR0: u1 = 0, /// ODR1 [1:1] /// Port output data ODR1: u1 = 0, /// ODR2 [2:2] /// Port output data ODR2: u1 = 0, /// ODR3 [3:3] /// Port output data ODR3: u1 = 0, /// ODR4 [4:4] /// Port output data ODR4: u1 = 0, /// ODR5 [5:5] /// Port output data ODR5: u1 = 0, /// ODR6 [6:6] /// Port output data ODR6: u1 = 0, /// ODR7 [7:7] /// Port output data ODR7: u1 = 0, /// ODR8 [8:8] /// Port output data ODR8: u1 = 0, /// ODR9 [9:9] /// Port output data ODR9: u1 = 0, /// ODR10 [10:10] /// Port output data ODR10: u1 = 0, /// ODR11 [11:11] /// Port output data ODR11: u1 = 0, /// ODR12 [12:12] /// Port output data ODR12: u1 = 0, /// ODR13 [13:13] /// Port output data ODR13: u1 = 0, /// ODR14 [14:14] /// Port output data ODR14: u1 = 0, /// ODR15 [15:15] /// Port output data ODR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port output data register pub const ODR = Register(ODR_val).init(base_address + 0xc); /// BSRR const BSRR_val = packed struct { /// BS0 [0:0] /// Set bit 0 BS0: u1 = 0, /// BS1 [1:1] /// Set bit 1 BS1: u1 = 0, /// BS2 [2:2] /// Set bit 1 BS2: u1 = 0, /// BS3 [3:3] /// Set bit 3 BS3: u1 = 0, /// BS4 [4:4] /// Set bit 4 BS4: u1 = 0, /// BS5 [5:5] /// Set bit 5 BS5: u1 = 0, /// BS6 [6:6] /// Set bit 6 BS6: u1 = 0, /// BS7 [7:7] /// Set bit 7 BS7: u1 = 0, /// BS8 [8:8] /// Set bit 8 BS8: u1 = 0, /// BS9 [9:9] /// Set bit 9 BS9: u1 = 0, /// BS10 [10:10] /// Set bit 10 BS10: u1 = 0, /// BS11 [11:11] /// Set bit 11 BS11: u1 = 0, /// BS12 [12:12] /// Set bit 12 BS12: u1 = 0, /// BS13 [13:13] /// Set bit 13 BS13: u1 = 0, /// BS14 [14:14] /// Set bit 14 BS14: u1 = 0, /// BS15 [15:15] /// Set bit 15 BS15: u1 = 0, /// BR0 [16:16] /// Reset bit 0 BR0: u1 = 0, /// BR1 [17:17] /// Reset bit 1 BR1: u1 = 0, /// BR2 [18:18] /// Reset bit 2 BR2: u1 = 0, /// BR3 [19:19] /// Reset bit 3 BR3: u1 = 0, /// BR4 [20:20] /// Reset bit 4 BR4: u1 = 0, /// BR5 [21:21] /// Reset bit 5 BR5: u1 = 0, /// BR6 [22:22] /// Reset bit 6 BR6: u1 = 0, /// BR7 [23:23] /// Reset bit 7 BR7: u1 = 0, /// BR8 [24:24] /// Reset bit 8 BR8: u1 = 0, /// BR9 [25:25] /// Reset bit 9 BR9: u1 = 0, /// BR10 [26:26] /// Reset bit 10 BR10: u1 = 0, /// BR11 [27:27] /// Reset bit 11 BR11: u1 = 0, /// BR12 [28:28] /// Reset bit 12 BR12: u1 = 0, /// BR13 [29:29] /// Reset bit 13 BR13: u1 = 0, /// BR14 [30:30] /// Reset bit 14 BR14: u1 = 0, /// BR15 [31:31] /// Reset bit 15 BR15: u1 = 0, }; /// Port bit set/reset register pub const BSRR = Register(BSRR_val).init(base_address + 0x10); /// BRR const BRR_val = packed struct { /// BR0 [0:0] /// Reset bit 0 BR0: u1 = 0, /// BR1 [1:1] /// Reset bit 1 BR1: u1 = 0, /// BR2 [2:2] /// Reset bit 1 BR2: u1 = 0, /// BR3 [3:3] /// Reset bit 3 BR3: u1 = 0, /// BR4 [4:4] /// Reset bit 4 BR4: u1 = 0, /// BR5 [5:5] /// Reset bit 5 BR5: u1 = 0, /// BR6 [6:6] /// Reset bit 6 BR6: u1 = 0, /// BR7 [7:7] /// Reset bit 7 BR7: u1 = 0, /// BR8 [8:8] /// Reset bit 8 BR8: u1 = 0, /// BR9 [9:9] /// Reset bit 9 BR9: u1 = 0, /// BR10 [10:10] /// Reset bit 10 BR10: u1 = 0, /// BR11 [11:11] /// Reset bit 11 BR11: u1 = 0, /// BR12 [12:12] /// Reset bit 12 BR12: u1 = 0, /// BR13 [13:13] /// Reset bit 13 BR13: u1 = 0, /// BR14 [14:14] /// Reset bit 14 BR14: u1 = 0, /// BR15 [15:15] /// Reset bit 15 BR15: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Port bit reset register pub const BRR = Register(BRR_val).init(base_address + 0x14); /// LCKR const LCKR_val = packed struct { /// LCK0 [0:0] /// Port A Lock bit 0 LCK0: u1 = 0, /// LCK1 [1:1] /// Port A Lock bit 1 LCK1: u1 = 0, /// LCK2 [2:2] /// Port A Lock bit 2 LCK2: u1 = 0, /// LCK3 [3:3] /// Port A Lock bit 3 LCK3: u1 = 0, /// LCK4 [4:4] /// Port A Lock bit 4 LCK4: u1 = 0, /// LCK5 [5:5] /// Port A Lock bit 5 LCK5: u1 = 0, /// LCK6 [6:6] /// Port A Lock bit 6 LCK6: u1 = 0, /// LCK7 [7:7] /// Port A Lock bit 7 LCK7: u1 = 0, /// LCK8 [8:8] /// Port A Lock bit 8 LCK8: u1 = 0, /// LCK9 [9:9] /// Port A Lock bit 9 LCK9: u1 = 0, /// LCK10 [10:10] /// Port A Lock bit 10 LCK10: u1 = 0, /// LCK11 [11:11] /// Port A Lock bit 11 LCK11: u1 = 0, /// LCK12 [12:12] /// Port A Lock bit 12 LCK12: u1 = 0, /// LCK13 [13:13] /// Port A Lock bit 13 LCK13: u1 = 0, /// LCK14 [14:14] /// Port A Lock bit 14 LCK14: u1 = 0, /// LCK15 [15:15] /// Port A Lock bit 15 LCK15: u1 = 0, /// LCKK [16:16] /// Lock key LCKK: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Port configuration lock pub const LCKR = Register(LCKR_val).init(base_address + 0x18); }; /// Alternate function I/O pub const AFIO = struct { const base_address = 0x40010000; /// EVCR const EVCR_val = packed struct { /// PIN [0:3] /// Pin selection PIN: u4 = 0, /// PORT [4:6] /// Port selection PORT: u3 = 0, /// EVOE [7:7] /// Event Output Enable EVOE: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Event Control Register pub const EVCR = Register(EVCR_val).init(base_address + 0x0); /// MAPR const MAPR_val = packed struct { /// SPI1_REMAP [0:0] /// SPI1 remapping SPI1_REMAP: u1 = 0, /// I2C1_REMAP [1:1] /// I2C1 remapping I2C1_REMAP: u1 = 0, /// USART1_REMAP [2:2] /// USART1 remapping USART1_REMAP: u1 = 0, /// USART2_REMAP [3:3] /// USART2 remapping USART2_REMAP: u1 = 0, /// USART3_REMAP [4:5] /// USART3 remapping USART3_REMAP: u2 = 0, /// TIM1_REMAP [6:7] /// TIM1 remapping TIM1_REMAP: u2 = 0, /// TIM2_REMAP [8:9] /// TIM2 remapping TIM2_REMAP: u2 = 0, /// TIM3_REMAP [10:11] /// TIM3 remapping TIM3_REMAP: u2 = 0, /// TIM4_REMAP [12:12] /// TIM4 remapping TIM4_REMAP: u1 = 0, /// CAN_REMAP [13:14] /// CAN1 remapping CAN_REMAP: u2 = 0, /// PD01_REMAP [15:15] /// Port D0/Port D1 mapping on PD01_REMAP: u1 = 0, /// TIM5CH4_IREMAP [16:16] /// Set and cleared by TIM5CH4_IREMAP: u1 = 0, /// ADC1_ETRGINJ_REMAP [17:17] /// ADC 1 External trigger injected ADC1_ETRGINJ_REMAP: u1 = 0, /// ADC1_ETRGREG_REMAP [18:18] /// ADC 1 external trigger regular ADC1_ETRGREG_REMAP: u1 = 0, /// ADC2_ETRGINJ_REMAP [19:19] /// ADC 2 external trigger injected ADC2_ETRGINJ_REMAP: u1 = 0, /// ADC2_ETRGREG_REMAP [20:20] /// ADC 2 external trigger regular ADC2_ETRGREG_REMAP: u1 = 0, /// unused [21:23] _unused21: u3 = 0, /// SWJ_CFG [24:26] /// Serial wire JTAG SWJ_CFG: u3 = 0, /// unused [27:31] _unused27: u5 = 0, }; /// AF remap and debug I/O configuration pub const MAPR = Register(MAPR_val).init(base_address + 0x4); /// EXTICR1 const EXTICR1_val = packed struct { /// EXTI0 [0:3] /// EXTI0 configuration EXTI0: u4 = 0, /// EXTI1 [4:7] /// EXTI1 configuration EXTI1: u4 = 0, /// EXTI2 [8:11] /// EXTI2 configuration EXTI2: u4 = 0, /// EXTI3 [12:15] /// EXTI3 configuration EXTI3: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// External interrupt configuration register 1 pub const EXTICR1 = Register(EXTICR1_val).init(base_address + 0x8); /// EXTICR2 const EXTICR2_val = packed struct { /// EXTI4 [0:3] /// EXTI4 configuration EXTI4: u4 = 0, /// EXTI5 [4:7] /// EXTI5 configuration EXTI5: u4 = 0, /// EXTI6 [8:11] /// EXTI6 configuration EXTI6: u4 = 0, /// EXTI7 [12:15] /// EXTI7 configuration EXTI7: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// External interrupt configuration register 2 pub const EXTICR2 = Register(EXTICR2_val).init(base_address + 0xc); /// EXTICR3 const EXTICR3_val = packed struct { /// EXTI8 [0:3] /// EXTI8 configuration EXTI8: u4 = 0, /// EXTI9 [4:7] /// EXTI9 configuration EXTI9: u4 = 0, /// EXTI10 [8:11] /// EXTI10 configuration EXTI10: u4 = 0, /// EXTI11 [12:15] /// EXTI11 configuration EXTI11: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// External interrupt configuration register 3 pub const EXTICR3 = Register(EXTICR3_val).init(base_address + 0x10); /// EXTICR4 const EXTICR4_val = packed struct { /// EXTI12 [0:3] /// EXTI12 configuration EXTI12: u4 = 0, /// EXTI13 [4:7] /// EXTI13 configuration EXTI13: u4 = 0, /// EXTI14 [8:11] /// EXTI14 configuration EXTI14: u4 = 0, /// EXTI15 [12:15] /// EXTI15 configuration EXTI15: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// External interrupt configuration register 4 pub const EXTICR4 = Register(EXTICR4_val).init(base_address + 0x14); /// MAPR2 const MAPR2_val = packed struct { /// unused [0:4] _unused0: u5 = 0, /// TIM9_REMAP [5:5] /// TIM9 remapping TIM9_REMAP: u1 = 0, /// TIM10_REMAP [6:6] /// TIM10 remapping TIM10_REMAP: u1 = 0, /// TIM11_REMAP [7:7] /// TIM11 remapping TIM11_REMAP: u1 = 0, /// TIM13_REMAP [8:8] /// TIM13 remapping TIM13_REMAP: u1 = 0, /// TIM14_REMAP [9:9] /// TIM14 remapping TIM14_REMAP: u1 = 0, /// FSMC_NADV [10:10] /// NADV connect/disconnect FSMC_NADV: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// AF remap and debug I/O configuration pub const MAPR2 = Register(MAPR2_val).init(base_address + 0x1c); }; /// EXTI pub const EXTI = struct { const base_address = 0x40010400; /// IMR const IMR_val = packed struct { /// MR0 [0:0] /// Interrupt Mask on line 0 MR0: u1 = 0, /// MR1 [1:1] /// Interrupt Mask on line 1 MR1: u1 = 0, /// MR2 [2:2] /// Interrupt Mask on line 2 MR2: u1 = 0, /// MR3 [3:3] /// Interrupt Mask on line 3 MR3: u1 = 0, /// MR4 [4:4] /// Interrupt Mask on line 4 MR4: u1 = 0, /// MR5 [5:5] /// Interrupt Mask on line 5 MR5: u1 = 0, /// MR6 [6:6] /// Interrupt Mask on line 6 MR6: u1 = 0, /// MR7 [7:7] /// Interrupt Mask on line 7 MR7: u1 = 0, /// MR8 [8:8] /// Interrupt Mask on line 8 MR8: u1 = 0, /// MR9 [9:9] /// Interrupt Mask on line 9 MR9: u1 = 0, /// MR10 [10:10] /// Interrupt Mask on line 10 MR10: u1 = 0, /// MR11 [11:11] /// Interrupt Mask on line 11 MR11: u1 = 0, /// MR12 [12:12] /// Interrupt Mask on line 12 MR12: u1 = 0, /// MR13 [13:13] /// Interrupt Mask on line 13 MR13: u1 = 0, /// MR14 [14:14] /// Interrupt Mask on line 14 MR14: u1 = 0, /// MR15 [15:15] /// Interrupt Mask on line 15 MR15: u1 = 0, /// MR16 [16:16] /// Interrupt Mask on line 16 MR16: u1 = 0, /// MR17 [17:17] /// Interrupt Mask on line 17 MR17: u1 = 0, /// MR18 [18:18] /// Interrupt Mask on line 18 MR18: u1 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// Interrupt mask register pub const IMR = Register(IMR_val).init(base_address + 0x0); /// EMR const EMR_val = packed struct { /// MR0 [0:0] /// Event Mask on line 0 MR0: u1 = 0, /// MR1 [1:1] /// Event Mask on line 1 MR1: u1 = 0, /// MR2 [2:2] /// Event Mask on line 2 MR2: u1 = 0, /// MR3 [3:3] /// Event Mask on line 3 MR3: u1 = 0, /// MR4 [4:4] /// Event Mask on line 4 MR4: u1 = 0, /// MR5 [5:5] /// Event Mask on line 5 MR5: u1 = 0, /// MR6 [6:6] /// Event Mask on line 6 MR6: u1 = 0, /// MR7 [7:7] /// Event Mask on line 7 MR7: u1 = 0, /// MR8 [8:8] /// Event Mask on line 8 MR8: u1 = 0, /// MR9 [9:9] /// Event Mask on line 9 MR9: u1 = 0, /// MR10 [10:10] /// Event Mask on line 10 MR10: u1 = 0, /// MR11 [11:11] /// Event Mask on line 11 MR11: u1 = 0, /// MR12 [12:12] /// Event Mask on line 12 MR12: u1 = 0, /// MR13 [13:13] /// Event Mask on line 13 MR13: u1 = 0, /// MR14 [14:14] /// Event Mask on line 14 MR14: u1 = 0, /// MR15 [15:15] /// Event Mask on line 15 MR15: u1 = 0, /// MR16 [16:16] /// Event Mask on line 16 MR16: u1 = 0, /// MR17 [17:17] /// Event Mask on line 17 MR17: u1 = 0, /// MR18 [18:18] /// Event Mask on line 18 MR18: u1 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// Event mask register (EXTI_EMR) pub const EMR = Register(EMR_val).init(base_address + 0x4); /// RTSR const RTSR_val = packed struct { /// TR0 [0:0] /// Rising trigger event configuration of TR0: u1 = 0, /// TR1 [1:1] /// Rising trigger event configuration of TR1: u1 = 0, /// TR2 [2:2] /// Rising trigger event configuration of TR2: u1 = 0, /// TR3 [3:3] /// Rising trigger event configuration of TR3: u1 = 0, /// TR4 [4:4] /// Rising trigger event configuration of TR4: u1 = 0, /// TR5 [5:5] /// Rising trigger event configuration of TR5: u1 = 0, /// TR6 [6:6] /// Rising trigger event configuration of TR6: u1 = 0, /// TR7 [7:7] /// Rising trigger event configuration of TR7: u1 = 0, /// TR8 [8:8] /// Rising trigger event configuration of TR8: u1 = 0, /// TR9 [9:9] /// Rising trigger event configuration of TR9: u1 = 0, /// TR10 [10:10] /// Rising trigger event configuration of TR10: u1 = 0, /// TR11 [11:11] /// Rising trigger event configuration of TR11: u1 = 0, /// TR12 [12:12] /// Rising trigger event configuration of TR12: u1 = 0, /// TR13 [13:13] /// Rising trigger event configuration of TR13: u1 = 0, /// TR14 [14:14] /// Rising trigger event configuration of TR14: u1 = 0, /// TR15 [15:15] /// Rising trigger event configuration of TR15: u1 = 0, /// TR16 [16:16] /// Rising trigger event configuration of TR16: u1 = 0, /// TR17 [17:17] /// Rising trigger event configuration of TR17: u1 = 0, /// TR18 [18:18] /// Rising trigger event configuration of TR18: u1 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// Rising Trigger selection register pub const RTSR = Register(RTSR_val).init(base_address + 0x8); /// FTSR const FTSR_val = packed struct { /// TR0 [0:0] /// Falling trigger event configuration of TR0: u1 = 0, /// TR1 [1:1] /// Falling trigger event configuration of TR1: u1 = 0, /// TR2 [2:2] /// Falling trigger event configuration of TR2: u1 = 0, /// TR3 [3:3] /// Falling trigger event configuration of TR3: u1 = 0, /// TR4 [4:4] /// Falling trigger event configuration of TR4: u1 = 0, /// TR5 [5:5] /// Falling trigger event configuration of TR5: u1 = 0, /// TR6 [6:6] /// Falling trigger event configuration of TR6: u1 = 0, /// TR7 [7:7] /// Falling trigger event configuration of TR7: u1 = 0, /// TR8 [8:8] /// Falling trigger event configuration of TR8: u1 = 0, /// TR9 [9:9] /// Falling trigger event configuration of TR9: u1 = 0, /// TR10 [10:10] /// Falling trigger event configuration of TR10: u1 = 0, /// TR11 [11:11] /// Falling trigger event configuration of TR11: u1 = 0, /// TR12 [12:12] /// Falling trigger event configuration of TR12: u1 = 0, /// TR13 [13:13] /// Falling trigger event configuration of TR13: u1 = 0, /// TR14 [14:14] /// Falling trigger event configuration of TR14: u1 = 0, /// TR15 [15:15] /// Falling trigger event configuration of TR15: u1 = 0, /// TR16 [16:16] /// Falling trigger event configuration of TR16: u1 = 0, /// TR17 [17:17] /// Falling trigger event configuration of TR17: u1 = 0, /// TR18 [18:18] /// Falling trigger event configuration of TR18: u1 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// Falling Trigger selection register pub const FTSR = Register(FTSR_val).init(base_address + 0xc); /// SWIER const SWIER_val = packed struct { /// SWIER0 [0:0] /// Software Interrupt on line SWIER0: u1 = 0, /// SWIER1 [1:1] /// Software Interrupt on line SWIER1: u1 = 0, /// SWIER2 [2:2] /// Software Interrupt on line SWIER2: u1 = 0, /// SWIER3 [3:3] /// Software Interrupt on line SWIER3: u1 = 0, /// SWIER4 [4:4] /// Software Interrupt on line SWIER4: u1 = 0, /// SWIER5 [5:5] /// Software Interrupt on line SWIER5: u1 = 0, /// SWIER6 [6:6] /// Software Interrupt on line SWIER6: u1 = 0, /// SWIER7 [7:7] /// Software Interrupt on line SWIER7: u1 = 0, /// SWIER8 [8:8] /// Software Interrupt on line SWIER8: u1 = 0, /// SWIER9 [9:9] /// Software Interrupt on line SWIER9: u1 = 0, /// SWIER10 [10:10] /// Software Interrupt on line SWIER10: u1 = 0, /// SWIER11 [11:11] /// Software Interrupt on line SWIER11: u1 = 0, /// SWIER12 [12:12] /// Software Interrupt on line SWIER12: u1 = 0, /// SWIER13 [13:13] /// Software Interrupt on line SWIER13: u1 = 0, /// SWIER14 [14:14] /// Software Interrupt on line SWIER14: u1 = 0, /// SWIER15 [15:15] /// Software Interrupt on line SWIER15: u1 = 0, /// SWIER16 [16:16] /// Software Interrupt on line SWIER16: u1 = 0, /// SWIER17 [17:17] /// Software Interrupt on line SWIER17: u1 = 0, /// SWIER18 [18:18] /// Software Interrupt on line SWIER18: u1 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// Software interrupt event register pub const SWIER = Register(SWIER_val).init(base_address + 0x10); /// PR const PR_val = packed struct { /// PR0 [0:0] /// Pending bit 0 PR0: u1 = 0, /// PR1 [1:1] /// Pending bit 1 PR1: u1 = 0, /// PR2 [2:2] /// Pending bit 2 PR2: u1 = 0, /// PR3 [3:3] /// Pending bit 3 PR3: u1 = 0, /// PR4 [4:4] /// Pending bit 4 PR4: u1 = 0, /// PR5 [5:5] /// Pending bit 5 PR5: u1 = 0, /// PR6 [6:6] /// Pending bit 6 PR6: u1 = 0, /// PR7 [7:7] /// Pending bit 7 PR7: u1 = 0, /// PR8 [8:8] /// Pending bit 8 PR8: u1 = 0, /// PR9 [9:9] /// Pending bit 9 PR9: u1 = 0, /// PR10 [10:10] /// Pending bit 10 PR10: u1 = 0, /// PR11 [11:11] /// Pending bit 11 PR11: u1 = 0, /// PR12 [12:12] /// Pending bit 12 PR12: u1 = 0, /// PR13 [13:13] /// Pending bit 13 PR13: u1 = 0, /// PR14 [14:14] /// Pending bit 14 PR14: u1 = 0, /// PR15 [15:15] /// Pending bit 15 PR15: u1 = 0, /// PR16 [16:16] /// Pending bit 16 PR16: u1 = 0, /// PR17 [17:17] /// Pending bit 17 PR17: u1 = 0, /// PR18 [18:18] /// Pending bit 18 PR18: u1 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// Pending register (EXTI_PR) pub const PR = Register(PR_val).init(base_address + 0x14); }; /// DMA controller pub const DMA1 = struct { const base_address = 0x40020000; /// ISR const ISR_val = packed struct { /// GIF1 [0:0] /// Channel 1 Global interrupt GIF1: u1 = 0, /// TCIF1 [1:1] /// Channel 1 Transfer Complete TCIF1: u1 = 0, /// HTIF1 [2:2] /// Channel 1 Half Transfer Complete HTIF1: u1 = 0, /// TEIF1 [3:3] /// Channel 1 Transfer Error TEIF1: u1 = 0, /// GIF2 [4:4] /// Channel 2 Global interrupt GIF2: u1 = 0, /// TCIF2 [5:5] /// Channel 2 Transfer Complete TCIF2: u1 = 0, /// HTIF2 [6:6] /// Channel 2 Half Transfer Complete HTIF2: u1 = 0, /// TEIF2 [7:7] /// Channel 2 Transfer Error TEIF2: u1 = 0, /// GIF3 [8:8] /// Channel 3 Global interrupt GIF3: u1 = 0, /// TCIF3 [9:9] /// Channel 3 Transfer Complete TCIF3: u1 = 0, /// HTIF3 [10:10] /// Channel 3 Half Transfer Complete HTIF3: u1 = 0, /// TEIF3 [11:11] /// Channel 3 Transfer Error TEIF3: u1 = 0, /// GIF4 [12:12] /// Channel 4 Global interrupt GIF4: u1 = 0, /// TCIF4 [13:13] /// Channel 4 Transfer Complete TCIF4: u1 = 0, /// HTIF4 [14:14] /// Channel 4 Half Transfer Complete HTIF4: u1 = 0, /// TEIF4 [15:15] /// Channel 4 Transfer Error TEIF4: u1 = 0, /// GIF5 [16:16] /// Channel 5 Global interrupt GIF5: u1 = 0, /// TCIF5 [17:17] /// Channel 5 Transfer Complete TCIF5: u1 = 0, /// HTIF5 [18:18] /// Channel 5 Half Transfer Complete HTIF5: u1 = 0, /// TEIF5 [19:19] /// Channel 5 Transfer Error TEIF5: u1 = 0, /// GIF6 [20:20] /// Channel 6 Global interrupt GIF6: u1 = 0, /// TCIF6 [21:21] /// Channel 6 Transfer Complete TCIF6: u1 = 0, /// HTIF6 [22:22] /// Channel 6 Half Transfer Complete HTIF6: u1 = 0, /// TEIF6 [23:23] /// Channel 6 Transfer Error TEIF6: u1 = 0, /// GIF7 [24:24] /// Channel 7 Global interrupt GIF7: u1 = 0, /// TCIF7 [25:25] /// Channel 7 Transfer Complete TCIF7: u1 = 0, /// HTIF7 [26:26] /// Channel 7 Half Transfer Complete HTIF7: u1 = 0, /// TEIF7 [27:27] /// Channel 7 Transfer Error TEIF7: u1 = 0, /// unused [28:31] _unused28: u4 = 0, }; /// DMA interrupt status register pub const ISR = Register(ISR_val).init(base_address + 0x0); /// IFCR const IFCR_val = packed struct { /// CGIF1 [0:0] /// Channel 1 Global interrupt CGIF1: u1 = 0, /// CTCIF1 [1:1] /// Channel 1 Transfer Complete CTCIF1: u1 = 0, /// CHTIF1 [2:2] /// Channel 1 Half Transfer CHTIF1: u1 = 0, /// CTEIF1 [3:3] /// Channel 1 Transfer Error CTEIF1: u1 = 0, /// CGIF2 [4:4] /// Channel 2 Global interrupt CGIF2: u1 = 0, /// CTCIF2 [5:5] /// Channel 2 Transfer Complete CTCIF2: u1 = 0, /// CHTIF2 [6:6] /// Channel 2 Half Transfer CHTIF2: u1 = 0, /// CTEIF2 [7:7] /// Channel 2 Transfer Error CTEIF2: u1 = 0, /// CGIF3 [8:8] /// Channel 3 Global interrupt CGIF3: u1 = 0, /// CTCIF3 [9:9] /// Channel 3 Transfer Complete CTCIF3: u1 = 0, /// CHTIF3 [10:10] /// Channel 3 Half Transfer CHTIF3: u1 = 0, /// CTEIF3 [11:11] /// Channel 3 Transfer Error CTEIF3: u1 = 0, /// CGIF4 [12:12] /// Channel 4 Global interrupt CGIF4: u1 = 0, /// CTCIF4 [13:13] /// Channel 4 Transfer Complete CTCIF4: u1 = 0, /// CHTIF4 [14:14] /// Channel 4 Half Transfer CHTIF4: u1 = 0, /// CTEIF4 [15:15] /// Channel 4 Transfer Error CTEIF4: u1 = 0, /// CGIF5 [16:16] /// Channel 5 Global interrupt CGIF5: u1 = 0, /// CTCIF5 [17:17] /// Channel 5 Transfer Complete CTCIF5: u1 = 0, /// CHTIF5 [18:18] /// Channel 5 Half Transfer CHTIF5: u1 = 0, /// CTEIF5 [19:19] /// Channel 5 Transfer Error CTEIF5: u1 = 0, /// CGIF6 [20:20] /// Channel 6 Global interrupt CGIF6: u1 = 0, /// CTCIF6 [21:21] /// Channel 6 Transfer Complete CTCIF6: u1 = 0, /// CHTIF6 [22:22] /// Channel 6 Half Transfer CHTIF6: u1 = 0, /// CTEIF6 [23:23] /// Channel 6 Transfer Error CTEIF6: u1 = 0, /// CGIF7 [24:24] /// Channel 7 Global interrupt CGIF7: u1 = 0, /// CTCIF7 [25:25] /// Channel 7 Transfer Complete CTCIF7: u1 = 0, /// CHTIF7 [26:26] /// Channel 7 Half Transfer CHTIF7: u1 = 0, /// CTEIF7 [27:27] /// Channel 7 Transfer Error CTEIF7: u1 = 0, /// unused [28:31] _unused28: u4 = 0, }; /// DMA interrupt flag clear register pub const IFCR = Register(IFCR_val).init(base_address + 0x4); /// CCR1 const CCR1_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR1 = Register(CCR1_val).init(base_address + 0x8); /// CNDTR1 const CNDTR1_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 1 number of data pub const CNDTR1 = Register(CNDTR1_val).init(base_address + 0xc); /// CPAR1 const CPAR1_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 1 peripheral address pub const CPAR1 = Register(CPAR1_val).init(base_address + 0x10); /// CMAR1 const CMAR1_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 1 memory address pub const CMAR1 = Register(CMAR1_val).init(base_address + 0x14); /// CCR2 const CCR2_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR2 = Register(CCR2_val).init(base_address + 0x1c); /// CNDTR2 const CNDTR2_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 2 number of data pub const CNDTR2 = Register(CNDTR2_val).init(base_address + 0x20); /// CPAR2 const CPAR2_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 2 peripheral address pub const CPAR2 = Register(CPAR2_val).init(base_address + 0x24); /// CMAR2 const CMAR2_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 2 memory address pub const CMAR2 = Register(CMAR2_val).init(base_address + 0x28); /// CCR3 const CCR3_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR3 = Register(CCR3_val).init(base_address + 0x30); /// CNDTR3 const CNDTR3_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 3 number of data pub const CNDTR3 = Register(CNDTR3_val).init(base_address + 0x34); /// CPAR3 const CPAR3_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 3 peripheral address pub const CPAR3 = Register(CPAR3_val).init(base_address + 0x38); /// CMAR3 const CMAR3_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 3 memory address pub const CMAR3 = Register(CMAR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR4 = Register(CCR4_val).init(base_address + 0x44); /// CNDTR4 const CNDTR4_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 4 number of data pub const CNDTR4 = Register(CNDTR4_val).init(base_address + 0x48); /// CPAR4 const CPAR4_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 4 peripheral address pub const CPAR4 = Register(CPAR4_val).init(base_address + 0x4c); /// CMAR4 const CMAR4_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 4 memory address pub const CMAR4 = Register(CMAR4_val).init(base_address + 0x50); /// CCR5 const CCR5_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR5 = Register(CCR5_val).init(base_address + 0x58); /// CNDTR5 const CNDTR5_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 5 number of data pub const CNDTR5 = Register(CNDTR5_val).init(base_address + 0x5c); /// CPAR5 const CPAR5_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 5 peripheral address pub const CPAR5 = Register(CPAR5_val).init(base_address + 0x60); /// CMAR5 const CMAR5_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 5 memory address pub const CMAR5 = Register(CMAR5_val).init(base_address + 0x64); /// CCR6 const CCR6_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR6 = Register(CCR6_val).init(base_address + 0x6c); /// CNDTR6 const CNDTR6_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 6 number of data pub const CNDTR6 = Register(CNDTR6_val).init(base_address + 0x70); /// CPAR6 const CPAR6_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 6 peripheral address pub const CPAR6 = Register(CPAR6_val).init(base_address + 0x74); /// CMAR6 const CMAR6_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 6 memory address pub const CMAR6 = Register(CMAR6_val).init(base_address + 0x78); /// CCR7 const CCR7_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR7 = Register(CCR7_val).init(base_address + 0x80); /// CNDTR7 const CNDTR7_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 7 number of data pub const CNDTR7 = Register(CNDTR7_val).init(base_address + 0x84); /// CPAR7 const CPAR7_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 7 peripheral address pub const CPAR7 = Register(CPAR7_val).init(base_address + 0x88); /// CMAR7 const CMAR7_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 7 memory address pub const CMAR7 = Register(CMAR7_val).init(base_address + 0x8c); }; /// DMA controller pub const DMA2 = struct { const base_address = 0x40020400; /// ISR const ISR_val = packed struct { /// GIF1 [0:0] /// Channel 1 Global interrupt GIF1: u1 = 0, /// TCIF1 [1:1] /// Channel 1 Transfer Complete TCIF1: u1 = 0, /// HTIF1 [2:2] /// Channel 1 Half Transfer Complete HTIF1: u1 = 0, /// TEIF1 [3:3] /// Channel 1 Transfer Error TEIF1: u1 = 0, /// GIF2 [4:4] /// Channel 2 Global interrupt GIF2: u1 = 0, /// TCIF2 [5:5] /// Channel 2 Transfer Complete TCIF2: u1 = 0, /// HTIF2 [6:6] /// Channel 2 Half Transfer Complete HTIF2: u1 = 0, /// TEIF2 [7:7] /// Channel 2 Transfer Error TEIF2: u1 = 0, /// GIF3 [8:8] /// Channel 3 Global interrupt GIF3: u1 = 0, /// TCIF3 [9:9] /// Channel 3 Transfer Complete TCIF3: u1 = 0, /// HTIF3 [10:10] /// Channel 3 Half Transfer Complete HTIF3: u1 = 0, /// TEIF3 [11:11] /// Channel 3 Transfer Error TEIF3: u1 = 0, /// GIF4 [12:12] /// Channel 4 Global interrupt GIF4: u1 = 0, /// TCIF4 [13:13] /// Channel 4 Transfer Complete TCIF4: u1 = 0, /// HTIF4 [14:14] /// Channel 4 Half Transfer Complete HTIF4: u1 = 0, /// TEIF4 [15:15] /// Channel 4 Transfer Error TEIF4: u1 = 0, /// GIF5 [16:16] /// Channel 5 Global interrupt GIF5: u1 = 0, /// TCIF5 [17:17] /// Channel 5 Transfer Complete TCIF5: u1 = 0, /// HTIF5 [18:18] /// Channel 5 Half Transfer Complete HTIF5: u1 = 0, /// TEIF5 [19:19] /// Channel 5 Transfer Error TEIF5: u1 = 0, /// GIF6 [20:20] /// Channel 6 Global interrupt GIF6: u1 = 0, /// TCIF6 [21:21] /// Channel 6 Transfer Complete TCIF6: u1 = 0, /// HTIF6 [22:22] /// Channel 6 Half Transfer Complete HTIF6: u1 = 0, /// TEIF6 [23:23] /// Channel 6 Transfer Error TEIF6: u1 = 0, /// GIF7 [24:24] /// Channel 7 Global interrupt GIF7: u1 = 0, /// TCIF7 [25:25] /// Channel 7 Transfer Complete TCIF7: u1 = 0, /// HTIF7 [26:26] /// Channel 7 Half Transfer Complete HTIF7: u1 = 0, /// TEIF7 [27:27] /// Channel 7 Transfer Error TEIF7: u1 = 0, /// unused [28:31] _unused28: u4 = 0, }; /// DMA interrupt status register pub const ISR = Register(ISR_val).init(base_address + 0x0); /// IFCR const IFCR_val = packed struct { /// CGIF1 [0:0] /// Channel 1 Global interrupt CGIF1: u1 = 0, /// CTCIF1 [1:1] /// Channel 1 Transfer Complete CTCIF1: u1 = 0, /// CHTIF1 [2:2] /// Channel 1 Half Transfer CHTIF1: u1 = 0, /// CTEIF1 [3:3] /// Channel 1 Transfer Error CTEIF1: u1 = 0, /// CGIF2 [4:4] /// Channel 2 Global interrupt CGIF2: u1 = 0, /// CTCIF2 [5:5] /// Channel 2 Transfer Complete CTCIF2: u1 = 0, /// CHTIF2 [6:6] /// Channel 2 Half Transfer CHTIF2: u1 = 0, /// CTEIF2 [7:7] /// Channel 2 Transfer Error CTEIF2: u1 = 0, /// CGIF3 [8:8] /// Channel 3 Global interrupt CGIF3: u1 = 0, /// CTCIF3 [9:9] /// Channel 3 Transfer Complete CTCIF3: u1 = 0, /// CHTIF3 [10:10] /// Channel 3 Half Transfer CHTIF3: u1 = 0, /// CTEIF3 [11:11] /// Channel 3 Transfer Error CTEIF3: u1 = 0, /// CGIF4 [12:12] /// Channel 4 Global interrupt CGIF4: u1 = 0, /// CTCIF4 [13:13] /// Channel 4 Transfer Complete CTCIF4: u1 = 0, /// CHTIF4 [14:14] /// Channel 4 Half Transfer CHTIF4: u1 = 0, /// CTEIF4 [15:15] /// Channel 4 Transfer Error CTEIF4: u1 = 0, /// CGIF5 [16:16] /// Channel 5 Global interrupt CGIF5: u1 = 0, /// CTCIF5 [17:17] /// Channel 5 Transfer Complete CTCIF5: u1 = 0, /// CHTIF5 [18:18] /// Channel 5 Half Transfer CHTIF5: u1 = 0, /// CTEIF5 [19:19] /// Channel 5 Transfer Error CTEIF5: u1 = 0, /// CGIF6 [20:20] /// Channel 6 Global interrupt CGIF6: u1 = 0, /// CTCIF6 [21:21] /// Channel 6 Transfer Complete CTCIF6: u1 = 0, /// CHTIF6 [22:22] /// Channel 6 Half Transfer CHTIF6: u1 = 0, /// CTEIF6 [23:23] /// Channel 6 Transfer Error CTEIF6: u1 = 0, /// CGIF7 [24:24] /// Channel 7 Global interrupt CGIF7: u1 = 0, /// CTCIF7 [25:25] /// Channel 7 Transfer Complete CTCIF7: u1 = 0, /// CHTIF7 [26:26] /// Channel 7 Half Transfer CHTIF7: u1 = 0, /// CTEIF7 [27:27] /// Channel 7 Transfer Error CTEIF7: u1 = 0, /// unused [28:31] _unused28: u4 = 0, }; /// DMA interrupt flag clear register pub const IFCR = Register(IFCR_val).init(base_address + 0x4); /// CCR1 const CCR1_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR1 = Register(CCR1_val).init(base_address + 0x8); /// CNDTR1 const CNDTR1_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 1 number of data pub const CNDTR1 = Register(CNDTR1_val).init(base_address + 0xc); /// CPAR1 const CPAR1_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 1 peripheral address pub const CPAR1 = Register(CPAR1_val).init(base_address + 0x10); /// CMAR1 const CMAR1_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 1 memory address pub const CMAR1 = Register(CMAR1_val).init(base_address + 0x14); /// CCR2 const CCR2_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR2 = Register(CCR2_val).init(base_address + 0x1c); /// CNDTR2 const CNDTR2_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 2 number of data pub const CNDTR2 = Register(CNDTR2_val).init(base_address + 0x20); /// CPAR2 const CPAR2_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 2 peripheral address pub const CPAR2 = Register(CPAR2_val).init(base_address + 0x24); /// CMAR2 const CMAR2_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 2 memory address pub const CMAR2 = Register(CMAR2_val).init(base_address + 0x28); /// CCR3 const CCR3_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR3 = Register(CCR3_val).init(base_address + 0x30); /// CNDTR3 const CNDTR3_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 3 number of data pub const CNDTR3 = Register(CNDTR3_val).init(base_address + 0x34); /// CPAR3 const CPAR3_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 3 peripheral address pub const CPAR3 = Register(CPAR3_val).init(base_address + 0x38); /// CMAR3 const CMAR3_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 3 memory address pub const CMAR3 = Register(CMAR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR4 = Register(CCR4_val).init(base_address + 0x44); /// CNDTR4 const CNDTR4_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 4 number of data pub const CNDTR4 = Register(CNDTR4_val).init(base_address + 0x48); /// CPAR4 const CPAR4_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 4 peripheral address pub const CPAR4 = Register(CPAR4_val).init(base_address + 0x4c); /// CMAR4 const CMAR4_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 4 memory address pub const CMAR4 = Register(CMAR4_val).init(base_address + 0x50); /// CCR5 const CCR5_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR5 = Register(CCR5_val).init(base_address + 0x58); /// CNDTR5 const CNDTR5_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 5 number of data pub const CNDTR5 = Register(CNDTR5_val).init(base_address + 0x5c); /// CPAR5 const CPAR5_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 5 peripheral address pub const CPAR5 = Register(CPAR5_val).init(base_address + 0x60); /// CMAR5 const CMAR5_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 5 memory address pub const CMAR5 = Register(CMAR5_val).init(base_address + 0x64); /// CCR6 const CCR6_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR6 = Register(CCR6_val).init(base_address + 0x6c); /// CNDTR6 const CNDTR6_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 6 number of data pub const CNDTR6 = Register(CNDTR6_val).init(base_address + 0x70); /// CPAR6 const CPAR6_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 6 peripheral address pub const CPAR6 = Register(CPAR6_val).init(base_address + 0x74); /// CMAR6 const CMAR6_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 6 memory address pub const CMAR6 = Register(CMAR6_val).init(base_address + 0x78); /// CCR7 const CCR7_val = packed struct { /// EN [0:0] /// Channel enable EN: u1 = 0, /// TCIE [1:1] /// Transfer complete interrupt TCIE: u1 = 0, /// HTIE [2:2] /// Half Transfer interrupt HTIE: u1 = 0, /// TEIE [3:3] /// Transfer error interrupt TEIE: u1 = 0, /// DIR [4:4] /// Data transfer direction DIR: u1 = 0, /// CIRC [5:5] /// Circular mode CIRC: u1 = 0, /// PINC [6:6] /// Peripheral increment mode PINC: u1 = 0, /// MINC [7:7] /// Memory increment mode MINC: u1 = 0, /// PSIZE [8:9] /// Peripheral size PSIZE: u2 = 0, /// MSIZE [10:11] /// Memory size MSIZE: u2 = 0, /// PL [12:13] /// Channel Priority level PL: u2 = 0, /// MEM2MEM [14:14] /// Memory to memory mode MEM2MEM: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel configuration register pub const CCR7 = Register(CCR7_val).init(base_address + 0x80); /// CNDTR7 const CNDTR7_val = packed struct { /// NDT [0:15] /// Number of data to transfer NDT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA channel 7 number of data pub const CNDTR7 = Register(CNDTR7_val).init(base_address + 0x84); /// CPAR7 const CPAR7_val = packed struct { /// PA [0:31] /// Peripheral address PA: u32 = 0, }; /// DMA channel 7 peripheral address pub const CPAR7 = Register(CPAR7_val).init(base_address + 0x88); /// CMAR7 const CMAR7_val = packed struct { /// MA [0:31] /// Memory address MA: u32 = 0, }; /// DMA channel 7 memory address pub const CMAR7 = Register(CMAR7_val).init(base_address + 0x8c); }; /// Secure digital input/output pub const SDIO = struct { const base_address = 0x40018000; /// POWER const POWER_val = packed struct { /// PWRCTRL [0:1] /// PWRCTRL PWRCTRL: u2 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Bits 1:0 = PWRCTRL: Power supply control pub const POWER = Register(POWER_val).init(base_address + 0x0); /// CLKCR const CLKCR_val = packed struct { /// CLKDIV [0:7] /// Clock divide factor CLKDIV: u8 = 0, /// CLKEN [8:8] /// Clock enable bit CLKEN: u1 = 0, /// PWRSAV [9:9] /// Power saving configuration PWRSAV: u1 = 0, /// BYPASS [10:10] /// Clock divider bypass enable BYPASS: u1 = 0, /// WIDBUS [11:12] /// Wide bus mode enable bit WIDBUS: u2 = 0, /// NEGEDGE [13:13] /// SDIO_CK dephasing selection NEGEDGE: u1 = 0, /// HWFC_EN [14:14] /// HW Flow Control enable HWFC_EN: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// SDI clock control register pub const CLKCR = Register(CLKCR_val).init(base_address + 0x4); /// ARG const ARG_val = packed struct { /// CMDARG [0:31] /// Command argument CMDARG: u32 = 0, }; /// Bits 31:0 = : Command argument pub const ARG = Register(ARG_val).init(base_address + 0x8); /// CMD const CMD_val = packed struct { /// CMDINDEX [0:5] /// CMDINDEX CMDINDEX: u6 = 0, /// WAITRESP [6:7] /// WAITRESP WAITRESP: u2 = 0, /// WAITINT [8:8] /// WAITINT WAITINT: u1 = 0, /// WAITPEND [9:9] /// WAITPEND WAITPEND: u1 = 0, /// CPSMEN [10:10] /// CPSMEN CPSMEN: u1 = 0, /// SDIOSuspend [11:11] /// SDIOSuspend SDIOSuspend: u1 = 0, /// ENCMDcompl [12:12] /// ENCMDcompl ENCMDcompl: u1 = 0, /// nIEN [13:13] /// nIEN nIEN: u1 = 0, /// CE_ATACMD [14:14] /// CE_ATACMD CE_ATACMD: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// SDIO command register pub const CMD = Register(CMD_val).init(base_address + 0xc); /// RESPCMD const RESPCMD_val = packed struct { /// RESPCMD [0:5] /// RESPCMD RESPCMD: u6 = 0, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// SDIO command register pub const RESPCMD = Register(RESPCMD_val).init(base_address + 0x10); /// RESPI1 const RESPI1_val = packed struct { /// CARDSTATUS1 [0:31] /// CARDSTATUS1 CARDSTATUS1: u32 = 0, }; /// Bits 31:0 = CARDSTATUS1 pub const RESPI1 = Register(RESPI1_val).init(base_address + 0x14); /// RESP2 const RESP2_val = packed struct { /// CARDSTATUS2 [0:31] /// CARDSTATUS2 CARDSTATUS2: u32 = 0, }; /// Bits 31:0 = CARDSTATUS2 pub const RESP2 = Register(RESP2_val).init(base_address + 0x18); /// RESP3 const RESP3_val = packed struct { /// CARDSTATUS3 [0:31] /// CARDSTATUS3 CARDSTATUS3: u32 = 0, }; /// Bits 31:0 = CARDSTATUS3 pub const RESP3 = Register(RESP3_val).init(base_address + 0x1c); /// RESP4 const RESP4_val = packed struct { /// CARDSTATUS4 [0:31] /// CARDSTATUS4 CARDSTATUS4: u32 = 0, }; /// Bits 31:0 = CARDSTATUS4 pub const RESP4 = Register(RESP4_val).init(base_address + 0x20); /// DTIMER const DTIMER_val = packed struct { /// DATATIME [0:31] /// Data timeout period DATATIME: u32 = 0, }; /// Bits 31:0 = DATATIME: Data timeout pub const DTIMER = Register(DTIMER_val).init(base_address + 0x24); /// DLEN const DLEN_val = packed struct { /// DATALENGTH [0:24] /// Data length value DATALENGTH: u25 = 0, /// unused [25:31] _unused25: u7 = 0, }; /// Bits 24:0 = DATALENGTH: Data length pub const DLEN = Register(DLEN_val).init(base_address + 0x28); /// DCTRL const DCTRL_val = packed struct { /// DTEN [0:0] /// DTEN DTEN: u1 = 0, /// DTDIR [1:1] /// DTDIR DTDIR: u1 = 0, /// DTMODE [2:2] /// DTMODE DTMODE: u1 = 0, /// DMAEN [3:3] /// DMAEN DMAEN: u1 = 0, /// DBLOCKSIZE [4:7] /// DBLOCKSIZE DBLOCKSIZE: u4 = 0, /// PWSTART [8:8] /// PWSTART PWSTART: u1 = 0, /// PWSTOP [9:9] /// PWSTOP PWSTOP: u1 = 0, /// RWMOD [10:10] /// RWMOD RWMOD: u1 = 0, /// SDIOEN [11:11] /// SDIOEN SDIOEN: u1 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// SDIO data control register pub const DCTRL = Register(DCTRL_val).init(base_address + 0x2c); /// DCOUNT const DCOUNT_val = packed struct { /// DATACOUNT [0:24] /// Data count value DATACOUNT: u25 = 0, /// unused [25:31] _unused25: u7 = 0, }; /// Bits 24:0 = DATACOUNT: Data count pub const DCOUNT = Register(DCOUNT_val).init(base_address + 0x30); /// STA const STA_val = packed struct { /// CCRCFAIL [0:0] /// CCRCFAIL CCRCFAIL: u1 = 0, /// DCRCFAIL [1:1] /// DCRCFAIL DCRCFAIL: u1 = 0, /// CTIMEOUT [2:2] /// CTIMEOUT CTIMEOUT: u1 = 0, /// DTIMEOUT [3:3] /// DTIMEOUT DTIMEOUT: u1 = 0, /// TXUNDERR [4:4] /// TXUNDERR TXUNDERR: u1 = 0, /// RXOVERR [5:5] /// RXOVERR RXOVERR: u1 = 0, /// CMDREND [6:6] /// CMDREND CMDREND: u1 = 0, /// CMDSENT [7:7] /// CMDSENT CMDSENT: u1 = 0, /// DATAEND [8:8] /// DATAEND DATAEND: u1 = 0, /// STBITERR [9:9] /// STBITERR STBITERR: u1 = 0, /// DBCKEND [10:10] /// DBCKEND DBCKEND: u1 = 0, /// CMDACT [11:11] /// CMDACT CMDACT: u1 = 0, /// TXACT [12:12] /// TXACT TXACT: u1 = 0, /// RXACT [13:13] /// RXACT RXACT: u1 = 0, /// TXFIFOHE [14:14] /// TXFIFOHE TXFIFOHE: u1 = 0, /// RXFIFOHF [15:15] /// RXFIFOHF RXFIFOHF: u1 = 0, /// TXFIFOF [16:16] /// TXFIFOF TXFIFOF: u1 = 0, /// RXFIFOF [17:17] /// RXFIFOF RXFIFOF: u1 = 0, /// TXFIFOE [18:18] /// TXFIFOE TXFIFOE: u1 = 0, /// RXFIFOE [19:19] /// RXFIFOE RXFIFOE: u1 = 0, /// TXDAVL [20:20] /// TXDAVL TXDAVL: u1 = 0, /// RXDAVL [21:21] /// RXDAVL RXDAVL: u1 = 0, /// SDIOIT [22:22] /// SDIOIT SDIOIT: u1 = 0, /// CEATAEND [23:23] /// CEATAEND CEATAEND: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// SDIO status register pub const STA = Register(STA_val).init(base_address + 0x34); /// ICR const ICR_val = packed struct { /// CCRCFAILC [0:0] /// CCRCFAILC CCRCFAILC: u1 = 0, /// DCRCFAILC [1:1] /// DCRCFAILC DCRCFAILC: u1 = 0, /// CTIMEOUTC [2:2] /// CTIMEOUTC CTIMEOUTC: u1 = 0, /// DTIMEOUTC [3:3] /// DTIMEOUTC DTIMEOUTC: u1 = 0, /// TXUNDERRC [4:4] /// TXUNDERRC TXUNDERRC: u1 = 0, /// RXOVERRC [5:5] /// RXOVERRC RXOVERRC: u1 = 0, /// CMDRENDC [6:6] /// CMDRENDC CMDRENDC: u1 = 0, /// CMDSENTC [7:7] /// CMDSENTC CMDSENTC: u1 = 0, /// DATAENDC [8:8] /// DATAENDC DATAENDC: u1 = 0, /// STBITERRC [9:9] /// STBITERRC STBITERRC: u1 = 0, /// DBCKENDC [10:10] /// DBCKENDC DBCKENDC: u1 = 0, /// unused [11:21] _unused11: u5 = 0, _unused16: u6 = 0, /// SDIOITC [22:22] /// SDIOITC SDIOITC: u1 = 0, /// CEATAENDC [23:23] /// CEATAENDC CEATAENDC: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// SDIO interrupt clear register pub const ICR = Register(ICR_val).init(base_address + 0x38); /// MASK const MASK_val = packed struct { /// CCRCFAILIE [0:0] /// CCRCFAILIE CCRCFAILIE: u1 = 0, /// DCRCFAILIE [1:1] /// DCRCFAILIE DCRCFAILIE: u1 = 0, /// CTIMEOUTIE [2:2] /// CTIMEOUTIE CTIMEOUTIE: u1 = 0, /// DTIMEOUTIE [3:3] /// DTIMEOUTIE DTIMEOUTIE: u1 = 0, /// TXUNDERRIE [4:4] /// TXUNDERRIE TXUNDERRIE: u1 = 0, /// RXOVERRIE [5:5] /// RXOVERRIE RXOVERRIE: u1 = 0, /// CMDRENDIE [6:6] /// CMDRENDIE CMDRENDIE: u1 = 0, /// CMDSENTIE [7:7] /// CMDSENTIE CMDSENTIE: u1 = 0, /// DATAENDIE [8:8] /// DATAENDIE DATAENDIE: u1 = 0, /// STBITERRIE [9:9] /// STBITERRIE STBITERRIE: u1 = 0, /// DBACKENDIE [10:10] /// DBACKENDIE DBACKENDIE: u1 = 0, /// CMDACTIE [11:11] /// CMDACTIE CMDACTIE: u1 = 0, /// TXACTIE [12:12] /// TXACTIE TXACTIE: u1 = 0, /// RXACTIE [13:13] /// RXACTIE RXACTIE: u1 = 0, /// TXFIFOHEIE [14:14] /// TXFIFOHEIE TXFIFOHEIE: u1 = 0, /// RXFIFOHFIE [15:15] /// RXFIFOHFIE RXFIFOHFIE: u1 = 0, /// TXFIFOFIE [16:16] /// TXFIFOFIE TXFIFOFIE: u1 = 0, /// RXFIFOFIE [17:17] /// RXFIFOFIE RXFIFOFIE: u1 = 0, /// TXFIFOEIE [18:18] /// TXFIFOEIE TXFIFOEIE: u1 = 0, /// RXFIFOEIE [19:19] /// RXFIFOEIE RXFIFOEIE: u1 = 0, /// TXDAVLIE [20:20] /// TXDAVLIE TXDAVLIE: u1 = 0, /// RXDAVLIE [21:21] /// RXDAVLIE RXDAVLIE: u1 = 0, /// SDIOITIE [22:22] /// SDIOITIE SDIOITIE: u1 = 0, /// CEATENDIE [23:23] /// CEATENDIE CEATENDIE: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// SDIO mask register (SDIO_MASK) pub const MASK = Register(MASK_val).init(base_address + 0x3c); /// FIFOCNT const FIFOCNT_val = packed struct { /// FIF0COUNT [0:23] /// FIF0COUNT FIF0COUNT: u24 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// Bits 23:0 = FIFOCOUNT: Remaining number of pub const FIFOCNT = Register(FIFOCNT_val).init(base_address + 0x48); /// FIFO const FIFO_val = packed struct { /// FIFOData [0:31] /// FIFOData FIFOData: u32 = 0, }; /// bits 31:0 = FIFOData: Receive and transmit pub const FIFO = Register(FIFO_val).init(base_address + 0x80); }; /// Real time clock pub const RTC = struct { const base_address = 0x40002800; /// CRH const CRH_val = packed struct { /// SECIE [0:0] /// Second interrupt Enable SECIE: u1 = 0, /// ALRIE [1:1] /// Alarm interrupt Enable ALRIE: u1 = 0, /// OWIE [2:2] /// Overflow interrupt Enable OWIE: u1 = 0, /// unused [3:31] _unused3: u5 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Control Register High pub const CRH = Register(CRH_val).init(base_address + 0x0); /// CRL const CRL_val = packed struct { /// SECF [0:0] /// Second Flag SECF: u1 = 0, /// ALRF [1:1] /// Alarm Flag ALRF: u1 = 0, /// OWF [2:2] /// Overflow Flag OWF: u1 = 0, /// RSF [3:3] /// Registers Synchronized RSF: u1 = 0, /// CNF [4:4] /// Configuration Flag CNF: u1 = 0, /// RTOFF [5:5] /// RTC operation OFF RTOFF: u1 = 1, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Control Register Low pub const CRL = Register(CRL_val).init(base_address + 0x4); /// PRLH const PRLH_val = packed struct { /// PRLH [0:3] /// RTC Prescaler Load Register PRLH: u4 = 0, /// unused [4:31] _unused4: u4 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Prescaler Load Register pub const PRLH = Register(PRLH_val).init(base_address + 0x8); /// PRLL const PRLL_val = packed struct { /// PRLL [0:15] /// RTC Prescaler Divider Register PRLL: u16 = 32768, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Prescaler Load Register pub const PRLL = Register(PRLL_val).init(base_address + 0xc); /// DIVH const DIVH_val = packed struct { /// DIVH [0:3] /// RTC prescaler divider register DIVH: u4 = 0, /// unused [4:31] _unused4: u4 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Prescaler Divider Register pub const DIVH = Register(DIVH_val).init(base_address + 0x10); /// DIVL const DIVL_val = packed struct { /// DIVL [0:15] /// RTC prescaler divider register DIVL: u16 = 32768, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Prescaler Divider Register pub const DIVL = Register(DIVL_val).init(base_address + 0x14); /// CNTH const CNTH_val = packed struct { /// CNTH [0:15] /// RTC counter register high CNTH: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Counter Register High pub const CNTH = Register(CNTH_val).init(base_address + 0x18); /// CNTL const CNTL_val = packed struct { /// CNTL [0:15] /// RTC counter register Low CNTL: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Counter Register Low pub const CNTL = Register(CNTL_val).init(base_address + 0x1c); /// ALRH const ALRH_val = packed struct { /// ALRH [0:15] /// RTC alarm register high ALRH: u16 = 65535, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Alarm Register High pub const ALRH = Register(ALRH_val).init(base_address + 0x20); /// ALRL const ALRL_val = packed struct { /// ALRL [0:15] /// RTC alarm register low ALRL: u16 = 65535, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC Alarm Register Low pub const ALRL = Register(ALRL_val).init(base_address + 0x24); }; /// Backup registers pub const BKP = struct { const base_address = 0x40006c00; /// DR1 const DR1_val = packed struct { /// D1 [0:15] /// Backup data D1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR1 = Register(DR1_val).init(base_address + 0x0); /// DR2 const DR2_val = packed struct { /// D2 [0:15] /// Backup data D2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR2 = Register(DR2_val).init(base_address + 0x4); /// DR3 const DR3_val = packed struct { /// D3 [0:15] /// Backup data D3: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR3 = Register(DR3_val).init(base_address + 0x8); /// DR4 const DR4_val = packed struct { /// D4 [0:15] /// Backup data D4: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR4 = Register(DR4_val).init(base_address + 0xc); /// DR5 const DR5_val = packed struct { /// D5 [0:15] /// Backup data D5: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR5 = Register(DR5_val).init(base_address + 0x10); /// DR6 const DR6_val = packed struct { /// D6 [0:15] /// Backup data D6: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR6 = Register(DR6_val).init(base_address + 0x14); /// DR7 const DR7_val = packed struct { /// D7 [0:15] /// Backup data D7: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR7 = Register(DR7_val).init(base_address + 0x18); /// DR8 const DR8_val = packed struct { /// D8 [0:15] /// Backup data D8: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR8 = Register(DR8_val).init(base_address + 0x1c); /// DR9 const DR9_val = packed struct { /// D9 [0:15] /// Backup data D9: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR9 = Register(DR9_val).init(base_address + 0x20); /// DR10 const DR10_val = packed struct { /// D10 [0:15] /// Backup data D10: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR10 = Register(DR10_val).init(base_address + 0x24); /// DR11 const DR11_val = packed struct { /// DR11 [0:15] /// Backup data DR11: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR11 = Register(DR11_val).init(base_address + 0x3c); /// DR12 const DR12_val = packed struct { /// DR12 [0:15] /// Backup data DR12: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR12 = Register(DR12_val).init(base_address + 0x40); /// DR13 const DR13_val = packed struct { /// DR13 [0:15] /// Backup data DR13: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR13 = Register(DR13_val).init(base_address + 0x44); /// DR14 const DR14_val = packed struct { /// D14 [0:15] /// Backup data D14: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR14 = Register(DR14_val).init(base_address + 0x48); /// DR15 const DR15_val = packed struct { /// D15 [0:15] /// Backup data D15: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR15 = Register(DR15_val).init(base_address + 0x4c); /// DR16 const DR16_val = packed struct { /// D16 [0:15] /// Backup data D16: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR16 = Register(DR16_val).init(base_address + 0x50); /// DR17 const DR17_val = packed struct { /// D17 [0:15] /// Backup data D17: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR17 = Register(DR17_val).init(base_address + 0x54); /// DR18 const DR18_val = packed struct { /// D18 [0:15] /// Backup data D18: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR18 = Register(DR18_val).init(base_address + 0x58); /// DR19 const DR19_val = packed struct { /// D19 [0:15] /// Backup data D19: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR19 = Register(DR19_val).init(base_address + 0x5c); /// DR20 const DR20_val = packed struct { /// D20 [0:15] /// Backup data D20: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR20 = Register(DR20_val).init(base_address + 0x60); /// DR21 const DR21_val = packed struct { /// D21 [0:15] /// Backup data D21: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR21 = Register(DR21_val).init(base_address + 0x64); /// DR22 const DR22_val = packed struct { /// D22 [0:15] /// Backup data D22: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR22 = Register(DR22_val).init(base_address + 0x68); /// DR23 const DR23_val = packed struct { /// D23 [0:15] /// Backup data D23: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR23 = Register(DR23_val).init(base_address + 0x6c); /// DR24 const DR24_val = packed struct { /// D24 [0:15] /// Backup data D24: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR24 = Register(DR24_val).init(base_address + 0x70); /// DR25 const DR25_val = packed struct { /// D25 [0:15] /// Backup data D25: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR25 = Register(DR25_val).init(base_address + 0x74); /// DR26 const DR26_val = packed struct { /// D26 [0:15] /// Backup data D26: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR26 = Register(DR26_val).init(base_address + 0x78); /// DR27 const DR27_val = packed struct { /// D27 [0:15] /// Backup data D27: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR27 = Register(DR27_val).init(base_address + 0x7c); /// DR28 const DR28_val = packed struct { /// D28 [0:15] /// Backup data D28: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR28 = Register(DR28_val).init(base_address + 0x80); /// DR29 const DR29_val = packed struct { /// D29 [0:15] /// Backup data D29: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR29 = Register(DR29_val).init(base_address + 0x84); /// DR30 const DR30_val = packed struct { /// D30 [0:15] /// Backup data D30: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR30 = Register(DR30_val).init(base_address + 0x88); /// DR31 const DR31_val = packed struct { /// D31 [0:15] /// Backup data D31: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR31 = Register(DR31_val).init(base_address + 0x8c); /// DR32 const DR32_val = packed struct { /// D32 [0:15] /// Backup data D32: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR32 = Register(DR32_val).init(base_address + 0x90); /// DR33 const DR33_val = packed struct { /// D33 [0:15] /// Backup data D33: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR33 = Register(DR33_val).init(base_address + 0x94); /// DR34 const DR34_val = packed struct { /// D34 [0:15] /// Backup data D34: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR34 = Register(DR34_val).init(base_address + 0x98); /// DR35 const DR35_val = packed struct { /// D35 [0:15] /// Backup data D35: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR35 = Register(DR35_val).init(base_address + 0x9c); /// DR36 const DR36_val = packed struct { /// D36 [0:15] /// Backup data D36: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR36 = Register(DR36_val).init(base_address + 0xa0); /// DR37 const DR37_val = packed struct { /// D37 [0:15] /// Backup data D37: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR37 = Register(DR37_val).init(base_address + 0xa4); /// DR38 const DR38_val = packed struct { /// D38 [0:15] /// Backup data D38: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR38 = Register(DR38_val).init(base_address + 0xa8); /// DR39 const DR39_val = packed struct { /// D39 [0:15] /// Backup data D39: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR39 = Register(DR39_val).init(base_address + 0xac); /// DR40 const DR40_val = packed struct { /// D40 [0:15] /// Backup data D40: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR40 = Register(DR40_val).init(base_address + 0xb0); /// DR41 const DR41_val = packed struct { /// D41 [0:15] /// Backup data D41: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR41 = Register(DR41_val).init(base_address + 0xb4); /// DR42 const DR42_val = packed struct { /// D42 [0:15] /// Backup data D42: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup data register (BKP_DR) pub const DR42 = Register(DR42_val).init(base_address + 0xb8); /// RTCCR const RTCCR_val = packed struct { /// CAL [0:6] /// Calibration value CAL: u7 = 0, /// CCO [7:7] /// Calibration Clock Output CCO: u1 = 0, /// ASOE [8:8] /// Alarm or second output ASOE: u1 = 0, /// ASOS [9:9] /// Alarm or second output ASOS: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// RTC clock calibration register pub const RTCCR = Register(RTCCR_val).init(base_address + 0x28); /// CR const CR_val = packed struct { /// TPE [0:0] /// Tamper pin enable TPE: u1 = 0, /// TPAL [1:1] /// Tamper pin active level TPAL: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Backup control register pub const CR = Register(CR_val).init(base_address + 0x2c); /// CSR const CSR_val = packed struct { /// CTE [0:0] /// Clear Tamper event CTE: u1 = 0, /// CTI [1:1] /// Clear Tamper Interrupt CTI: u1 = 0, /// TPIE [2:2] /// Tamper Pin interrupt TPIE: u1 = 0, /// unused [3:7] _unused3: u5 = 0, /// TEF [8:8] /// Tamper Event Flag TEF: u1 = 0, /// TIF [9:9] /// Tamper Interrupt Flag TIF: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// BKP_CSR control/status register pub const CSR = Register(CSR_val).init(base_address + 0x30); }; /// Independent watchdog pub const IWDG = struct { const base_address = 0x40003000; /// KR const KR_val = packed struct { /// KEY [0:15] /// Key value KEY: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Key register (IWDG_KR) pub const KR = Register(KR_val).init(base_address + 0x0); /// PR const PR_val = packed struct { /// PR [0:2] /// Prescaler divider PR: u3 = 0, /// unused [3:31] _unused3: u5 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Prescaler register (IWDG_PR) pub const PR = Register(PR_val).init(base_address + 0x4); /// RLR const RLR_val = packed struct { /// RL [0:11] /// Watchdog counter reload RL: u12 = 4095, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Reload register (IWDG_RLR) pub const RLR = Register(RLR_val).init(base_address + 0x8); /// SR const SR_val = packed struct { /// PVU [0:0] /// Watchdog prescaler value PVU: u1 = 0, /// RVU [1:1] /// Watchdog counter reload value RVU: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register (IWDG_SR) pub const SR = Register(SR_val).init(base_address + 0xc); }; /// Window watchdog pub const WWDG = struct { const base_address = 0x40002c00; /// CR const CR_val = packed struct { /// T [0:6] /// 7-bit counter (MSB to LSB) T: u7 = 127, /// WDGA [7:7] /// Activation bit WDGA: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register (WWDG_CR) pub const CR = Register(CR_val).init(base_address + 0x0); /// CFR const CFR_val = packed struct { /// W [0:6] /// 7-bit window value W: u7 = 127, /// WDGTB [7:8] /// Timer Base WDGTB: u2 = 0, /// EWI [9:9] /// Early Wakeup Interrupt EWI: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Configuration register pub const CFR = Register(CFR_val).init(base_address + 0x4); /// SR const SR_val = packed struct { /// EWI [0:0] /// Early Wakeup Interrupt EWI: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register (WWDG_SR) pub const SR = Register(SR_val).init(base_address + 0x8); }; /// Advanced timer pub const TIM1 = struct { const base_address = 0x40012c00; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// DIR [4:4] /// Direction DIR: u1 = 0, /// CMS [5:6] /// Center-aligned mode CMS: u2 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// CCPC [0:0] /// Capture/compare preloaded CCPC: u1 = 0, /// unused [1:1] _unused1: u1 = 0, /// CCUS [2:2] /// Capture/compare control update CCUS: u1 = 0, /// CCDS [3:3] /// Capture/compare DMA CCDS: u1 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// TI1S [7:7] /// TI1 selection TI1S: u1 = 0, /// OIS1 [8:8] /// Output Idle state 1 OIS1: u1 = 0, /// OIS1N [9:9] /// Output Idle state 1 OIS1N: u1 = 0, /// OIS2 [10:10] /// Output Idle state 2 OIS2: u1 = 0, /// OIS2N [11:11] /// Output Idle state 2 OIS2N: u1 = 0, /// OIS3 [12:12] /// Output Idle state 3 OIS3: u1 = 0, /// OIS3N [13:13] /// Output Idle state 3 OIS3N: u1 = 0, /// OIS4 [14:14] /// Output Idle state 4 OIS4: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// ETF [8:11] /// External trigger filter ETF: u4 = 0, /// ETPS [12:13] /// External trigger prescaler ETPS: u2 = 0, /// ECE [14:14] /// External clock enable ECE: u1 = 0, /// ETP [15:15] /// External trigger polarity ETP: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// CC3IE [3:3] /// Capture/Compare 3 interrupt CC3IE: u1 = 0, /// CC4IE [4:4] /// Capture/Compare 4 interrupt CC4IE: u1 = 0, /// COMIE [5:5] /// COM interrupt enable COMIE: u1 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// BIE [7:7] /// Break interrupt enable BIE: u1 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// CC1DE [9:9] /// Capture/Compare 1 DMA request CC1DE: u1 = 0, /// CC2DE [10:10] /// Capture/Compare 2 DMA request CC2DE: u1 = 0, /// CC3DE [11:11] /// Capture/Compare 3 DMA request CC3DE: u1 = 0, /// CC4DE [12:12] /// Capture/Compare 4 DMA request CC4DE: u1 = 0, /// COMDE [13:13] /// COM DMA request enable COMDE: u1 = 0, /// TDE [14:14] /// Trigger DMA request enable TDE: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// CC3IF [3:3] /// Capture/Compare 3 interrupt CC3IF: u1 = 0, /// CC4IF [4:4] /// Capture/Compare 4 interrupt CC4IF: u1 = 0, /// COMIF [5:5] /// COM interrupt flag COMIF: u1 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// BIF [7:7] /// Break interrupt flag BIF: u1 = 0, /// unused [8:8] _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// CC3OF [11:11] /// Capture/Compare 3 overcapture CC3OF: u1 = 0, /// CC4OF [12:12] /// Capture/Compare 4 overcapture CC4OF: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// CC3G [3:3] /// Capture/compare 3 CC3G: u1 = 0, /// CC4G [4:4] /// Capture/compare 4 CC4G: u1 = 0, /// COMG [5:5] /// Capture/Compare control update COMG: u1 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// BG [7:7] /// Break generation BG: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output Compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// OC1CE [7:7] /// Output Compare 1 clear OC1CE: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output Compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output Compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output Compare 2 mode OC2M: u3 = 0, /// OC2CE [15:15] /// Output Compare 2 clear OC2CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// ICPCS [2:3] /// Input capture 1 prescaler ICPCS: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// IC2PCS [10:11] /// Input capture 2 prescaler IC2PCS: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCMR2_Output const CCMR2_Output_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// OC3FE [2:2] /// Output compare 3 fast OC3FE: u1 = 0, /// OC3PE [3:3] /// Output compare 3 preload OC3PE: u1 = 0, /// OC3M [4:6] /// Output compare 3 mode OC3M: u3 = 0, /// OC3CE [7:7] /// Output compare 3 clear OC3CE: u1 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// OC4FE [10:10] /// Output compare 4 fast OC4FE: u1 = 0, /// OC4PE [11:11] /// Output compare 4 preload OC4PE: u1 = 0, /// OC4M [12:14] /// Output compare 4 mode OC4M: u3 = 0, /// OC4CE [15:15] /// Output compare 4 clear OC4CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR2_Output = Register(CCMR2_Output_val).init(base_address + 0x1c); /// CCMR2_Input const CCMR2_Input_val = packed struct { /// CC3S [0:1] /// Capture/compare 3 CC3S: u2 = 0, /// IC3PSC [2:3] /// Input capture 3 prescaler IC3PSC: u2 = 0, /// IC3F [4:7] /// Input capture 3 filter IC3F: u4 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// IC4PSC [10:11] /// Input capture 4 prescaler IC4PSC: u2 = 0, /// IC4F [12:15] /// Input capture 4 filter IC4F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (input pub const CCMR2_Input = Register(CCMR2_Input_val).init(base_address + 0x1c); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// CC1NE [2:2] /// Capture/Compare 1 complementary output CC1NE: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// CC2NE [6:6] /// Capture/Compare 2 complementary output CC2NE: u1 = 0, /// CC2NP [7:7] /// Capture/Compare 2 output CC2NP: u1 = 0, /// CC3E [8:8] /// Capture/Compare 3 output CC3E: u1 = 0, /// CC3P [9:9] /// Capture/Compare 3 output CC3P: u1 = 0, /// CC3NE [10:10] /// Capture/Compare 3 complementary output CC3NE: u1 = 0, /// CC3NP [11:11] /// Capture/Compare 3 output CC3NP: u1 = 0, /// CC4E [12:12] /// Capture/Compare 4 output CC4E: u1 = 0, /// CC4P [13:13] /// Capture/Compare 3 output CC4P: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); /// CCR3 const CCR3_val = packed struct { /// CCR3 [0:15] /// Capture/Compare value CCR3: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 3 pub const CCR3 = Register(CCR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// CCR4 [0:15] /// Capture/Compare value CCR4: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 4 pub const CCR4 = Register(CCR4_val).init(base_address + 0x40); /// DCR const DCR_val = packed struct { /// DBA [0:4] /// DMA base address DBA: u5 = 0, /// unused [5:7] _unused5: u3 = 0, /// DBL [8:12] /// DMA burst length DBL: u5 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA control register pub const DCR = Register(DCR_val).init(base_address + 0x48); /// DMAR const DMAR_val = packed struct { /// DMAB [0:15] /// DMA register for burst DMAB: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA address for full transfer pub const DMAR = Register(DMAR_val).init(base_address + 0x4c); /// RCR const RCR_val = packed struct { /// REP [0:7] /// Repetition counter value REP: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// repetition counter register pub const RCR = Register(RCR_val).init(base_address + 0x30); /// BDTR const BDTR_val = packed struct { /// DTG [0:7] /// Dead-time generator setup DTG: u8 = 0, /// LOCK [8:9] /// Lock configuration LOCK: u2 = 0, /// OSSI [10:10] /// Off-state selection for Idle OSSI: u1 = 0, /// OSSR [11:11] /// Off-state selection for Run OSSR: u1 = 0, /// BKE [12:12] /// Break enable BKE: u1 = 0, /// BKP [13:13] /// Break polarity BKP: u1 = 0, /// AOE [14:14] /// Automatic output enable AOE: u1 = 0, /// MOE [15:15] /// Main output enable MOE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// break and dead-time register pub const BDTR = Register(BDTR_val).init(base_address + 0x44); }; /// Advanced timer pub const TIM8 = struct { const base_address = 0x40013400; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// DIR [4:4] /// Direction DIR: u1 = 0, /// CMS [5:6] /// Center-aligned mode CMS: u2 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// CCPC [0:0] /// Capture/compare preloaded CCPC: u1 = 0, /// unused [1:1] _unused1: u1 = 0, /// CCUS [2:2] /// Capture/compare control update CCUS: u1 = 0, /// CCDS [3:3] /// Capture/compare DMA CCDS: u1 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// TI1S [7:7] /// TI1 selection TI1S: u1 = 0, /// OIS1 [8:8] /// Output Idle state 1 OIS1: u1 = 0, /// OIS1N [9:9] /// Output Idle state 1 OIS1N: u1 = 0, /// OIS2 [10:10] /// Output Idle state 2 OIS2: u1 = 0, /// OIS2N [11:11] /// Output Idle state 2 OIS2N: u1 = 0, /// OIS3 [12:12] /// Output Idle state 3 OIS3: u1 = 0, /// OIS3N [13:13] /// Output Idle state 3 OIS3N: u1 = 0, /// OIS4 [14:14] /// Output Idle state 4 OIS4: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// ETF [8:11] /// External trigger filter ETF: u4 = 0, /// ETPS [12:13] /// External trigger prescaler ETPS: u2 = 0, /// ECE [14:14] /// External clock enable ECE: u1 = 0, /// ETP [15:15] /// External trigger polarity ETP: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// CC3IE [3:3] /// Capture/Compare 3 interrupt CC3IE: u1 = 0, /// CC4IE [4:4] /// Capture/Compare 4 interrupt CC4IE: u1 = 0, /// COMIE [5:5] /// COM interrupt enable COMIE: u1 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// BIE [7:7] /// Break interrupt enable BIE: u1 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// CC1DE [9:9] /// Capture/Compare 1 DMA request CC1DE: u1 = 0, /// CC2DE [10:10] /// Capture/Compare 2 DMA request CC2DE: u1 = 0, /// CC3DE [11:11] /// Capture/Compare 3 DMA request CC3DE: u1 = 0, /// CC4DE [12:12] /// Capture/Compare 4 DMA request CC4DE: u1 = 0, /// COMDE [13:13] /// COM DMA request enable COMDE: u1 = 0, /// TDE [14:14] /// Trigger DMA request enable TDE: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// CC3IF [3:3] /// Capture/Compare 3 interrupt CC3IF: u1 = 0, /// CC4IF [4:4] /// Capture/Compare 4 interrupt CC4IF: u1 = 0, /// COMIF [5:5] /// COM interrupt flag COMIF: u1 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// BIF [7:7] /// Break interrupt flag BIF: u1 = 0, /// unused [8:8] _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// CC3OF [11:11] /// Capture/Compare 3 overcapture CC3OF: u1 = 0, /// CC4OF [12:12] /// Capture/Compare 4 overcapture CC4OF: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// CC3G [3:3] /// Capture/compare 3 CC3G: u1 = 0, /// CC4G [4:4] /// Capture/compare 4 CC4G: u1 = 0, /// COMG [5:5] /// Capture/Compare control update COMG: u1 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// BG [7:7] /// Break generation BG: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output Compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// OC1CE [7:7] /// Output Compare 1 clear OC1CE: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output Compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output Compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output Compare 2 mode OC2M: u3 = 0, /// OC2CE [15:15] /// Output Compare 2 clear OC2CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// ICPCS [2:3] /// Input capture 1 prescaler ICPCS: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// IC2PCS [10:11] /// Input capture 2 prescaler IC2PCS: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCMR2_Output const CCMR2_Output_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// OC3FE [2:2] /// Output compare 3 fast OC3FE: u1 = 0, /// OC3PE [3:3] /// Output compare 3 preload OC3PE: u1 = 0, /// OC3M [4:6] /// Output compare 3 mode OC3M: u3 = 0, /// OC3CE [7:7] /// Output compare 3 clear OC3CE: u1 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// OC4FE [10:10] /// Output compare 4 fast OC4FE: u1 = 0, /// OC4PE [11:11] /// Output compare 4 preload OC4PE: u1 = 0, /// OC4M [12:14] /// Output compare 4 mode OC4M: u3 = 0, /// OC4CE [15:15] /// Output compare 4 clear OC4CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR2_Output = Register(CCMR2_Output_val).init(base_address + 0x1c); /// CCMR2_Input const CCMR2_Input_val = packed struct { /// CC3S [0:1] /// Capture/compare 3 CC3S: u2 = 0, /// IC3PSC [2:3] /// Input capture 3 prescaler IC3PSC: u2 = 0, /// IC3F [4:7] /// Input capture 3 filter IC3F: u4 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// IC4PSC [10:11] /// Input capture 4 prescaler IC4PSC: u2 = 0, /// IC4F [12:15] /// Input capture 4 filter IC4F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (input pub const CCMR2_Input = Register(CCMR2_Input_val).init(base_address + 0x1c); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// CC1NE [2:2] /// Capture/Compare 1 complementary output CC1NE: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// CC2NE [6:6] /// Capture/Compare 2 complementary output CC2NE: u1 = 0, /// CC2NP [7:7] /// Capture/Compare 2 output CC2NP: u1 = 0, /// CC3E [8:8] /// Capture/Compare 3 output CC3E: u1 = 0, /// CC3P [9:9] /// Capture/Compare 3 output CC3P: u1 = 0, /// CC3NE [10:10] /// Capture/Compare 3 complementary output CC3NE: u1 = 0, /// CC3NP [11:11] /// Capture/Compare 3 output CC3NP: u1 = 0, /// CC4E [12:12] /// Capture/Compare 4 output CC4E: u1 = 0, /// CC4P [13:13] /// Capture/Compare 3 output CC4P: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); /// CCR3 const CCR3_val = packed struct { /// CCR3 [0:15] /// Capture/Compare value CCR3: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 3 pub const CCR3 = Register(CCR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// CCR4 [0:15] /// Capture/Compare value CCR4: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 4 pub const CCR4 = Register(CCR4_val).init(base_address + 0x40); /// DCR const DCR_val = packed struct { /// DBA [0:4] /// DMA base address DBA: u5 = 0, /// unused [5:7] _unused5: u3 = 0, /// DBL [8:12] /// DMA burst length DBL: u5 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA control register pub const DCR = Register(DCR_val).init(base_address + 0x48); /// DMAR const DMAR_val = packed struct { /// DMAB [0:15] /// DMA register for burst DMAB: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA address for full transfer pub const DMAR = Register(DMAR_val).init(base_address + 0x4c); /// RCR const RCR_val = packed struct { /// REP [0:7] /// Repetition counter value REP: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// repetition counter register pub const RCR = Register(RCR_val).init(base_address + 0x30); /// BDTR const BDTR_val = packed struct { /// DTG [0:7] /// Dead-time generator setup DTG: u8 = 0, /// LOCK [8:9] /// Lock configuration LOCK: u2 = 0, /// OSSI [10:10] /// Off-state selection for Idle OSSI: u1 = 0, /// OSSR [11:11] /// Off-state selection for Run OSSR: u1 = 0, /// BKE [12:12] /// Break enable BKE: u1 = 0, /// BKP [13:13] /// Break polarity BKP: u1 = 0, /// AOE [14:14] /// Automatic output enable AOE: u1 = 0, /// MOE [15:15] /// Main output enable MOE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// break and dead-time register pub const BDTR = Register(BDTR_val).init(base_address + 0x44); }; /// General purpose timer pub const TIM2 = struct { const base_address = 0x40000000; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// DIR [4:4] /// Direction DIR: u1 = 0, /// CMS [5:6] /// Center-aligned mode CMS: u2 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:2] _unused0: u3 = 0, /// CCDS [3:3] /// Capture/compare DMA CCDS: u1 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// TI1S [7:7] /// TI1 selection TI1S: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// ETF [8:11] /// External trigger filter ETF: u4 = 0, /// ETPS [12:13] /// External trigger prescaler ETPS: u2 = 0, /// ECE [14:14] /// External clock enable ECE: u1 = 0, /// ETP [15:15] /// External trigger polarity ETP: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// CC3IE [3:3] /// Capture/Compare 3 interrupt CC3IE: u1 = 0, /// CC4IE [4:4] /// Capture/Compare 4 interrupt CC4IE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// CC1DE [9:9] /// Capture/Compare 1 DMA request CC1DE: u1 = 0, /// CC2DE [10:10] /// Capture/Compare 2 DMA request CC2DE: u1 = 0, /// CC3DE [11:11] /// Capture/Compare 3 DMA request CC3DE: u1 = 0, /// CC4DE [12:12] /// Capture/Compare 4 DMA request CC4DE: u1 = 0, /// unused [13:13] _unused13: u1 = 0, /// TDE [14:14] /// Trigger DMA request enable TDE: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// CC3IF [3:3] /// Capture/Compare 3 interrupt CC3IF: u1 = 0, /// CC4IF [4:4] /// Capture/Compare 4 interrupt CC4IF: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// CC3OF [11:11] /// Capture/Compare 3 overcapture CC3OF: u1 = 0, /// CC4OF [12:12] /// Capture/Compare 4 overcapture CC4OF: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// CC3G [3:3] /// Capture/compare 3 CC3G: u1 = 0, /// CC4G [4:4] /// Capture/compare 4 CC4G: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output compare 1 mode OC1M: u3 = 0, /// OC1CE [7:7] /// Output compare 1 clear OC1CE: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output compare 2 mode OC2M: u3 = 0, /// OC2CE [15:15] /// Output compare 2 clear OC2CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/compare 2 CC2S: u2 = 0, /// IC2PSC [10:11] /// Input capture 2 prescaler IC2PSC: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCMR2_Output const CCMR2_Output_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// OC3FE [2:2] /// Output compare 3 fast OC3FE: u1 = 0, /// OC3PE [3:3] /// Output compare 3 preload OC3PE: u1 = 0, /// OC3M [4:6] /// Output compare 3 mode OC3M: u3 = 0, /// OC3CE [7:7] /// Output compare 3 clear OC3CE: u1 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// OC4FE [10:10] /// Output compare 4 fast OC4FE: u1 = 0, /// OC4PE [11:11] /// Output compare 4 preload OC4PE: u1 = 0, /// OC4M [12:14] /// Output compare 4 mode OC4M: u3 = 0, /// O24CE [15:15] /// Output compare 4 clear O24CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (output pub const CCMR2_Output = Register(CCMR2_Output_val).init(base_address + 0x1c); /// CCMR2_Input const CCMR2_Input_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// IC3PSC [2:3] /// Input capture 3 prescaler IC3PSC: u2 = 0, /// IC3F [4:7] /// Input capture 3 filter IC3F: u4 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// IC4PSC [10:11] /// Input capture 4 prescaler IC4PSC: u2 = 0, /// IC4F [12:15] /// Input capture 4 filter IC4F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (input pub const CCMR2_Input = Register(CCMR2_Input_val).init(base_address + 0x1c); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:3] _unused2: u2 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// unused [6:7] _unused6: u2 = 0, /// CC3E [8:8] /// Capture/Compare 3 output CC3E: u1 = 0, /// CC3P [9:9] /// Capture/Compare 3 output CC3P: u1 = 0, /// unused [10:11] _unused10: u2 = 0, /// CC4E [12:12] /// Capture/Compare 4 output CC4E: u1 = 0, /// CC4P [13:13] /// Capture/Compare 3 output CC4P: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); /// CCR3 const CCR3_val = packed struct { /// CCR3 [0:15] /// Capture/Compare value CCR3: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 3 pub const CCR3 = Register(CCR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// CCR4 [0:15] /// Capture/Compare value CCR4: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 4 pub const CCR4 = Register(CCR4_val).init(base_address + 0x40); /// DCR const DCR_val = packed struct { /// DBA [0:4] /// DMA base address DBA: u5 = 0, /// unused [5:7] _unused5: u3 = 0, /// DBL [8:12] /// DMA burst length DBL: u5 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA control register pub const DCR = Register(DCR_val).init(base_address + 0x48); /// DMAR const DMAR_val = packed struct { /// DMAB [0:15] /// DMA register for burst DMAB: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA address for full transfer pub const DMAR = Register(DMAR_val).init(base_address + 0x4c); }; /// General purpose timer pub const TIM3 = struct { const base_address = 0x40000400; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// DIR [4:4] /// Direction DIR: u1 = 0, /// CMS [5:6] /// Center-aligned mode CMS: u2 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:2] _unused0: u3 = 0, /// CCDS [3:3] /// Capture/compare DMA CCDS: u1 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// TI1S [7:7] /// TI1 selection TI1S: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// ETF [8:11] /// External trigger filter ETF: u4 = 0, /// ETPS [12:13] /// External trigger prescaler ETPS: u2 = 0, /// ECE [14:14] /// External clock enable ECE: u1 = 0, /// ETP [15:15] /// External trigger polarity ETP: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// CC3IE [3:3] /// Capture/Compare 3 interrupt CC3IE: u1 = 0, /// CC4IE [4:4] /// Capture/Compare 4 interrupt CC4IE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// CC1DE [9:9] /// Capture/Compare 1 DMA request CC1DE: u1 = 0, /// CC2DE [10:10] /// Capture/Compare 2 DMA request CC2DE: u1 = 0, /// CC3DE [11:11] /// Capture/Compare 3 DMA request CC3DE: u1 = 0, /// CC4DE [12:12] /// Capture/Compare 4 DMA request CC4DE: u1 = 0, /// unused [13:13] _unused13: u1 = 0, /// TDE [14:14] /// Trigger DMA request enable TDE: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// CC3IF [3:3] /// Capture/Compare 3 interrupt CC3IF: u1 = 0, /// CC4IF [4:4] /// Capture/Compare 4 interrupt CC4IF: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// CC3OF [11:11] /// Capture/Compare 3 overcapture CC3OF: u1 = 0, /// CC4OF [12:12] /// Capture/Compare 4 overcapture CC4OF: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// CC3G [3:3] /// Capture/compare 3 CC3G: u1 = 0, /// CC4G [4:4] /// Capture/compare 4 CC4G: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output compare 1 mode OC1M: u3 = 0, /// OC1CE [7:7] /// Output compare 1 clear OC1CE: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output compare 2 mode OC2M: u3 = 0, /// OC2CE [15:15] /// Output compare 2 clear OC2CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/compare 2 CC2S: u2 = 0, /// IC2PSC [10:11] /// Input capture 2 prescaler IC2PSC: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCMR2_Output const CCMR2_Output_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// OC3FE [2:2] /// Output compare 3 fast OC3FE: u1 = 0, /// OC3PE [3:3] /// Output compare 3 preload OC3PE: u1 = 0, /// OC3M [4:6] /// Output compare 3 mode OC3M: u3 = 0, /// OC3CE [7:7] /// Output compare 3 clear OC3CE: u1 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// OC4FE [10:10] /// Output compare 4 fast OC4FE: u1 = 0, /// OC4PE [11:11] /// Output compare 4 preload OC4PE: u1 = 0, /// OC4M [12:14] /// Output compare 4 mode OC4M: u3 = 0, /// O24CE [15:15] /// Output compare 4 clear O24CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (output pub const CCMR2_Output = Register(CCMR2_Output_val).init(base_address + 0x1c); /// CCMR2_Input const CCMR2_Input_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// IC3PSC [2:3] /// Input capture 3 prescaler IC3PSC: u2 = 0, /// IC3F [4:7] /// Input capture 3 filter IC3F: u4 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// IC4PSC [10:11] /// Input capture 4 prescaler IC4PSC: u2 = 0, /// IC4F [12:15] /// Input capture 4 filter IC4F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (input pub const CCMR2_Input = Register(CCMR2_Input_val).init(base_address + 0x1c); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:3] _unused2: u2 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// unused [6:7] _unused6: u2 = 0, /// CC3E [8:8] /// Capture/Compare 3 output CC3E: u1 = 0, /// CC3P [9:9] /// Capture/Compare 3 output CC3P: u1 = 0, /// unused [10:11] _unused10: u2 = 0, /// CC4E [12:12] /// Capture/Compare 4 output CC4E: u1 = 0, /// CC4P [13:13] /// Capture/Compare 3 output CC4P: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); /// CCR3 const CCR3_val = packed struct { /// CCR3 [0:15] /// Capture/Compare value CCR3: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 3 pub const CCR3 = Register(CCR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// CCR4 [0:15] /// Capture/Compare value CCR4: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 4 pub const CCR4 = Register(CCR4_val).init(base_address + 0x40); /// DCR const DCR_val = packed struct { /// DBA [0:4] /// DMA base address DBA: u5 = 0, /// unused [5:7] _unused5: u3 = 0, /// DBL [8:12] /// DMA burst length DBL: u5 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA control register pub const DCR = Register(DCR_val).init(base_address + 0x48); /// DMAR const DMAR_val = packed struct { /// DMAB [0:15] /// DMA register for burst DMAB: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA address for full transfer pub const DMAR = Register(DMAR_val).init(base_address + 0x4c); }; /// General purpose timer pub const TIM4 = struct { const base_address = 0x40000800; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// DIR [4:4] /// Direction DIR: u1 = 0, /// CMS [5:6] /// Center-aligned mode CMS: u2 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:2] _unused0: u3 = 0, /// CCDS [3:3] /// Capture/compare DMA CCDS: u1 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// TI1S [7:7] /// TI1 selection TI1S: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// ETF [8:11] /// External trigger filter ETF: u4 = 0, /// ETPS [12:13] /// External trigger prescaler ETPS: u2 = 0, /// ECE [14:14] /// External clock enable ECE: u1 = 0, /// ETP [15:15] /// External trigger polarity ETP: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// CC3IE [3:3] /// Capture/Compare 3 interrupt CC3IE: u1 = 0, /// CC4IE [4:4] /// Capture/Compare 4 interrupt CC4IE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// CC1DE [9:9] /// Capture/Compare 1 DMA request CC1DE: u1 = 0, /// CC2DE [10:10] /// Capture/Compare 2 DMA request CC2DE: u1 = 0, /// CC3DE [11:11] /// Capture/Compare 3 DMA request CC3DE: u1 = 0, /// CC4DE [12:12] /// Capture/Compare 4 DMA request CC4DE: u1 = 0, /// unused [13:13] _unused13: u1 = 0, /// TDE [14:14] /// Trigger DMA request enable TDE: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// CC3IF [3:3] /// Capture/Compare 3 interrupt CC3IF: u1 = 0, /// CC4IF [4:4] /// Capture/Compare 4 interrupt CC4IF: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// CC3OF [11:11] /// Capture/Compare 3 overcapture CC3OF: u1 = 0, /// CC4OF [12:12] /// Capture/Compare 4 overcapture CC4OF: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// CC3G [3:3] /// Capture/compare 3 CC3G: u1 = 0, /// CC4G [4:4] /// Capture/compare 4 CC4G: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output compare 1 mode OC1M: u3 = 0, /// OC1CE [7:7] /// Output compare 1 clear OC1CE: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output compare 2 mode OC2M: u3 = 0, /// OC2CE [15:15] /// Output compare 2 clear OC2CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/compare 2 CC2S: u2 = 0, /// IC2PSC [10:11] /// Input capture 2 prescaler IC2PSC: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCMR2_Output const CCMR2_Output_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// OC3FE [2:2] /// Output compare 3 fast OC3FE: u1 = 0, /// OC3PE [3:3] /// Output compare 3 preload OC3PE: u1 = 0, /// OC3M [4:6] /// Output compare 3 mode OC3M: u3 = 0, /// OC3CE [7:7] /// Output compare 3 clear OC3CE: u1 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// OC4FE [10:10] /// Output compare 4 fast OC4FE: u1 = 0, /// OC4PE [11:11] /// Output compare 4 preload OC4PE: u1 = 0, /// OC4M [12:14] /// Output compare 4 mode OC4M: u3 = 0, /// O24CE [15:15] /// Output compare 4 clear O24CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (output pub const CCMR2_Output = Register(CCMR2_Output_val).init(base_address + 0x1c); /// CCMR2_Input const CCMR2_Input_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// IC3PSC [2:3] /// Input capture 3 prescaler IC3PSC: u2 = 0, /// IC3F [4:7] /// Input capture 3 filter IC3F: u4 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// IC4PSC [10:11] /// Input capture 4 prescaler IC4PSC: u2 = 0, /// IC4F [12:15] /// Input capture 4 filter IC4F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (input pub const CCMR2_Input = Register(CCMR2_Input_val).init(base_address + 0x1c); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:3] _unused2: u2 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// unused [6:7] _unused6: u2 = 0, /// CC3E [8:8] /// Capture/Compare 3 output CC3E: u1 = 0, /// CC3P [9:9] /// Capture/Compare 3 output CC3P: u1 = 0, /// unused [10:11] _unused10: u2 = 0, /// CC4E [12:12] /// Capture/Compare 4 output CC4E: u1 = 0, /// CC4P [13:13] /// Capture/Compare 3 output CC4P: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); /// CCR3 const CCR3_val = packed struct { /// CCR3 [0:15] /// Capture/Compare value CCR3: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 3 pub const CCR3 = Register(CCR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// CCR4 [0:15] /// Capture/Compare value CCR4: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 4 pub const CCR4 = Register(CCR4_val).init(base_address + 0x40); /// DCR const DCR_val = packed struct { /// DBA [0:4] /// DMA base address DBA: u5 = 0, /// unused [5:7] _unused5: u3 = 0, /// DBL [8:12] /// DMA burst length DBL: u5 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA control register pub const DCR = Register(DCR_val).init(base_address + 0x48); /// DMAR const DMAR_val = packed struct { /// DMAB [0:15] /// DMA register for burst DMAB: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA address for full transfer pub const DMAR = Register(DMAR_val).init(base_address + 0x4c); }; /// General purpose timer pub const TIM5 = struct { const base_address = 0x40000c00; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// DIR [4:4] /// Direction DIR: u1 = 0, /// CMS [5:6] /// Center-aligned mode CMS: u2 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:2] _unused0: u3 = 0, /// CCDS [3:3] /// Capture/compare DMA CCDS: u1 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// TI1S [7:7] /// TI1 selection TI1S: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// ETF [8:11] /// External trigger filter ETF: u4 = 0, /// ETPS [12:13] /// External trigger prescaler ETPS: u2 = 0, /// ECE [14:14] /// External clock enable ECE: u1 = 0, /// ETP [15:15] /// External trigger polarity ETP: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// CC3IE [3:3] /// Capture/Compare 3 interrupt CC3IE: u1 = 0, /// CC4IE [4:4] /// Capture/Compare 4 interrupt CC4IE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// CC1DE [9:9] /// Capture/Compare 1 DMA request CC1DE: u1 = 0, /// CC2DE [10:10] /// Capture/Compare 2 DMA request CC2DE: u1 = 0, /// CC3DE [11:11] /// Capture/Compare 3 DMA request CC3DE: u1 = 0, /// CC4DE [12:12] /// Capture/Compare 4 DMA request CC4DE: u1 = 0, /// unused [13:13] _unused13: u1 = 0, /// TDE [14:14] /// Trigger DMA request enable TDE: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// CC3IF [3:3] /// Capture/Compare 3 interrupt CC3IF: u1 = 0, /// CC4IF [4:4] /// Capture/Compare 4 interrupt CC4IF: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// CC3OF [11:11] /// Capture/Compare 3 overcapture CC3OF: u1 = 0, /// CC4OF [12:12] /// Capture/Compare 4 overcapture CC4OF: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// CC3G [3:3] /// Capture/compare 3 CC3G: u1 = 0, /// CC4G [4:4] /// Capture/compare 4 CC4G: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output compare 1 mode OC1M: u3 = 0, /// OC1CE [7:7] /// Output compare 1 clear OC1CE: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output compare 2 mode OC2M: u3 = 0, /// OC2CE [15:15] /// Output compare 2 clear OC2CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/compare 2 CC2S: u2 = 0, /// IC2PSC [10:11] /// Input capture 2 prescaler IC2PSC: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCMR2_Output const CCMR2_Output_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// OC3FE [2:2] /// Output compare 3 fast OC3FE: u1 = 0, /// OC3PE [3:3] /// Output compare 3 preload OC3PE: u1 = 0, /// OC3M [4:6] /// Output compare 3 mode OC3M: u3 = 0, /// OC3CE [7:7] /// Output compare 3 clear OC3CE: u1 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// OC4FE [10:10] /// Output compare 4 fast OC4FE: u1 = 0, /// OC4PE [11:11] /// Output compare 4 preload OC4PE: u1 = 0, /// OC4M [12:14] /// Output compare 4 mode OC4M: u3 = 0, /// O24CE [15:15] /// Output compare 4 clear O24CE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (output pub const CCMR2_Output = Register(CCMR2_Output_val).init(base_address + 0x1c); /// CCMR2_Input const CCMR2_Input_val = packed struct { /// CC3S [0:1] /// Capture/Compare 3 CC3S: u2 = 0, /// IC3PSC [2:3] /// Input capture 3 prescaler IC3PSC: u2 = 0, /// IC3F [4:7] /// Input capture 3 filter IC3F: u4 = 0, /// CC4S [8:9] /// Capture/Compare 4 CC4S: u2 = 0, /// IC4PSC [10:11] /// Input capture 4 prescaler IC4PSC: u2 = 0, /// IC4F [12:15] /// Input capture 4 filter IC4F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 2 (input pub const CCMR2_Input = Register(CCMR2_Input_val).init(base_address + 0x1c); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:3] _unused2: u2 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// unused [6:7] _unused6: u2 = 0, /// CC3E [8:8] /// Capture/Compare 3 output CC3E: u1 = 0, /// CC3P [9:9] /// Capture/Compare 3 output CC3P: u1 = 0, /// unused [10:11] _unused10: u2 = 0, /// CC4E [12:12] /// Capture/Compare 4 output CC4E: u1 = 0, /// CC4P [13:13] /// Capture/Compare 3 output CC4P: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); /// CCR3 const CCR3_val = packed struct { /// CCR3 [0:15] /// Capture/Compare value CCR3: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 3 pub const CCR3 = Register(CCR3_val).init(base_address + 0x3c); /// CCR4 const CCR4_val = packed struct { /// CCR4 [0:15] /// Capture/Compare value CCR4: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 4 pub const CCR4 = Register(CCR4_val).init(base_address + 0x40); /// DCR const DCR_val = packed struct { /// DBA [0:4] /// DMA base address DBA: u5 = 0, /// unused [5:7] _unused5: u3 = 0, /// DBL [8:12] /// DMA burst length DBL: u5 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA control register pub const DCR = Register(DCR_val).init(base_address + 0x48); /// DMAR const DMAR_val = packed struct { /// DMAB [0:15] /// DMA register for burst DMAB: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA address for full transfer pub const DMAR = Register(DMAR_val).init(base_address + 0x4c); }; /// General purpose timer pub const TIM9 = struct { const base_address = 0x40014c00; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// unused [3:5] _unused3: u3 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// unused [3:5] _unused3: u3 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// unused [3:5] _unused3: u3 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output Compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// unused [7:7] _unused7: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output Compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output Compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output Compare 2 mode OC2M: u3 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// IC2PSC [10:11] /// Input capture 2 prescaler IC2PSC: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// CC2NP [7:7] /// Capture/Compare 2 output CC2NP: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); }; /// General purpose timer pub const TIM12 = struct { const base_address = 0x40001800; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SMCR const SMCR_val = packed struct { /// SMS [0:2] /// Slave mode selection SMS: u3 = 0, /// unused [3:3] _unused3: u1 = 0, /// TS [4:6] /// Trigger selection TS: u3 = 0, /// MSM [7:7] /// Master/Slave mode MSM: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// slave mode control register pub const SMCR = Register(SMCR_val).init(base_address + 0x8); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// CC2IE [2:2] /// Capture/Compare 2 interrupt CC2IE: u1 = 0, /// unused [3:5] _unused3: u3 = 0, /// TIE [6:6] /// Trigger interrupt enable TIE: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// CC2IF [2:2] /// Capture/Compare 2 interrupt CC2IF: u1 = 0, /// unused [3:5] _unused3: u3 = 0, /// TIF [6:6] /// Trigger interrupt flag TIF: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// CC2OF [10:10] /// Capture/compare 2 overcapture CC2OF: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// CC2G [2:2] /// Capture/compare 2 CC2G: u1 = 0, /// unused [3:5] _unused3: u3 = 0, /// TG [6:6] /// Trigger generation TG: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// OC1FE [2:2] /// Output Compare 1 fast OC1FE: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// unused [7:7] _unused7: u1 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// OC2FE [10:10] /// Output Compare 2 fast OC2FE: u1 = 0, /// OC2PE [11:11] /// Output Compare 2 preload OC2PE: u1 = 0, /// OC2M [12:14] /// Output Compare 2 mode OC2M: u3 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// CC2S [8:9] /// Capture/Compare 2 CC2S: u2 = 0, /// IC2PSC [10:11] /// Input capture 2 prescaler IC2PSC: u2 = 0, /// IC2F [12:15] /// Input capture 2 filter IC2F: u4 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register 1 (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// CC2E [4:4] /// Capture/Compare 2 output CC2E: u1 = 0, /// CC2P [5:5] /// Capture/Compare 2 output CC2P: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// CC2NP [7:7] /// Capture/Compare 2 output CC2NP: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); /// CCR2 const CCR2_val = packed struct { /// CCR2 [0:15] /// Capture/Compare 2 value CCR2: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 2 pub const CCR2 = Register(CCR2_val).init(base_address + 0x38); }; /// General purpose timer pub const TIM10 = struct { const base_address = 0x40015000; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// unused [3:6] _unused3: u4 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// unused [2:8] _unused2: u6 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// unused [4:31] _unused4: u4 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); }; /// General purpose timer pub const TIM11 = struct { const base_address = 0x40015400; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// unused [3:6] _unused3: u4 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// unused [2:8] _unused2: u6 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// unused [4:31] _unused4: u4 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); }; /// General purpose timer pub const TIM13 = struct { const base_address = 0x40001c00; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// unused [3:6] _unused3: u4 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// unused [2:8] _unused2: u6 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// unused [4:31] _unused4: u4 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); }; /// General purpose timer pub const TIM14 = struct { const base_address = 0x40002000; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// unused [3:6] _unused3: u4 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// CKD [8:9] /// Clock division CKD: u2 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// CC1IE [1:1] /// Capture/Compare 1 interrupt CC1IE: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// CC1IF [1:1] /// Capture/compare 1 interrupt CC1IF: u1 = 0, /// unused [2:8] _unused2: u6 = 0, _unused8: u1 = 0, /// CC1OF [9:9] /// Capture/Compare 1 overcapture CC1OF: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// CC1G [1:1] /// Capture/compare 1 CC1G: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CCMR1_Output const CCMR1_Output_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// OC1PE [3:3] /// Output Compare 1 preload OC1PE: u1 = 0, /// OC1M [4:6] /// Output Compare 1 mode OC1M: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (output pub const CCMR1_Output = Register(CCMR1_Output_val).init(base_address + 0x18); /// CCMR1_Input const CCMR1_Input_val = packed struct { /// CC1S [0:1] /// Capture/Compare 1 CC1S: u2 = 0, /// IC1PSC [2:3] /// Input capture 1 prescaler IC1PSC: u2 = 0, /// IC1F [4:7] /// Input capture 1 filter IC1F: u4 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare mode register (input pub const CCMR1_Input = Register(CCMR1_Input_val).init(base_address + 0x18); /// CCER const CCER_val = packed struct { /// CC1E [0:0] /// Capture/Compare 1 output CC1E: u1 = 0, /// CC1P [1:1] /// Capture/Compare 1 output CC1P: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// CC1NP [3:3] /// Capture/Compare 1 output CC1NP: u1 = 0, /// unused [4:31] _unused4: u4 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare enable pub const CCER = Register(CCER_val).init(base_address + 0x20); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); /// CCR1 const CCR1_val = packed struct { /// CCR1 [0:15] /// Capture/Compare 1 value CCR1: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// capture/compare register 1 pub const CCR1 = Register(CCR1_val).init(base_address + 0x34); }; /// Basic timer pub const TIM6 = struct { const base_address = 0x40001000; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// unused [1:7] _unused1: u7 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// Low counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Low Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); }; /// Basic timer pub const TIM7 = struct { const base_address = 0x40001400; /// CR1 const CR1_val = packed struct { /// CEN [0:0] /// Counter enable CEN: u1 = 0, /// UDIS [1:1] /// Update disable UDIS: u1 = 0, /// URS [2:2] /// Update request source URS: u1 = 0, /// OPM [3:3] /// One-pulse mode OPM: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// ARPE [7:7] /// Auto-reload preload enable ARPE: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// MMS [4:6] /// Master mode selection MMS: u3 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// DIER const DIER_val = packed struct { /// UIE [0:0] /// Update interrupt enable UIE: u1 = 0, /// unused [1:7] _unused1: u7 = 0, /// UDE [8:8] /// Update DMA request enable UDE: u1 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DMA/Interrupt enable register pub const DIER = Register(DIER_val).init(base_address + 0xc); /// SR const SR_val = packed struct { /// UIF [0:0] /// Update interrupt flag UIF: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x10); /// EGR const EGR_val = packed struct { /// UG [0:0] /// Update generation UG: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// event generation register pub const EGR = Register(EGR_val).init(base_address + 0x14); /// CNT const CNT_val = packed struct { /// CNT [0:15] /// Low counter value CNT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// counter pub const CNT = Register(CNT_val).init(base_address + 0x24); /// PSC const PSC_val = packed struct { /// PSC [0:15] /// Prescaler value PSC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// prescaler pub const PSC = Register(PSC_val).init(base_address + 0x28); /// ARR const ARR_val = packed struct { /// ARR [0:15] /// Low Auto-reload value ARR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// auto-reload register pub const ARR = Register(ARR_val).init(base_address + 0x2c); }; /// Inter integrated circuit pub const I2C1 = struct { const base_address = 0x40005400; /// CR1 const CR1_val = packed struct { /// PE [0:0] /// Peripheral enable PE: u1 = 0, /// SMBUS [1:1] /// SMBus mode SMBUS: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// SMBTYPE [3:3] /// SMBus type SMBTYPE: u1 = 0, /// ENARP [4:4] /// ARP enable ENARP: u1 = 0, /// ENPEC [5:5] /// PEC enable ENPEC: u1 = 0, /// ENGC [6:6] /// General call enable ENGC: u1 = 0, /// NOSTRETCH [7:7] /// Clock stretching disable (Slave NOSTRETCH: u1 = 0, /// START [8:8] /// Start generation START: u1 = 0, /// STOP [9:9] /// Stop generation STOP: u1 = 0, /// ACK [10:10] /// Acknowledge enable ACK: u1 = 0, /// POS [11:11] /// Acknowledge/PEC Position (for data POS: u1 = 0, /// PEC [12:12] /// Packet error checking PEC: u1 = 0, /// ALERT [13:13] /// SMBus alert ALERT: u1 = 0, /// unused [14:14] _unused14: u1 = 0, /// SWRST [15:15] /// Software reset SWRST: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// FREQ [0:5] /// Peripheral clock frequency FREQ: u6 = 0, /// unused [6:7] _unused6: u2 = 0, /// ITERREN [8:8] /// Error interrupt enable ITERREN: u1 = 0, /// ITEVTEN [9:9] /// Event interrupt enable ITEVTEN: u1 = 0, /// ITBUFEN [10:10] /// Buffer interrupt enable ITBUFEN: u1 = 0, /// DMAEN [11:11] /// DMA requests enable DMAEN: u1 = 0, /// LAST [12:12] /// DMA last transfer LAST: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// OAR1 const OAR1_val = packed struct { /// ADD0 [0:0] /// Interface address ADD0: u1 = 0, /// ADD7 [1:7] /// Interface address ADD7: u7 = 0, /// ADD10 [8:9] /// Interface address ADD10: u2 = 0, /// unused [10:14] _unused10: u5 = 0, /// ADDMODE [15:15] /// Addressing mode (slave ADDMODE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Own address register 1 pub const OAR1 = Register(OAR1_val).init(base_address + 0x8); /// OAR2 const OAR2_val = packed struct { /// ENDUAL [0:0] /// Dual addressing mode ENDUAL: u1 = 0, /// ADD2 [1:7] /// Interface address ADD2: u7 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Own address register 2 pub const OAR2 = Register(OAR2_val).init(base_address + 0xc); /// DR const DR_val = packed struct { /// DR [0:7] /// 8-bit data register DR: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Data register pub const DR = Register(DR_val).init(base_address + 0x10); /// SR1 const SR1_val = packed struct { /// SB [0:0] /// Start bit (Master mode) SB: u1 = 0, /// ADDR [1:1] /// Address sent (master mode)/matched ADDR: u1 = 0, /// BTF [2:2] /// Byte transfer finished BTF: u1 = 0, /// ADD10 [3:3] /// 10-bit header sent (Master ADD10: u1 = 0, /// STOPF [4:4] /// Stop detection (slave STOPF: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// RxNE [6:6] /// Data register not empty RxNE: u1 = 0, /// TxE [7:7] /// Data register empty TxE: u1 = 0, /// BERR [8:8] /// Bus error BERR: u1 = 0, /// ARLO [9:9] /// Arbitration lost (master ARLO: u1 = 0, /// AF [10:10] /// Acknowledge failure AF: u1 = 0, /// OVR [11:11] /// Overrun/Underrun OVR: u1 = 0, /// PECERR [12:12] /// PEC Error in reception PECERR: u1 = 0, /// unused [13:13] _unused13: u1 = 0, /// TIMEOUT [14:14] /// Timeout or Tlow error TIMEOUT: u1 = 0, /// SMBALERT [15:15] /// SMBus alert SMBALERT: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register 1 pub const SR1 = Register(SR1_val).init(base_address + 0x14); /// SR2 const SR2_val = packed struct { /// MSL [0:0] /// Master/slave MSL: u1 = 0, /// BUSY [1:1] /// Bus busy BUSY: u1 = 0, /// TRA [2:2] /// Transmitter/receiver TRA: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// GENCALL [4:4] /// General call address (Slave GENCALL: u1 = 0, /// SMBDEFAULT [5:5] /// SMBus device default address (Slave SMBDEFAULT: u1 = 0, /// SMBHOST [6:6] /// SMBus host header (Slave SMBHOST: u1 = 0, /// DUALF [7:7] /// Dual flag (Slave mode) DUALF: u1 = 0, /// PEC [8:15] /// acket error checking PEC: u8 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register 2 pub const SR2 = Register(SR2_val).init(base_address + 0x18); /// CCR const CCR_val = packed struct { /// CCR [0:11] /// Clock control register in Fast/Standard CCR: u12 = 0, /// unused [12:13] _unused12: u2 = 0, /// DUTY [14:14] /// Fast mode duty cycle DUTY: u1 = 0, /// F_S [15:15] /// I2C master mode selection F_S: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Clock control register pub const CCR = Register(CCR_val).init(base_address + 0x1c); /// TRISE const TRISE_val = packed struct { /// TRISE [0:5] /// Maximum rise time in Fast/Standard mode TRISE: u6 = 2, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// TRISE register pub const TRISE = Register(TRISE_val).init(base_address + 0x20); }; /// Inter integrated circuit pub const I2C2 = struct { const base_address = 0x40005800; /// CR1 const CR1_val = packed struct { /// PE [0:0] /// Peripheral enable PE: u1 = 0, /// SMBUS [1:1] /// SMBus mode SMBUS: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// SMBTYPE [3:3] /// SMBus type SMBTYPE: u1 = 0, /// ENARP [4:4] /// ARP enable ENARP: u1 = 0, /// ENPEC [5:5] /// PEC enable ENPEC: u1 = 0, /// ENGC [6:6] /// General call enable ENGC: u1 = 0, /// NOSTRETCH [7:7] /// Clock stretching disable (Slave NOSTRETCH: u1 = 0, /// START [8:8] /// Start generation START: u1 = 0, /// STOP [9:9] /// Stop generation STOP: u1 = 0, /// ACK [10:10] /// Acknowledge enable ACK: u1 = 0, /// POS [11:11] /// Acknowledge/PEC Position (for data POS: u1 = 0, /// PEC [12:12] /// Packet error checking PEC: u1 = 0, /// ALERT [13:13] /// SMBus alert ALERT: u1 = 0, /// unused [14:14] _unused14: u1 = 0, /// SWRST [15:15] /// Software reset SWRST: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// FREQ [0:5] /// Peripheral clock frequency FREQ: u6 = 0, /// unused [6:7] _unused6: u2 = 0, /// ITERREN [8:8] /// Error interrupt enable ITERREN: u1 = 0, /// ITEVTEN [9:9] /// Event interrupt enable ITEVTEN: u1 = 0, /// ITBUFEN [10:10] /// Buffer interrupt enable ITBUFEN: u1 = 0, /// DMAEN [11:11] /// DMA requests enable DMAEN: u1 = 0, /// LAST [12:12] /// DMA last transfer LAST: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// OAR1 const OAR1_val = packed struct { /// ADD0 [0:0] /// Interface address ADD0: u1 = 0, /// ADD7 [1:7] /// Interface address ADD7: u7 = 0, /// ADD10 [8:9] /// Interface address ADD10: u2 = 0, /// unused [10:14] _unused10: u5 = 0, /// ADDMODE [15:15] /// Addressing mode (slave ADDMODE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Own address register 1 pub const OAR1 = Register(OAR1_val).init(base_address + 0x8); /// OAR2 const OAR2_val = packed struct { /// ENDUAL [0:0] /// Dual addressing mode ENDUAL: u1 = 0, /// ADD2 [1:7] /// Interface address ADD2: u7 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Own address register 2 pub const OAR2 = Register(OAR2_val).init(base_address + 0xc); /// DR const DR_val = packed struct { /// DR [0:7] /// 8-bit data register DR: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Data register pub const DR = Register(DR_val).init(base_address + 0x10); /// SR1 const SR1_val = packed struct { /// SB [0:0] /// Start bit (Master mode) SB: u1 = 0, /// ADDR [1:1] /// Address sent (master mode)/matched ADDR: u1 = 0, /// BTF [2:2] /// Byte transfer finished BTF: u1 = 0, /// ADD10 [3:3] /// 10-bit header sent (Master ADD10: u1 = 0, /// STOPF [4:4] /// Stop detection (slave STOPF: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// RxNE [6:6] /// Data register not empty RxNE: u1 = 0, /// TxE [7:7] /// Data register empty TxE: u1 = 0, /// BERR [8:8] /// Bus error BERR: u1 = 0, /// ARLO [9:9] /// Arbitration lost (master ARLO: u1 = 0, /// AF [10:10] /// Acknowledge failure AF: u1 = 0, /// OVR [11:11] /// Overrun/Underrun OVR: u1 = 0, /// PECERR [12:12] /// PEC Error in reception PECERR: u1 = 0, /// unused [13:13] _unused13: u1 = 0, /// TIMEOUT [14:14] /// Timeout or Tlow error TIMEOUT: u1 = 0, /// SMBALERT [15:15] /// SMBus alert SMBALERT: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register 1 pub const SR1 = Register(SR1_val).init(base_address + 0x14); /// SR2 const SR2_val = packed struct { /// MSL [0:0] /// Master/slave MSL: u1 = 0, /// BUSY [1:1] /// Bus busy BUSY: u1 = 0, /// TRA [2:2] /// Transmitter/receiver TRA: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// GENCALL [4:4] /// General call address (Slave GENCALL: u1 = 0, /// SMBDEFAULT [5:5] /// SMBus device default address (Slave SMBDEFAULT: u1 = 0, /// SMBHOST [6:6] /// SMBus host header (Slave SMBHOST: u1 = 0, /// DUALF [7:7] /// Dual flag (Slave mode) DUALF: u1 = 0, /// PEC [8:15] /// acket error checking PEC: u8 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register 2 pub const SR2 = Register(SR2_val).init(base_address + 0x18); /// CCR const CCR_val = packed struct { /// CCR [0:11] /// Clock control register in Fast/Standard CCR: u12 = 0, /// unused [12:13] _unused12: u2 = 0, /// DUTY [14:14] /// Fast mode duty cycle DUTY: u1 = 0, /// F_S [15:15] /// I2C master mode selection F_S: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Clock control register pub const CCR = Register(CCR_val).init(base_address + 0x1c); /// TRISE const TRISE_val = packed struct { /// TRISE [0:5] /// Maximum rise time in Fast/Standard mode TRISE: u6 = 2, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// TRISE register pub const TRISE = Register(TRISE_val).init(base_address + 0x20); }; /// Serial peripheral interface pub const SPI1 = struct { const base_address = 0x40013000; /// CR1 const CR1_val = packed struct { /// CPHA [0:0] /// Clock phase CPHA: u1 = 0, /// CPOL [1:1] /// Clock polarity CPOL: u1 = 0, /// MSTR [2:2] /// Master selection MSTR: u1 = 0, /// BR [3:5] /// Baud rate control BR: u3 = 0, /// SPE [6:6] /// SPI enable SPE: u1 = 0, /// LSBFIRST [7:7] /// Frame format LSBFIRST: u1 = 0, /// SSI [8:8] /// Internal slave select SSI: u1 = 0, /// SSM [9:9] /// Software slave management SSM: u1 = 0, /// RXONLY [10:10] /// Receive only RXONLY: u1 = 0, /// DFF [11:11] /// Data frame format DFF: u1 = 0, /// CRCNEXT [12:12] /// CRC transfer next CRCNEXT: u1 = 0, /// CRCEN [13:13] /// Hardware CRC calculation CRCEN: u1 = 0, /// BIDIOE [14:14] /// Output enable in bidirectional BIDIOE: u1 = 0, /// BIDIMODE [15:15] /// Bidirectional data mode BIDIMODE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// RXDMAEN [0:0] /// Rx buffer DMA enable RXDMAEN: u1 = 0, /// TXDMAEN [1:1] /// Tx buffer DMA enable TXDMAEN: u1 = 0, /// SSOE [2:2] /// SS output enable SSOE: u1 = 0, /// unused [3:4] _unused3: u2 = 0, /// ERRIE [5:5] /// Error interrupt enable ERRIE: u1 = 0, /// RXNEIE [6:6] /// RX buffer not empty interrupt RXNEIE: u1 = 0, /// TXEIE [7:7] /// Tx buffer empty interrupt TXEIE: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SR const SR_val = packed struct { /// RXNE [0:0] /// Receive buffer not empty RXNE: u1 = 0, /// TXE [1:1] /// Transmit buffer empty TXE: u1 = 1, /// CHSIDE [2:2] /// Channel side CHSIDE: u1 = 0, /// UDR [3:3] /// Underrun flag UDR: u1 = 0, /// CRCERR [4:4] /// CRC error flag CRCERR: u1 = 0, /// MODF [5:5] /// Mode fault MODF: u1 = 0, /// OVR [6:6] /// Overrun flag OVR: u1 = 0, /// BSY [7:7] /// Busy flag BSY: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x8); /// DR const DR_val = packed struct { /// DR [0:15] /// Data register DR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// data register pub const DR = Register(DR_val).init(base_address + 0xc); /// CRCPR const CRCPR_val = packed struct { /// CRCPOLY [0:15] /// CRC polynomial register CRCPOLY: u16 = 7, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// CRC polynomial register pub const CRCPR = Register(CRCPR_val).init(base_address + 0x10); /// RXCRCR const RXCRCR_val = packed struct { /// RxCRC [0:15] /// Rx CRC register RxCRC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RX CRC register pub const RXCRCR = Register(RXCRCR_val).init(base_address + 0x14); /// TXCRCR const TXCRCR_val = packed struct { /// TxCRC [0:15] /// Tx CRC register TxCRC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// TX CRC register pub const TXCRCR = Register(TXCRCR_val).init(base_address + 0x18); /// I2SCFGR const I2SCFGR_val = packed struct { /// CHLEN [0:0] /// Channel length (number of bits per audio CHLEN: u1 = 0, /// DATLEN [1:2] /// Data length to be DATLEN: u2 = 0, /// CKPOL [3:3] /// Steady state clock CKPOL: u1 = 0, /// I2SSTD [4:5] /// I2S standard selection I2SSTD: u2 = 0, /// unused [6:6] _unused6: u1 = 0, /// PCMSYNC [7:7] /// PCM frame synchronization PCMSYNC: u1 = 0, /// I2SCFG [8:9] /// I2S configuration mode I2SCFG: u2 = 0, /// I2SE [10:10] /// I2S Enable I2SE: u1 = 0, /// I2SMOD [11:11] /// I2S mode selection I2SMOD: u1 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// I2S configuration register pub const I2SCFGR = Register(I2SCFGR_val).init(base_address + 0x1c); /// I2SPR const I2SPR_val = packed struct { /// I2SDIV [0:7] /// I2S Linear prescaler I2SDIV: u8 = 16, /// ODD [8:8] /// Odd factor for the ODD: u1 = 0, /// MCKOE [9:9] /// Master clock output enable MCKOE: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// I2S prescaler register pub const I2SPR = Register(I2SPR_val).init(base_address + 0x20); }; /// Serial peripheral interface pub const SPI2 = struct { const base_address = 0x40003800; /// CR1 const CR1_val = packed struct { /// CPHA [0:0] /// Clock phase CPHA: u1 = 0, /// CPOL [1:1] /// Clock polarity CPOL: u1 = 0, /// MSTR [2:2] /// Master selection MSTR: u1 = 0, /// BR [3:5] /// Baud rate control BR: u3 = 0, /// SPE [6:6] /// SPI enable SPE: u1 = 0, /// LSBFIRST [7:7] /// Frame format LSBFIRST: u1 = 0, /// SSI [8:8] /// Internal slave select SSI: u1 = 0, /// SSM [9:9] /// Software slave management SSM: u1 = 0, /// RXONLY [10:10] /// Receive only RXONLY: u1 = 0, /// DFF [11:11] /// Data frame format DFF: u1 = 0, /// CRCNEXT [12:12] /// CRC transfer next CRCNEXT: u1 = 0, /// CRCEN [13:13] /// Hardware CRC calculation CRCEN: u1 = 0, /// BIDIOE [14:14] /// Output enable in bidirectional BIDIOE: u1 = 0, /// BIDIMODE [15:15] /// Bidirectional data mode BIDIMODE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// RXDMAEN [0:0] /// Rx buffer DMA enable RXDMAEN: u1 = 0, /// TXDMAEN [1:1] /// Tx buffer DMA enable TXDMAEN: u1 = 0, /// SSOE [2:2] /// SS output enable SSOE: u1 = 0, /// unused [3:4] _unused3: u2 = 0, /// ERRIE [5:5] /// Error interrupt enable ERRIE: u1 = 0, /// RXNEIE [6:6] /// RX buffer not empty interrupt RXNEIE: u1 = 0, /// TXEIE [7:7] /// Tx buffer empty interrupt TXEIE: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SR const SR_val = packed struct { /// RXNE [0:0] /// Receive buffer not empty RXNE: u1 = 0, /// TXE [1:1] /// Transmit buffer empty TXE: u1 = 1, /// CHSIDE [2:2] /// Channel side CHSIDE: u1 = 0, /// UDR [3:3] /// Underrun flag UDR: u1 = 0, /// CRCERR [4:4] /// CRC error flag CRCERR: u1 = 0, /// MODF [5:5] /// Mode fault MODF: u1 = 0, /// OVR [6:6] /// Overrun flag OVR: u1 = 0, /// BSY [7:7] /// Busy flag BSY: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x8); /// DR const DR_val = packed struct { /// DR [0:15] /// Data register DR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// data register pub const DR = Register(DR_val).init(base_address + 0xc); /// CRCPR const CRCPR_val = packed struct { /// CRCPOLY [0:15] /// CRC polynomial register CRCPOLY: u16 = 7, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// CRC polynomial register pub const CRCPR = Register(CRCPR_val).init(base_address + 0x10); /// RXCRCR const RXCRCR_val = packed struct { /// RxCRC [0:15] /// Rx CRC register RxCRC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RX CRC register pub const RXCRCR = Register(RXCRCR_val).init(base_address + 0x14); /// TXCRCR const TXCRCR_val = packed struct { /// TxCRC [0:15] /// Tx CRC register TxCRC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// TX CRC register pub const TXCRCR = Register(TXCRCR_val).init(base_address + 0x18); /// I2SCFGR const I2SCFGR_val = packed struct { /// CHLEN [0:0] /// Channel length (number of bits per audio CHLEN: u1 = 0, /// DATLEN [1:2] /// Data length to be DATLEN: u2 = 0, /// CKPOL [3:3] /// Steady state clock CKPOL: u1 = 0, /// I2SSTD [4:5] /// I2S standard selection I2SSTD: u2 = 0, /// unused [6:6] _unused6: u1 = 0, /// PCMSYNC [7:7] /// PCM frame synchronization PCMSYNC: u1 = 0, /// I2SCFG [8:9] /// I2S configuration mode I2SCFG: u2 = 0, /// I2SE [10:10] /// I2S Enable I2SE: u1 = 0, /// I2SMOD [11:11] /// I2S mode selection I2SMOD: u1 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// I2S configuration register pub const I2SCFGR = Register(I2SCFGR_val).init(base_address + 0x1c); /// I2SPR const I2SPR_val = packed struct { /// I2SDIV [0:7] /// I2S Linear prescaler I2SDIV: u8 = 16, /// ODD [8:8] /// Odd factor for the ODD: u1 = 0, /// MCKOE [9:9] /// Master clock output enable MCKOE: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// I2S prescaler register pub const I2SPR = Register(I2SPR_val).init(base_address + 0x20); }; /// Serial peripheral interface pub const SPI3 = struct { const base_address = 0x40003c00; /// CR1 const CR1_val = packed struct { /// CPHA [0:0] /// Clock phase CPHA: u1 = 0, /// CPOL [1:1] /// Clock polarity CPOL: u1 = 0, /// MSTR [2:2] /// Master selection MSTR: u1 = 0, /// BR [3:5] /// Baud rate control BR: u3 = 0, /// SPE [6:6] /// SPI enable SPE: u1 = 0, /// LSBFIRST [7:7] /// Frame format LSBFIRST: u1 = 0, /// SSI [8:8] /// Internal slave select SSI: u1 = 0, /// SSM [9:9] /// Software slave management SSM: u1 = 0, /// RXONLY [10:10] /// Receive only RXONLY: u1 = 0, /// DFF [11:11] /// Data frame format DFF: u1 = 0, /// CRCNEXT [12:12] /// CRC transfer next CRCNEXT: u1 = 0, /// CRCEN [13:13] /// Hardware CRC calculation CRCEN: u1 = 0, /// BIDIOE [14:14] /// Output enable in bidirectional BIDIOE: u1 = 0, /// BIDIMODE [15:15] /// Bidirectional data mode BIDIMODE: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x0); /// CR2 const CR2_val = packed struct { /// RXDMAEN [0:0] /// Rx buffer DMA enable RXDMAEN: u1 = 0, /// TXDMAEN [1:1] /// Tx buffer DMA enable TXDMAEN: u1 = 0, /// SSOE [2:2] /// SS output enable SSOE: u1 = 0, /// unused [3:4] _unused3: u2 = 0, /// ERRIE [5:5] /// Error interrupt enable ERRIE: u1 = 0, /// RXNEIE [6:6] /// RX buffer not empty interrupt RXNEIE: u1 = 0, /// TXEIE [7:7] /// Tx buffer empty interrupt TXEIE: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x4); /// SR const SR_val = packed struct { /// RXNE [0:0] /// Receive buffer not empty RXNE: u1 = 0, /// TXE [1:1] /// Transmit buffer empty TXE: u1 = 1, /// CHSIDE [2:2] /// Channel side CHSIDE: u1 = 0, /// UDR [3:3] /// Underrun flag UDR: u1 = 0, /// CRCERR [4:4] /// CRC error flag CRCERR: u1 = 0, /// MODF [5:5] /// Mode fault MODF: u1 = 0, /// OVR [6:6] /// Overrun flag OVR: u1 = 0, /// BSY [7:7] /// Busy flag BSY: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x8); /// DR const DR_val = packed struct { /// DR [0:15] /// Data register DR: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// data register pub const DR = Register(DR_val).init(base_address + 0xc); /// CRCPR const CRCPR_val = packed struct { /// CRCPOLY [0:15] /// CRC polynomial register CRCPOLY: u16 = 7, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// CRC polynomial register pub const CRCPR = Register(CRCPR_val).init(base_address + 0x10); /// RXCRCR const RXCRCR_val = packed struct { /// RxCRC [0:15] /// Rx CRC register RxCRC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// RX CRC register pub const RXCRCR = Register(RXCRCR_val).init(base_address + 0x14); /// TXCRCR const TXCRCR_val = packed struct { /// TxCRC [0:15] /// Tx CRC register TxCRC: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// TX CRC register pub const TXCRCR = Register(TXCRCR_val).init(base_address + 0x18); /// I2SCFGR const I2SCFGR_val = packed struct { /// CHLEN [0:0] /// Channel length (number of bits per audio CHLEN: u1 = 0, /// DATLEN [1:2] /// Data length to be DATLEN: u2 = 0, /// CKPOL [3:3] /// Steady state clock CKPOL: u1 = 0, /// I2SSTD [4:5] /// I2S standard selection I2SSTD: u2 = 0, /// unused [6:6] _unused6: u1 = 0, /// PCMSYNC [7:7] /// PCM frame synchronization PCMSYNC: u1 = 0, /// I2SCFG [8:9] /// I2S configuration mode I2SCFG: u2 = 0, /// I2SE [10:10] /// I2S Enable I2SE: u1 = 0, /// I2SMOD [11:11] /// I2S mode selection I2SMOD: u1 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// I2S configuration register pub const I2SCFGR = Register(I2SCFGR_val).init(base_address + 0x1c); /// I2SPR const I2SPR_val = packed struct { /// I2SDIV [0:7] /// I2S Linear prescaler I2SDIV: u8 = 16, /// ODD [8:8] /// Odd factor for the ODD: u1 = 0, /// MCKOE [9:9] /// Master clock output enable MCKOE: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// I2S prescaler register pub const I2SPR = Register(I2SPR_val).init(base_address + 0x20); }; /// Universal synchronous asynchronous receiver pub const USART1 = struct { const base_address = 0x40013800; /// SR const SR_val = packed struct { /// PE [0:0] /// Parity error PE: u1 = 0, /// FE [1:1] /// Framing error FE: u1 = 0, /// NE [2:2] /// Noise error flag NE: u1 = 0, /// ORE [3:3] /// Overrun error ORE: u1 = 0, /// IDLE [4:4] /// IDLE line detected IDLE: u1 = 0, /// RXNE [5:5] /// Read data register not RXNE: u1 = 0, /// TC [6:6] /// Transmission complete TC: u1 = 1, /// TXE [7:7] /// Transmit data register TXE: u1 = 1, /// LBD [8:8] /// LIN break detection flag LBD: u1 = 0, /// CTS [9:9] /// CTS flag CTS: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register pub const SR = Register(SR_val).init(base_address + 0x0); /// DR const DR_val = packed struct { /// DR [0:8] /// Data value DR: u9 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Data register pub const DR = Register(DR_val).init(base_address + 0x4); /// BRR const BRR_val = packed struct { /// DIV_Fraction [0:3] /// fraction of USARTDIV DIV_Fraction: u4 = 0, /// DIV_Mantissa [4:15] /// mantissa of USARTDIV DIV_Mantissa: u12 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Baud rate register pub const BRR = Register(BRR_val).init(base_address + 0x8); /// CR1 const CR1_val = packed struct { /// SBK [0:0] /// Send break SBK: u1 = 0, /// RWU [1:1] /// Receiver wakeup RWU: u1 = 0, /// RE [2:2] /// Receiver enable RE: u1 = 0, /// TE [3:3] /// Transmitter enable TE: u1 = 0, /// IDLEIE [4:4] /// IDLE interrupt enable IDLEIE: u1 = 0, /// RXNEIE [5:5] /// RXNE interrupt enable RXNEIE: u1 = 0, /// TCIE [6:6] /// Transmission complete interrupt TCIE: u1 = 0, /// TXEIE [7:7] /// TXE interrupt enable TXEIE: u1 = 0, /// PEIE [8:8] /// PE interrupt enable PEIE: u1 = 0, /// PS [9:9] /// Parity selection PS: u1 = 0, /// PCE [10:10] /// Parity control enable PCE: u1 = 0, /// WAKE [11:11] /// Wakeup method WAKE: u1 = 0, /// M [12:12] /// Word length M: u1 = 0, /// UE [13:13] /// USART enable UE: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0xc); /// CR2 const CR2_val = packed struct { /// ADD [0:3] /// Address of the USART node ADD: u4 = 0, /// unused [4:4] _unused4: u1 = 0, /// LBDL [5:5] /// lin break detection length LBDL: u1 = 0, /// LBDIE [6:6] /// LIN break detection interrupt LBDIE: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// LBCL [8:8] /// Last bit clock pulse LBCL: u1 = 0, /// CPHA [9:9] /// Clock phase CPHA: u1 = 0, /// CPOL [10:10] /// Clock polarity CPOL: u1 = 0, /// CLKEN [11:11] /// Clock enable CLKEN: u1 = 0, /// STOP [12:13] /// STOP bits STOP: u2 = 0, /// LINEN [14:14] /// LIN mode enable LINEN: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x10); /// CR3 const CR3_val = packed struct { /// EIE [0:0] /// Error interrupt enable EIE: u1 = 0, /// IREN [1:1] /// IrDA mode enable IREN: u1 = 0, /// IRLP [2:2] /// IrDA low-power IRLP: u1 = 0, /// HDSEL [3:3] /// Half-duplex selection HDSEL: u1 = 0, /// NACK [4:4] /// Smartcard NACK enable NACK: u1 = 0, /// SCEN [5:5] /// Smartcard mode enable SCEN: u1 = 0, /// DMAR [6:6] /// DMA enable receiver DMAR: u1 = 0, /// DMAT [7:7] /// DMA enable transmitter DMAT: u1 = 0, /// RTSE [8:8] /// RTS enable RTSE: u1 = 0, /// CTSE [9:9] /// CTS enable CTSE: u1 = 0, /// CTSIE [10:10] /// CTS interrupt enable CTSIE: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 3 pub const CR3 = Register(CR3_val).init(base_address + 0x14); /// GTPR const GTPR_val = packed struct { /// PSC [0:7] /// Prescaler value PSC: u8 = 0, /// GT [8:15] /// Guard time value GT: u8 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Guard time and prescaler pub const GTPR = Register(GTPR_val).init(base_address + 0x18); }; /// Universal synchronous asynchronous receiver pub const USART2 = struct { const base_address = 0x40004400; /// SR const SR_val = packed struct { /// PE [0:0] /// Parity error PE: u1 = 0, /// FE [1:1] /// Framing error FE: u1 = 0, /// NE [2:2] /// Noise error flag NE: u1 = 0, /// ORE [3:3] /// Overrun error ORE: u1 = 0, /// IDLE [4:4] /// IDLE line detected IDLE: u1 = 0, /// RXNE [5:5] /// Read data register not RXNE: u1 = 0, /// TC [6:6] /// Transmission complete TC: u1 = 1, /// TXE [7:7] /// Transmit data register TXE: u1 = 1, /// LBD [8:8] /// LIN break detection flag LBD: u1 = 0, /// CTS [9:9] /// CTS flag CTS: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register pub const SR = Register(SR_val).init(base_address + 0x0); /// DR const DR_val = packed struct { /// DR [0:8] /// Data value DR: u9 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Data register pub const DR = Register(DR_val).init(base_address + 0x4); /// BRR const BRR_val = packed struct { /// DIV_Fraction [0:3] /// fraction of USARTDIV DIV_Fraction: u4 = 0, /// DIV_Mantissa [4:15] /// mantissa of USARTDIV DIV_Mantissa: u12 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Baud rate register pub const BRR = Register(BRR_val).init(base_address + 0x8); /// CR1 const CR1_val = packed struct { /// SBK [0:0] /// Send break SBK: u1 = 0, /// RWU [1:1] /// Receiver wakeup RWU: u1 = 0, /// RE [2:2] /// Receiver enable RE: u1 = 0, /// TE [3:3] /// Transmitter enable TE: u1 = 0, /// IDLEIE [4:4] /// IDLE interrupt enable IDLEIE: u1 = 0, /// RXNEIE [5:5] /// RXNE interrupt enable RXNEIE: u1 = 0, /// TCIE [6:6] /// Transmission complete interrupt TCIE: u1 = 0, /// TXEIE [7:7] /// TXE interrupt enable TXEIE: u1 = 0, /// PEIE [8:8] /// PE interrupt enable PEIE: u1 = 0, /// PS [9:9] /// Parity selection PS: u1 = 0, /// PCE [10:10] /// Parity control enable PCE: u1 = 0, /// WAKE [11:11] /// Wakeup method WAKE: u1 = 0, /// M [12:12] /// Word length M: u1 = 0, /// UE [13:13] /// USART enable UE: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0xc); /// CR2 const CR2_val = packed struct { /// ADD [0:3] /// Address of the USART node ADD: u4 = 0, /// unused [4:4] _unused4: u1 = 0, /// LBDL [5:5] /// lin break detection length LBDL: u1 = 0, /// LBDIE [6:6] /// LIN break detection interrupt LBDIE: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// LBCL [8:8] /// Last bit clock pulse LBCL: u1 = 0, /// CPHA [9:9] /// Clock phase CPHA: u1 = 0, /// CPOL [10:10] /// Clock polarity CPOL: u1 = 0, /// CLKEN [11:11] /// Clock enable CLKEN: u1 = 0, /// STOP [12:13] /// STOP bits STOP: u2 = 0, /// LINEN [14:14] /// LIN mode enable LINEN: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x10); /// CR3 const CR3_val = packed struct { /// EIE [0:0] /// Error interrupt enable EIE: u1 = 0, /// IREN [1:1] /// IrDA mode enable IREN: u1 = 0, /// IRLP [2:2] /// IrDA low-power IRLP: u1 = 0, /// HDSEL [3:3] /// Half-duplex selection HDSEL: u1 = 0, /// NACK [4:4] /// Smartcard NACK enable NACK: u1 = 0, /// SCEN [5:5] /// Smartcard mode enable SCEN: u1 = 0, /// DMAR [6:6] /// DMA enable receiver DMAR: u1 = 0, /// DMAT [7:7] /// DMA enable transmitter DMAT: u1 = 0, /// RTSE [8:8] /// RTS enable RTSE: u1 = 0, /// CTSE [9:9] /// CTS enable CTSE: u1 = 0, /// CTSIE [10:10] /// CTS interrupt enable CTSIE: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 3 pub const CR3 = Register(CR3_val).init(base_address + 0x14); /// GTPR const GTPR_val = packed struct { /// PSC [0:7] /// Prescaler value PSC: u8 = 0, /// GT [8:15] /// Guard time value GT: u8 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Guard time and prescaler pub const GTPR = Register(GTPR_val).init(base_address + 0x18); }; /// Universal synchronous asynchronous receiver pub const USART3 = struct { const base_address = 0x40004800; /// SR const SR_val = packed struct { /// PE [0:0] /// Parity error PE: u1 = 0, /// FE [1:1] /// Framing error FE: u1 = 0, /// NE [2:2] /// Noise error flag NE: u1 = 0, /// ORE [3:3] /// Overrun error ORE: u1 = 0, /// IDLE [4:4] /// IDLE line detected IDLE: u1 = 0, /// RXNE [5:5] /// Read data register not RXNE: u1 = 0, /// TC [6:6] /// Transmission complete TC: u1 = 1, /// TXE [7:7] /// Transmit data register TXE: u1 = 1, /// LBD [8:8] /// LIN break detection flag LBD: u1 = 0, /// CTS [9:9] /// CTS flag CTS: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register pub const SR = Register(SR_val).init(base_address + 0x0); /// DR const DR_val = packed struct { /// DR [0:8] /// Data value DR: u9 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Data register pub const DR = Register(DR_val).init(base_address + 0x4); /// BRR const BRR_val = packed struct { /// DIV_Fraction [0:3] /// fraction of USARTDIV DIV_Fraction: u4 = 0, /// DIV_Mantissa [4:15] /// mantissa of USARTDIV DIV_Mantissa: u12 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Baud rate register pub const BRR = Register(BRR_val).init(base_address + 0x8); /// CR1 const CR1_val = packed struct { /// SBK [0:0] /// Send break SBK: u1 = 0, /// RWU [1:1] /// Receiver wakeup RWU: u1 = 0, /// RE [2:2] /// Receiver enable RE: u1 = 0, /// TE [3:3] /// Transmitter enable TE: u1 = 0, /// IDLEIE [4:4] /// IDLE interrupt enable IDLEIE: u1 = 0, /// RXNEIE [5:5] /// RXNE interrupt enable RXNEIE: u1 = 0, /// TCIE [6:6] /// Transmission complete interrupt TCIE: u1 = 0, /// TXEIE [7:7] /// TXE interrupt enable TXEIE: u1 = 0, /// PEIE [8:8] /// PE interrupt enable PEIE: u1 = 0, /// PS [9:9] /// Parity selection PS: u1 = 0, /// PCE [10:10] /// Parity control enable PCE: u1 = 0, /// WAKE [11:11] /// Wakeup method WAKE: u1 = 0, /// M [12:12] /// Word length M: u1 = 0, /// UE [13:13] /// USART enable UE: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0xc); /// CR2 const CR2_val = packed struct { /// ADD [0:3] /// Address of the USART node ADD: u4 = 0, /// unused [4:4] _unused4: u1 = 0, /// LBDL [5:5] /// lin break detection length LBDL: u1 = 0, /// LBDIE [6:6] /// LIN break detection interrupt LBDIE: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// LBCL [8:8] /// Last bit clock pulse LBCL: u1 = 0, /// CPHA [9:9] /// Clock phase CPHA: u1 = 0, /// CPOL [10:10] /// Clock polarity CPOL: u1 = 0, /// CLKEN [11:11] /// Clock enable CLKEN: u1 = 0, /// STOP [12:13] /// STOP bits STOP: u2 = 0, /// LINEN [14:14] /// LIN mode enable LINEN: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x10); /// CR3 const CR3_val = packed struct { /// EIE [0:0] /// Error interrupt enable EIE: u1 = 0, /// IREN [1:1] /// IrDA mode enable IREN: u1 = 0, /// IRLP [2:2] /// IrDA low-power IRLP: u1 = 0, /// HDSEL [3:3] /// Half-duplex selection HDSEL: u1 = 0, /// NACK [4:4] /// Smartcard NACK enable NACK: u1 = 0, /// SCEN [5:5] /// Smartcard mode enable SCEN: u1 = 0, /// DMAR [6:6] /// DMA enable receiver DMAR: u1 = 0, /// DMAT [7:7] /// DMA enable transmitter DMAT: u1 = 0, /// RTSE [8:8] /// RTS enable RTSE: u1 = 0, /// CTSE [9:9] /// CTS enable CTSE: u1 = 0, /// CTSIE [10:10] /// CTS interrupt enable CTSIE: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register 3 pub const CR3 = Register(CR3_val).init(base_address + 0x14); /// GTPR const GTPR_val = packed struct { /// PSC [0:7] /// Prescaler value PSC: u8 = 0, /// GT [8:15] /// Guard time value GT: u8 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Guard time and prescaler pub const GTPR = Register(GTPR_val).init(base_address + 0x18); }; /// Analog to digital converter pub const ADC1 = struct { const base_address = 0x40012400; /// SR const SR_val = packed struct { /// AWD [0:0] /// Analog watchdog flag AWD: u1 = 0, /// EOC [1:1] /// Regular channel end of EOC: u1 = 0, /// JEOC [2:2] /// Injected channel end of JEOC: u1 = 0, /// JSTRT [3:3] /// Injected channel start JSTRT: u1 = 0, /// STRT [4:4] /// Regular channel start flag STRT: u1 = 0, /// unused [5:31] _unused5: u3 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x0); /// CR1 const CR1_val = packed struct { /// AWDCH [0:4] /// Analog watchdog channel select AWDCH: u5 = 0, /// EOCIE [5:5] /// Interrupt enable for EOC EOCIE: u1 = 0, /// AWDIE [6:6] /// Analog watchdog interrupt AWDIE: u1 = 0, /// JEOCIE [7:7] /// Interrupt enable for injected JEOCIE: u1 = 0, /// SCAN [8:8] /// Scan mode SCAN: u1 = 0, /// AWDSGL [9:9] /// Enable the watchdog on a single channel AWDSGL: u1 = 0, /// JAUTO [10:10] /// Automatic injected group JAUTO: u1 = 0, /// DISCEN [11:11] /// Discontinuous mode on regular DISCEN: u1 = 0, /// JDISCEN [12:12] /// Discontinuous mode on injected JDISCEN: u1 = 0, /// DISCNUM [13:15] /// Discontinuous mode channel DISCNUM: u3 = 0, /// DUALMOD [16:19] /// Dual mode selection DUALMOD: u4 = 0, /// unused [20:21] _unused20: u2 = 0, /// JAWDEN [22:22] /// Analog watchdog enable on injected JAWDEN: u1 = 0, /// AWDEN [23:23] /// Analog watchdog enable on regular AWDEN: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x4); /// CR2 const CR2_val = packed struct { /// ADON [0:0] /// A/D converter ON / OFF ADON: u1 = 0, /// CONT [1:1] /// Continuous conversion CONT: u1 = 0, /// CAL [2:2] /// A/D calibration CAL: u1 = 0, /// RSTCAL [3:3] /// Reset calibration RSTCAL: u1 = 0, /// unused [4:7] _unused4: u4 = 0, /// DMA [8:8] /// Direct memory access mode DMA: u1 = 0, /// unused [9:10] _unused9: u2 = 0, /// ALIGN [11:11] /// Data alignment ALIGN: u1 = 0, /// JEXTSEL [12:14] /// External event select for injected JEXTSEL: u3 = 0, /// JEXTTRIG [15:15] /// External trigger conversion mode for JEXTTRIG: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// EXTSEL [17:19] /// External event select for regular EXTSEL: u3 = 0, /// EXTTRIG [20:20] /// External trigger conversion mode for EXTTRIG: u1 = 0, /// JSWSTART [21:21] /// Start conversion of injected JSWSTART: u1 = 0, /// SWSTART [22:22] /// Start conversion of regular SWSTART: u1 = 0, /// TSVREFE [23:23] /// Temperature sensor and VREFINT TSVREFE: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x8); /// SMPR1 const SMPR1_val = packed struct { /// SMP10 [0:2] /// Channel 10 sample time SMP10: u3 = 0, /// SMP11 [3:5] /// Channel 11 sample time SMP11: u3 = 0, /// SMP12 [6:8] /// Channel 12 sample time SMP12: u3 = 0, /// SMP13 [9:11] /// Channel 13 sample time SMP13: u3 = 0, /// SMP14 [12:14] /// Channel 14 sample time SMP14: u3 = 0, /// SMP15 [15:17] /// Channel 15 sample time SMP15: u3 = 0, /// SMP16 [18:20] /// Channel 16 sample time SMP16: u3 = 0, /// SMP17 [21:23] /// Channel 17 sample time SMP17: u3 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// sample time register 1 pub const SMPR1 = Register(SMPR1_val).init(base_address + 0xc); /// SMPR2 const SMPR2_val = packed struct { /// SMP0 [0:2] /// Channel 0 sample time SMP0: u3 = 0, /// SMP1 [3:5] /// Channel 1 sample time SMP1: u3 = 0, /// SMP2 [6:8] /// Channel 2 sample time SMP2: u3 = 0, /// SMP3 [9:11] /// Channel 3 sample time SMP3: u3 = 0, /// SMP4 [12:14] /// Channel 4 sample time SMP4: u3 = 0, /// SMP5 [15:17] /// Channel 5 sample time SMP5: u3 = 0, /// SMP6 [18:20] /// Channel 6 sample time SMP6: u3 = 0, /// SMP7 [21:23] /// Channel 7 sample time SMP7: u3 = 0, /// SMP8 [24:26] /// Channel 8 sample time SMP8: u3 = 0, /// SMP9 [27:29] /// Channel 9 sample time SMP9: u3 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// sample time register 2 pub const SMPR2 = Register(SMPR2_val).init(base_address + 0x10); /// JOFR1 const JOFR1_val = packed struct { /// JOFFSET1 [0:11] /// Data offset for injected channel JOFFSET1: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR1 = Register(JOFR1_val).init(base_address + 0x14); /// JOFR2 const JOFR2_val = packed struct { /// JOFFSET2 [0:11] /// Data offset for injected channel JOFFSET2: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR2 = Register(JOFR2_val).init(base_address + 0x18); /// JOFR3 const JOFR3_val = packed struct { /// JOFFSET3 [0:11] /// Data offset for injected channel JOFFSET3: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR3 = Register(JOFR3_val).init(base_address + 0x1c); /// JOFR4 const JOFR4_val = packed struct { /// JOFFSET4 [0:11] /// Data offset for injected channel JOFFSET4: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR4 = Register(JOFR4_val).init(base_address + 0x20); /// HTR const HTR_val = packed struct { /// HT [0:11] /// Analog watchdog higher HT: u12 = 4095, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// watchdog higher threshold pub const HTR = Register(HTR_val).init(base_address + 0x24); /// LTR const LTR_val = packed struct { /// LT [0:11] /// Analog watchdog lower LT: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// watchdog lower threshold pub const LTR = Register(LTR_val).init(base_address + 0x28); /// SQR1 const SQR1_val = packed struct { /// SQ13 [0:4] /// 13th conversion in regular SQ13: u5 = 0, /// SQ14 [5:9] /// 14th conversion in regular SQ14: u5 = 0, /// SQ15 [10:14] /// 15th conversion in regular SQ15: u5 = 0, /// SQ16 [15:19] /// 16th conversion in regular SQ16: u5 = 0, /// L [20:23] /// Regular channel sequence L: u4 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// regular sequence register 1 pub const SQR1 = Register(SQR1_val).init(base_address + 0x2c); /// SQR2 const SQR2_val = packed struct { /// SQ7 [0:4] /// 7th conversion in regular SQ7: u5 = 0, /// SQ8 [5:9] /// 8th conversion in regular SQ8: u5 = 0, /// SQ9 [10:14] /// 9th conversion in regular SQ9: u5 = 0, /// SQ10 [15:19] /// 10th conversion in regular SQ10: u5 = 0, /// SQ11 [20:24] /// 11th conversion in regular SQ11: u5 = 0, /// SQ12 [25:29] /// 12th conversion in regular SQ12: u5 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// regular sequence register 2 pub const SQR2 = Register(SQR2_val).init(base_address + 0x30); /// SQR3 const SQR3_val = packed struct { /// SQ1 [0:4] /// 1st conversion in regular SQ1: u5 = 0, /// SQ2 [5:9] /// 2nd conversion in regular SQ2: u5 = 0, /// SQ3 [10:14] /// 3rd conversion in regular SQ3: u5 = 0, /// SQ4 [15:19] /// 4th conversion in regular SQ4: u5 = 0, /// SQ5 [20:24] /// 5th conversion in regular SQ5: u5 = 0, /// SQ6 [25:29] /// 6th conversion in regular SQ6: u5 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// regular sequence register 3 pub const SQR3 = Register(SQR3_val).init(base_address + 0x34); /// JSQR const JSQR_val = packed struct { /// JSQ1 [0:4] /// 1st conversion in injected JSQ1: u5 = 0, /// JSQ2 [5:9] /// 2nd conversion in injected JSQ2: u5 = 0, /// JSQ3 [10:14] /// 3rd conversion in injected JSQ3: u5 = 0, /// JSQ4 [15:19] /// 4th conversion in injected JSQ4: u5 = 0, /// JL [20:21] /// Injected sequence length JL: u2 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// injected sequence register pub const JSQR = Register(JSQR_val).init(base_address + 0x38); /// JDR1 const JDR1_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR1 = Register(JDR1_val).init(base_address + 0x3c); /// JDR2 const JDR2_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR2 = Register(JDR2_val).init(base_address + 0x40); /// JDR3 const JDR3_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR3 = Register(JDR3_val).init(base_address + 0x44); /// JDR4 const JDR4_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR4 = Register(JDR4_val).init(base_address + 0x48); /// DR const DR_val = packed struct { /// DATA [0:15] /// Regular data DATA: u16 = 0, /// ADC2DATA [16:31] /// ADC2 data ADC2DATA: u16 = 0, }; /// regular data register pub const DR = Register(DR_val).init(base_address + 0x4c); }; /// Analog to digital converter pub const ADC2 = struct { const base_address = 0x40012800; /// SR const SR_val = packed struct { /// AWD [0:0] /// Analog watchdog flag AWD: u1 = 0, /// EOC [1:1] /// Regular channel end of EOC: u1 = 0, /// JEOC [2:2] /// Injected channel end of JEOC: u1 = 0, /// JSTRT [3:3] /// Injected channel start JSTRT: u1 = 0, /// STRT [4:4] /// Regular channel start flag STRT: u1 = 0, /// unused [5:31] _unused5: u3 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x0); /// CR1 const CR1_val = packed struct { /// AWDCH [0:4] /// Analog watchdog channel select AWDCH: u5 = 0, /// EOCIE [5:5] /// Interrupt enable for EOC EOCIE: u1 = 0, /// AWDIE [6:6] /// Analog watchdog interrupt AWDIE: u1 = 0, /// JEOCIE [7:7] /// Interrupt enable for injected JEOCIE: u1 = 0, /// SCAN [8:8] /// Scan mode SCAN: u1 = 0, /// AWDSGL [9:9] /// Enable the watchdog on a single channel AWDSGL: u1 = 0, /// JAUTO [10:10] /// Automatic injected group JAUTO: u1 = 0, /// DISCEN [11:11] /// Discontinuous mode on regular DISCEN: u1 = 0, /// JDISCEN [12:12] /// Discontinuous mode on injected JDISCEN: u1 = 0, /// DISCNUM [13:15] /// Discontinuous mode channel DISCNUM: u3 = 0, /// unused [16:21] _unused16: u6 = 0, /// JAWDEN [22:22] /// Analog watchdog enable on injected JAWDEN: u1 = 0, /// AWDEN [23:23] /// Analog watchdog enable on regular AWDEN: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x4); /// CR2 const CR2_val = packed struct { /// ADON [0:0] /// A/D converter ON / OFF ADON: u1 = 0, /// CONT [1:1] /// Continuous conversion CONT: u1 = 0, /// CAL [2:2] /// A/D calibration CAL: u1 = 0, /// RSTCAL [3:3] /// Reset calibration RSTCAL: u1 = 0, /// unused [4:7] _unused4: u4 = 0, /// DMA [8:8] /// Direct memory access mode DMA: u1 = 0, /// unused [9:10] _unused9: u2 = 0, /// ALIGN [11:11] /// Data alignment ALIGN: u1 = 0, /// JEXTSEL [12:14] /// External event select for injected JEXTSEL: u3 = 0, /// JEXTTRIG [15:15] /// External trigger conversion mode for JEXTTRIG: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// EXTSEL [17:19] /// External event select for regular EXTSEL: u3 = 0, /// EXTTRIG [20:20] /// External trigger conversion mode for EXTTRIG: u1 = 0, /// JSWSTART [21:21] /// Start conversion of injected JSWSTART: u1 = 0, /// SWSTART [22:22] /// Start conversion of regular SWSTART: u1 = 0, /// TSVREFE [23:23] /// Temperature sensor and VREFINT TSVREFE: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x8); /// SMPR1 const SMPR1_val = packed struct { /// SMP10 [0:2] /// Channel 10 sample time SMP10: u3 = 0, /// SMP11 [3:5] /// Channel 11 sample time SMP11: u3 = 0, /// SMP12 [6:8] /// Channel 12 sample time SMP12: u3 = 0, /// SMP13 [9:11] /// Channel 13 sample time SMP13: u3 = 0, /// SMP14 [12:14] /// Channel 14 sample time SMP14: u3 = 0, /// SMP15 [15:17] /// Channel 15 sample time SMP15: u3 = 0, /// SMP16 [18:20] /// Channel 16 sample time SMP16: u3 = 0, /// SMP17 [21:23] /// Channel 17 sample time SMP17: u3 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// sample time register 1 pub const SMPR1 = Register(SMPR1_val).init(base_address + 0xc); /// SMPR2 const SMPR2_val = packed struct { /// SMP0 [0:2] /// Channel 0 sample time SMP0: u3 = 0, /// SMP1 [3:5] /// Channel 1 sample time SMP1: u3 = 0, /// SMP2 [6:8] /// Channel 2 sample time SMP2: u3 = 0, /// SMP3 [9:11] /// Channel 3 sample time SMP3: u3 = 0, /// SMP4 [12:14] /// Channel 4 sample time SMP4: u3 = 0, /// SMP5 [15:17] /// Channel 5 sample time SMP5: u3 = 0, /// SMP6 [18:20] /// Channel 6 sample time SMP6: u3 = 0, /// SMP7 [21:23] /// Channel 7 sample time SMP7: u3 = 0, /// SMP8 [24:26] /// Channel 8 sample time SMP8: u3 = 0, /// SMP9 [27:29] /// Channel 9 sample time SMP9: u3 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// sample time register 2 pub const SMPR2 = Register(SMPR2_val).init(base_address + 0x10); /// JOFR1 const JOFR1_val = packed struct { /// JOFFSET1 [0:11] /// Data offset for injected channel JOFFSET1: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR1 = Register(JOFR1_val).init(base_address + 0x14); /// JOFR2 const JOFR2_val = packed struct { /// JOFFSET2 [0:11] /// Data offset for injected channel JOFFSET2: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR2 = Register(JOFR2_val).init(base_address + 0x18); /// JOFR3 const JOFR3_val = packed struct { /// JOFFSET3 [0:11] /// Data offset for injected channel JOFFSET3: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR3 = Register(JOFR3_val).init(base_address + 0x1c); /// JOFR4 const JOFR4_val = packed struct { /// JOFFSET4 [0:11] /// Data offset for injected channel JOFFSET4: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR4 = Register(JOFR4_val).init(base_address + 0x20); /// HTR const HTR_val = packed struct { /// HT [0:11] /// Analog watchdog higher HT: u12 = 4095, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// watchdog higher threshold pub const HTR = Register(HTR_val).init(base_address + 0x24); /// LTR const LTR_val = packed struct { /// LT [0:11] /// Analog watchdog lower LT: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// watchdog lower threshold pub const LTR = Register(LTR_val).init(base_address + 0x28); /// SQR1 const SQR1_val = packed struct { /// SQ13 [0:4] /// 13th conversion in regular SQ13: u5 = 0, /// SQ14 [5:9] /// 14th conversion in regular SQ14: u5 = 0, /// SQ15 [10:14] /// 15th conversion in regular SQ15: u5 = 0, /// SQ16 [15:19] /// 16th conversion in regular SQ16: u5 = 0, /// L [20:23] /// Regular channel sequence L: u4 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// regular sequence register 1 pub const SQR1 = Register(SQR1_val).init(base_address + 0x2c); /// SQR2 const SQR2_val = packed struct { /// SQ7 [0:4] /// 7th conversion in regular SQ7: u5 = 0, /// SQ8 [5:9] /// 8th conversion in regular SQ8: u5 = 0, /// SQ9 [10:14] /// 9th conversion in regular SQ9: u5 = 0, /// SQ10 [15:19] /// 10th conversion in regular SQ10: u5 = 0, /// SQ11 [20:24] /// 11th conversion in regular SQ11: u5 = 0, /// SQ12 [25:29] /// 12th conversion in regular SQ12: u5 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// regular sequence register 2 pub const SQR2 = Register(SQR2_val).init(base_address + 0x30); /// SQR3 const SQR3_val = packed struct { /// SQ1 [0:4] /// 1st conversion in regular SQ1: u5 = 0, /// SQ2 [5:9] /// 2nd conversion in regular SQ2: u5 = 0, /// SQ3 [10:14] /// 3rd conversion in regular SQ3: u5 = 0, /// SQ4 [15:19] /// 4th conversion in regular SQ4: u5 = 0, /// SQ5 [20:24] /// 5th conversion in regular SQ5: u5 = 0, /// SQ6 [25:29] /// 6th conversion in regular SQ6: u5 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// regular sequence register 3 pub const SQR3 = Register(SQR3_val).init(base_address + 0x34); /// JSQR const JSQR_val = packed struct { /// JSQ1 [0:4] /// 1st conversion in injected JSQ1: u5 = 0, /// JSQ2 [5:9] /// 2nd conversion in injected JSQ2: u5 = 0, /// JSQ3 [10:14] /// 3rd conversion in injected JSQ3: u5 = 0, /// JSQ4 [15:19] /// 4th conversion in injected JSQ4: u5 = 0, /// JL [20:21] /// Injected sequence length JL: u2 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// injected sequence register pub const JSQR = Register(JSQR_val).init(base_address + 0x38); /// JDR1 const JDR1_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR1 = Register(JDR1_val).init(base_address + 0x3c); /// JDR2 const JDR2_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR2 = Register(JDR2_val).init(base_address + 0x40); /// JDR3 const JDR3_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR3 = Register(JDR3_val).init(base_address + 0x44); /// JDR4 const JDR4_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR4 = Register(JDR4_val).init(base_address + 0x48); /// DR const DR_val = packed struct { /// DATA [0:15] /// Regular data DATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// regular data register pub const DR = Register(DR_val).init(base_address + 0x4c); }; /// Analog to digital converter pub const ADC3 = struct { const base_address = 0x40013c00; /// SR const SR_val = packed struct { /// AWD [0:0] /// Analog watchdog flag AWD: u1 = 0, /// EOC [1:1] /// Regular channel end of EOC: u1 = 0, /// JEOC [2:2] /// Injected channel end of JEOC: u1 = 0, /// JSTRT [3:3] /// Injected channel start JSTRT: u1 = 0, /// STRT [4:4] /// Regular channel start flag STRT: u1 = 0, /// unused [5:31] _unused5: u3 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// status register pub const SR = Register(SR_val).init(base_address + 0x0); /// CR1 const CR1_val = packed struct { /// AWDCH [0:4] /// Analog watchdog channel select AWDCH: u5 = 0, /// EOCIE [5:5] /// Interrupt enable for EOC EOCIE: u1 = 0, /// AWDIE [6:6] /// Analog watchdog interrupt AWDIE: u1 = 0, /// JEOCIE [7:7] /// Interrupt enable for injected JEOCIE: u1 = 0, /// SCAN [8:8] /// Scan mode SCAN: u1 = 0, /// AWDSGL [9:9] /// Enable the watchdog on a single channel AWDSGL: u1 = 0, /// JAUTO [10:10] /// Automatic injected group JAUTO: u1 = 0, /// DISCEN [11:11] /// Discontinuous mode on regular DISCEN: u1 = 0, /// JDISCEN [12:12] /// Discontinuous mode on injected JDISCEN: u1 = 0, /// DISCNUM [13:15] /// Discontinuous mode channel DISCNUM: u3 = 0, /// unused [16:21] _unused16: u6 = 0, /// JAWDEN [22:22] /// Analog watchdog enable on injected JAWDEN: u1 = 0, /// AWDEN [23:23] /// Analog watchdog enable on regular AWDEN: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// control register 1 pub const CR1 = Register(CR1_val).init(base_address + 0x4); /// CR2 const CR2_val = packed struct { /// ADON [0:0] /// A/D converter ON / OFF ADON: u1 = 0, /// CONT [1:1] /// Continuous conversion CONT: u1 = 0, /// CAL [2:2] /// A/D calibration CAL: u1 = 0, /// RSTCAL [3:3] /// Reset calibration RSTCAL: u1 = 0, /// unused [4:7] _unused4: u4 = 0, /// DMA [8:8] /// Direct memory access mode DMA: u1 = 0, /// unused [9:10] _unused9: u2 = 0, /// ALIGN [11:11] /// Data alignment ALIGN: u1 = 0, /// JEXTSEL [12:14] /// External event select for injected JEXTSEL: u3 = 0, /// JEXTTRIG [15:15] /// External trigger conversion mode for JEXTTRIG: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// EXTSEL [17:19] /// External event select for regular EXTSEL: u3 = 0, /// EXTTRIG [20:20] /// External trigger conversion mode for EXTTRIG: u1 = 0, /// JSWSTART [21:21] /// Start conversion of injected JSWSTART: u1 = 0, /// SWSTART [22:22] /// Start conversion of regular SWSTART: u1 = 0, /// TSVREFE [23:23] /// Temperature sensor and VREFINT TSVREFE: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// control register 2 pub const CR2 = Register(CR2_val).init(base_address + 0x8); /// SMPR1 const SMPR1_val = packed struct { /// SMP10 [0:2] /// Channel 10 sample time SMP10: u3 = 0, /// SMP11 [3:5] /// Channel 11 sample time SMP11: u3 = 0, /// SMP12 [6:8] /// Channel 12 sample time SMP12: u3 = 0, /// SMP13 [9:11] /// Channel 13 sample time SMP13: u3 = 0, /// SMP14 [12:14] /// Channel 14 sample time SMP14: u3 = 0, /// SMP15 [15:17] /// Channel 15 sample time SMP15: u3 = 0, /// SMP16 [18:20] /// Channel 16 sample time SMP16: u3 = 0, /// SMP17 [21:23] /// Channel 17 sample time SMP17: u3 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// sample time register 1 pub const SMPR1 = Register(SMPR1_val).init(base_address + 0xc); /// SMPR2 const SMPR2_val = packed struct { /// SMP0 [0:2] /// Channel 0 sample time SMP0: u3 = 0, /// SMP1 [3:5] /// Channel 1 sample time SMP1: u3 = 0, /// SMP2 [6:8] /// Channel 2 sample time SMP2: u3 = 0, /// SMP3 [9:11] /// Channel 3 sample time SMP3: u3 = 0, /// SMP4 [12:14] /// Channel 4 sample time SMP4: u3 = 0, /// SMP5 [15:17] /// Channel 5 sample time SMP5: u3 = 0, /// SMP6 [18:20] /// Channel 6 sample time SMP6: u3 = 0, /// SMP7 [21:23] /// Channel 7 sample time SMP7: u3 = 0, /// SMP8 [24:26] /// Channel 8 sample time SMP8: u3 = 0, /// SMP9 [27:29] /// Channel 9 sample time SMP9: u3 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// sample time register 2 pub const SMPR2 = Register(SMPR2_val).init(base_address + 0x10); /// JOFR1 const JOFR1_val = packed struct { /// JOFFSET1 [0:11] /// Data offset for injected channel JOFFSET1: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR1 = Register(JOFR1_val).init(base_address + 0x14); /// JOFR2 const JOFR2_val = packed struct { /// JOFFSET2 [0:11] /// Data offset for injected channel JOFFSET2: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR2 = Register(JOFR2_val).init(base_address + 0x18); /// JOFR3 const JOFR3_val = packed struct { /// JOFFSET3 [0:11] /// Data offset for injected channel JOFFSET3: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR3 = Register(JOFR3_val).init(base_address + 0x1c); /// JOFR4 const JOFR4_val = packed struct { /// JOFFSET4 [0:11] /// Data offset for injected channel JOFFSET4: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected channel data offset register pub const JOFR4 = Register(JOFR4_val).init(base_address + 0x20); /// HTR const HTR_val = packed struct { /// HT [0:11] /// Analog watchdog higher HT: u12 = 4095, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// watchdog higher threshold pub const HTR = Register(HTR_val).init(base_address + 0x24); /// LTR const LTR_val = packed struct { /// LT [0:11] /// Analog watchdog lower LT: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// watchdog lower threshold pub const LTR = Register(LTR_val).init(base_address + 0x28); /// SQR1 const SQR1_val = packed struct { /// SQ13 [0:4] /// 13th conversion in regular SQ13: u5 = 0, /// SQ14 [5:9] /// 14th conversion in regular SQ14: u5 = 0, /// SQ15 [10:14] /// 15th conversion in regular SQ15: u5 = 0, /// SQ16 [15:19] /// 16th conversion in regular SQ16: u5 = 0, /// L [20:23] /// Regular channel sequence L: u4 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// regular sequence register 1 pub const SQR1 = Register(SQR1_val).init(base_address + 0x2c); /// SQR2 const SQR2_val = packed struct { /// SQ7 [0:4] /// 7th conversion in regular SQ7: u5 = 0, /// SQ8 [5:9] /// 8th conversion in regular SQ8: u5 = 0, /// SQ9 [10:14] /// 9th conversion in regular SQ9: u5 = 0, /// SQ10 [15:19] /// 10th conversion in regular SQ10: u5 = 0, /// SQ11 [20:24] /// 11th conversion in regular SQ11: u5 = 0, /// SQ12 [25:29] /// 12th conversion in regular SQ12: u5 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// regular sequence register 2 pub const SQR2 = Register(SQR2_val).init(base_address + 0x30); /// SQR3 const SQR3_val = packed struct { /// SQ1 [0:4] /// 1st conversion in regular SQ1: u5 = 0, /// SQ2 [5:9] /// 2nd conversion in regular SQ2: u5 = 0, /// SQ3 [10:14] /// 3rd conversion in regular SQ3: u5 = 0, /// SQ4 [15:19] /// 4th conversion in regular SQ4: u5 = 0, /// SQ5 [20:24] /// 5th conversion in regular SQ5: u5 = 0, /// SQ6 [25:29] /// 6th conversion in regular SQ6: u5 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// regular sequence register 3 pub const SQR3 = Register(SQR3_val).init(base_address + 0x34); /// JSQR const JSQR_val = packed struct { /// JSQ1 [0:4] /// 1st conversion in injected JSQ1: u5 = 0, /// JSQ2 [5:9] /// 2nd conversion in injected JSQ2: u5 = 0, /// JSQ3 [10:14] /// 3rd conversion in injected JSQ3: u5 = 0, /// JSQ4 [15:19] /// 4th conversion in injected JSQ4: u5 = 0, /// JL [20:21] /// Injected sequence length JL: u2 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// injected sequence register pub const JSQR = Register(JSQR_val).init(base_address + 0x38); /// JDR1 const JDR1_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR1 = Register(JDR1_val).init(base_address + 0x3c); /// JDR2 const JDR2_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR2 = Register(JDR2_val).init(base_address + 0x40); /// JDR3 const JDR3_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR3 = Register(JDR3_val).init(base_address + 0x44); /// JDR4 const JDR4_val = packed struct { /// JDATA [0:15] /// Injected data JDATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// injected data register x pub const JDR4 = Register(JDR4_val).init(base_address + 0x48); /// DR const DR_val = packed struct { /// DATA [0:15] /// Regular data DATA: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// regular data register pub const DR = Register(DR_val).init(base_address + 0x4c); }; /// Controller area network pub const CAN1 = struct { const base_address = 0x40006400; /// CAN_MCR const CAN_MCR_val = packed struct { /// INRQ [0:0] /// INRQ INRQ: u1 = 0, /// SLEEP [1:1] /// SLEEP SLEEP: u1 = 0, /// TXFP [2:2] /// TXFP TXFP: u1 = 0, /// RFLM [3:3] /// RFLM RFLM: u1 = 0, /// NART [4:4] /// NART NART: u1 = 0, /// AWUM [5:5] /// AWUM AWUM: u1 = 0, /// ABOM [6:6] /// ABOM ABOM: u1 = 0, /// TTCM [7:7] /// TTCM TTCM: u1 = 0, /// unused [8:14] _unused8: u7 = 0, /// RESET [15:15] /// RESET RESET: u1 = 0, /// DBF [16:16] /// DBF DBF: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// CAN_MCR pub const CAN_MCR = Register(CAN_MCR_val).init(base_address + 0x0); /// CAN_MSR const CAN_MSR_val = packed struct { /// INAK [0:0] /// INAK INAK: u1 = 0, /// SLAK [1:1] /// SLAK SLAK: u1 = 0, /// ERRI [2:2] /// ERRI ERRI: u1 = 0, /// WKUI [3:3] /// WKUI WKUI: u1 = 0, /// SLAKI [4:4] /// SLAKI SLAKI: u1 = 0, /// unused [5:7] _unused5: u3 = 0, /// TXM [8:8] /// TXM TXM: u1 = 0, /// RXM [9:9] /// RXM RXM: u1 = 0, /// SAMP [10:10] /// SAMP SAMP: u1 = 0, /// RX [11:11] /// RX RX: u1 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_MSR pub const CAN_MSR = Register(CAN_MSR_val).init(base_address + 0x4); /// CAN_TSR const CAN_TSR_val = packed struct { /// RQCP0 [0:0] /// RQCP0 RQCP0: u1 = 0, /// TXOK0 [1:1] /// TXOK0 TXOK0: u1 = 0, /// ALST0 [2:2] /// ALST0 ALST0: u1 = 0, /// TERR0 [3:3] /// TERR0 TERR0: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// ABRQ0 [7:7] /// ABRQ0 ABRQ0: u1 = 0, /// RQCP1 [8:8] /// RQCP1 RQCP1: u1 = 0, /// TXOK1 [9:9] /// TXOK1 TXOK1: u1 = 0, /// ALST1 [10:10] /// ALST1 ALST1: u1 = 0, /// TERR1 [11:11] /// TERR1 TERR1: u1 = 0, /// unused [12:14] _unused12: u3 = 0, /// ABRQ1 [15:15] /// ABRQ1 ABRQ1: u1 = 0, /// RQCP2 [16:16] /// RQCP2 RQCP2: u1 = 0, /// TXOK2 [17:17] /// TXOK2 TXOK2: u1 = 0, /// ALST2 [18:18] /// ALST2 ALST2: u1 = 0, /// TERR2 [19:19] /// TERR2 TERR2: u1 = 0, /// unused [20:22] _unused20: u3 = 0, /// ABRQ2 [23:23] /// ABRQ2 ABRQ2: u1 = 0, /// CODE [24:25] /// CODE CODE: u2 = 0, /// TME0 [26:26] /// Lowest priority flag for mailbox TME0: u1 = 0, /// TME1 [27:27] /// Lowest priority flag for mailbox TME1: u1 = 0, /// TME2 [28:28] /// Lowest priority flag for mailbox TME2: u1 = 0, /// LOW0 [29:29] /// Lowest priority flag for mailbox LOW0: u1 = 0, /// LOW1 [30:30] /// Lowest priority flag for mailbox LOW1: u1 = 0, /// LOW2 [31:31] /// Lowest priority flag for mailbox LOW2: u1 = 0, }; /// CAN_TSR pub const CAN_TSR = Register(CAN_TSR_val).init(base_address + 0x8); /// CAN_RF0R const CAN_RF0R_val = packed struct { /// FMP0 [0:1] /// FMP0 FMP0: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// FULL0 [3:3] /// FULL0 FULL0: u1 = 0, /// FOVR0 [4:4] /// FOVR0 FOVR0: u1 = 0, /// RFOM0 [5:5] /// RFOM0 RFOM0: u1 = 0, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_RF0R pub const CAN_RF0R = Register(CAN_RF0R_val).init(base_address + 0xc); /// CAN_RF1R const CAN_RF1R_val = packed struct { /// FMP1 [0:1] /// FMP1 FMP1: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// FULL1 [3:3] /// FULL1 FULL1: u1 = 0, /// FOVR1 [4:4] /// FOVR1 FOVR1: u1 = 0, /// RFOM1 [5:5] /// RFOM1 RFOM1: u1 = 0, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_RF1R pub const CAN_RF1R = Register(CAN_RF1R_val).init(base_address + 0x10); /// CAN_IER const CAN_IER_val = packed struct { /// TMEIE [0:0] /// TMEIE TMEIE: u1 = 0, /// FMPIE0 [1:1] /// FMPIE0 FMPIE0: u1 = 0, /// FFIE0 [2:2] /// FFIE0 FFIE0: u1 = 0, /// FOVIE0 [3:3] /// FOVIE0 FOVIE0: u1 = 0, /// FMPIE1 [4:4] /// FMPIE1 FMPIE1: u1 = 0, /// FFIE1 [5:5] /// FFIE1 FFIE1: u1 = 0, /// FOVIE1 [6:6] /// FOVIE1 FOVIE1: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// EWGIE [8:8] /// EWGIE EWGIE: u1 = 0, /// EPVIE [9:9] /// EPVIE EPVIE: u1 = 0, /// BOFIE [10:10] /// BOFIE BOFIE: u1 = 0, /// LECIE [11:11] /// LECIE LECIE: u1 = 0, /// unused [12:14] _unused12: u3 = 0, /// ERRIE [15:15] /// ERRIE ERRIE: u1 = 0, /// WKUIE [16:16] /// WKUIE WKUIE: u1 = 0, /// SLKIE [17:17] /// SLKIE SLKIE: u1 = 0, /// unused [18:31] _unused18: u6 = 0, _unused24: u8 = 0, }; /// CAN_IER pub const CAN_IER = Register(CAN_IER_val).init(base_address + 0x14); /// CAN_ESR const CAN_ESR_val = packed struct { /// EWGF [0:0] /// EWGF EWGF: u1 = 0, /// EPVF [1:1] /// EPVF EPVF: u1 = 0, /// BOFF [2:2] /// BOFF BOFF: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// LEC [4:6] /// LEC LEC: u3 = 0, /// unused [7:15] _unused7: u1 = 0, _unused8: u8 = 0, /// TEC [16:23] /// TEC TEC: u8 = 0, /// REC [24:31] /// REC REC: u8 = 0, }; /// CAN_ESR pub const CAN_ESR = Register(CAN_ESR_val).init(base_address + 0x18); /// CAN_BTR const CAN_BTR_val = packed struct { /// BRP [0:9] /// BRP BRP: u10 = 0, /// unused [10:15] _unused10: u6 = 0, /// TS1 [16:19] /// TS1 TS1: u4 = 0, /// TS2 [20:22] /// TS2 TS2: u3 = 0, /// unused [23:23] _unused23: u1 = 0, /// SJW [24:25] /// SJW SJW: u2 = 0, /// unused [26:29] _unused26: u4 = 0, /// LBKM [30:30] /// LBKM LBKM: u1 = 0, /// SILM [31:31] /// SILM SILM: u1 = 0, }; /// CAN_BTR pub const CAN_BTR = Register(CAN_BTR_val).init(base_address + 0x1c); /// CAN_TI0R const CAN_TI0R_val = packed struct { /// TXRQ [0:0] /// TXRQ TXRQ: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_TI0R pub const CAN_TI0R = Register(CAN_TI0R_val).init(base_address + 0x180); /// CAN_TDT0R const CAN_TDT0R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// TGT [8:8] /// TGT TGT: u1 = 0, /// unused [9:15] _unused9: u7 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_TDT0R pub const CAN_TDT0R = Register(CAN_TDT0R_val).init(base_address + 0x184); /// CAN_TDL0R const CAN_TDL0R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_TDL0R pub const CAN_TDL0R = Register(CAN_TDL0R_val).init(base_address + 0x188); /// CAN_TDH0R const CAN_TDH0R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_TDH0R pub const CAN_TDH0R = Register(CAN_TDH0R_val).init(base_address + 0x18c); /// CAN_TI1R const CAN_TI1R_val = packed struct { /// TXRQ [0:0] /// TXRQ TXRQ: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_TI1R pub const CAN_TI1R = Register(CAN_TI1R_val).init(base_address + 0x190); /// CAN_TDT1R const CAN_TDT1R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// TGT [8:8] /// TGT TGT: u1 = 0, /// unused [9:15] _unused9: u7 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_TDT1R pub const CAN_TDT1R = Register(CAN_TDT1R_val).init(base_address + 0x194); /// CAN_TDL1R const CAN_TDL1R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_TDL1R pub const CAN_TDL1R = Register(CAN_TDL1R_val).init(base_address + 0x198); /// CAN_TDH1R const CAN_TDH1R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_TDH1R pub const CAN_TDH1R = Register(CAN_TDH1R_val).init(base_address + 0x19c); /// CAN_TI2R const CAN_TI2R_val = packed struct { /// TXRQ [0:0] /// TXRQ TXRQ: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_TI2R pub const CAN_TI2R = Register(CAN_TI2R_val).init(base_address + 0x1a0); /// CAN_TDT2R const CAN_TDT2R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// TGT [8:8] /// TGT TGT: u1 = 0, /// unused [9:15] _unused9: u7 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_TDT2R pub const CAN_TDT2R = Register(CAN_TDT2R_val).init(base_address + 0x1a4); /// CAN_TDL2R const CAN_TDL2R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_TDL2R pub const CAN_TDL2R = Register(CAN_TDL2R_val).init(base_address + 0x1a8); /// CAN_TDH2R const CAN_TDH2R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_TDH2R pub const CAN_TDH2R = Register(CAN_TDH2R_val).init(base_address + 0x1ac); /// CAN_RI0R const CAN_RI0R_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_RI0R pub const CAN_RI0R = Register(CAN_RI0R_val).init(base_address + 0x1b0); /// CAN_RDT0R const CAN_RDT0R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// FMI [8:15] /// FMI FMI: u8 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_RDT0R pub const CAN_RDT0R = Register(CAN_RDT0R_val).init(base_address + 0x1b4); /// CAN_RDL0R const CAN_RDL0R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_RDL0R pub const CAN_RDL0R = Register(CAN_RDL0R_val).init(base_address + 0x1b8); /// CAN_RDH0R const CAN_RDH0R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_RDH0R pub const CAN_RDH0R = Register(CAN_RDH0R_val).init(base_address + 0x1bc); /// CAN_RI1R const CAN_RI1R_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_RI1R pub const CAN_RI1R = Register(CAN_RI1R_val).init(base_address + 0x1c0); /// CAN_RDT1R const CAN_RDT1R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// FMI [8:15] /// FMI FMI: u8 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_RDT1R pub const CAN_RDT1R = Register(CAN_RDT1R_val).init(base_address + 0x1c4); /// CAN_RDL1R const CAN_RDL1R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_RDL1R pub const CAN_RDL1R = Register(CAN_RDL1R_val).init(base_address + 0x1c8); /// CAN_RDH1R const CAN_RDH1R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_RDH1R pub const CAN_RDH1R = Register(CAN_RDH1R_val).init(base_address + 0x1cc); /// CAN_FMR const CAN_FMR_val = packed struct { /// FINIT [0:0] /// FINIT FINIT: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FMR pub const CAN_FMR = Register(CAN_FMR_val).init(base_address + 0x200); /// CAN_FM1R const CAN_FM1R_val = packed struct { /// FBM0 [0:0] /// Filter mode FBM0: u1 = 0, /// FBM1 [1:1] /// Filter mode FBM1: u1 = 0, /// FBM2 [2:2] /// Filter mode FBM2: u1 = 0, /// FBM3 [3:3] /// Filter mode FBM3: u1 = 0, /// FBM4 [4:4] /// Filter mode FBM4: u1 = 0, /// FBM5 [5:5] /// Filter mode FBM5: u1 = 0, /// FBM6 [6:6] /// Filter mode FBM6: u1 = 0, /// FBM7 [7:7] /// Filter mode FBM7: u1 = 0, /// FBM8 [8:8] /// Filter mode FBM8: u1 = 0, /// FBM9 [9:9] /// Filter mode FBM9: u1 = 0, /// FBM10 [10:10] /// Filter mode FBM10: u1 = 0, /// FBM11 [11:11] /// Filter mode FBM11: u1 = 0, /// FBM12 [12:12] /// Filter mode FBM12: u1 = 0, /// FBM13 [13:13] /// Filter mode FBM13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FM1R pub const CAN_FM1R = Register(CAN_FM1R_val).init(base_address + 0x204); /// CAN_FS1R const CAN_FS1R_val = packed struct { /// FSC0 [0:0] /// Filter scale configuration FSC0: u1 = 0, /// FSC1 [1:1] /// Filter scale configuration FSC1: u1 = 0, /// FSC2 [2:2] /// Filter scale configuration FSC2: u1 = 0, /// FSC3 [3:3] /// Filter scale configuration FSC3: u1 = 0, /// FSC4 [4:4] /// Filter scale configuration FSC4: u1 = 0, /// FSC5 [5:5] /// Filter scale configuration FSC5: u1 = 0, /// FSC6 [6:6] /// Filter scale configuration FSC6: u1 = 0, /// FSC7 [7:7] /// Filter scale configuration FSC7: u1 = 0, /// FSC8 [8:8] /// Filter scale configuration FSC8: u1 = 0, /// FSC9 [9:9] /// Filter scale configuration FSC9: u1 = 0, /// FSC10 [10:10] /// Filter scale configuration FSC10: u1 = 0, /// FSC11 [11:11] /// Filter scale configuration FSC11: u1 = 0, /// FSC12 [12:12] /// Filter scale configuration FSC12: u1 = 0, /// FSC13 [13:13] /// Filter scale configuration FSC13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FS1R pub const CAN_FS1R = Register(CAN_FS1R_val).init(base_address + 0x20c); /// CAN_FFA1R const CAN_FFA1R_val = packed struct { /// FFA0 [0:0] /// Filter FIFO assignment for filter FFA0: u1 = 0, /// FFA1 [1:1] /// Filter FIFO assignment for filter FFA1: u1 = 0, /// FFA2 [2:2] /// Filter FIFO assignment for filter FFA2: u1 = 0, /// FFA3 [3:3] /// Filter FIFO assignment for filter FFA3: u1 = 0, /// FFA4 [4:4] /// Filter FIFO assignment for filter FFA4: u1 = 0, /// FFA5 [5:5] /// Filter FIFO assignment for filter FFA5: u1 = 0, /// FFA6 [6:6] /// Filter FIFO assignment for filter FFA6: u1 = 0, /// FFA7 [7:7] /// Filter FIFO assignment for filter FFA7: u1 = 0, /// FFA8 [8:8] /// Filter FIFO assignment for filter FFA8: u1 = 0, /// FFA9 [9:9] /// Filter FIFO assignment for filter FFA9: u1 = 0, /// FFA10 [10:10] /// Filter FIFO assignment for filter FFA10: u1 = 0, /// FFA11 [11:11] /// Filter FIFO assignment for filter FFA11: u1 = 0, /// FFA12 [12:12] /// Filter FIFO assignment for filter FFA12: u1 = 0, /// FFA13 [13:13] /// Filter FIFO assignment for filter FFA13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FFA1R pub const CAN_FFA1R = Register(CAN_FFA1R_val).init(base_address + 0x214); /// CAN_FA1R const CAN_FA1R_val = packed struct { /// FACT0 [0:0] /// Filter active FACT0: u1 = 0, /// FACT1 [1:1] /// Filter active FACT1: u1 = 0, /// FACT2 [2:2] /// Filter active FACT2: u1 = 0, /// FACT3 [3:3] /// Filter active FACT3: u1 = 0, /// FACT4 [4:4] /// Filter active FACT4: u1 = 0, /// FACT5 [5:5] /// Filter active FACT5: u1 = 0, /// FACT6 [6:6] /// Filter active FACT6: u1 = 0, /// FACT7 [7:7] /// Filter active FACT7: u1 = 0, /// FACT8 [8:8] /// Filter active FACT8: u1 = 0, /// FACT9 [9:9] /// Filter active FACT9: u1 = 0, /// FACT10 [10:10] /// Filter active FACT10: u1 = 0, /// FACT11 [11:11] /// Filter active FACT11: u1 = 0, /// FACT12 [12:12] /// Filter active FACT12: u1 = 0, /// FACT13 [13:13] /// Filter active FACT13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FA1R pub const CAN_FA1R = Register(CAN_FA1R_val).init(base_address + 0x21c); /// F0R1 const F0R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 0 register 1 pub const F0R1 = Register(F0R1_val).init(base_address + 0x240); /// F0R2 const F0R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 0 register 2 pub const F0R2 = Register(F0R2_val).init(base_address + 0x244); /// F1R1 const F1R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 1 register 1 pub const F1R1 = Register(F1R1_val).init(base_address + 0x248); /// F1R2 const F1R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 1 register 2 pub const F1R2 = Register(F1R2_val).init(base_address + 0x24c); /// F2R1 const F2R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 2 register 1 pub const F2R1 = Register(F2R1_val).init(base_address + 0x250); /// F2R2 const F2R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 2 register 2 pub const F2R2 = Register(F2R2_val).init(base_address + 0x254); /// F3R1 const F3R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 3 register 1 pub const F3R1 = Register(F3R1_val).init(base_address + 0x258); /// F3R2 const F3R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 3 register 2 pub const F3R2 = Register(F3R2_val).init(base_address + 0x25c); /// F4R1 const F4R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 4 register 1 pub const F4R1 = Register(F4R1_val).init(base_address + 0x260); /// F4R2 const F4R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 4 register 2 pub const F4R2 = Register(F4R2_val).init(base_address + 0x264); /// F5R1 const F5R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 5 register 1 pub const F5R1 = Register(F5R1_val).init(base_address + 0x268); /// F5R2 const F5R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 5 register 2 pub const F5R2 = Register(F5R2_val).init(base_address + 0x26c); /// F6R1 const F6R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 6 register 1 pub const F6R1 = Register(F6R1_val).init(base_address + 0x270); /// F6R2 const F6R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 6 register 2 pub const F6R2 = Register(F6R2_val).init(base_address + 0x274); /// F7R1 const F7R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 7 register 1 pub const F7R1 = Register(F7R1_val).init(base_address + 0x278); /// F7R2 const F7R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 7 register 2 pub const F7R2 = Register(F7R2_val).init(base_address + 0x27c); /// F8R1 const F8R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 8 register 1 pub const F8R1 = Register(F8R1_val).init(base_address + 0x280); /// F8R2 const F8R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 8 register 2 pub const F8R2 = Register(F8R2_val).init(base_address + 0x284); /// F9R1 const F9R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 9 register 1 pub const F9R1 = Register(F9R1_val).init(base_address + 0x288); /// F9R2 const F9R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 9 register 2 pub const F9R2 = Register(F9R2_val).init(base_address + 0x28c); /// F10R1 const F10R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 10 register 1 pub const F10R1 = Register(F10R1_val).init(base_address + 0x290); /// F10R2 const F10R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 10 register 2 pub const F10R2 = Register(F10R2_val).init(base_address + 0x294); /// F11R1 const F11R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 11 register 1 pub const F11R1 = Register(F11R1_val).init(base_address + 0x298); /// F11R2 const F11R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 11 register 2 pub const F11R2 = Register(F11R2_val).init(base_address + 0x29c); /// F12R1 const F12R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 4 register 1 pub const F12R1 = Register(F12R1_val).init(base_address + 0x2a0); /// F12R2 const F12R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 12 register 2 pub const F12R2 = Register(F12R2_val).init(base_address + 0x2a4); /// F13R1 const F13R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 13 register 1 pub const F13R1 = Register(F13R1_val).init(base_address + 0x2a8); /// F13R2 const F13R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 13 register 2 pub const F13R2 = Register(F13R2_val).init(base_address + 0x2ac); }; /// Controller area network pub const CAN2 = struct { const base_address = 0x40006800; /// CAN_MCR const CAN_MCR_val = packed struct { /// INRQ [0:0] /// INRQ INRQ: u1 = 0, /// SLEEP [1:1] /// SLEEP SLEEP: u1 = 0, /// TXFP [2:2] /// TXFP TXFP: u1 = 0, /// RFLM [3:3] /// RFLM RFLM: u1 = 0, /// NART [4:4] /// NART NART: u1 = 0, /// AWUM [5:5] /// AWUM AWUM: u1 = 0, /// ABOM [6:6] /// ABOM ABOM: u1 = 0, /// TTCM [7:7] /// TTCM TTCM: u1 = 0, /// unused [8:14] _unused8: u7 = 0, /// RESET [15:15] /// RESET RESET: u1 = 0, /// DBF [16:16] /// DBF DBF: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// CAN_MCR pub const CAN_MCR = Register(CAN_MCR_val).init(base_address + 0x0); /// CAN_MSR const CAN_MSR_val = packed struct { /// INAK [0:0] /// INAK INAK: u1 = 0, /// SLAK [1:1] /// SLAK SLAK: u1 = 0, /// ERRI [2:2] /// ERRI ERRI: u1 = 0, /// WKUI [3:3] /// WKUI WKUI: u1 = 0, /// SLAKI [4:4] /// SLAKI SLAKI: u1 = 0, /// unused [5:7] _unused5: u3 = 0, /// TXM [8:8] /// TXM TXM: u1 = 0, /// RXM [9:9] /// RXM RXM: u1 = 0, /// SAMP [10:10] /// SAMP SAMP: u1 = 0, /// RX [11:11] /// RX RX: u1 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_MSR pub const CAN_MSR = Register(CAN_MSR_val).init(base_address + 0x4); /// CAN_TSR const CAN_TSR_val = packed struct { /// RQCP0 [0:0] /// RQCP0 RQCP0: u1 = 0, /// TXOK0 [1:1] /// TXOK0 TXOK0: u1 = 0, /// ALST0 [2:2] /// ALST0 ALST0: u1 = 0, /// TERR0 [3:3] /// TERR0 TERR0: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// ABRQ0 [7:7] /// ABRQ0 ABRQ0: u1 = 0, /// RQCP1 [8:8] /// RQCP1 RQCP1: u1 = 0, /// TXOK1 [9:9] /// TXOK1 TXOK1: u1 = 0, /// ALST1 [10:10] /// ALST1 ALST1: u1 = 0, /// TERR1 [11:11] /// TERR1 TERR1: u1 = 0, /// unused [12:14] _unused12: u3 = 0, /// ABRQ1 [15:15] /// ABRQ1 ABRQ1: u1 = 0, /// RQCP2 [16:16] /// RQCP2 RQCP2: u1 = 0, /// TXOK2 [17:17] /// TXOK2 TXOK2: u1 = 0, /// ALST2 [18:18] /// ALST2 ALST2: u1 = 0, /// TERR2 [19:19] /// TERR2 TERR2: u1 = 0, /// unused [20:22] _unused20: u3 = 0, /// ABRQ2 [23:23] /// ABRQ2 ABRQ2: u1 = 0, /// CODE [24:25] /// CODE CODE: u2 = 0, /// TME0 [26:26] /// Lowest priority flag for mailbox TME0: u1 = 0, /// TME1 [27:27] /// Lowest priority flag for mailbox TME1: u1 = 0, /// TME2 [28:28] /// Lowest priority flag for mailbox TME2: u1 = 0, /// LOW0 [29:29] /// Lowest priority flag for mailbox LOW0: u1 = 0, /// LOW1 [30:30] /// Lowest priority flag for mailbox LOW1: u1 = 0, /// LOW2 [31:31] /// Lowest priority flag for mailbox LOW2: u1 = 0, }; /// CAN_TSR pub const CAN_TSR = Register(CAN_TSR_val).init(base_address + 0x8); /// CAN_RF0R const CAN_RF0R_val = packed struct { /// FMP0 [0:1] /// FMP0 FMP0: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// FULL0 [3:3] /// FULL0 FULL0: u1 = 0, /// FOVR0 [4:4] /// FOVR0 FOVR0: u1 = 0, /// RFOM0 [5:5] /// RFOM0 RFOM0: u1 = 0, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_RF0R pub const CAN_RF0R = Register(CAN_RF0R_val).init(base_address + 0xc); /// CAN_RF1R const CAN_RF1R_val = packed struct { /// FMP1 [0:1] /// FMP1 FMP1: u2 = 0, /// unused [2:2] _unused2: u1 = 0, /// FULL1 [3:3] /// FULL1 FULL1: u1 = 0, /// FOVR1 [4:4] /// FOVR1 FOVR1: u1 = 0, /// RFOM1 [5:5] /// RFOM1 RFOM1: u1 = 0, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_RF1R pub const CAN_RF1R = Register(CAN_RF1R_val).init(base_address + 0x10); /// CAN_IER const CAN_IER_val = packed struct { /// TMEIE [0:0] /// TMEIE TMEIE: u1 = 0, /// FMPIE0 [1:1] /// FMPIE0 FMPIE0: u1 = 0, /// FFIE0 [2:2] /// FFIE0 FFIE0: u1 = 0, /// FOVIE0 [3:3] /// FOVIE0 FOVIE0: u1 = 0, /// FMPIE1 [4:4] /// FMPIE1 FMPIE1: u1 = 0, /// FFIE1 [5:5] /// FFIE1 FFIE1: u1 = 0, /// FOVIE1 [6:6] /// FOVIE1 FOVIE1: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// EWGIE [8:8] /// EWGIE EWGIE: u1 = 0, /// EPVIE [9:9] /// EPVIE EPVIE: u1 = 0, /// BOFIE [10:10] /// BOFIE BOFIE: u1 = 0, /// LECIE [11:11] /// LECIE LECIE: u1 = 0, /// unused [12:14] _unused12: u3 = 0, /// ERRIE [15:15] /// ERRIE ERRIE: u1 = 0, /// WKUIE [16:16] /// WKUIE WKUIE: u1 = 0, /// SLKIE [17:17] /// SLKIE SLKIE: u1 = 0, /// unused [18:31] _unused18: u6 = 0, _unused24: u8 = 0, }; /// CAN_IER pub const CAN_IER = Register(CAN_IER_val).init(base_address + 0x14); /// CAN_ESR const CAN_ESR_val = packed struct { /// EWGF [0:0] /// EWGF EWGF: u1 = 0, /// EPVF [1:1] /// EPVF EPVF: u1 = 0, /// BOFF [2:2] /// BOFF BOFF: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// LEC [4:6] /// LEC LEC: u3 = 0, /// unused [7:15] _unused7: u1 = 0, _unused8: u8 = 0, /// TEC [16:23] /// TEC TEC: u8 = 0, /// REC [24:31] /// REC REC: u8 = 0, }; /// CAN_ESR pub const CAN_ESR = Register(CAN_ESR_val).init(base_address + 0x18); /// CAN_BTR const CAN_BTR_val = packed struct { /// BRP [0:9] /// BRP BRP: u10 = 0, /// unused [10:15] _unused10: u6 = 0, /// TS1 [16:19] /// TS1 TS1: u4 = 0, /// TS2 [20:22] /// TS2 TS2: u3 = 0, /// unused [23:23] _unused23: u1 = 0, /// SJW [24:25] /// SJW SJW: u2 = 0, /// unused [26:29] _unused26: u4 = 0, /// LBKM [30:30] /// LBKM LBKM: u1 = 0, /// SILM [31:31] /// SILM SILM: u1 = 0, }; /// CAN_BTR pub const CAN_BTR = Register(CAN_BTR_val).init(base_address + 0x1c); /// CAN_TI0R const CAN_TI0R_val = packed struct { /// TXRQ [0:0] /// TXRQ TXRQ: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_TI0R pub const CAN_TI0R = Register(CAN_TI0R_val).init(base_address + 0x180); /// CAN_TDT0R const CAN_TDT0R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// TGT [8:8] /// TGT TGT: u1 = 0, /// unused [9:15] _unused9: u7 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_TDT0R pub const CAN_TDT0R = Register(CAN_TDT0R_val).init(base_address + 0x184); /// CAN_TDL0R const CAN_TDL0R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_TDL0R pub const CAN_TDL0R = Register(CAN_TDL0R_val).init(base_address + 0x188); /// CAN_TDH0R const CAN_TDH0R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_TDH0R pub const CAN_TDH0R = Register(CAN_TDH0R_val).init(base_address + 0x18c); /// CAN_TI1R const CAN_TI1R_val = packed struct { /// TXRQ [0:0] /// TXRQ TXRQ: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_TI1R pub const CAN_TI1R = Register(CAN_TI1R_val).init(base_address + 0x190); /// CAN_TDT1R const CAN_TDT1R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// TGT [8:8] /// TGT TGT: u1 = 0, /// unused [9:15] _unused9: u7 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_TDT1R pub const CAN_TDT1R = Register(CAN_TDT1R_val).init(base_address + 0x194); /// CAN_TDL1R const CAN_TDL1R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_TDL1R pub const CAN_TDL1R = Register(CAN_TDL1R_val).init(base_address + 0x198); /// CAN_TDH1R const CAN_TDH1R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_TDH1R pub const CAN_TDH1R = Register(CAN_TDH1R_val).init(base_address + 0x19c); /// CAN_TI2R const CAN_TI2R_val = packed struct { /// TXRQ [0:0] /// TXRQ TXRQ: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_TI2R pub const CAN_TI2R = Register(CAN_TI2R_val).init(base_address + 0x1a0); /// CAN_TDT2R const CAN_TDT2R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// TGT [8:8] /// TGT TGT: u1 = 0, /// unused [9:15] _unused9: u7 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_TDT2R pub const CAN_TDT2R = Register(CAN_TDT2R_val).init(base_address + 0x1a4); /// CAN_TDL2R const CAN_TDL2R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_TDL2R pub const CAN_TDL2R = Register(CAN_TDL2R_val).init(base_address + 0x1a8); /// CAN_TDH2R const CAN_TDH2R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_TDH2R pub const CAN_TDH2R = Register(CAN_TDH2R_val).init(base_address + 0x1ac); /// CAN_RI0R const CAN_RI0R_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_RI0R pub const CAN_RI0R = Register(CAN_RI0R_val).init(base_address + 0x1b0); /// CAN_RDT0R const CAN_RDT0R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// FMI [8:15] /// FMI FMI: u8 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_RDT0R pub const CAN_RDT0R = Register(CAN_RDT0R_val).init(base_address + 0x1b4); /// CAN_RDL0R const CAN_RDL0R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_RDL0R pub const CAN_RDL0R = Register(CAN_RDL0R_val).init(base_address + 0x1b8); /// CAN_RDH0R const CAN_RDH0R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_RDH0R pub const CAN_RDH0R = Register(CAN_RDH0R_val).init(base_address + 0x1bc); /// CAN_RI1R const CAN_RI1R_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// RTR [1:1] /// RTR RTR: u1 = 0, /// IDE [2:2] /// IDE IDE: u1 = 0, /// EXID [3:20] /// EXID EXID: u18 = 0, /// STID [21:31] /// STID STID: u11 = 0, }; /// CAN_RI1R pub const CAN_RI1R = Register(CAN_RI1R_val).init(base_address + 0x1c0); /// CAN_RDT1R const CAN_RDT1R_val = packed struct { /// DLC [0:3] /// DLC DLC: u4 = 0, /// unused [4:7] _unused4: u4 = 0, /// FMI [8:15] /// FMI FMI: u8 = 0, /// TIME [16:31] /// TIME TIME: u16 = 0, }; /// CAN_RDT1R pub const CAN_RDT1R = Register(CAN_RDT1R_val).init(base_address + 0x1c4); /// CAN_RDL1R const CAN_RDL1R_val = packed struct { /// DATA0 [0:7] /// DATA0 DATA0: u8 = 0, /// DATA1 [8:15] /// DATA1 DATA1: u8 = 0, /// DATA2 [16:23] /// DATA2 DATA2: u8 = 0, /// DATA3 [24:31] /// DATA3 DATA3: u8 = 0, }; /// CAN_RDL1R pub const CAN_RDL1R = Register(CAN_RDL1R_val).init(base_address + 0x1c8); /// CAN_RDH1R const CAN_RDH1R_val = packed struct { /// DATA4 [0:7] /// DATA4 DATA4: u8 = 0, /// DATA5 [8:15] /// DATA5 DATA5: u8 = 0, /// DATA6 [16:23] /// DATA6 DATA6: u8 = 0, /// DATA7 [24:31] /// DATA7 DATA7: u8 = 0, }; /// CAN_RDH1R pub const CAN_RDH1R = Register(CAN_RDH1R_val).init(base_address + 0x1cc); /// CAN_FMR const CAN_FMR_val = packed struct { /// FINIT [0:0] /// FINIT FINIT: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FMR pub const CAN_FMR = Register(CAN_FMR_val).init(base_address + 0x200); /// CAN_FM1R const CAN_FM1R_val = packed struct { /// FBM0 [0:0] /// Filter mode FBM0: u1 = 0, /// FBM1 [1:1] /// Filter mode FBM1: u1 = 0, /// FBM2 [2:2] /// Filter mode FBM2: u1 = 0, /// FBM3 [3:3] /// Filter mode FBM3: u1 = 0, /// FBM4 [4:4] /// Filter mode FBM4: u1 = 0, /// FBM5 [5:5] /// Filter mode FBM5: u1 = 0, /// FBM6 [6:6] /// Filter mode FBM6: u1 = 0, /// FBM7 [7:7] /// Filter mode FBM7: u1 = 0, /// FBM8 [8:8] /// Filter mode FBM8: u1 = 0, /// FBM9 [9:9] /// Filter mode FBM9: u1 = 0, /// FBM10 [10:10] /// Filter mode FBM10: u1 = 0, /// FBM11 [11:11] /// Filter mode FBM11: u1 = 0, /// FBM12 [12:12] /// Filter mode FBM12: u1 = 0, /// FBM13 [13:13] /// Filter mode FBM13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FM1R pub const CAN_FM1R = Register(CAN_FM1R_val).init(base_address + 0x204); /// CAN_FS1R const CAN_FS1R_val = packed struct { /// FSC0 [0:0] /// Filter scale configuration FSC0: u1 = 0, /// FSC1 [1:1] /// Filter scale configuration FSC1: u1 = 0, /// FSC2 [2:2] /// Filter scale configuration FSC2: u1 = 0, /// FSC3 [3:3] /// Filter scale configuration FSC3: u1 = 0, /// FSC4 [4:4] /// Filter scale configuration FSC4: u1 = 0, /// FSC5 [5:5] /// Filter scale configuration FSC5: u1 = 0, /// FSC6 [6:6] /// Filter scale configuration FSC6: u1 = 0, /// FSC7 [7:7] /// Filter scale configuration FSC7: u1 = 0, /// FSC8 [8:8] /// Filter scale configuration FSC8: u1 = 0, /// FSC9 [9:9] /// Filter scale configuration FSC9: u1 = 0, /// FSC10 [10:10] /// Filter scale configuration FSC10: u1 = 0, /// FSC11 [11:11] /// Filter scale configuration FSC11: u1 = 0, /// FSC12 [12:12] /// Filter scale configuration FSC12: u1 = 0, /// FSC13 [13:13] /// Filter scale configuration FSC13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FS1R pub const CAN_FS1R = Register(CAN_FS1R_val).init(base_address + 0x20c); /// CAN_FFA1R const CAN_FFA1R_val = packed struct { /// FFA0 [0:0] /// Filter FIFO assignment for filter FFA0: u1 = 0, /// FFA1 [1:1] /// Filter FIFO assignment for filter FFA1: u1 = 0, /// FFA2 [2:2] /// Filter FIFO assignment for filter FFA2: u1 = 0, /// FFA3 [3:3] /// Filter FIFO assignment for filter FFA3: u1 = 0, /// FFA4 [4:4] /// Filter FIFO assignment for filter FFA4: u1 = 0, /// FFA5 [5:5] /// Filter FIFO assignment for filter FFA5: u1 = 0, /// FFA6 [6:6] /// Filter FIFO assignment for filter FFA6: u1 = 0, /// FFA7 [7:7] /// Filter FIFO assignment for filter FFA7: u1 = 0, /// FFA8 [8:8] /// Filter FIFO assignment for filter FFA8: u1 = 0, /// FFA9 [9:9] /// Filter FIFO assignment for filter FFA9: u1 = 0, /// FFA10 [10:10] /// Filter FIFO assignment for filter FFA10: u1 = 0, /// FFA11 [11:11] /// Filter FIFO assignment for filter FFA11: u1 = 0, /// FFA12 [12:12] /// Filter FIFO assignment for filter FFA12: u1 = 0, /// FFA13 [13:13] /// Filter FIFO assignment for filter FFA13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FFA1R pub const CAN_FFA1R = Register(CAN_FFA1R_val).init(base_address + 0x214); /// CAN_FA1R const CAN_FA1R_val = packed struct { /// FACT0 [0:0] /// Filter active FACT0: u1 = 0, /// FACT1 [1:1] /// Filter active FACT1: u1 = 0, /// FACT2 [2:2] /// Filter active FACT2: u1 = 0, /// FACT3 [3:3] /// Filter active FACT3: u1 = 0, /// FACT4 [4:4] /// Filter active FACT4: u1 = 0, /// FACT5 [5:5] /// Filter active FACT5: u1 = 0, /// FACT6 [6:6] /// Filter active FACT6: u1 = 0, /// FACT7 [7:7] /// Filter active FACT7: u1 = 0, /// FACT8 [8:8] /// Filter active FACT8: u1 = 0, /// FACT9 [9:9] /// Filter active FACT9: u1 = 0, /// FACT10 [10:10] /// Filter active FACT10: u1 = 0, /// FACT11 [11:11] /// Filter active FACT11: u1 = 0, /// FACT12 [12:12] /// Filter active FACT12: u1 = 0, /// FACT13 [13:13] /// Filter active FACT13: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// CAN_FA1R pub const CAN_FA1R = Register(CAN_FA1R_val).init(base_address + 0x21c); /// F0R1 const F0R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 0 register 1 pub const F0R1 = Register(F0R1_val).init(base_address + 0x240); /// F0R2 const F0R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 0 register 2 pub const F0R2 = Register(F0R2_val).init(base_address + 0x244); /// F1R1 const F1R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 1 register 1 pub const F1R1 = Register(F1R1_val).init(base_address + 0x248); /// F1R2 const F1R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 1 register 2 pub const F1R2 = Register(F1R2_val).init(base_address + 0x24c); /// F2R1 const F2R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 2 register 1 pub const F2R1 = Register(F2R1_val).init(base_address + 0x250); /// F2R2 const F2R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 2 register 2 pub const F2R2 = Register(F2R2_val).init(base_address + 0x254); /// F3R1 const F3R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 3 register 1 pub const F3R1 = Register(F3R1_val).init(base_address + 0x258); /// F3R2 const F3R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 3 register 2 pub const F3R2 = Register(F3R2_val).init(base_address + 0x25c); /// F4R1 const F4R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 4 register 1 pub const F4R1 = Register(F4R1_val).init(base_address + 0x260); /// F4R2 const F4R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 4 register 2 pub const F4R2 = Register(F4R2_val).init(base_address + 0x264); /// F5R1 const F5R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 5 register 1 pub const F5R1 = Register(F5R1_val).init(base_address + 0x268); /// F5R2 const F5R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 5 register 2 pub const F5R2 = Register(F5R2_val).init(base_address + 0x26c); /// F6R1 const F6R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 6 register 1 pub const F6R1 = Register(F6R1_val).init(base_address + 0x270); /// F6R2 const F6R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 6 register 2 pub const F6R2 = Register(F6R2_val).init(base_address + 0x274); /// F7R1 const F7R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 7 register 1 pub const F7R1 = Register(F7R1_val).init(base_address + 0x278); /// F7R2 const F7R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 7 register 2 pub const F7R2 = Register(F7R2_val).init(base_address + 0x27c); /// F8R1 const F8R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 8 register 1 pub const F8R1 = Register(F8R1_val).init(base_address + 0x280); /// F8R2 const F8R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 8 register 2 pub const F8R2 = Register(F8R2_val).init(base_address + 0x284); /// F9R1 const F9R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 9 register 1 pub const F9R1 = Register(F9R1_val).init(base_address + 0x288); /// F9R2 const F9R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 9 register 2 pub const F9R2 = Register(F9R2_val).init(base_address + 0x28c); /// F10R1 const F10R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 10 register 1 pub const F10R1 = Register(F10R1_val).init(base_address + 0x290); /// F10R2 const F10R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 10 register 2 pub const F10R2 = Register(F10R2_val).init(base_address + 0x294); /// F11R1 const F11R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 11 register 1 pub const F11R1 = Register(F11R1_val).init(base_address + 0x298); /// F11R2 const F11R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 11 register 2 pub const F11R2 = Register(F11R2_val).init(base_address + 0x29c); /// F12R1 const F12R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 4 register 1 pub const F12R1 = Register(F12R1_val).init(base_address + 0x2a0); /// F12R2 const F12R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 12 register 2 pub const F12R2 = Register(F12R2_val).init(base_address + 0x2a4); /// F13R1 const F13R1_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 13 register 1 pub const F13R1 = Register(F13R1_val).init(base_address + 0x2a8); /// F13R2 const F13R2_val = packed struct { /// FB0 [0:0] /// Filter bits FB0: u1 = 0, /// FB1 [1:1] /// Filter bits FB1: u1 = 0, /// FB2 [2:2] /// Filter bits FB2: u1 = 0, /// FB3 [3:3] /// Filter bits FB3: u1 = 0, /// FB4 [4:4] /// Filter bits FB4: u1 = 0, /// FB5 [5:5] /// Filter bits FB5: u1 = 0, /// FB6 [6:6] /// Filter bits FB6: u1 = 0, /// FB7 [7:7] /// Filter bits FB7: u1 = 0, /// FB8 [8:8] /// Filter bits FB8: u1 = 0, /// FB9 [9:9] /// Filter bits FB9: u1 = 0, /// FB10 [10:10] /// Filter bits FB10: u1 = 0, /// FB11 [11:11] /// Filter bits FB11: u1 = 0, /// FB12 [12:12] /// Filter bits FB12: u1 = 0, /// FB13 [13:13] /// Filter bits FB13: u1 = 0, /// FB14 [14:14] /// Filter bits FB14: u1 = 0, /// FB15 [15:15] /// Filter bits FB15: u1 = 0, /// FB16 [16:16] /// Filter bits FB16: u1 = 0, /// FB17 [17:17] /// Filter bits FB17: u1 = 0, /// FB18 [18:18] /// Filter bits FB18: u1 = 0, /// FB19 [19:19] /// Filter bits FB19: u1 = 0, /// FB20 [20:20] /// Filter bits FB20: u1 = 0, /// FB21 [21:21] /// Filter bits FB21: u1 = 0, /// FB22 [22:22] /// Filter bits FB22: u1 = 0, /// FB23 [23:23] /// Filter bits FB23: u1 = 0, /// FB24 [24:24] /// Filter bits FB24: u1 = 0, /// FB25 [25:25] /// Filter bits FB25: u1 = 0, /// FB26 [26:26] /// Filter bits FB26: u1 = 0, /// FB27 [27:27] /// Filter bits FB27: u1 = 0, /// FB28 [28:28] /// Filter bits FB28: u1 = 0, /// FB29 [29:29] /// Filter bits FB29: u1 = 0, /// FB30 [30:30] /// Filter bits FB30: u1 = 0, /// FB31 [31:31] /// Filter bits FB31: u1 = 0, }; /// Filter bank 13 register 2 pub const F13R2 = Register(F13R2_val).init(base_address + 0x2ac); }; /// Digital to analog converter pub const DAC = struct { const base_address = 0x40007400; /// CR const CR_val = packed struct { /// EN1 [0:0] /// DAC channel1 enable EN1: u1 = 0, /// BOFF1 [1:1] /// DAC channel1 output buffer BOFF1: u1 = 0, /// TEN1 [2:2] /// DAC channel1 trigger TEN1: u1 = 0, /// TSEL1 [3:5] /// DAC channel1 trigger TSEL1: u3 = 0, /// WAVE1 [6:7] /// DAC channel1 noise/triangle wave WAVE1: u2 = 0, /// MAMP1 [8:11] /// DAC channel1 mask/amplitude MAMP1: u4 = 0, /// DMAEN1 [12:12] /// DAC channel1 DMA enable DMAEN1: u1 = 0, /// unused [13:15] _unused13: u3 = 0, /// EN2 [16:16] /// DAC channel2 enable EN2: u1 = 0, /// BOFF2 [17:17] /// DAC channel2 output buffer BOFF2: u1 = 0, /// TEN2 [18:18] /// DAC channel2 trigger TEN2: u1 = 0, /// TSEL2 [19:21] /// DAC channel2 trigger TSEL2: u3 = 0, /// WAVE2 [22:23] /// DAC channel2 noise/triangle wave WAVE2: u2 = 0, /// MAMP2 [24:27] /// DAC channel2 mask/amplitude MAMP2: u4 = 0, /// DMAEN2 [28:28] /// DAC channel2 DMA enable DMAEN2: u1 = 0, /// unused [29:31] _unused29: u3 = 0, }; /// Control register (DAC_CR) pub const CR = Register(CR_val).init(base_address + 0x0); /// SWTRIGR const SWTRIGR_val = packed struct { /// SWTRIG1 [0:0] /// DAC channel1 software SWTRIG1: u1 = 0, /// SWTRIG2 [1:1] /// DAC channel2 software SWTRIG2: u1 = 0, /// unused [2:31] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC software trigger register pub const SWTRIGR = Register(SWTRIGR_val).init(base_address + 0x4); /// DHR12R1 const DHR12R1_val = packed struct { /// DACC1DHR [0:11] /// DAC channel1 12-bit right-aligned DACC1DHR: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel1 12-bit right-aligned data pub const DHR12R1 = Register(DHR12R1_val).init(base_address + 0x8); /// DHR12L1 const DHR12L1_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// DACC1DHR [4:15] /// DAC channel1 12-bit left-aligned DACC1DHR: u12 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel1 12-bit left aligned data pub const DHR12L1 = Register(DHR12L1_val).init(base_address + 0xc); /// DHR8R1 const DHR8R1_val = packed struct { /// DACC1DHR [0:7] /// DAC channel1 8-bit right-aligned DACC1DHR: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel1 8-bit right aligned data pub const DHR8R1 = Register(DHR8R1_val).init(base_address + 0x10); /// DHR12R2 const DHR12R2_val = packed struct { /// DACC2DHR [0:11] /// DAC channel2 12-bit right-aligned DACC2DHR: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel2 12-bit right aligned data pub const DHR12R2 = Register(DHR12R2_val).init(base_address + 0x14); /// DHR12L2 const DHR12L2_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// DACC2DHR [4:15] /// DAC channel2 12-bit left-aligned DACC2DHR: u12 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel2 12-bit left aligned data pub const DHR12L2 = Register(DHR12L2_val).init(base_address + 0x18); /// DHR8R2 const DHR8R2_val = packed struct { /// DACC2DHR [0:7] /// DAC channel2 8-bit right-aligned DACC2DHR: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel2 8-bit right-aligned data pub const DHR8R2 = Register(DHR8R2_val).init(base_address + 0x1c); /// DHR12RD const DHR12RD_val = packed struct { /// DACC1DHR [0:11] /// DAC channel1 12-bit right-aligned DACC1DHR: u12 = 0, /// unused [12:15] _unused12: u4 = 0, /// DACC2DHR [16:27] /// DAC channel2 12-bit right-aligned DACC2DHR: u12 = 0, /// unused [28:31] _unused28: u4 = 0, }; /// Dual DAC 12-bit right-aligned data holding pub const DHR12RD = Register(DHR12RD_val).init(base_address + 0x20); /// DHR12LD const DHR12LD_val = packed struct { /// unused [0:3] _unused0: u4 = 0, /// DACC1DHR [4:15] /// DAC channel1 12-bit left-aligned DACC1DHR: u12 = 0, /// unused [16:19] _unused16: u4 = 0, /// DACC2DHR [20:31] /// DAC channel2 12-bit right-aligned DACC2DHR: u12 = 0, }; /// DUAL DAC 12-bit left aligned data holding pub const DHR12LD = Register(DHR12LD_val).init(base_address + 0x24); /// DHR8RD const DHR8RD_val = packed struct { /// DACC1DHR [0:7] /// DAC channel1 8-bit right-aligned DACC1DHR: u8 = 0, /// DACC2DHR [8:15] /// DAC channel2 8-bit right-aligned DACC2DHR: u8 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// DUAL DAC 8-bit right aligned data holding pub const DHR8RD = Register(DHR8RD_val).init(base_address + 0x28); /// DOR1 const DOR1_val = packed struct { /// DACC1DOR [0:11] /// DAC channel1 data output DACC1DOR: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel1 data output register pub const DOR1 = Register(DOR1_val).init(base_address + 0x2c); /// DOR2 const DOR2_val = packed struct { /// DACC2DOR [0:11] /// DAC channel2 data output DACC2DOR: u12 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// DAC channel2 data output register pub const DOR2 = Register(DOR2_val).init(base_address + 0x30); }; /// Debug support pub const DBG = struct { const base_address = 0xe0042000; /// IDCODE const IDCODE_val = packed struct { /// DEV_ID [0:11] /// DEV_ID DEV_ID: u12 = 0, /// unused [12:15] _unused12: u4 = 0, /// REV_ID [16:31] /// REV_ID REV_ID: u16 = 0, }; /// DBGMCU_IDCODE pub const IDCODE = Register(IDCODE_val).init(base_address + 0x0); /// CR const CR_val = packed struct { /// DBG_SLEEP [0:0] /// DBG_SLEEP DBG_SLEEP: u1 = 0, /// DBG_STOP [1:1] /// DBG_STOP DBG_STOP: u1 = 0, /// DBG_STANDBY [2:2] /// DBG_STANDBY DBG_STANDBY: u1 = 0, /// unused [3:4] _unused3: u2 = 0, /// TRACE_IOEN [5:5] /// TRACE_IOEN TRACE_IOEN: u1 = 0, /// TRACE_MODE [6:7] /// TRACE_MODE TRACE_MODE: u2 = 0, /// DBG_IWDG_STOP [8:8] /// DBG_IWDG_STOP DBG_IWDG_STOP: u1 = 0, /// DBG_WWDG_STOP [9:9] /// DBG_WWDG_STOP DBG_WWDG_STOP: u1 = 0, /// DBG_TIM1_STOP [10:10] /// DBG_TIM1_STOP DBG_TIM1_STOP: u1 = 0, /// DBG_TIM2_STOP [11:11] /// DBG_TIM2_STOP DBG_TIM2_STOP: u1 = 0, /// DBG_TIM3_STOP [12:12] /// DBG_TIM3_STOP DBG_TIM3_STOP: u1 = 0, /// DBG_TIM4_STOP [13:13] /// DBG_TIM4_STOP DBG_TIM4_STOP: u1 = 0, /// DBG_CAN1_STOP [14:14] /// DBG_CAN1_STOP DBG_CAN1_STOP: u1 = 0, /// DBG_I2C1_SMBUS_TIMEOUT [15:15] /// DBG_I2C1_SMBUS_TIMEOUT DBG_I2C1_SMBUS_TIMEOUT: u1 = 0, /// DBG_I2C2_SMBUS_TIMEOUT [16:16] /// DBG_I2C2_SMBUS_TIMEOUT DBG_I2C2_SMBUS_TIMEOUT: u1 = 0, /// DBG_TIM8_STOP [17:17] /// DBG_TIM8_STOP DBG_TIM8_STOP: u1 = 0, /// DBG_TIM5_STOP [18:18] /// DBG_TIM5_STOP DBG_TIM5_STOP: u1 = 0, /// DBG_TIM6_STOP [19:19] /// DBG_TIM6_STOP DBG_TIM6_STOP: u1 = 0, /// DBG_TIM7_STOP [20:20] /// DBG_TIM7_STOP DBG_TIM7_STOP: u1 = 0, /// DBG_CAN2_STOP [21:21] /// DBG_CAN2_STOP DBG_CAN2_STOP: u1 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// DBGMCU_CR pub const CR = Register(CR_val).init(base_address + 0x4); }; /// Universal asynchronous receiver pub const UART4 = struct { const base_address = 0x40004c00; /// SR const SR_val = packed struct { /// PE [0:0] /// Parity error PE: u1 = 0, /// FE [1:1] /// Framing error FE: u1 = 0, /// NE [2:2] /// Noise error flag NE: u1 = 0, /// ORE [3:3] /// Overrun error ORE: u1 = 0, /// IDLE [4:4] /// IDLE line detected IDLE: u1 = 0, /// RXNE [5:5] /// Read data register not RXNE: u1 = 0, /// TC [6:6] /// Transmission complete TC: u1 = 0, /// TXE [7:7] /// Transmit data register TXE: u1 = 0, /// LBD [8:8] /// LIN break detection flag LBD: u1 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_SR pub const SR = Register(SR_val).init(base_address + 0x0); /// DR const DR_val = packed struct { /// DR [0:8] /// DR DR: u9 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_DR pub const DR = Register(DR_val).init(base_address + 0x4); /// BRR const BRR_val = packed struct { /// DIV_Fraction [0:3] /// DIV_Fraction DIV_Fraction: u4 = 0, /// DIV_Mantissa [4:15] /// DIV_Mantissa DIV_Mantissa: u12 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_BRR pub const BRR = Register(BRR_val).init(base_address + 0x8); /// CR1 const CR1_val = packed struct { /// SBK [0:0] /// Send break SBK: u1 = 0, /// RWU [1:1] /// Receiver wakeup RWU: u1 = 0, /// RE [2:2] /// Receiver enable RE: u1 = 0, /// TE [3:3] /// Transmitter enable TE: u1 = 0, /// IDLEIE [4:4] /// IDLE interrupt enable IDLEIE: u1 = 0, /// RXNEIE [5:5] /// RXNE interrupt enable RXNEIE: u1 = 0, /// TCIE [6:6] /// Transmission complete interrupt TCIE: u1 = 0, /// TXEIE [7:7] /// TXE interrupt enable TXEIE: u1 = 0, /// PEIE [8:8] /// PE interrupt enable PEIE: u1 = 0, /// PS [9:9] /// Parity selection PS: u1 = 0, /// PCE [10:10] /// Parity control enable PCE: u1 = 0, /// WAKE [11:11] /// Wakeup method WAKE: u1 = 0, /// M [12:12] /// Word length M: u1 = 0, /// UE [13:13] /// USART enable UE: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_CR1 pub const CR1 = Register(CR1_val).init(base_address + 0xc); /// CR2 const CR2_val = packed struct { /// ADD [0:3] /// Address of the USART node ADD: u4 = 0, /// unused [4:4] _unused4: u1 = 0, /// LBDL [5:5] /// lin break detection length LBDL: u1 = 0, /// LBDIE [6:6] /// LIN break detection interrupt LBDIE: u1 = 0, /// unused [7:11] _unused7: u1 = 0, _unused8: u4 = 0, /// STOP [12:13] /// STOP bits STOP: u2 = 0, /// LINEN [14:14] /// LIN mode enable LINEN: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_CR2 pub const CR2 = Register(CR2_val).init(base_address + 0x10); /// CR3 const CR3_val = packed struct { /// EIE [0:0] /// Error interrupt enable EIE: u1 = 0, /// IREN [1:1] /// IrDA mode enable IREN: u1 = 0, /// IRLP [2:2] /// IrDA low-power IRLP: u1 = 0, /// HDSEL [3:3] /// Half-duplex selection HDSEL: u1 = 0, /// unused [4:5] _unused4: u2 = 0, /// DMAR [6:6] /// DMA enable receiver DMAR: u1 = 0, /// DMAT [7:7] /// DMA enable transmitter DMAT: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_CR3 pub const CR3 = Register(CR3_val).init(base_address + 0x14); }; /// Universal asynchronous receiver pub const UART5 = struct { const base_address = 0x40005000; /// SR const SR_val = packed struct { /// PE [0:0] /// PE PE: u1 = 0, /// FE [1:1] /// FE FE: u1 = 0, /// NE [2:2] /// NE NE: u1 = 0, /// ORE [3:3] /// ORE ORE: u1 = 0, /// IDLE [4:4] /// IDLE IDLE: u1 = 0, /// RXNE [5:5] /// RXNE RXNE: u1 = 0, /// TC [6:6] /// TC TC: u1 = 0, /// TXE [7:7] /// TXE TXE: u1 = 0, /// LBD [8:8] /// LBD LBD: u1 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_SR pub const SR = Register(SR_val).init(base_address + 0x0); /// DR const DR_val = packed struct { /// DR [0:8] /// DR DR: u9 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_DR pub const DR = Register(DR_val).init(base_address + 0x4); /// BRR const BRR_val = packed struct { /// DIV_Fraction [0:3] /// DIV_Fraction DIV_Fraction: u4 = 0, /// DIV_Mantissa [4:15] /// DIV_Mantissa DIV_Mantissa: u12 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_BRR pub const BRR = Register(BRR_val).init(base_address + 0x8); /// CR1 const CR1_val = packed struct { /// SBK [0:0] /// SBK SBK: u1 = 0, /// RWU [1:1] /// RWU RWU: u1 = 0, /// RE [2:2] /// RE RE: u1 = 0, /// TE [3:3] /// TE TE: u1 = 0, /// IDLEIE [4:4] /// IDLEIE IDLEIE: u1 = 0, /// RXNEIE [5:5] /// RXNEIE RXNEIE: u1 = 0, /// TCIE [6:6] /// TCIE TCIE: u1 = 0, /// TXEIE [7:7] /// TXEIE TXEIE: u1 = 0, /// PEIE [8:8] /// PEIE PEIE: u1 = 0, /// PS [9:9] /// PS PS: u1 = 0, /// PCE [10:10] /// PCE PCE: u1 = 0, /// WAKE [11:11] /// WAKE WAKE: u1 = 0, /// M [12:12] /// M M: u1 = 0, /// UE [13:13] /// UE UE: u1 = 0, /// unused [14:31] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_CR1 pub const CR1 = Register(CR1_val).init(base_address + 0xc); /// CR2 const CR2_val = packed struct { /// ADD [0:3] /// ADD ADD: u4 = 0, /// unused [4:4] _unused4: u1 = 0, /// LBDL [5:5] /// LBDL LBDL: u1 = 0, /// LBDIE [6:6] /// LBDIE LBDIE: u1 = 0, /// unused [7:11] _unused7: u1 = 0, _unused8: u4 = 0, /// STOP [12:13] /// STOP STOP: u2 = 0, /// LINEN [14:14] /// LINEN LINEN: u1 = 0, /// unused [15:31] _unused15: u1 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_CR2 pub const CR2 = Register(CR2_val).init(base_address + 0x10); /// CR3 const CR3_val = packed struct { /// EIE [0:0] /// Error interrupt enable EIE: u1 = 0, /// IREN [1:1] /// IrDA mode enable IREN: u1 = 0, /// IRLP [2:2] /// IrDA low-power IRLP: u1 = 0, /// HDSEL [3:3] /// Half-duplex selection HDSEL: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// DMAT [7:7] /// DMA enable transmitter DMAT: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// UART4_CR3 pub const CR3 = Register(CR3_val).init(base_address + 0x14); }; /// CRC calculation unit pub const CRC = struct { const base_address = 0x40023000; /// DR const DR_val = packed struct { /// DR [0:31] /// Data Register DR: u32 = 4294967295, }; /// Data register pub const DR = Register(DR_val).init(base_address + 0x0); /// IDR const IDR_val = packed struct { /// IDR [0:7] /// Independent Data register IDR: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Independent Data register pub const IDR = Register(IDR_val).init(base_address + 0x4); /// CR const CR_val = packed struct { /// RESET [0:0] /// Reset bit RESET: u1 = 0, /// unused [1:31] _unused1: u7 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register pub const CR = Register(CR_val).init(base_address + 0x8); }; /// FLASH pub const FLASH = struct { const base_address = 0x40022000; /// ACR const ACR_val = packed struct { /// LATENCY [0:2] /// Latency LATENCY: u3 = 0, /// HLFCYA [3:3] /// Flash half cycle access HLFCYA: u1 = 0, /// PRFTBE [4:4] /// Prefetch buffer enable PRFTBE: u1 = 1, /// PRFTBS [5:5] /// Prefetch buffer status PRFTBS: u1 = 1, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Flash access control register pub const ACR = Register(ACR_val).init(base_address + 0x0); /// KEYR const KEYR_val = packed struct { /// KEY [0:31] /// FPEC key KEY: u32 = 0, }; /// Flash key register pub const KEYR = Register(KEYR_val).init(base_address + 0x4); /// OPTKEYR const OPTKEYR_val = packed struct { /// OPTKEY [0:31] /// Option byte key OPTKEY: u32 = 0, }; /// Flash option key register pub const OPTKEYR = Register(OPTKEYR_val).init(base_address + 0x8); /// SR const SR_val = packed struct { /// BSY [0:0] /// Busy BSY: u1 = 0, /// unused [1:1] _unused1: u1 = 0, /// PGERR [2:2] /// Programming error PGERR: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// WRPRTERR [4:4] /// Write protection error WRPRTERR: u1 = 0, /// EOP [5:5] /// End of operation EOP: u1 = 0, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Status register pub const SR = Register(SR_val).init(base_address + 0xc); /// CR const CR_val = packed struct { /// PG [0:0] /// Programming PG: u1 = 0, /// PER [1:1] /// Page Erase PER: u1 = 0, /// MER [2:2] /// Mass Erase MER: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// OPTPG [4:4] /// Option byte programming OPTPG: u1 = 0, /// OPTER [5:5] /// Option byte erase OPTER: u1 = 0, /// STRT [6:6] /// Start STRT: u1 = 0, /// LOCK [7:7] /// Lock LOCK: u1 = 1, /// unused [8:8] _unused8: u1 = 0, /// OPTWRE [9:9] /// Option bytes write enable OPTWRE: u1 = 0, /// ERRIE [10:10] /// Error interrupt enable ERRIE: u1 = 0, /// unused [11:11] _unused11: u1 = 0, /// EOPIE [12:12] /// End of operation interrupt EOPIE: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Control register pub const CR = Register(CR_val).init(base_address + 0x10); /// AR const AR_val = packed struct { /// FAR [0:31] /// Flash Address FAR: u32 = 0, }; /// Flash address register pub const AR = Register(AR_val).init(base_address + 0x14); /// OBR const OBR_val = packed struct { /// OPTERR [0:0] /// Option byte error OPTERR: u1 = 0, /// RDPRT [1:1] /// Read protection RDPRT: u1 = 0, /// WDG_SW [2:2] /// WDG_SW WDG_SW: u1 = 1, /// nRST_STOP [3:3] /// nRST_STOP nRST_STOP: u1 = 1, /// nRST_STDBY [4:4] /// nRST_STDBY nRST_STDBY: u1 = 1, /// unused [5:9] _unused5: u3 = 7, _unused8: u2 = 3, /// Data0 [10:17] /// Data0 Data0: u8 = 255, /// Data1 [18:25] /// Data1 Data1: u8 = 255, /// unused [26:31] _unused26: u6 = 0, }; /// Option byte register pub const OBR = Register(OBR_val).init(base_address + 0x1c); /// WRPR const WRPR_val = packed struct { /// WRP [0:31] /// Write protect WRP: u32 = 4294967295, }; /// Write protection register pub const WRPR = Register(WRPR_val).init(base_address + 0x20); }; /// Universal serial bus full-speed device pub const USB = struct { const base_address = 0x40005c00; /// EP0R const EP0R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 0 register pub const EP0R = Register(EP0R_val).init(base_address + 0x0); /// EP1R const EP1R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 1 register pub const EP1R = Register(EP1R_val).init(base_address + 0x4); /// EP2R const EP2R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 2 register pub const EP2R = Register(EP2R_val).init(base_address + 0x8); /// EP3R const EP3R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 3 register pub const EP3R = Register(EP3R_val).init(base_address + 0xc); /// EP4R const EP4R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 4 register pub const EP4R = Register(EP4R_val).init(base_address + 0x10); /// EP5R const EP5R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 5 register pub const EP5R = Register(EP5R_val).init(base_address + 0x14); /// EP6R const EP6R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 6 register pub const EP6R = Register(EP6R_val).init(base_address + 0x18); /// EP7R const EP7R_val = packed struct { /// EA [0:3] /// Endpoint address EA: u4 = 0, /// STAT_TX [4:5] /// Status bits, for transmission STAT_TX: u2 = 0, /// DTOG_TX [6:6] /// Data Toggle, for transmission DTOG_TX: u1 = 0, /// CTR_TX [7:7] /// Correct Transfer for CTR_TX: u1 = 0, /// EP_KIND [8:8] /// Endpoint kind EP_KIND: u1 = 0, /// EP_TYPE [9:10] /// Endpoint type EP_TYPE: u2 = 0, /// SETUP [11:11] /// Setup transaction SETUP: u1 = 0, /// STAT_RX [12:13] /// Status bits, for reception STAT_RX: u2 = 0, /// DTOG_RX [14:14] /// Data Toggle, for reception DTOG_RX: u1 = 0, /// CTR_RX [15:15] /// Correct transfer for CTR_RX: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// endpoint 7 register pub const EP7R = Register(EP7R_val).init(base_address + 0x1c); /// CNTR const CNTR_val = packed struct { /// FRES [0:0] /// Force USB Reset FRES: u1 = 1, /// PDWN [1:1] /// Power down PDWN: u1 = 1, /// LPMODE [2:2] /// Low-power mode LPMODE: u1 = 0, /// FSUSP [3:3] /// Force suspend FSUSP: u1 = 0, /// RESUME [4:4] /// Resume request RESUME: u1 = 0, /// unused [5:7] _unused5: u3 = 0, /// ESOFM [8:8] /// Expected start of frame interrupt ESOFM: u1 = 0, /// SOFM [9:9] /// Start of frame interrupt SOFM: u1 = 0, /// RESETM [10:10] /// USB reset interrupt mask RESETM: u1 = 0, /// SUSPM [11:11] /// Suspend mode interrupt SUSPM: u1 = 0, /// WKUPM [12:12] /// Wakeup interrupt mask WKUPM: u1 = 0, /// ERRM [13:13] /// Error interrupt mask ERRM: u1 = 0, /// PMAOVRM [14:14] /// Packet memory area over / underrun PMAOVRM: u1 = 0, /// CTRM [15:15] /// Correct transfer interrupt CTRM: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// control register pub const CNTR = Register(CNTR_val).init(base_address + 0x40); /// ISTR const ISTR_val = packed struct { /// EP_ID [0:3] /// Endpoint Identifier EP_ID: u4 = 0, /// DIR [4:4] /// Direction of transaction DIR: u1 = 0, /// unused [5:7] _unused5: u3 = 0, /// ESOF [8:8] /// Expected start frame ESOF: u1 = 0, /// SOF [9:9] /// start of frame SOF: u1 = 0, /// RESET [10:10] /// reset request RESET: u1 = 0, /// SUSP [11:11] /// Suspend mode request SUSP: u1 = 0, /// WKUP [12:12] /// Wakeup WKUP: u1 = 0, /// ERR [13:13] /// Error ERR: u1 = 0, /// PMAOVR [14:14] /// Packet memory area over / PMAOVR: u1 = 0, /// CTR [15:15] /// Correct transfer CTR: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// interrupt status register pub const ISTR = Register(ISTR_val).init(base_address + 0x44); /// FNR const FNR_val = packed struct { /// FN [0:10] /// Frame number FN: u11 = 0, /// LSOF [11:12] /// Lost SOF LSOF: u2 = 0, /// LCK [13:13] /// Locked LCK: u1 = 0, /// RXDM [14:14] /// Receive data - line status RXDM: u1 = 0, /// RXDP [15:15] /// Receive data + line status RXDP: u1 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// frame number register pub const FNR = Register(FNR_val).init(base_address + 0x48); /// DADDR const DADDR_val = packed struct { /// ADD [0:6] /// Device address ADD: u7 = 0, /// EF [7:7] /// Enable function EF: u1 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device address pub const DADDR = Register(DADDR_val).init(base_address + 0x4c); /// BTABLE const BTABLE_val = packed struct { /// unused [0:2] _unused0: u3 = 0, /// BTABLE [3:15] /// Buffer table BTABLE: u13 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Buffer table address pub const BTABLE = Register(BTABLE_val).init(base_address + 0x50); }; /// USB on the go full speed pub const OTG_FS_DEVICE = struct { const base_address = 0x50000800; /// FS_DCFG const FS_DCFG_val = packed struct { /// DSPD [0:1] /// Device speed DSPD: u2 = 0, /// NZLSOHSK [2:2] /// Non-zero-length status OUT NZLSOHSK: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// DAD [4:10] /// Device address DAD: u7 = 0, /// PFIVL [11:12] /// Periodic frame interval PFIVL: u2 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 32, _unused24: u8 = 2, }; /// OTG_FS device configuration register pub const FS_DCFG = Register(FS_DCFG_val).init(base_address + 0x0); /// FS_DCTL const FS_DCTL_val = packed struct { /// RWUSIG [0:0] /// Remote wakeup signaling RWUSIG: u1 = 0, /// SDIS [1:1] /// Soft disconnect SDIS: u1 = 0, /// GINSTS [2:2] /// Global IN NAK status GINSTS: u1 = 0, /// GONSTS [3:3] /// Global OUT NAK status GONSTS: u1 = 0, /// TCTL [4:6] /// Test control TCTL: u3 = 0, /// SGINAK [7:7] /// Set global IN NAK SGINAK: u1 = 0, /// CGINAK [8:8] /// Clear global IN NAK CGINAK: u1 = 0, /// SGONAK [9:9] /// Set global OUT NAK SGONAK: u1 = 0, /// CGONAK [10:10] /// Clear global OUT NAK CGONAK: u1 = 0, /// POPRGDNE [11:11] /// Power-on programming done POPRGDNE: u1 = 0, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device control register pub const FS_DCTL = Register(FS_DCTL_val).init(base_address + 0x4); /// FS_DSTS const FS_DSTS_val = packed struct { /// SUSPSTS [0:0] /// Suspend status SUSPSTS: u1 = 0, /// ENUMSPD [1:2] /// Enumerated speed ENUMSPD: u2 = 0, /// EERR [3:3] /// Erratic error EERR: u1 = 0, /// unused [4:7] _unused4: u4 = 1, /// FNSOF [8:21] /// Frame number of the received FNSOF: u14 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// OTG_FS device status register pub const FS_DSTS = Register(FS_DSTS_val).init(base_address + 0x8); /// FS_DIEPMSK const FS_DIEPMSK_val = packed struct { /// XFRCM [0:0] /// Transfer completed interrupt XFRCM: u1 = 0, /// EPDM [1:1] /// Endpoint disabled interrupt EPDM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// TOM [3:3] /// Timeout condition mask (Non-isochronous TOM: u1 = 0, /// ITTXFEMSK [4:4] /// IN token received when TxFIFO empty ITTXFEMSK: u1 = 0, /// INEPNMM [5:5] /// IN token received with EP mismatch INEPNMM: u1 = 0, /// INEPNEM [6:6] /// IN endpoint NAK effective INEPNEM: u1 = 0, /// unused [7:31] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device IN endpoint common interrupt pub const FS_DIEPMSK = Register(FS_DIEPMSK_val).init(base_address + 0x10); /// FS_DOEPMSK const FS_DOEPMSK_val = packed struct { /// XFRCM [0:0] /// Transfer completed interrupt XFRCM: u1 = 0, /// EPDM [1:1] /// Endpoint disabled interrupt EPDM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STUPM [3:3] /// SETUP phase done mask STUPM: u1 = 0, /// OTEPDM [4:4] /// OUT token received when endpoint OTEPDM: u1 = 0, /// unused [5:31] _unused5: u3 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device OUT endpoint common interrupt pub const FS_DOEPMSK = Register(FS_DOEPMSK_val).init(base_address + 0x14); /// FS_DAINT const FS_DAINT_val = packed struct { /// IEPINT [0:15] /// IN endpoint interrupt bits IEPINT: u16 = 0, /// OEPINT [16:31] /// OUT endpoint interrupt OEPINT: u16 = 0, }; /// OTG_FS device all endpoints interrupt pub const FS_DAINT = Register(FS_DAINT_val).init(base_address + 0x18); /// FS_DAINTMSK const FS_DAINTMSK_val = packed struct { /// IEPM [0:15] /// IN EP interrupt mask bits IEPM: u16 = 0, /// OEPINT [16:31] /// OUT endpoint interrupt OEPINT: u16 = 0, }; /// OTG_FS all endpoints interrupt mask register pub const FS_DAINTMSK = Register(FS_DAINTMSK_val).init(base_address + 0x1c); /// DVBUSDIS const DVBUSDIS_val = packed struct { /// VBUSDT [0:15] /// Device VBUS discharge time VBUSDT: u16 = 6103, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device VBUS discharge time pub const DVBUSDIS = Register(DVBUSDIS_val).init(base_address + 0x28); /// DVBUSPULSE const DVBUSPULSE_val = packed struct { /// DVBUSP [0:11] /// Device VBUS pulsing time DVBUSP: u12 = 1464, /// unused [12:31] _unused12: u4 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device VBUS pulsing time pub const DVBUSPULSE = Register(DVBUSPULSE_val).init(base_address + 0x2c); /// DIEPEMPMSK const DIEPEMPMSK_val = packed struct { /// INEPTXFEM [0:15] /// IN EP Tx FIFO empty interrupt mask INEPTXFEM: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device IN endpoint FIFO empty pub const DIEPEMPMSK = Register(DIEPEMPMSK_val).init(base_address + 0x34); /// FS_DIEPCTL0 const FS_DIEPCTL0_val = packed struct { /// MPSIZ [0:1] /// Maximum packet size MPSIZ: u2 = 0, /// unused [2:14] _unused2: u6 = 0, _unused8: u7 = 0, /// USBAEP [15:15] /// USB active endpoint USBAEP: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// NAKSTS [17:17] /// NAK status NAKSTS: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// unused [20:20] _unused20: u1 = 0, /// STALL [21:21] /// STALL handshake STALL: u1 = 0, /// TXFNUM [22:25] /// TxFIFO number TXFNUM: u4 = 0, /// CNAK [26:26] /// Clear NAK CNAK: u1 = 0, /// SNAK [27:27] /// Set NAK SNAK: u1 = 0, /// unused [28:29] _unused28: u2 = 0, /// EPDIS [30:30] /// Endpoint disable EPDIS: u1 = 0, /// EPENA [31:31] /// Endpoint enable EPENA: u1 = 0, }; /// OTG_FS device control IN endpoint 0 control pub const FS_DIEPCTL0 = Register(FS_DIEPCTL0_val).init(base_address + 0x100); /// DIEPCTL1 const DIEPCTL1_val = packed struct { /// MPSIZ [0:10] /// MPSIZ MPSIZ: u11 = 0, /// unused [11:14] _unused11: u4 = 0, /// USBAEP [15:15] /// USBAEP USBAEP: u1 = 0, /// EONUM_DPID [16:16] /// EONUM/DPID EONUM_DPID: u1 = 0, /// NAKSTS [17:17] /// NAKSTS NAKSTS: u1 = 0, /// EPTYP [18:19] /// EPTYP EPTYP: u2 = 0, /// unused [20:20] _unused20: u1 = 0, /// Stall [21:21] /// Stall Stall: u1 = 0, /// TXFNUM [22:25] /// TXFNUM TXFNUM: u4 = 0, /// CNAK [26:26] /// CNAK CNAK: u1 = 0, /// SNAK [27:27] /// SNAK SNAK: u1 = 0, /// SD0PID_SEVNFRM [28:28] /// SD0PID/SEVNFRM SD0PID_SEVNFRM: u1 = 0, /// SODDFRM_SD1PID [29:29] /// SODDFRM/SD1PID SODDFRM_SD1PID: u1 = 0, /// EPDIS [30:30] /// EPDIS EPDIS: u1 = 0, /// EPENA [31:31] /// EPENA EPENA: u1 = 0, }; /// OTG device endpoint-1 control pub const DIEPCTL1 = Register(DIEPCTL1_val).init(base_address + 0x120); /// DIEPCTL2 const DIEPCTL2_val = packed struct { /// MPSIZ [0:10] /// MPSIZ MPSIZ: u11 = 0, /// unused [11:14] _unused11: u4 = 0, /// USBAEP [15:15] /// USBAEP USBAEP: u1 = 0, /// EONUM_DPID [16:16] /// EONUM/DPID EONUM_DPID: u1 = 0, /// NAKSTS [17:17] /// NAKSTS NAKSTS: u1 = 0, /// EPTYP [18:19] /// EPTYP EPTYP: u2 = 0, /// unused [20:20] _unused20: u1 = 0, /// Stall [21:21] /// Stall Stall: u1 = 0, /// TXFNUM [22:25] /// TXFNUM TXFNUM: u4 = 0, /// CNAK [26:26] /// CNAK CNAK: u1 = 0, /// SNAK [27:27] /// SNAK SNAK: u1 = 0, /// SD0PID_SEVNFRM [28:28] /// SD0PID/SEVNFRM SD0PID_SEVNFRM: u1 = 0, /// SODDFRM [29:29] /// SODDFRM SODDFRM: u1 = 0, /// EPDIS [30:30] /// EPDIS EPDIS: u1 = 0, /// EPENA [31:31] /// EPENA EPENA: u1 = 0, }; /// OTG device endpoint-2 control pub const DIEPCTL2 = Register(DIEPCTL2_val).init(base_address + 0x140); /// DIEPCTL3 const DIEPCTL3_val = packed struct { /// MPSIZ [0:10] /// MPSIZ MPSIZ: u11 = 0, /// unused [11:14] _unused11: u4 = 0, /// USBAEP [15:15] /// USBAEP USBAEP: u1 = 0, /// EONUM_DPID [16:16] /// EONUM/DPID EONUM_DPID: u1 = 0, /// NAKSTS [17:17] /// NAKSTS NAKSTS: u1 = 0, /// EPTYP [18:19] /// EPTYP EPTYP: u2 = 0, /// unused [20:20] _unused20: u1 = 0, /// Stall [21:21] /// Stall Stall: u1 = 0, /// TXFNUM [22:25] /// TXFNUM TXFNUM: u4 = 0, /// CNAK [26:26] /// CNAK CNAK: u1 = 0, /// SNAK [27:27] /// SNAK SNAK: u1 = 0, /// SD0PID_SEVNFRM [28:28] /// SD0PID/SEVNFRM SD0PID_SEVNFRM: u1 = 0, /// SODDFRM [29:29] /// SODDFRM SODDFRM: u1 = 0, /// EPDIS [30:30] /// EPDIS EPDIS: u1 = 0, /// EPENA [31:31] /// EPENA EPENA: u1 = 0, }; /// OTG device endpoint-3 control pub const DIEPCTL3 = Register(DIEPCTL3_val).init(base_address + 0x160); /// DOEPCTL0 const DOEPCTL0_val = packed struct { /// MPSIZ [0:1] /// MPSIZ MPSIZ: u2 = 0, /// unused [2:14] _unused2: u6 = 0, _unused8: u7 = 0, /// USBAEP [15:15] /// USBAEP USBAEP: u1 = 1, /// unused [16:16] _unused16: u1 = 0, /// NAKSTS [17:17] /// NAKSTS NAKSTS: u1 = 0, /// EPTYP [18:19] /// EPTYP EPTYP: u2 = 0, /// SNPM [20:20] /// SNPM SNPM: u1 = 0, /// Stall [21:21] /// Stall Stall: u1 = 0, /// unused [22:25] _unused22: u2 = 0, _unused24: u2 = 0, /// CNAK [26:26] /// CNAK CNAK: u1 = 0, /// SNAK [27:27] /// SNAK SNAK: u1 = 0, /// unused [28:29] _unused28: u2 = 0, /// EPDIS [30:30] /// EPDIS EPDIS: u1 = 0, /// EPENA [31:31] /// EPENA EPENA: u1 = 0, }; /// device endpoint-0 control pub const DOEPCTL0 = Register(DOEPCTL0_val).init(base_address + 0x300); /// DOEPCTL1 const DOEPCTL1_val = packed struct { /// MPSIZ [0:10] /// MPSIZ MPSIZ: u11 = 0, /// unused [11:14] _unused11: u4 = 0, /// USBAEP [15:15] /// USBAEP USBAEP: u1 = 0, /// EONUM_DPID [16:16] /// EONUM/DPID EONUM_DPID: u1 = 0, /// NAKSTS [17:17] /// NAKSTS NAKSTS: u1 = 0, /// EPTYP [18:19] /// EPTYP EPTYP: u2 = 0, /// SNPM [20:20] /// SNPM SNPM: u1 = 0, /// Stall [21:21] /// Stall Stall: u1 = 0, /// unused [22:25] _unused22: u2 = 0, _unused24: u2 = 0, /// CNAK [26:26] /// CNAK CNAK: u1 = 0, /// SNAK [27:27] /// SNAK SNAK: u1 = 0, /// SD0PID_SEVNFRM [28:28] /// SD0PID/SEVNFRM SD0PID_SEVNFRM: u1 = 0, /// SODDFRM [29:29] /// SODDFRM SODDFRM: u1 = 0, /// EPDIS [30:30] /// EPDIS EPDIS: u1 = 0, /// EPENA [31:31] /// EPENA EPENA: u1 = 0, }; /// device endpoint-1 control pub const DOEPCTL1 = Register(DOEPCTL1_val).init(base_address + 0x320); /// DOEPCTL2 const DOEPCTL2_val = packed struct { /// MPSIZ [0:10] /// MPSIZ MPSIZ: u11 = 0, /// unused [11:14] _unused11: u4 = 0, /// USBAEP [15:15] /// USBAEP USBAEP: u1 = 0, /// EONUM_DPID [16:16] /// EONUM/DPID EONUM_DPID: u1 = 0, /// NAKSTS [17:17] /// NAKSTS NAKSTS: u1 = 0, /// EPTYP [18:19] /// EPTYP EPTYP: u2 = 0, /// SNPM [20:20] /// SNPM SNPM: u1 = 0, /// Stall [21:21] /// Stall Stall: u1 = 0, /// unused [22:25] _unused22: u2 = 0, _unused24: u2 = 0, /// CNAK [26:26] /// CNAK CNAK: u1 = 0, /// SNAK [27:27] /// SNAK SNAK: u1 = 0, /// SD0PID_SEVNFRM [28:28] /// SD0PID/SEVNFRM SD0PID_SEVNFRM: u1 = 0, /// SODDFRM [29:29] /// SODDFRM SODDFRM: u1 = 0, /// EPDIS [30:30] /// EPDIS EPDIS: u1 = 0, /// EPENA [31:31] /// EPENA EPENA: u1 = 0, }; /// device endpoint-2 control pub const DOEPCTL2 = Register(DOEPCTL2_val).init(base_address + 0x340); /// DOEPCTL3 const DOEPCTL3_val = packed struct { /// MPSIZ [0:10] /// MPSIZ MPSIZ: u11 = 0, /// unused [11:14] _unused11: u4 = 0, /// USBAEP [15:15] /// USBAEP USBAEP: u1 = 0, /// EONUM_DPID [16:16] /// EONUM/DPID EONUM_DPID: u1 = 0, /// NAKSTS [17:17] /// NAKSTS NAKSTS: u1 = 0, /// EPTYP [18:19] /// EPTYP EPTYP: u2 = 0, /// SNPM [20:20] /// SNPM SNPM: u1 = 0, /// Stall [21:21] /// Stall Stall: u1 = 0, /// unused [22:25] _unused22: u2 = 0, _unused24: u2 = 0, /// CNAK [26:26] /// CNAK CNAK: u1 = 0, /// SNAK [27:27] /// SNAK SNAK: u1 = 0, /// SD0PID_SEVNFRM [28:28] /// SD0PID/SEVNFRM SD0PID_SEVNFRM: u1 = 0, /// SODDFRM [29:29] /// SODDFRM SODDFRM: u1 = 0, /// EPDIS [30:30] /// EPDIS EPDIS: u1 = 0, /// EPENA [31:31] /// EPENA EPENA: u1 = 0, }; /// device endpoint-3 control pub const DOEPCTL3 = Register(DOEPCTL3_val).init(base_address + 0x360); /// DIEPINT0 const DIEPINT0_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// TOC [3:3] /// TOC TOC: u1 = 0, /// ITTXFE [4:4] /// ITTXFE ITTXFE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// INEPNE [6:6] /// INEPNE INEPNE: u1 = 0, /// TXFE [7:7] /// TXFE TXFE: u1 = 1, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-x interrupt pub const DIEPINT0 = Register(DIEPINT0_val).init(base_address + 0x108); /// DIEPINT1 const DIEPINT1_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// TOC [3:3] /// TOC TOC: u1 = 0, /// ITTXFE [4:4] /// ITTXFE ITTXFE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// INEPNE [6:6] /// INEPNE INEPNE: u1 = 0, /// TXFE [7:7] /// TXFE TXFE: u1 = 1, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-1 interrupt pub const DIEPINT1 = Register(DIEPINT1_val).init(base_address + 0x128); /// DIEPINT2 const DIEPINT2_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// TOC [3:3] /// TOC TOC: u1 = 0, /// ITTXFE [4:4] /// ITTXFE ITTXFE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// INEPNE [6:6] /// INEPNE INEPNE: u1 = 0, /// TXFE [7:7] /// TXFE TXFE: u1 = 1, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-2 interrupt pub const DIEPINT2 = Register(DIEPINT2_val).init(base_address + 0x148); /// DIEPINT3 const DIEPINT3_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// TOC [3:3] /// TOC TOC: u1 = 0, /// ITTXFE [4:4] /// ITTXFE ITTXFE: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// INEPNE [6:6] /// INEPNE INEPNE: u1 = 0, /// TXFE [7:7] /// TXFE TXFE: u1 = 1, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-3 interrupt pub const DIEPINT3 = Register(DIEPINT3_val).init(base_address + 0x168); /// DOEPINT0 const DOEPINT0_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STUP [3:3] /// STUP STUP: u1 = 0, /// OTEPDIS [4:4] /// OTEPDIS OTEPDIS: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// B2BSTUP [6:6] /// B2BSTUP B2BSTUP: u1 = 0, /// unused [7:31] _unused7: u1 = 1, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-0 interrupt pub const DOEPINT0 = Register(DOEPINT0_val).init(base_address + 0x308); /// DOEPINT1 const DOEPINT1_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STUP [3:3] /// STUP STUP: u1 = 0, /// OTEPDIS [4:4] /// OTEPDIS OTEPDIS: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// B2BSTUP [6:6] /// B2BSTUP B2BSTUP: u1 = 0, /// unused [7:31] _unused7: u1 = 1, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-1 interrupt pub const DOEPINT1 = Register(DOEPINT1_val).init(base_address + 0x328); /// DOEPINT2 const DOEPINT2_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STUP [3:3] /// STUP STUP: u1 = 0, /// OTEPDIS [4:4] /// OTEPDIS OTEPDIS: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// B2BSTUP [6:6] /// B2BSTUP B2BSTUP: u1 = 0, /// unused [7:31] _unused7: u1 = 1, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-2 interrupt pub const DOEPINT2 = Register(DOEPINT2_val).init(base_address + 0x348); /// DOEPINT3 const DOEPINT3_val = packed struct { /// XFRC [0:0] /// XFRC XFRC: u1 = 0, /// EPDISD [1:1] /// EPDISD EPDISD: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STUP [3:3] /// STUP STUP: u1 = 0, /// OTEPDIS [4:4] /// OTEPDIS OTEPDIS: u1 = 0, /// unused [5:5] _unused5: u1 = 0, /// B2BSTUP [6:6] /// B2BSTUP B2BSTUP: u1 = 0, /// unused [7:31] _unused7: u1 = 1, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// device endpoint-3 interrupt pub const DOEPINT3 = Register(DOEPINT3_val).init(base_address + 0x368); /// DIEPTSIZ0 const DIEPTSIZ0_val = packed struct { /// XFRSIZ [0:6] /// Transfer size XFRSIZ: u7 = 0, /// unused [7:18] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u3 = 0, /// PKTCNT [19:20] /// Packet count PKTCNT: u2 = 0, /// unused [21:31] _unused21: u3 = 0, _unused24: u8 = 0, }; /// device endpoint-0 transfer size pub const DIEPTSIZ0 = Register(DIEPTSIZ0_val).init(base_address + 0x110); /// DOEPTSIZ0 const DOEPTSIZ0_val = packed struct { /// XFRSIZ [0:6] /// Transfer size XFRSIZ: u7 = 0, /// unused [7:18] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u3 = 0, /// PKTCNT [19:19] /// Packet count PKTCNT: u1 = 0, /// unused [20:28] _unused20: u4 = 0, _unused24: u5 = 0, /// STUPCNT [29:30] /// SETUP packet count STUPCNT: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// device OUT endpoint-0 transfer size pub const DOEPTSIZ0 = Register(DOEPTSIZ0_val).init(base_address + 0x310); /// DIEPTSIZ1 const DIEPTSIZ1_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// MCNT [29:30] /// Multi count MCNT: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// device endpoint-1 transfer size pub const DIEPTSIZ1 = Register(DIEPTSIZ1_val).init(base_address + 0x130); /// DIEPTSIZ2 const DIEPTSIZ2_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// MCNT [29:30] /// Multi count MCNT: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// device endpoint-2 transfer size pub const DIEPTSIZ2 = Register(DIEPTSIZ2_val).init(base_address + 0x150); /// DIEPTSIZ3 const DIEPTSIZ3_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// MCNT [29:30] /// Multi count MCNT: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// device endpoint-3 transfer size pub const DIEPTSIZ3 = Register(DIEPTSIZ3_val).init(base_address + 0x170); /// DTXFSTS0 const DTXFSTS0_val = packed struct { /// INEPTFSAV [0:15] /// IN endpoint TxFIFO space INEPTFSAV: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device IN endpoint transmit FIFO pub const DTXFSTS0 = Register(DTXFSTS0_val).init(base_address + 0x118); /// DTXFSTS1 const DTXFSTS1_val = packed struct { /// INEPTFSAV [0:15] /// IN endpoint TxFIFO space INEPTFSAV: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device IN endpoint transmit FIFO pub const DTXFSTS1 = Register(DTXFSTS1_val).init(base_address + 0x138); /// DTXFSTS2 const DTXFSTS2_val = packed struct { /// INEPTFSAV [0:15] /// IN endpoint TxFIFO space INEPTFSAV: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device IN endpoint transmit FIFO pub const DTXFSTS2 = Register(DTXFSTS2_val).init(base_address + 0x158); /// DTXFSTS3 const DTXFSTS3_val = packed struct { /// INEPTFSAV [0:15] /// IN endpoint TxFIFO space INEPTFSAV: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS device IN endpoint transmit FIFO pub const DTXFSTS3 = Register(DTXFSTS3_val).init(base_address + 0x178); /// DOEPTSIZ1 const DOEPTSIZ1_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// RXDPID_STUPCNT [29:30] /// Received data PID/SETUP packet RXDPID_STUPCNT: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// device OUT endpoint-1 transfer size pub const DOEPTSIZ1 = Register(DOEPTSIZ1_val).init(base_address + 0x330); /// DOEPTSIZ2 const DOEPTSIZ2_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// RXDPID_STUPCNT [29:30] /// Received data PID/SETUP packet RXDPID_STUPCNT: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// device OUT endpoint-2 transfer size pub const DOEPTSIZ2 = Register(DOEPTSIZ2_val).init(base_address + 0x350); /// DOEPTSIZ3 const DOEPTSIZ3_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// RXDPID_STUPCNT [29:30] /// Received data PID/SETUP packet RXDPID_STUPCNT: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// device OUT endpoint-3 transfer size pub const DOEPTSIZ3 = Register(DOEPTSIZ3_val).init(base_address + 0x370); }; /// USB on the go full speed pub const OTG_FS_GLOBAL = struct { const base_address = 0x50000000; /// FS_GOTGCTL const FS_GOTGCTL_val = packed struct { /// SRQSCS [0:0] /// Session request success SRQSCS: u1 = 0, /// SRQ [1:1] /// Session request SRQ: u1 = 0, /// unused [2:7] _unused2: u6 = 0, /// HNGSCS [8:8] /// Host negotiation success HNGSCS: u1 = 0, /// HNPRQ [9:9] /// HNP request HNPRQ: u1 = 0, /// HSHNPEN [10:10] /// Host set HNP enable HSHNPEN: u1 = 0, /// DHNPEN [11:11] /// Device HNP enabled DHNPEN: u1 = 1, /// unused [12:15] _unused12: u4 = 0, /// CIDSTS [16:16] /// Connector ID status CIDSTS: u1 = 0, /// DBCT [17:17] /// Long/short debounce time DBCT: u1 = 0, /// ASVLD [18:18] /// A-session valid ASVLD: u1 = 0, /// BSVLD [19:19] /// B-session valid BSVLD: u1 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// OTG_FS control and status register pub const FS_GOTGCTL = Register(FS_GOTGCTL_val).init(base_address + 0x0); /// FS_GOTGINT const FS_GOTGINT_val = packed struct { /// unused [0:1] _unused0: u2 = 0, /// SEDET [2:2] /// Session end detected SEDET: u1 = 0, /// unused [3:7] _unused3: u5 = 0, /// SRSSCHG [8:8] /// Session request success status SRSSCHG: u1 = 0, /// HNSSCHG [9:9] /// Host negotiation success status HNSSCHG: u1 = 0, /// unused [10:16] _unused10: u6 = 0, _unused16: u1 = 0, /// HNGDET [17:17] /// Host negotiation detected HNGDET: u1 = 0, /// ADTOCHG [18:18] /// A-device timeout change ADTOCHG: u1 = 0, /// DBCDNE [19:19] /// Debounce done DBCDNE: u1 = 0, /// unused [20:31] _unused20: u4 = 0, _unused24: u8 = 0, }; /// OTG_FS interrupt register pub const FS_GOTGINT = Register(FS_GOTGINT_val).init(base_address + 0x4); /// FS_GAHBCFG const FS_GAHBCFG_val = packed struct { /// GINT [0:0] /// Global interrupt mask GINT: u1 = 0, /// unused [1:6] _unused1: u6 = 0, /// TXFELVL [7:7] /// TxFIFO empty level TXFELVL: u1 = 0, /// PTXFELVL [8:8] /// Periodic TxFIFO empty PTXFELVL: u1 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS AHB configuration register pub const FS_GAHBCFG = Register(FS_GAHBCFG_val).init(base_address + 0x8); /// FS_GUSBCFG const FS_GUSBCFG_val = packed struct { /// TOCAL [0:2] /// FS timeout calibration TOCAL: u3 = 0, /// unused [3:5] _unused3: u3 = 0, /// PHYSEL [6:6] /// Full Speed serial transceiver PHYSEL: u1 = 0, /// unused [7:7] _unused7: u1 = 0, /// SRPCAP [8:8] /// SRP-capable SRPCAP: u1 = 0, /// HNPCAP [9:9] /// HNP-capable HNPCAP: u1 = 1, /// TRDT [10:13] /// USB turnaround time TRDT: u4 = 2, /// unused [14:28] _unused14: u2 = 0, _unused16: u8 = 0, _unused24: u5 = 0, /// FHMOD [29:29] /// Force host mode FHMOD: u1 = 0, /// FDMOD [30:30] /// Force device mode FDMOD: u1 = 0, /// CTXPKT [31:31] /// Corrupt Tx packet CTXPKT: u1 = 0, }; /// OTG_FS USB configuration register pub const FS_GUSBCFG = Register(FS_GUSBCFG_val).init(base_address + 0xc); /// FS_GRSTCTL const FS_GRSTCTL_val = packed struct { /// CSRST [0:0] /// Core soft reset CSRST: u1 = 0, /// HSRST [1:1] /// HCLK soft reset HSRST: u1 = 0, /// FCRST [2:2] /// Host frame counter reset FCRST: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// RXFFLSH [4:4] /// RxFIFO flush RXFFLSH: u1 = 0, /// TXFFLSH [5:5] /// TxFIFO flush TXFFLSH: u1 = 0, /// TXFNUM [6:10] /// TxFIFO number TXFNUM: u5 = 0, /// unused [11:30] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u7 = 32, /// AHBIDL [31:31] /// AHB master idle AHBIDL: u1 = 0, }; /// OTG_FS reset register pub const FS_GRSTCTL = Register(FS_GRSTCTL_val).init(base_address + 0x10); /// FS_GINTSTS const FS_GINTSTS_val = packed struct { /// CMOD [0:0] /// Current mode of operation CMOD: u1 = 0, /// MMIS [1:1] /// Mode mismatch interrupt MMIS: u1 = 0, /// OTGINT [2:2] /// OTG interrupt OTGINT: u1 = 0, /// SOF [3:3] /// Start of frame SOF: u1 = 0, /// RXFLVL [4:4] /// RxFIFO non-empty RXFLVL: u1 = 0, /// NPTXFE [5:5] /// Non-periodic TxFIFO empty NPTXFE: u1 = 1, /// GINAKEFF [6:6] /// Global IN non-periodic NAK GINAKEFF: u1 = 0, /// GOUTNAKEFF [7:7] /// Global OUT NAK effective GOUTNAKEFF: u1 = 0, /// unused [8:9] _unused8: u2 = 0, /// ESUSP [10:10] /// Early suspend ESUSP: u1 = 0, /// USBSUSP [11:11] /// USB suspend USBSUSP: u1 = 0, /// USBRST [12:12] /// USB reset USBRST: u1 = 0, /// ENUMDNE [13:13] /// Enumeration done ENUMDNE: u1 = 0, /// ISOODRP [14:14] /// Isochronous OUT packet dropped ISOODRP: u1 = 0, /// EOPF [15:15] /// End of periodic frame EOPF: u1 = 0, /// unused [16:17] _unused16: u2 = 0, /// IEPINT [18:18] /// IN endpoint interrupt IEPINT: u1 = 0, /// OEPINT [19:19] /// OUT endpoint interrupt OEPINT: u1 = 0, /// IISOIXFR [20:20] /// Incomplete isochronous IN IISOIXFR: u1 = 0, /// IPXFR_INCOMPISOOUT [21:21] /// Incomplete periodic transfer(Host IPXFR_INCOMPISOOUT: u1 = 0, /// unused [22:23] _unused22: u2 = 0, /// HPRTINT [24:24] /// Host port interrupt HPRTINT: u1 = 0, /// HCINT [25:25] /// Host channels interrupt HCINT: u1 = 0, /// PTXFE [26:26] /// Periodic TxFIFO empty PTXFE: u1 = 1, /// unused [27:27] _unused27: u1 = 0, /// CIDSCHG [28:28] /// Connector ID status change CIDSCHG: u1 = 0, /// DISCINT [29:29] /// Disconnect detected DISCINT: u1 = 0, /// SRQINT [30:30] /// Session request/new session detected SRQINT: u1 = 0, /// WKUPINT [31:31] /// Resume/remote wakeup detected WKUPINT: u1 = 0, }; /// OTG_FS core interrupt register pub const FS_GINTSTS = Register(FS_GINTSTS_val).init(base_address + 0x14); /// FS_GINTMSK const FS_GINTMSK_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// MMISM [1:1] /// Mode mismatch interrupt MMISM: u1 = 0, /// OTGINT [2:2] /// OTG interrupt mask OTGINT: u1 = 0, /// SOFM [3:3] /// Start of frame mask SOFM: u1 = 0, /// RXFLVLM [4:4] /// Receive FIFO non-empty RXFLVLM: u1 = 0, /// NPTXFEM [5:5] /// Non-periodic TxFIFO empty NPTXFEM: u1 = 0, /// GINAKEFFM [6:6] /// Global non-periodic IN NAK effective GINAKEFFM: u1 = 0, /// GONAKEFFM [7:7] /// Global OUT NAK effective GONAKEFFM: u1 = 0, /// unused [8:9] _unused8: u2 = 0, /// ESUSPM [10:10] /// Early suspend mask ESUSPM: u1 = 0, /// USBSUSPM [11:11] /// USB suspend mask USBSUSPM: u1 = 0, /// USBRST [12:12] /// USB reset mask USBRST: u1 = 0, /// ENUMDNEM [13:13] /// Enumeration done mask ENUMDNEM: u1 = 0, /// ISOODRPM [14:14] /// Isochronous OUT packet dropped interrupt ISOODRPM: u1 = 0, /// EOPFM [15:15] /// End of periodic frame interrupt EOPFM: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// EPMISM [17:17] /// Endpoint mismatch interrupt EPMISM: u1 = 0, /// IEPINT [18:18] /// IN endpoints interrupt IEPINT: u1 = 0, /// OEPINT [19:19] /// OUT endpoints interrupt OEPINT: u1 = 0, /// IISOIXFRM [20:20] /// Incomplete isochronous IN transfer IISOIXFRM: u1 = 0, /// IPXFRM_IISOOXFRM [21:21] /// Incomplete periodic transfer mask(Host IPXFRM_IISOOXFRM: u1 = 0, /// unused [22:23] _unused22: u2 = 0, /// PRTIM [24:24] /// Host port interrupt mask PRTIM: u1 = 0, /// HCIM [25:25] /// Host channels interrupt HCIM: u1 = 0, /// PTXFEM [26:26] /// Periodic TxFIFO empty mask PTXFEM: u1 = 0, /// unused [27:27] _unused27: u1 = 0, /// CIDSCHGM [28:28] /// Connector ID status change CIDSCHGM: u1 = 0, /// DISCINT [29:29] /// Disconnect detected interrupt DISCINT: u1 = 0, /// SRQIM [30:30] /// Session request/new session detected SRQIM: u1 = 0, /// WUIM [31:31] /// Resume/remote wakeup detected interrupt WUIM: u1 = 0, }; /// OTG_FS interrupt mask register pub const FS_GINTMSK = Register(FS_GINTMSK_val).init(base_address + 0x18); /// FS_GRXSTSR_Device const FS_GRXSTSR_Device_val = packed struct { /// EPNUM [0:3] /// Endpoint number EPNUM: u4 = 0, /// BCNT [4:14] /// Byte count BCNT: u11 = 0, /// DPID [15:16] /// Data PID DPID: u2 = 0, /// PKTSTS [17:20] /// Packet status PKTSTS: u4 = 0, /// FRMNUM [21:24] /// Frame number FRMNUM: u4 = 0, /// unused [25:31] _unused25: u7 = 0, }; /// OTG_FS Receive status debug read(Device pub const FS_GRXSTSR_Device = Register(FS_GRXSTSR_Device_val).init(base_address + 0x1c); /// FS_GRXSTSR_Host const FS_GRXSTSR_Host_val = packed struct { /// EPNUM [0:3] /// Endpoint number EPNUM: u4 = 0, /// BCNT [4:14] /// Byte count BCNT: u11 = 0, /// DPID [15:16] /// Data PID DPID: u2 = 0, /// PKTSTS [17:20] /// Packet status PKTSTS: u4 = 0, /// FRMNUM [21:24] /// Frame number FRMNUM: u4 = 0, /// unused [25:31] _unused25: u7 = 0, }; /// OTG_FS Receive status debug read(Host pub const FS_GRXSTSR_Host = Register(FS_GRXSTSR_Host_val).init(base_address + 0x1c); /// FS_GRXFSIZ const FS_GRXFSIZ_val = packed struct { /// RXFD [0:15] /// RxFIFO depth RXFD: u16 = 512, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS Receive FIFO size register pub const FS_GRXFSIZ = Register(FS_GRXFSIZ_val).init(base_address + 0x24); /// FS_GNPTXFSIZ_Device const FS_GNPTXFSIZ_Device_val = packed struct { /// TX0FSA [0:15] /// Endpoint 0 transmit RAM start TX0FSA: u16 = 512, /// TX0FD [16:31] /// Endpoint 0 TxFIFO depth TX0FD: u16 = 0, }; /// OTG_FS non-periodic transmit FIFO size pub const FS_GNPTXFSIZ_Device = Register(FS_GNPTXFSIZ_Device_val).init(base_address + 0x28); /// FS_GNPTXFSIZ_Host const FS_GNPTXFSIZ_Host_val = packed struct { /// NPTXFSA [0:15] /// Non-periodic transmit RAM start NPTXFSA: u16 = 512, /// NPTXFD [16:31] /// Non-periodic TxFIFO depth NPTXFD: u16 = 0, }; /// OTG_FS non-periodic transmit FIFO size pub const FS_GNPTXFSIZ_Host = Register(FS_GNPTXFSIZ_Host_val).init(base_address + 0x28); /// FS_GNPTXSTS const FS_GNPTXSTS_val = packed struct { /// NPTXFSAV [0:15] /// Non-periodic TxFIFO space NPTXFSAV: u16 = 512, /// NPTQXSAV [16:23] /// Non-periodic transmit request queue NPTQXSAV: u8 = 8, /// NPTXQTOP [24:30] /// Top of the non-periodic transmit request NPTXQTOP: u7 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS non-periodic transmit FIFO/queue pub const FS_GNPTXSTS = Register(FS_GNPTXSTS_val).init(base_address + 0x2c); /// FS_GCCFG const FS_GCCFG_val = packed struct { /// unused [0:15] _unused0: u8 = 0, _unused8: u8 = 0, /// PWRDWN [16:16] /// Power down PWRDWN: u1 = 0, /// unused [17:17] _unused17: u1 = 0, /// VBUSASEN [18:18] /// Enable the VBUS sensing VBUSASEN: u1 = 0, /// VBUSBSEN [19:19] /// Enable the VBUS sensing VBUSBSEN: u1 = 0, /// SOFOUTEN [20:20] /// SOF output enable SOFOUTEN: u1 = 0, /// unused [21:31] _unused21: u3 = 0, _unused24: u8 = 0, }; /// OTG_FS general core configuration register pub const FS_GCCFG = Register(FS_GCCFG_val).init(base_address + 0x38); /// FS_CID const FS_CID_val = packed struct { /// PRODUCT_ID [0:31] /// Product ID field PRODUCT_ID: u32 = 4096, }; /// core ID register pub const FS_CID = Register(FS_CID_val).init(base_address + 0x3c); /// FS_HPTXFSIZ const FS_HPTXFSIZ_val = packed struct { /// PTXSA [0:15] /// Host periodic TxFIFO start PTXSA: u16 = 1536, /// PTXFSIZ [16:31] /// Host periodic TxFIFO depth PTXFSIZ: u16 = 512, }; /// OTG_FS Host periodic transmit FIFO size pub const FS_HPTXFSIZ = Register(FS_HPTXFSIZ_val).init(base_address + 0x100); /// FS_DIEPTXF1 const FS_DIEPTXF1_val = packed struct { /// INEPTXSA [0:15] /// IN endpoint FIFO2 transmit RAM start INEPTXSA: u16 = 1024, /// INEPTXFD [16:31] /// IN endpoint TxFIFO depth INEPTXFD: u16 = 512, }; /// OTG_FS device IN endpoint transmit FIFO size pub const FS_DIEPTXF1 = Register(FS_DIEPTXF1_val).init(base_address + 0x104); /// FS_DIEPTXF2 const FS_DIEPTXF2_val = packed struct { /// INEPTXSA [0:15] /// IN endpoint FIFO3 transmit RAM start INEPTXSA: u16 = 1024, /// INEPTXFD [16:31] /// IN endpoint TxFIFO depth INEPTXFD: u16 = 512, }; /// OTG_FS device IN endpoint transmit FIFO size pub const FS_DIEPTXF2 = Register(FS_DIEPTXF2_val).init(base_address + 0x108); /// FS_DIEPTXF3 const FS_DIEPTXF3_val = packed struct { /// INEPTXSA [0:15] /// IN endpoint FIFO4 transmit RAM start INEPTXSA: u16 = 1024, /// INEPTXFD [16:31] /// IN endpoint TxFIFO depth INEPTXFD: u16 = 512, }; /// OTG_FS device IN endpoint transmit FIFO size pub const FS_DIEPTXF3 = Register(FS_DIEPTXF3_val).init(base_address + 0x10c); }; /// USB on the go full speed pub const OTG_FS_HOST = struct { const base_address = 0x50000400; /// FS_HCFG const FS_HCFG_val = packed struct { /// FSLSPCS [0:1] /// FS/LS PHY clock select FSLSPCS: u2 = 0, /// FSLSS [2:2] /// FS- and LS-only support FSLSS: u1 = 0, /// unused [3:31] _unused3: u5 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host configuration register pub const FS_HCFG = Register(FS_HCFG_val).init(base_address + 0x0); /// HFIR const HFIR_val = packed struct { /// FRIVL [0:15] /// Frame interval FRIVL: u16 = 60000, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS Host frame interval pub const HFIR = Register(HFIR_val).init(base_address + 0x4); /// FS_HFNUM const FS_HFNUM_val = packed struct { /// FRNUM [0:15] /// Frame number FRNUM: u16 = 16383, /// FTREM [16:31] /// Frame time remaining FTREM: u16 = 0, }; /// OTG_FS host frame number/frame time pub const FS_HFNUM = Register(FS_HFNUM_val).init(base_address + 0x8); /// FS_HPTXSTS const FS_HPTXSTS_val = packed struct { /// PTXFSAVL [0:15] /// Periodic transmit data FIFO space PTXFSAVL: u16 = 256, /// PTXQSAV [16:23] /// Periodic transmit request queue space PTXQSAV: u8 = 8, /// PTXQTOP [24:31] /// Top of the periodic transmit request PTXQTOP: u8 = 0, }; /// OTG_FS_Host periodic transmit FIFO/queue pub const FS_HPTXSTS = Register(FS_HPTXSTS_val).init(base_address + 0x10); /// HAINT const HAINT_val = packed struct { /// HAINT [0:15] /// Channel interrupts HAINT: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS Host all channels interrupt pub const HAINT = Register(HAINT_val).init(base_address + 0x14); /// HAINTMSK const HAINTMSK_val = packed struct { /// HAINTM [0:15] /// Channel interrupt mask HAINTM: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host all channels interrupt mask pub const HAINTMSK = Register(HAINTMSK_val).init(base_address + 0x18); /// FS_HPRT const FS_HPRT_val = packed struct { /// PCSTS [0:0] /// Port connect status PCSTS: u1 = 0, /// PCDET [1:1] /// Port connect detected PCDET: u1 = 0, /// PENA [2:2] /// Port enable PENA: u1 = 0, /// PENCHNG [3:3] /// Port enable/disable change PENCHNG: u1 = 0, /// POCA [4:4] /// Port overcurrent active POCA: u1 = 0, /// POCCHNG [5:5] /// Port overcurrent change POCCHNG: u1 = 0, /// PRES [6:6] /// Port resume PRES: u1 = 0, /// PSUSP [7:7] /// Port suspend PSUSP: u1 = 0, /// PRST [8:8] /// Port reset PRST: u1 = 0, /// unused [9:9] _unused9: u1 = 0, /// PLSTS [10:11] /// Port line status PLSTS: u2 = 0, /// PPWR [12:12] /// Port power PPWR: u1 = 0, /// PTCTL [13:16] /// Port test control PTCTL: u4 = 0, /// PSPD [17:18] /// Port speed PSPD: u2 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// OTG_FS host port control and status register pub const FS_HPRT = Register(FS_HPRT_val).init(base_address + 0x40); /// FS_HCCHAR0 const FS_HCCHAR0_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-0 characteristics pub const FS_HCCHAR0 = Register(FS_HCCHAR0_val).init(base_address + 0x100); /// FS_HCCHAR1 const FS_HCCHAR1_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-1 characteristics pub const FS_HCCHAR1 = Register(FS_HCCHAR1_val).init(base_address + 0x120); /// FS_HCCHAR2 const FS_HCCHAR2_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-2 characteristics pub const FS_HCCHAR2 = Register(FS_HCCHAR2_val).init(base_address + 0x140); /// FS_HCCHAR3 const FS_HCCHAR3_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-3 characteristics pub const FS_HCCHAR3 = Register(FS_HCCHAR3_val).init(base_address + 0x160); /// FS_HCCHAR4 const FS_HCCHAR4_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-4 characteristics pub const FS_HCCHAR4 = Register(FS_HCCHAR4_val).init(base_address + 0x180); /// FS_HCCHAR5 const FS_HCCHAR5_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-5 characteristics pub const FS_HCCHAR5 = Register(FS_HCCHAR5_val).init(base_address + 0x1a0); /// FS_HCCHAR6 const FS_HCCHAR6_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-6 characteristics pub const FS_HCCHAR6 = Register(FS_HCCHAR6_val).init(base_address + 0x1c0); /// FS_HCCHAR7 const FS_HCCHAR7_val = packed struct { /// MPSIZ [0:10] /// Maximum packet size MPSIZ: u11 = 0, /// EPNUM [11:14] /// Endpoint number EPNUM: u4 = 0, /// EPDIR [15:15] /// Endpoint direction EPDIR: u1 = 0, /// unused [16:16] _unused16: u1 = 0, /// LSDEV [17:17] /// Low-speed device LSDEV: u1 = 0, /// EPTYP [18:19] /// Endpoint type EPTYP: u2 = 0, /// MCNT [20:21] /// Multicount MCNT: u2 = 0, /// DAD [22:28] /// Device address DAD: u7 = 0, /// ODDFRM [29:29] /// Odd frame ODDFRM: u1 = 0, /// CHDIS [30:30] /// Channel disable CHDIS: u1 = 0, /// CHENA [31:31] /// Channel enable CHENA: u1 = 0, }; /// OTG_FS host channel-7 characteristics pub const FS_HCCHAR7 = Register(FS_HCCHAR7_val).init(base_address + 0x1e0); /// FS_HCINT0 const FS_HCINT0_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-0 interrupt register pub const FS_HCINT0 = Register(FS_HCINT0_val).init(base_address + 0x108); /// FS_HCINT1 const FS_HCINT1_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-1 interrupt register pub const FS_HCINT1 = Register(FS_HCINT1_val).init(base_address + 0x128); /// FS_HCINT2 const FS_HCINT2_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-2 interrupt register pub const FS_HCINT2 = Register(FS_HCINT2_val).init(base_address + 0x148); /// FS_HCINT3 const FS_HCINT3_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-3 interrupt register pub const FS_HCINT3 = Register(FS_HCINT3_val).init(base_address + 0x168); /// FS_HCINT4 const FS_HCINT4_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-4 interrupt register pub const FS_HCINT4 = Register(FS_HCINT4_val).init(base_address + 0x188); /// FS_HCINT5 const FS_HCINT5_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-5 interrupt register pub const FS_HCINT5 = Register(FS_HCINT5_val).init(base_address + 0x1a8); /// FS_HCINT6 const FS_HCINT6_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-6 interrupt register pub const FS_HCINT6 = Register(FS_HCINT6_val).init(base_address + 0x1c8); /// FS_HCINT7 const FS_HCINT7_val = packed struct { /// XFRC [0:0] /// Transfer completed XFRC: u1 = 0, /// CHH [1:1] /// Channel halted CHH: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALL [3:3] /// STALL response received STALL: u1 = 0, /// NAK [4:4] /// NAK response received NAK: u1 = 0, /// ACK [5:5] /// ACK response received/transmitted ACK: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// TXERR [7:7] /// Transaction error TXERR: u1 = 0, /// BBERR [8:8] /// Babble error BBERR: u1 = 0, /// FRMOR [9:9] /// Frame overrun FRMOR: u1 = 0, /// DTERR [10:10] /// Data toggle error DTERR: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-7 interrupt register pub const FS_HCINT7 = Register(FS_HCINT7_val).init(base_address + 0x1e8); /// FS_HCINTMSK0 const FS_HCINTMSK0_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-0 mask register pub const FS_HCINTMSK0 = Register(FS_HCINTMSK0_val).init(base_address + 0x10c); /// FS_HCINTMSK1 const FS_HCINTMSK1_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-1 mask register pub const FS_HCINTMSK1 = Register(FS_HCINTMSK1_val).init(base_address + 0x12c); /// FS_HCINTMSK2 const FS_HCINTMSK2_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-2 mask register pub const FS_HCINTMSK2 = Register(FS_HCINTMSK2_val).init(base_address + 0x14c); /// FS_HCINTMSK3 const FS_HCINTMSK3_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-3 mask register pub const FS_HCINTMSK3 = Register(FS_HCINTMSK3_val).init(base_address + 0x16c); /// FS_HCINTMSK4 const FS_HCINTMSK4_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-4 mask register pub const FS_HCINTMSK4 = Register(FS_HCINTMSK4_val).init(base_address + 0x18c); /// FS_HCINTMSK5 const FS_HCINTMSK5_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-5 mask register pub const FS_HCINTMSK5 = Register(FS_HCINTMSK5_val).init(base_address + 0x1ac); /// FS_HCINTMSK6 const FS_HCINTMSK6_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-6 mask register pub const FS_HCINTMSK6 = Register(FS_HCINTMSK6_val).init(base_address + 0x1cc); /// FS_HCINTMSK7 const FS_HCINTMSK7_val = packed struct { /// XFRCM [0:0] /// Transfer completed mask XFRCM: u1 = 0, /// CHHM [1:1] /// Channel halted mask CHHM: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// STALLM [3:3] /// STALL response received interrupt STALLM: u1 = 0, /// NAKM [4:4] /// NAK response received interrupt NAKM: u1 = 0, /// ACKM [5:5] /// ACK response received/transmitted ACKM: u1 = 0, /// NYET [6:6] /// response received interrupt NYET: u1 = 0, /// TXERRM [7:7] /// Transaction error mask TXERRM: u1 = 0, /// BBERRM [8:8] /// Babble error mask BBERRM: u1 = 0, /// FRMORM [9:9] /// Frame overrun mask FRMORM: u1 = 0, /// DTERRM [10:10] /// Data toggle error mask DTERRM: u1 = 0, /// unused [11:31] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS host channel-7 mask register pub const FS_HCINTMSK7 = Register(FS_HCINTMSK7_val).init(base_address + 0x1ec); /// FS_HCTSIZ0 const FS_HCTSIZ0_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-0 transfer size pub const FS_HCTSIZ0 = Register(FS_HCTSIZ0_val).init(base_address + 0x110); /// FS_HCTSIZ1 const FS_HCTSIZ1_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-1 transfer size pub const FS_HCTSIZ1 = Register(FS_HCTSIZ1_val).init(base_address + 0x130); /// FS_HCTSIZ2 const FS_HCTSIZ2_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-2 transfer size pub const FS_HCTSIZ2 = Register(FS_HCTSIZ2_val).init(base_address + 0x150); /// FS_HCTSIZ3 const FS_HCTSIZ3_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-3 transfer size pub const FS_HCTSIZ3 = Register(FS_HCTSIZ3_val).init(base_address + 0x170); /// FS_HCTSIZ4 const FS_HCTSIZ4_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-x transfer size pub const FS_HCTSIZ4 = Register(FS_HCTSIZ4_val).init(base_address + 0x190); /// FS_HCTSIZ5 const FS_HCTSIZ5_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-5 transfer size pub const FS_HCTSIZ5 = Register(FS_HCTSIZ5_val).init(base_address + 0x1b0); /// FS_HCTSIZ6 const FS_HCTSIZ6_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-6 transfer size pub const FS_HCTSIZ6 = Register(FS_HCTSIZ6_val).init(base_address + 0x1d0); /// FS_HCTSIZ7 const FS_HCTSIZ7_val = packed struct { /// XFRSIZ [0:18] /// Transfer size XFRSIZ: u19 = 0, /// PKTCNT [19:28] /// Packet count PKTCNT: u10 = 0, /// DPID [29:30] /// Data PID DPID: u2 = 0, /// unused [31:31] _unused31: u1 = 0, }; /// OTG_FS host channel-7 transfer size pub const FS_HCTSIZ7 = Register(FS_HCTSIZ7_val).init(base_address + 0x1f0); }; /// USB on the go full speed pub const OTG_FS_PWRCLK = struct { const base_address = 0x50000e00; /// FS_PCGCCTL const FS_PCGCCTL_val = packed struct { /// STPPCLK [0:0] /// Stop PHY clock STPPCLK: u1 = 0, /// GATEHCLK [1:1] /// Gate HCLK GATEHCLK: u1 = 0, /// unused [2:3] _unused2: u2 = 0, /// PHYSUSP [4:4] /// PHY Suspended PHYSUSP: u1 = 0, /// unused [5:31] _unused5: u3 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// OTG_FS power and clock gating control pub const FS_PCGCCTL = Register(FS_PCGCCTL_val).init(base_address + 0x0); }; /// Ethernet: MAC management counters pub const ETHERNET_MMC = struct { const base_address = 0x40028100; /// MMCCR const MMCCR_val = packed struct { /// CR [0:0] /// Counter reset CR: u1 = 0, /// CSR [1:1] /// Counter stop rollover CSR: u1 = 0, /// ROR [2:2] /// Reset on read ROR: u1 = 0, /// unused [3:30] _unused3: u5 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u7 = 0, /// MCF [31:31] /// MMC counter freeze MCF: u1 = 0, }; /// Ethernet MMC control register pub const MMCCR = Register(MMCCR_val).init(base_address + 0x0); /// MMCRIR const MMCRIR_val = packed struct { /// unused [0:4] _unused0: u5 = 0, /// RFCES [5:5] /// Received frames CRC error RFCES: u1 = 0, /// RFAES [6:6] /// Received frames alignment error RFAES: u1 = 0, /// unused [7:16] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u1 = 0, /// RGUFS [17:17] /// Received Good Unicast Frames RGUFS: u1 = 0, /// unused [18:31] _unused18: u6 = 0, _unused24: u8 = 0, }; /// Ethernet MMC receive interrupt register pub const MMCRIR = Register(MMCRIR_val).init(base_address + 0x4); /// MMCTIR const MMCTIR_val = packed struct { /// unused [0:13] _unused0: u8 = 0, _unused8: u6 = 0, /// TGFSCS [14:14] /// Transmitted good frames single collision TGFSCS: u1 = 0, /// TGFMSCS [15:15] /// Transmitted good frames more single TGFMSCS: u1 = 0, /// unused [16:20] _unused16: u5 = 0, /// TGFS [21:21] /// Transmitted good frames TGFS: u1 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// Ethernet MMC transmit interrupt register pub const MMCTIR = Register(MMCTIR_val).init(base_address + 0x8); /// MMCRIMR const MMCRIMR_val = packed struct { /// unused [0:4] _unused0: u5 = 0, /// RFCEM [5:5] /// Received frame CRC error RFCEM: u1 = 0, /// RFAEM [6:6] /// Received frames alignment error RFAEM: u1 = 0, /// unused [7:16] _unused7: u1 = 0, _unused8: u8 = 0, _unused16: u1 = 0, /// RGUFM [17:17] /// Received good unicast frames RGUFM: u1 = 0, /// unused [18:31] _unused18: u6 = 0, _unused24: u8 = 0, }; /// Ethernet MMC receive interrupt mask register pub const MMCRIMR = Register(MMCRIMR_val).init(base_address + 0xc); /// MMCTIMR const MMCTIMR_val = packed struct { /// unused [0:13] _unused0: u8 = 0, _unused8: u6 = 0, /// TGFSCM [14:14] /// Transmitted good frames single collision TGFSCM: u1 = 0, /// TGFMSCM [15:15] /// Transmitted good frames more single TGFMSCM: u1 = 0, /// unused [16:20] _unused16: u5 = 0, /// TGFM [21:21] /// Transmitted good frames TGFM: u1 = 0, /// unused [22:31] _unused22: u2 = 0, _unused24: u8 = 0, }; /// Ethernet MMC transmit interrupt mask pub const MMCTIMR = Register(MMCTIMR_val).init(base_address + 0x10); /// MMCTGFSCCR const MMCTGFSCCR_val = packed struct { /// TGFSCC [0:31] /// Transmitted good frames after a single TGFSCC: u32 = 0, }; /// Ethernet MMC transmitted good frames after a pub const MMCTGFSCCR = Register(MMCTGFSCCR_val).init(base_address + 0x4c); /// MMCTGFMSCCR const MMCTGFMSCCR_val = packed struct { /// TGFMSCC [0:31] /// Transmitted good frames after more than TGFMSCC: u32 = 0, }; /// Ethernet MMC transmitted good frames after pub const MMCTGFMSCCR = Register(MMCTGFMSCCR_val).init(base_address + 0x50); /// MMCTGFCR const MMCTGFCR_val = packed struct { /// TGFC [0:31] /// Transmitted good frames TGFC: u32 = 0, }; /// Ethernet MMC transmitted good frames counter pub const MMCTGFCR = Register(MMCTGFCR_val).init(base_address + 0x68); /// MMCRFCECR const MMCRFCECR_val = packed struct { /// RFCFC [0:31] /// Received frames with CRC error RFCFC: u32 = 0, }; /// Ethernet MMC received frames with CRC error pub const MMCRFCECR = Register(MMCRFCECR_val).init(base_address + 0x94); /// MMCRFAECR const MMCRFAECR_val = packed struct { /// RFAEC [0:31] /// Received frames with alignment error RFAEC: u32 = 0, }; /// Ethernet MMC received frames with alignment pub const MMCRFAECR = Register(MMCRFAECR_val).init(base_address + 0x98); /// MMCRGUFCR const MMCRGUFCR_val = packed struct { /// RGUFC [0:31] /// Received good unicast frames RGUFC: u32 = 0, }; /// MMC received good unicast frames counter pub const MMCRGUFCR = Register(MMCRGUFCR_val).init(base_address + 0xc4); }; /// Ethernet: media access control pub const ETHERNET_MAC = struct { const base_address = 0x40028000; /// MACCR const MACCR_val = packed struct { /// unused [0:1] _unused0: u2 = 0, /// RE [2:2] /// Receiver enable RE: u1 = 0, /// TE [3:3] /// Transmitter enable TE: u1 = 0, /// DC [4:4] /// Deferral check DC: u1 = 0, /// BL [5:6] /// Back-off limit BL: u2 = 0, /// APCS [7:7] /// Automatic pad/CRC APCS: u1 = 0, /// unused [8:8] _unused8: u1 = 0, /// RD [9:9] /// Retry disable RD: u1 = 0, /// IPCO [10:10] /// IPv4 checksum offload IPCO: u1 = 0, /// DM [11:11] /// Duplex mode DM: u1 = 0, /// LM [12:12] /// Loopback mode LM: u1 = 0, /// ROD [13:13] /// Receive own disable ROD: u1 = 0, /// FES [14:14] /// Fast Ethernet speed FES: u1 = 0, /// unused [15:15] _unused15: u1 = 1, /// CSD [16:16] /// Carrier sense disable CSD: u1 = 0, /// IFG [17:19] /// Interframe gap IFG: u3 = 0, /// unused [20:21] _unused20: u2 = 0, /// JD [22:22] /// Jabber disable JD: u1 = 0, /// WD [23:23] /// Watchdog disable WD: u1 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// Ethernet MAC configuration register pub const MACCR = Register(MACCR_val).init(base_address + 0x0); /// MACFFR const MACFFR_val = packed struct { /// PM [0:0] /// Promiscuous mode PM: u1 = 0, /// HU [1:1] /// Hash unicast HU: u1 = 0, /// HM [2:2] /// Hash multicast HM: u1 = 0, /// DAIF [3:3] /// Destination address inverse DAIF: u1 = 0, /// PAM [4:4] /// Pass all multicast PAM: u1 = 0, /// BFD [5:5] /// Broadcast frames disable BFD: u1 = 0, /// PCF [6:7] /// Pass control frames PCF: u2 = 0, /// SAIF [8:8] /// Source address inverse SAIF: u1 = 0, /// SAF [9:9] /// Source address filter SAF: u1 = 0, /// HPF [10:10] /// Hash or perfect filter HPF: u1 = 0, /// unused [11:30] _unused11: u5 = 0, _unused16: u8 = 0, _unused24: u7 = 0, /// RA [31:31] /// Receive all RA: u1 = 0, }; /// Ethernet MAC frame filter register pub const MACFFR = Register(MACFFR_val).init(base_address + 0x4); /// MACHTHR const MACHTHR_val = packed struct { /// HTH [0:31] /// Hash table high HTH: u32 = 0, }; /// Ethernet MAC hash table high pub const MACHTHR = Register(MACHTHR_val).init(base_address + 0x8); /// MACHTLR const MACHTLR_val = packed struct { /// HTL [0:31] /// Hash table low HTL: u32 = 0, }; /// Ethernet MAC hash table low pub const MACHTLR = Register(MACHTLR_val).init(base_address + 0xc); /// MACMIIAR const MACMIIAR_val = packed struct { /// MB [0:0] /// MII busy MB: u1 = 0, /// MW [1:1] /// MII write MW: u1 = 0, /// CR [2:4] /// Clock range CR: u3 = 0, /// unused [5:5] _unused5: u1 = 0, /// MR [6:10] /// MII register MR: u5 = 0, /// PA [11:15] /// PHY address PA: u5 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Ethernet MAC MII address register pub const MACMIIAR = Register(MACMIIAR_val).init(base_address + 0x10); /// MACMIIDR const MACMIIDR_val = packed struct { /// MD [0:15] /// MII data MD: u16 = 0, /// unused [16:31] _unused16: u8 = 0, _unused24: u8 = 0, }; /// Ethernet MAC MII data register pub const MACMIIDR = Register(MACMIIDR_val).init(base_address + 0x14); /// MACFCR const MACFCR_val = packed struct { /// FCB_BPA [0:0] /// Flow control busy/back pressure FCB_BPA: u1 = 0, /// TFCE [1:1] /// Transmit flow control TFCE: u1 = 0, /// RFCE [2:2] /// Receive flow control RFCE: u1 = 0, /// UPFD [3:3] /// Unicast pause frame detect UPFD: u1 = 0, /// PLT [4:5] /// Pause low threshold PLT: u2 = 0, /// unused [6:6] _unused6: u1 = 0, /// ZQPD [7:7] /// Zero-quanta pause disable ZQPD: u1 = 0, /// unused [8:15] _unused8: u8 = 0, /// PT [16:31] /// Pass control frames PT: u16 = 0, }; /// Ethernet MAC flow control register pub const MACFCR = Register(MACFCR_val).init(base_address + 0x18); /// MACVLANTR const MACVLANTR_val = packed struct { /// VLANTI [0:15] /// VLAN tag identifier (for receive VLANTI: u16 = 0, /// VLANTC [16:16] /// 12-bit VLAN tag comparison VLANTC: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Ethernet MAC VLAN tag register pub const MACVLANTR = Register(MACVLANTR_val).init(base_address + 0x1c); /// MACRWUFFR const MACRWUFFR_val = packed struct { /// unused [0:31] _unused0: u8 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Ethernet MAC remote wakeup frame filter pub const MACRWUFFR = Register(MACRWUFFR_val).init(base_address + 0x28); /// MACPMTCSR const MACPMTCSR_val = packed struct { /// PD [0:0] /// Power down PD: u1 = 0, /// MPE [1:1] /// Magic Packet enable MPE: u1 = 0, /// WFE [2:2] /// Wakeup frame enable WFE: u1 = 0, /// unused [3:4] _unused3: u2 = 0, /// MPR [5:5] /// Magic packet received MPR: u1 = 0, /// WFR [6:6] /// Wakeup frame received WFR: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// GU [9:9] /// Global unicast GU: u1 = 0, /// unused [10:30] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u7 = 0, /// WFFRPR [31:31] /// Wakeup frame filter register pointer WFFRPR: u1 = 0, }; /// Ethernet MAC PMT control and status register pub const MACPMTCSR = Register(MACPMTCSR_val).init(base_address + 0x2c); /// MACSR const MACSR_val = packed struct { /// unused [0:2] _unused0: u3 = 0, /// PMTS [3:3] /// PMT status PMTS: u1 = 0, /// MMCS [4:4] /// MMC status MMCS: u1 = 0, /// MMCRS [5:5] /// MMC receive status MMCRS: u1 = 0, /// MMCTS [6:6] /// MMC transmit status MMCTS: u1 = 0, /// unused [7:8] _unused7: u1 = 0, _unused8: u1 = 0, /// TSTS [9:9] /// Time stamp trigger status TSTS: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Ethernet MAC interrupt status register pub const MACSR = Register(MACSR_val).init(base_address + 0x38); /// MACIMR const MACIMR_val = packed struct { /// unused [0:2] _unused0: u3 = 0, /// PMTIM [3:3] /// PMT interrupt mask PMTIM: u1 = 0, /// unused [4:8] _unused4: u4 = 0, _unused8: u1 = 0, /// TSTIM [9:9] /// Time stamp trigger interrupt TSTIM: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Ethernet MAC interrupt mask register pub const MACIMR = Register(MACIMR_val).init(base_address + 0x3c); /// MACA0HR const MACA0HR_val = packed struct { /// MACA0H [0:15] /// MAC address0 high MACA0H: u16 = 65535, /// unused [16:30] _unused16: u8 = 16, _unused24: u7 = 0, /// MO [31:31] /// Always 1 MO: u1 = 0, }; /// Ethernet MAC address 0 high register pub const MACA0HR = Register(MACA0HR_val).init(base_address + 0x40); /// MACA0LR const MACA0LR_val = packed struct { /// MACA0L [0:31] /// MAC address0 low MACA0L: u32 = 4294967295, }; /// Ethernet MAC address 0 low pub const MACA0LR = Register(MACA0LR_val).init(base_address + 0x44); /// MACA1HR const MACA1HR_val = packed struct { /// MACA1H [0:15] /// MAC address1 high MACA1H: u16 = 65535, /// unused [16:23] _unused16: u8 = 0, /// MBC [24:29] /// Mask byte control MBC: u6 = 0, /// SA [30:30] /// Source address SA: u1 = 0, /// AE [31:31] /// Address enable AE: u1 = 0, }; /// Ethernet MAC address 1 high register pub const MACA1HR = Register(MACA1HR_val).init(base_address + 0x48); /// MACA1LR const MACA1LR_val = packed struct { /// MACA1L [0:31] /// MAC address1 low MACA1L: u32 = 4294967295, }; /// Ethernet MAC address1 low pub const MACA1LR = Register(MACA1LR_val).init(base_address + 0x4c); /// MACA2HR const MACA2HR_val = packed struct { /// ETH_MACA2HR [0:15] /// Ethernet MAC address 2 high ETH_MACA2HR: u16 = 80, /// unused [16:23] _unused16: u8 = 0, /// MBC [24:29] /// Mask byte control MBC: u6 = 0, /// SA [30:30] /// Source address SA: u1 = 0, /// AE [31:31] /// Address enable AE: u1 = 0, }; /// Ethernet MAC address 2 high register pub const MACA2HR = Register(MACA2HR_val).init(base_address + 0x50); /// MACA2LR const MACA2LR_val = packed struct { /// MACA2L [0:30] /// MAC address2 low MACA2L: u31 = 2147483647, /// unused [31:31] _unused31: u1 = 1, }; /// Ethernet MAC address 2 low pub const MACA2LR = Register(MACA2LR_val).init(base_address + 0x54); /// MACA3HR const MACA3HR_val = packed struct { /// MACA3H [0:15] /// MAC address3 high MACA3H: u16 = 65535, /// unused [16:23] _unused16: u8 = 0, /// MBC [24:29] /// Mask byte control MBC: u6 = 0, /// SA [30:30] /// Source address SA: u1 = 0, /// AE [31:31] /// Address enable AE: u1 = 0, }; /// Ethernet MAC address 3 high register pub const MACA3HR = Register(MACA3HR_val).init(base_address + 0x58); /// MACA3LR const MACA3LR_val = packed struct { /// MBCA3L [0:31] /// MAC address3 low MBCA3L: u32 = 4294967295, }; /// Ethernet MAC address 3 low pub const MACA3LR = Register(MACA3LR_val).init(base_address + 0x5c); }; /// Ethernet: Precision time protocol pub const ETHERNET_PTP = struct { const base_address = 0x40028700; /// PTPTSCR const PTPTSCR_val = packed struct { /// TSE [0:0] /// Time stamp enable TSE: u1 = 0, /// TSFCU [1:1] /// Time stamp fine or coarse TSFCU: u1 = 0, /// TSSTI [2:2] /// Time stamp system time TSSTI: u1 = 0, /// TSSTU [3:3] /// Time stamp system time TSSTU: u1 = 0, /// TSITE [4:4] /// Time stamp interrupt trigger TSITE: u1 = 0, /// TSARU [5:5] /// Time stamp addend register TSARU: u1 = 0, /// unused [6:31] _unused6: u2 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Ethernet PTP time stamp control register pub const PTPTSCR = Register(PTPTSCR_val).init(base_address + 0x0); /// PTPSSIR const PTPSSIR_val = packed struct { /// STSSI [0:7] /// System time subsecond STSSI: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Ethernet PTP subsecond increment pub const PTPSSIR = Register(PTPSSIR_val).init(base_address + 0x4); /// PTPTSHR const PTPTSHR_val = packed struct { /// STS [0:31] /// System time second STS: u32 = 0, }; /// Ethernet PTP time stamp high pub const PTPTSHR = Register(PTPTSHR_val).init(base_address + 0x8); /// PTPTSLR const PTPTSLR_val = packed struct { /// STSS [0:30] /// System time subseconds STSS: u31 = 0, /// STPNS [31:31] /// System time positive or negative STPNS: u1 = 0, }; /// Ethernet PTP time stamp low register pub const PTPTSLR = Register(PTPTSLR_val).init(base_address + 0xc); /// PTPTSHUR const PTPTSHUR_val = packed struct { /// TSUS [0:31] /// Time stamp update second TSUS: u32 = 0, }; /// Ethernet PTP time stamp high update pub const PTPTSHUR = Register(PTPTSHUR_val).init(base_address + 0x10); /// PTPTSLUR const PTPTSLUR_val = packed struct { /// TSUSS [0:30] /// Time stamp update TSUSS: u31 = 0, /// TSUPNS [31:31] /// Time stamp update positive or negative TSUPNS: u1 = 0, }; /// Ethernet PTP time stamp low update register pub const PTPTSLUR = Register(PTPTSLUR_val).init(base_address + 0x14); /// PTPTSAR const PTPTSAR_val = packed struct { /// TSA [0:31] /// Time stamp addend TSA: u32 = 0, }; /// Ethernet PTP time stamp addend pub const PTPTSAR = Register(PTPTSAR_val).init(base_address + 0x18); /// PTPTTHR const PTPTTHR_val = packed struct { /// TTSH [0:31] /// Target time stamp high TTSH: u32 = 0, }; /// Ethernet PTP target time high pub const PTPTTHR = Register(PTPTTHR_val).init(base_address + 0x1c); /// PTPTTLR const PTPTTLR_val = packed struct { /// TTSL [0:31] /// Target time stamp low TTSL: u32 = 0, }; /// Ethernet PTP target time low pub const PTPTTLR = Register(PTPTTLR_val).init(base_address + 0x20); }; /// Ethernet: DMA controller operation pub const ETHERNET_DMA = struct { const base_address = 0x40029000; /// DMABMR const DMABMR_val = packed struct { /// SR [0:0] /// Software reset SR: u1 = 1, /// DA [1:1] /// DMA Arbitration DA: u1 = 0, /// DSL [2:6] /// Descriptor skip length DSL: u5 = 0, /// unused [7:7] _unused7: u1 = 0, /// PBL [8:13] /// Programmable burst length PBL: u6 = 1, /// RTPR [14:15] /// Rx Tx priority ratio RTPR: u2 = 0, /// FB [16:16] /// Fixed burst FB: u1 = 0, /// RDP [17:22] /// Rx DMA PBL RDP: u6 = 1, /// USP [23:23] /// Use separate PBL USP: u1 = 0, /// FPM [24:24] /// 4xPBL mode FPM: u1 = 0, /// AAB [25:25] /// Address-aligned beats AAB: u1 = 0, /// unused [26:31] _unused26: u6 = 0, }; /// Ethernet DMA bus mode register pub const DMABMR = Register(DMABMR_val).init(base_address + 0x0); /// DMATPDR const DMATPDR_val = packed struct { /// TPD [0:31] /// Transmit poll demand TPD: u32 = 0, }; /// Ethernet DMA transmit poll demand pub const DMATPDR = Register(DMATPDR_val).init(base_address + 0x4); /// DMARPDR const DMARPDR_val = packed struct { /// RPD [0:31] /// Receive poll demand RPD: u32 = 0, }; /// EHERNET DMA receive poll demand pub const DMARPDR = Register(DMARPDR_val).init(base_address + 0x8); /// DMARDLAR const DMARDLAR_val = packed struct { /// SRL [0:31] /// Start of receive list SRL: u32 = 0, }; /// Ethernet DMA receive descriptor list address pub const DMARDLAR = Register(DMARDLAR_val).init(base_address + 0xc); /// DMATDLAR const DMATDLAR_val = packed struct { /// STL [0:31] /// Start of transmit list STL: u32 = 0, }; /// Ethernet DMA transmit descriptor list pub const DMATDLAR = Register(DMATDLAR_val).init(base_address + 0x10); /// DMASR const DMASR_val = packed struct { /// TS [0:0] /// Transmit status TS: u1 = 0, /// TPSS [1:1] /// Transmit process stopped TPSS: u1 = 0, /// TBUS [2:2] /// Transmit buffer unavailable TBUS: u1 = 0, /// TJTS [3:3] /// Transmit jabber timeout TJTS: u1 = 0, /// ROS [4:4] /// Receive overflow status ROS: u1 = 0, /// TUS [5:5] /// Transmit underflow status TUS: u1 = 0, /// RS [6:6] /// Receive status RS: u1 = 0, /// RBUS [7:7] /// Receive buffer unavailable RBUS: u1 = 0, /// RPSS [8:8] /// Receive process stopped RPSS: u1 = 0, /// PWTS [9:9] /// Receive watchdog timeout PWTS: u1 = 0, /// ETS [10:10] /// Early transmit status ETS: u1 = 0, /// unused [11:12] _unused11: u2 = 0, /// FBES [13:13] /// Fatal bus error status FBES: u1 = 0, /// ERS [14:14] /// Early receive status ERS: u1 = 0, /// AIS [15:15] /// Abnormal interrupt summary AIS: u1 = 0, /// NIS [16:16] /// Normal interrupt summary NIS: u1 = 0, /// RPS [17:19] /// Receive process state RPS: u3 = 0, /// TPS [20:22] /// Transmit process state TPS: u3 = 0, /// EBS [23:25] /// Error bits status EBS: u3 = 0, /// unused [26:26] _unused26: u1 = 0, /// MMCS [27:27] /// MMC status MMCS: u1 = 0, /// PMTS [28:28] /// PMT status PMTS: u1 = 0, /// TSTS [29:29] /// Time stamp trigger status TSTS: u1 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// Ethernet DMA status register pub const DMASR = Register(DMASR_val).init(base_address + 0x14); /// DMAOMR const DMAOMR_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// SR [1:1] /// SR SR: u1 = 0, /// OSF [2:2] /// OSF OSF: u1 = 0, /// RTC [3:4] /// RTC RTC: u2 = 0, /// unused [5:5] _unused5: u1 = 0, /// FUGF [6:6] /// FUGF FUGF: u1 = 0, /// FEF [7:7] /// FEF FEF: u1 = 0, /// unused [8:12] _unused8: u5 = 0, /// ST [13:13] /// ST ST: u1 = 0, /// TTC [14:16] /// TTC TTC: u3 = 0, /// unused [17:19] _unused17: u3 = 0, /// FTF [20:20] /// FTF FTF: u1 = 0, /// TSF [21:21] /// TSF TSF: u1 = 0, /// unused [22:23] _unused22: u2 = 0, /// DFRF [24:24] /// DFRF DFRF: u1 = 0, /// RSF [25:25] /// RSF RSF: u1 = 0, /// DTCEFD [26:26] /// DTCEFD DTCEFD: u1 = 0, /// unused [27:31] _unused27: u5 = 0, }; /// Ethernet DMA operation mode pub const DMAOMR = Register(DMAOMR_val).init(base_address + 0x18); /// DMAIER const DMAIER_val = packed struct { /// TIE [0:0] /// Transmit interrupt enable TIE: u1 = 0, /// TPSIE [1:1] /// Transmit process stopped interrupt TPSIE: u1 = 0, /// TBUIE [2:2] /// Transmit buffer unavailable interrupt TBUIE: u1 = 0, /// TJTIE [3:3] /// Transmit jabber timeout interrupt TJTIE: u1 = 0, /// ROIE [4:4] /// Overflow interrupt enable ROIE: u1 = 0, /// TUIE [5:5] /// Underflow interrupt enable TUIE: u1 = 0, /// RIE [6:6] /// Receive interrupt enable RIE: u1 = 0, /// RBUIE [7:7] /// Receive buffer unavailable interrupt RBUIE: u1 = 0, /// RPSIE [8:8] /// Receive process stopped interrupt RPSIE: u1 = 0, /// RWTIE [9:9] /// receive watchdog timeout interrupt RWTIE: u1 = 0, /// ETIE [10:10] /// Early transmit interrupt ETIE: u1 = 0, /// unused [11:12] _unused11: u2 = 0, /// FBEIE [13:13] /// Fatal bus error interrupt FBEIE: u1 = 0, /// ERIE [14:14] /// Early receive interrupt ERIE: u1 = 0, /// AISE [15:15] /// Abnormal interrupt summary AISE: u1 = 0, /// NISE [16:16] /// Normal interrupt summary NISE: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// Ethernet DMA interrupt enable pub const DMAIER = Register(DMAIER_val).init(base_address + 0x1c); /// DMAMFBOCR const DMAMFBOCR_val = packed struct { /// MFC [0:15] /// Missed frames by the MFC: u16 = 0, /// OMFC [16:16] /// Overflow bit for missed frame OMFC: u1 = 0, /// MFA [17:27] /// Missed frames by the MFA: u11 = 0, /// OFOC [28:28] /// Overflow bit for FIFO overflow OFOC: u1 = 0, /// unused [29:31] _unused29: u3 = 0, }; /// Ethernet DMA missed frame and buffer pub const DMAMFBOCR = Register(DMAMFBOCR_val).init(base_address + 0x20); /// DMACHTDR const DMACHTDR_val = packed struct { /// HTDAP [0:31] /// Host transmit descriptor address HTDAP: u32 = 0, }; /// Ethernet DMA current host transmit pub const DMACHTDR = Register(DMACHTDR_val).init(base_address + 0x48); /// DMACHRDR const DMACHRDR_val = packed struct { /// HRDAP [0:31] /// Host receive descriptor address HRDAP: u32 = 0, }; /// Ethernet DMA current host receive descriptor pub const DMACHRDR = Register(DMACHRDR_val).init(base_address + 0x4c); /// DMACHTBAR const DMACHTBAR_val = packed struct { /// HTBAP [0:31] /// Host transmit buffer address HTBAP: u32 = 0, }; /// Ethernet DMA current host transmit buffer pub const DMACHTBAR = Register(DMACHTBAR_val).init(base_address + 0x50); /// DMACHRBAR const DMACHRBAR_val = packed struct { /// HRBAP [0:31] /// Host receive buffer address HRBAP: u32 = 0, }; /// Ethernet DMA current host receive buffer pub const DMACHRBAR = Register(DMACHRBAR_val).init(base_address + 0x54); }; /// Nested Vectored Interrupt pub const NVIC = struct { const base_address = 0xe000e100; /// ISER0 const ISER0_val = packed struct { /// SETENA [0:31] /// SETENA SETENA: u32 = 0, }; /// Interrupt Set-Enable Register pub const ISER0 = Register(ISER0_val).init(base_address + 0x0); /// ISER1 const ISER1_val = packed struct { /// SETENA [0:31] /// SETENA SETENA: u32 = 0, }; /// Interrupt Set-Enable Register pub const ISER1 = Register(ISER1_val).init(base_address + 0x4); /// ICER0 const ICER0_val = packed struct { /// CLRENA [0:31] /// CLRENA CLRENA: u32 = 0, }; /// Interrupt Clear-Enable pub const ICER0 = Register(ICER0_val).init(base_address + 0x80); /// ICER1 const ICER1_val = packed struct { /// CLRENA [0:31] /// CLRENA CLRENA: u32 = 0, }; /// Interrupt Clear-Enable pub const ICER1 = Register(ICER1_val).init(base_address + 0x84); /// ISPR0 const ISPR0_val = packed struct { /// SETPEND [0:31] /// SETPEND SETPEND: u32 = 0, }; /// Interrupt Set-Pending Register pub const ISPR0 = Register(ISPR0_val).init(base_address + 0x100); /// ISPR1 const ISPR1_val = packed struct { /// SETPEND [0:31] /// SETPEND SETPEND: u32 = 0, }; /// Interrupt Set-Pending Register pub const ISPR1 = Register(ISPR1_val).init(base_address + 0x104); /// ICPR0 const ICPR0_val = packed struct { /// CLRPEND [0:31] /// CLRPEND CLRPEND: u32 = 0, }; /// Interrupt Clear-Pending pub const ICPR0 = Register(ICPR0_val).init(base_address + 0x180); /// ICPR1 const ICPR1_val = packed struct { /// CLRPEND [0:31] /// CLRPEND CLRPEND: u32 = 0, }; /// Interrupt Clear-Pending pub const ICPR1 = Register(ICPR1_val).init(base_address + 0x184); /// IABR0 const IABR0_val = packed struct { /// ACTIVE [0:31] /// ACTIVE ACTIVE: u32 = 0, }; /// Interrupt Active Bit Register pub const IABR0 = Register(IABR0_val).init(base_address + 0x200); /// IABR1 const IABR1_val = packed struct { /// ACTIVE [0:31] /// ACTIVE ACTIVE: u32 = 0, }; /// Interrupt Active Bit Register pub const IABR1 = Register(IABR1_val).init(base_address + 0x204); /// IPR0 const IPR0_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR0 = Register(IPR0_val).init(base_address + 0x300); /// IPR1 const IPR1_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR1 = Register(IPR1_val).init(base_address + 0x304); /// IPR2 const IPR2_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR2 = Register(IPR2_val).init(base_address + 0x308); /// IPR3 const IPR3_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR3 = Register(IPR3_val).init(base_address + 0x30c); /// IPR4 const IPR4_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR4 = Register(IPR4_val).init(base_address + 0x310); /// IPR5 const IPR5_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR5 = Register(IPR5_val).init(base_address + 0x314); /// IPR6 const IPR6_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR6 = Register(IPR6_val).init(base_address + 0x318); /// IPR7 const IPR7_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR7 = Register(IPR7_val).init(base_address + 0x31c); /// IPR8 const IPR8_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR8 = Register(IPR8_val).init(base_address + 0x320); /// IPR9 const IPR9_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR9 = Register(IPR9_val).init(base_address + 0x324); /// IPR10 const IPR10_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR10 = Register(IPR10_val).init(base_address + 0x328); /// IPR11 const IPR11_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR11 = Register(IPR11_val).init(base_address + 0x32c); /// IPR12 const IPR12_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR12 = Register(IPR12_val).init(base_address + 0x330); /// IPR13 const IPR13_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR13 = Register(IPR13_val).init(base_address + 0x334); /// IPR14 const IPR14_val = packed struct { /// IPR_N0 [0:7] /// IPR_N0 IPR_N0: u8 = 0, /// IPR_N1 [8:15] /// IPR_N1 IPR_N1: u8 = 0, /// IPR_N2 [16:23] /// IPR_N2 IPR_N2: u8 = 0, /// IPR_N3 [24:31] /// IPR_N3 IPR_N3: u8 = 0, }; /// Interrupt Priority Register pub const IPR14 = Register(IPR14_val).init(base_address + 0x338); }; /// Memory protection unit pub const MPU = struct { const base_address = 0xe000ed90; /// MPU_TYPER const MPU_TYPER_val = packed struct { /// SEPARATE [0:0] /// Separate flag SEPARATE: u1 = 0, /// unused [1:7] _unused1: u7 = 0, /// DREGION [8:15] /// Number of MPU data regions DREGION: u8 = 8, /// IREGION [16:23] /// Number of MPU instruction IREGION: u8 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// MPU type register pub const MPU_TYPER = Register(MPU_TYPER_val).init(base_address + 0x0); /// MPU_CTRL const MPU_CTRL_val = packed struct { /// ENABLE [0:0] /// Enables the MPU ENABLE: u1 = 0, /// HFNMIENA [1:1] /// Enables the operation of MPU during hard HFNMIENA: u1 = 0, /// PRIVDEFENA [2:2] /// Enable priviliged software access to PRIVDEFENA: u1 = 0, /// unused [3:31] _unused3: u5 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// MPU control register pub const MPU_CTRL = Register(MPU_CTRL_val).init(base_address + 0x4); /// MPU_RNR const MPU_RNR_val = packed struct { /// REGION [0:7] /// MPU region REGION: u8 = 0, /// unused [8:31] _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// MPU region number register pub const MPU_RNR = Register(MPU_RNR_val).init(base_address + 0x8); /// MPU_RBAR const MPU_RBAR_val = packed struct { /// REGION [0:3] /// MPU region field REGION: u4 = 0, /// VALID [4:4] /// MPU region number valid VALID: u1 = 0, /// ADDR [5:31] /// Region base address field ADDR: u27 = 0, }; /// MPU region base address pub const MPU_RBAR = Register(MPU_RBAR_val).init(base_address + 0xc); /// MPU_RASR const MPU_RASR_val = packed struct { /// ENABLE [0:0] /// Region enable bit. ENABLE: u1 = 0, /// SIZE [1:5] /// Size of the MPU protection SIZE: u5 = 0, /// unused [6:7] _unused6: u2 = 0, /// SRD [8:15] /// Subregion disable bits SRD: u8 = 0, /// B [16:16] /// memory attribute B: u1 = 0, /// C [17:17] /// memory attribute C: u1 = 0, /// S [18:18] /// Shareable memory attribute S: u1 = 0, /// TEX [19:21] /// memory attribute TEX: u3 = 0, /// unused [22:23] _unused22: u2 = 0, /// AP [24:26] /// Access permission AP: u3 = 0, /// unused [27:27] _unused27: u1 = 0, /// XN [28:28] /// Instruction access disable XN: u1 = 0, /// unused [29:31] _unused29: u3 = 0, }; /// MPU region attribute and size pub const MPU_RASR = Register(MPU_RASR_val).init(base_address + 0x10); }; /// System control block ACTLR pub const SCB_ACTRL = struct { const base_address = 0xe000e008; /// ACTRL const ACTRL_val = packed struct { /// unused [0:1] _unused0: u2 = 0, /// DISFOLD [2:2] /// DISFOLD DISFOLD: u1 = 0, /// unused [3:9] _unused3: u5 = 0, _unused8: u2 = 0, /// FPEXCODIS [10:10] /// FPEXCODIS FPEXCODIS: u1 = 0, /// DISRAMODE [11:11] /// DISRAMODE DISRAMODE: u1 = 0, /// DISITMATBFLUSH [12:12] /// DISITMATBFLUSH DISITMATBFLUSH: u1 = 0, /// unused [13:31] _unused13: u3 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Auxiliary control register pub const ACTRL = Register(ACTRL_val).init(base_address + 0x0); }; /// Nested vectored interrupt pub const NVIC_STIR = struct { const base_address = 0xe000ef00; /// STIR const STIR_val = packed struct { /// INTID [0:8] /// Software generated interrupt INTID: u9 = 0, /// unused [9:31] _unused9: u7 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Software trigger interrupt pub const STIR = Register(STIR_val).init(base_address + 0x0); }; /// System control block pub const SCB = struct { const base_address = 0xe000ed00; /// CPUID const CPUID_val = packed struct { /// Revision [0:3] /// Revision number Revision: u4 = 1, /// PartNo [4:15] /// Part number of the PartNo: u12 = 3108, /// Constant [16:19] /// Reads as 0xF Constant: u4 = 15, /// Variant [20:23] /// Variant number Variant: u4 = 0, /// Implementer [24:31] /// Implementer code Implementer: u8 = 65, }; /// CPUID base register pub const CPUID = Register(CPUID_val).init(base_address + 0x0); /// ICSR const ICSR_val = packed struct { /// VECTACTIVE [0:8] /// Active vector VECTACTIVE: u9 = 0, /// unused [9:10] _unused9: u2 = 0, /// RETTOBASE [11:11] /// Return to base level RETTOBASE: u1 = 0, /// VECTPENDING [12:18] /// Pending vector VECTPENDING: u7 = 0, /// unused [19:21] _unused19: u3 = 0, /// ISRPENDING [22:22] /// Interrupt pending flag ISRPENDING: u1 = 0, /// unused [23:24] _unused23: u1 = 0, _unused24: u1 = 0, /// PENDSTCLR [25:25] /// SysTick exception clear-pending PENDSTCLR: u1 = 0, /// PENDSTSET [26:26] /// SysTick exception set-pending PENDSTSET: u1 = 0, /// PENDSVCLR [27:27] /// PendSV clear-pending bit PENDSVCLR: u1 = 0, /// PENDSVSET [28:28] /// PendSV set-pending bit PENDSVSET: u1 = 0, /// unused [29:30] _unused29: u2 = 0, /// NMIPENDSET [31:31] /// NMI set-pending bit. NMIPENDSET: u1 = 0, }; /// Interrupt control and state pub const ICSR = Register(ICSR_val).init(base_address + 0x4); /// VTOR const VTOR_val = packed struct { /// unused [0:8] _unused0: u8 = 0, _unused8: u1 = 0, /// TBLOFF [9:29] /// Vector table base offset TBLOFF: u21 = 0, /// unused [30:31] _unused30: u2 = 0, }; /// Vector table offset register pub const VTOR = Register(VTOR_val).init(base_address + 0x8); /// AIRCR const AIRCR_val = packed struct { /// VECTRESET [0:0] /// VECTRESET VECTRESET: u1 = 0, /// VECTCLRACTIVE [1:1] /// VECTCLRACTIVE VECTCLRACTIVE: u1 = 0, /// SYSRESETREQ [2:2] /// SYSRESETREQ SYSRESETREQ: u1 = 0, /// unused [3:7] _unused3: u5 = 0, /// PRIGROUP [8:10] /// PRIGROUP PRIGROUP: u3 = 0, /// unused [11:14] _unused11: u4 = 0, /// ENDIANESS [15:15] /// ENDIANESS ENDIANESS: u1 = 0, /// VECTKEYSTAT [16:31] /// Register key VECTKEYSTAT: u16 = 0, }; /// Application interrupt and reset control pub const AIRCR = Register(AIRCR_val).init(base_address + 0xc); /// SCR const SCR_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// SLEEPONEXIT [1:1] /// SLEEPONEXIT SLEEPONEXIT: u1 = 0, /// SLEEPDEEP [2:2] /// SLEEPDEEP SLEEPDEEP: u1 = 0, /// unused [3:3] _unused3: u1 = 0, /// SEVEONPEND [4:4] /// Send Event on Pending bit SEVEONPEND: u1 = 0, /// unused [5:31] _unused5: u3 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// System control register pub const SCR = Register(SCR_val).init(base_address + 0x10); /// CCR const CCR_val = packed struct { /// NONBASETHRDENA [0:0] /// Configures how the processor enters NONBASETHRDENA: u1 = 0, /// USERSETMPEND [1:1] /// USERSETMPEND USERSETMPEND: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// UNALIGN__TRP [3:3] /// UNALIGN_ TRP UNALIGN__TRP: u1 = 0, /// DIV_0_TRP [4:4] /// DIV_0_TRP DIV_0_TRP: u1 = 0, /// unused [5:7] _unused5: u3 = 0, /// BFHFNMIGN [8:8] /// BFHFNMIGN BFHFNMIGN: u1 = 0, /// STKALIGN [9:9] /// STKALIGN STKALIGN: u1 = 0, /// unused [10:31] _unused10: u6 = 0, _unused16: u8 = 0, _unused24: u8 = 0, }; /// Configuration and control pub const CCR = Register(CCR_val).init(base_address + 0x14); /// SHPR1 const SHPR1_val = packed struct { /// PRI_4 [0:7] /// Priority of system handler PRI_4: u8 = 0, /// PRI_5 [8:15] /// Priority of system handler PRI_5: u8 = 0, /// PRI_6 [16:23] /// Priority of system handler PRI_6: u8 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// System handler priority pub const SHPR1 = Register(SHPR1_val).init(base_address + 0x18); /// SHPR2 const SHPR2_val = packed struct { /// unused [0:23] _unused0: u8 = 0, _unused8: u8 = 0, _unused16: u8 = 0, /// PRI_11 [24:31] /// Priority of system handler PRI_11: u8 = 0, }; /// System handler priority pub const SHPR2 = Register(SHPR2_val).init(base_address + 0x1c); /// SHPR3 const SHPR3_val = packed struct { /// unused [0:15] _unused0: u8 = 0, _unused8: u8 = 0, /// PRI_14 [16:23] /// Priority of system handler PRI_14: u8 = 0, /// PRI_15 [24:31] /// Priority of system handler PRI_15: u8 = 0, }; /// System handler priority pub const SHPR3 = Register(SHPR3_val).init(base_address + 0x20); /// SHCRS const SHCRS_val = packed struct { /// MEMFAULTACT [0:0] /// Memory management fault exception active MEMFAULTACT: u1 = 0, /// BUSFAULTACT [1:1] /// Bus fault exception active BUSFAULTACT: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// USGFAULTACT [3:3] /// Usage fault exception active USGFAULTACT: u1 = 0, /// unused [4:6] _unused4: u3 = 0, /// SVCALLACT [7:7] /// SVC call active bit SVCALLACT: u1 = 0, /// MONITORACT [8:8] /// Debug monitor active bit MONITORACT: u1 = 0, /// unused [9:9] _unused9: u1 = 0, /// PENDSVACT [10:10] /// PendSV exception active PENDSVACT: u1 = 0, /// SYSTICKACT [11:11] /// SysTick exception active SYSTICKACT: u1 = 0, /// USGFAULTPENDED [12:12] /// Usage fault exception pending USGFAULTPENDED: u1 = 0, /// MEMFAULTPENDED [13:13] /// Memory management fault exception MEMFAULTPENDED: u1 = 0, /// BUSFAULTPENDED [14:14] /// Bus fault exception pending BUSFAULTPENDED: u1 = 0, /// SVCALLPENDED [15:15] /// SVC call pending bit SVCALLPENDED: u1 = 0, /// MEMFAULTENA [16:16] /// Memory management fault enable MEMFAULTENA: u1 = 0, /// BUSFAULTENA [17:17] /// Bus fault enable bit BUSFAULTENA: u1 = 0, /// USGFAULTENA [18:18] /// Usage fault enable bit USGFAULTENA: u1 = 0, /// unused [19:31] _unused19: u5 = 0, _unused24: u8 = 0, }; /// System handler control and state pub const SHCRS = Register(SHCRS_val).init(base_address + 0x24); /// CFSR_UFSR_BFSR_MMFSR const CFSR_UFSR_BFSR_MMFSR_val = packed struct { /// IACCVIOL [0:0] /// IACCVIOL IACCVIOL: u1 = 0, /// DACCVIOL [1:1] /// DACCVIOL DACCVIOL: u1 = 0, /// unused [2:2] _unused2: u1 = 0, /// MUNSTKERR [3:3] /// MUNSTKERR MUNSTKERR: u1 = 0, /// MSTKERR [4:4] /// MSTKERR MSTKERR: u1 = 0, /// MLSPERR [5:5] /// MLSPERR MLSPERR: u1 = 0, /// unused [6:6] _unused6: u1 = 0, /// MMARVALID [7:7] /// MMARVALID MMARVALID: u1 = 0, /// IBUSERR [8:8] /// Instruction bus error IBUSERR: u1 = 0, /// PRECISERR [9:9] /// Precise data bus error PRECISERR: u1 = 0, /// IMPRECISERR [10:10] /// Imprecise data bus error IMPRECISERR: u1 = 0, /// UNSTKERR [11:11] /// Bus fault on unstacking for a return UNSTKERR: u1 = 0, /// STKERR [12:12] /// Bus fault on stacking for exception STKERR: u1 = 0, /// LSPERR [13:13] /// Bus fault on floating-point lazy state LSPERR: u1 = 0, /// unused [14:14] _unused14: u1 = 0, /// BFARVALID [15:15] /// Bus Fault Address Register (BFAR) valid BFARVALID: u1 = 0, /// UNDEFINSTR [16:16] /// Undefined instruction usage UNDEFINSTR: u1 = 0, /// INVSTATE [17:17] /// Invalid state usage fault INVSTATE: u1 = 0, /// INVPC [18:18] /// Invalid PC load usage INVPC: u1 = 0, /// NOCP [19:19] /// No coprocessor usage NOCP: u1 = 0, /// unused [20:23] _unused20: u4 = 0, /// UNALIGNED [24:24] /// Unaligned access usage UNALIGNED: u1 = 0, /// DIVBYZERO [25:25] /// Divide by zero usage fault DIVBYZERO: u1 = 0, /// unused [26:31] _unused26: u6 = 0, }; /// Configurable fault status pub const CFSR_UFSR_BFSR_MMFSR = Register(CFSR_UFSR_BFSR_MMFSR_val).init(base_address + 0x28); /// HFSR const HFSR_val = packed struct { /// unused [0:0] _unused0: u1 = 0, /// VECTTBL [1:1] /// Vector table hard fault VECTTBL: u1 = 0, /// unused [2:29] _unused2: u6 = 0, _unused8: u8 = 0, _unused16: u8 = 0, _unused24: u6 = 0, /// FORCED [30:30] /// Forced hard fault FORCED: u1 = 0, /// DEBUG_VT [31:31] /// Reserved for Debug use DEBUG_VT: u1 = 0, }; /// Hard fault status register pub const HFSR = Register(HFSR_val).init(base_address + 0x2c); /// MMFAR const MMFAR_val = packed struct { /// MMFAR [0:31] /// Memory management fault MMFAR: u32 = 0, }; /// Memory management fault address pub const MMFAR = Register(MMFAR_val).init(base_address + 0x34); /// BFAR const BFAR_val = packed struct { /// BFAR [0:31] /// Bus fault address BFAR: u32 = 0, }; /// Bus fault address register pub const BFAR = Register(BFAR_val).init(base_address + 0x38); }; /// SysTick timer pub const STK = struct { const base_address = 0xe000e010; /// CTRL const CTRL_val = packed struct { /// ENABLE [0:0] /// Counter enable ENABLE: u1 = 0, /// TICKINT [1:1] /// SysTick exception request TICKINT: u1 = 0, /// CLKSOURCE [2:2] /// Clock source selection CLKSOURCE: u1 = 0, /// unused [3:15] _unused3: u5 = 0, _unused8: u8 = 0, /// COUNTFLAG [16:16] /// COUNTFLAG COUNTFLAG: u1 = 0, /// unused [17:31] _unused17: u7 = 0, _unused24: u8 = 0, }; /// SysTick control and status pub const CTRL = Register(CTRL_val).init(base_address + 0x0); /// LOAD_ const LOAD__val = packed struct { /// RELOAD [0:23] /// RELOAD value RELOAD: u24 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// SysTick reload value register pub const LOAD_ = Register(LOAD__val).init(base_address + 0x4); /// VAL const VAL_val = packed struct { /// CURRENT [0:23] /// Current counter value CURRENT: u24 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// SysTick current value register pub const VAL = Register(VAL_val).init(base_address + 0x8); /// CALIB const CALIB_val = packed struct { /// TENMS [0:23] /// Calibration value TENMS: u24 = 0, /// unused [24:31] _unused24: u8 = 0, }; /// SysTick calibration value pub const CALIB = Register(CALIB_val).init(base_address + 0xc); }; pub const interrupts = struct { pub const TIM1_TRG_COM = 26; pub const TIM6 = 54; pub const CAN_SCE = 22; pub const I2C2_ER = 34; pub const DMA2_Channel1 = 56; pub const EXTI3 = 9; pub const RTCAlarm = 41; pub const TIM5 = 50; pub const SPI2 = 36; pub const USART2 = 38; pub const EXTI0 = 6; pub const I2C2_EV = 33; pub const TAMPER = 2; pub const CAN_RX1 = 21; pub const EXTI1 = 7; pub const TIM8_BRK = 43; pub const TIM2 = 28; pub const EXTI15_10 = 40; pub const RCC = 5; pub const USART1 = 37; pub const DMA1_Channel6 = 16; pub const DMA2_Channel3 = 58; pub const USB_LP_CAN_RX0 = 20; pub const TIM7 = 55; pub const DMA1_Channel3 = 13; pub const TIM1_BRK = 24; pub const DMA1_Channel1 = 11; pub const SDIO = 49; pub const ADC3 = 47; pub const DMA2_Channel4_5 = 59; pub const RTC = 3; pub const DMA1_Channel7 = 17; pub const TIM8_TRG_COM = 45; pub const SPI3 = 51; pub const EXTI9_5 = 23; pub const TIM1_CC = 27; pub const I2C1_EV = 31; pub const TIM4 = 30; pub const DMA1_Channel2 = 12; pub const WWDG = 0; pub const DMA1_Channel4 = 14; pub const EXTI2 = 8; pub const TIM8_UP = 44; pub const TIM8_CC = 46; pub const ADC1_2 = 18; pub const TIM1_UP = 25; pub const USART3 = 39; pub const UART4 = 52; pub const DMA2_Channel2 = 57; pub const I2C1_ER = 32; pub const USB_HP_CAN_TX = 19; pub const PVD = 1; pub const TIM3 = 29; pub const FLASH = 4; pub const SPI1 = 35; pub const DMA1_Channel5 = 15; pub const UART5 = 53; pub const EXTI4 = 10; pub const FSMC = 48; };
src/registers.zig
/// mandelbrot.zig /// Module to calculate Mandelbrot sets in Zig /// const std = @import("std"); const C32 = std.math.complex.Complex(f32); const image = @import("./image.zig"); /// maximum pixel value const n_max = 255; /// RGB values for a single pixel const RGB24 = image.RGB(u8); const ImageRGB24 = image.Image(image.RGB, u8); /// calculate Mandelbrot value for given (complex) coordinates fn pixel_value(z: C32, r_max: f32) u8 { var n: u8 = n_max; var z1 = C32{ .re = 0, .im = 0 }; while (n > 0) : (n -= 1) { if (std.math.complex.abs(z1) > r_max) return n; z1 = C32.add(C32.mul(z1, z1), z); } return 0; } /// convert pixel value to RGB value(s) fn to_rgb(val: u8) RGB24 { return RGB24{ .r = 5 * @mod(val, 15), .g = 32 * @mod(val, 7), .b = 8 * @mod(val, 31), }; } /// creates a Mandelbrot RGB image of given size pub fn create(allocator: *std.mem.Allocator, width: usize, height: usize, x_center: f32, y_center: f32, pixel_size: f32) !ImageRGB24 { var img = try ImageRGB24.init(allocator, width, height); const offset = C32{ .re = x_center - 0.5 * pixel_size * @intToFloat(f32, width), .im = y_center + 0.5 * pixel_size * @intToFloat(f32, height), }; var y: usize = 0; while (y < height) : (y += 1) { var x: usize = 0; while (x < width) : (x += 1) { const coord = C32.add(offset, C32{ .re = @intToFloat(f32, x) * pixel_size, .im = -@intToFloat(f32, y) * pixel_size, }); const p = to_rgb(pixel_value(coord, 2.0)); try img.set_pixel(x, y, p); } } return img; } /// writes image as PNM file //pub fn writePPM(allocator: *std.mem.Allocator, img: []RGB, width: usize, height: usize, file_path: []const u8) !void { // const file = try std.fs.cwd().createFile(file_path, .{ .truncate = true }); // defer file.close(); // const w = file.writer(); // // var line_buf = std.ArrayList(u8).init(allocator); // defer line_buf.deinit(); // var buf_writer = line_buf.writer(); // // try w.print("P3\n", .{}); // try w.print("{} {} {}\n", .{ width, height, 255 }); // for (img) |pix, i| { // try buf_writer.print("{} {} {} ", .{ pix.r, pix.g, pix.b }); // if (i % 8 == 13) { // _ = try buf_writer.write("\n"); // _ = try w.write(line_buf.items); // line_buf.shrinkRetainingCapacity(0); // } // } // // make sure remaining pixels are written to file // if (line_buf.items.len > 0) { // _ = try buf_writer.write("\n"); // _ = try w.write(line_buf.items); // } //} const testing = std.testing; //test "write image as PPM" { // const test_allocator = std.testing.allocator; // // var img = [_]RGB{ // RGB{ .r = 0, .g = 0, .b = 0 }, // RGB{ .r = 128, .g = 0, .b = 0 }, // RGB{ .r = 255, .g = 0, .b = 0 }, // RGB{ .r = 0, .g = 0, .b = 0 }, // RGB{ .r = 0, .g = 128, .b = 0 }, // RGB{ .r = 0, .g = 255, .b = 0 }, // RGB{ .r = 0, .g = 0, .b = 0 }, // RGB{ .r = 0, .g = 0, .b = 128 }, // RGB{ .r = 0, .g = 0, .b = 255 }, // }; // const img_slice: []RGB = img[0..]; // // try writePPM(test_allocator, img_slice, 3, 3, "test_image.ppm"); // defer std.fs.cwd().deleteFile("test_image.ppm") catch unreachable; // // const file = try std.fs.cwd().openFile("test_image.ppm", .{ .read = true }); // defer file.close(); // // const contents = try file.reader().readAllAlloc( // test_allocator, // 120, // ); // defer test_allocator.free(contents); // // testing.expect(std.mem.eql(u8, contents, "P3\n3 3 255\n0 0 0 128 0 0 255 0 0 0 0 0 0 128 0 0 255 0 0 0 0 0 0 128 0 0 255 \n")); //}
Zig/benchmark/src/mandelbrot.zig
const builtin = @import("builtin"); const TypeInfo = builtin.TypeInfo; const TypeId = builtin.TypeId; const ErrorSet = builtin.TypeId.ErrorSet; const AtomicOrder = builtin.AtomicOrder; const AtomicRmwOp = builtin.AtomicRmwOp; const std = @import("std"); const Allocator = mem.Allocator; const ArrayList = std.ArrayList; const Timer = std.os.time.Timer; const mem = std.mem; const bufPrint = std.fmt.bufPrint; const format = std.fmt.format; const warn = std.debug.warn; const assert = std.debug.assert; const assertError = std.debug.assertError; const ns_per_s = 1000000000; /// compiler fence, request compiler to not reorder around cfence. fn cfence() void { asm volatile ("": : :"memory"); } /// mfence instruction fn mfence() void { asm volatile ("mfence": : :"memory"); } /// lfence instruction fn lfence() void { asm volatile ("lfence": : :"memory"); } /// sfence instruction fn sfence() void { asm volatile ("sfence": : :"memory"); } /// A possible API for a benchmark framework pub const Benchmark = struct { const Self = @This(); const Result = struct { run_time_ns: u64, iterations: u64, inner_iterations: u64, // Ascending compare lhs < rhs fn asc(lhs: Result, rhs: Result) bool { return lhs.run_time_ns < rhs.run_time_ns; } // Descending compare lhs > rhs fn desc(lhs: Result, rhs: Result) bool { return lhs.run_time_ns > rhs.run_time_ns; } }; pub name: []const u8, pub logl: usize, pub min_runtime_ns: u64, pub repetitions: u64, pub max_iterations: u64, pub pre_run_results: ArrayList(Result), pub results: ArrayList(Result), timer: Timer, pAllocator: *Allocator, /// Initialize benchmark framework pub fn init(name: []const u8, pAllocator: *Allocator) Self { return Self { .name = name, .logl = 0, .min_runtime_ns = ns_per_s / 2, .repetitions = 1, .max_iterations = 100000000000, .timer = undefined, .pAllocator = pAllocator, .pre_run_results = ArrayList(Result).init(pAllocator), .results = ArrayList(Result).init(pAllocator), }; } /// Create an instance of T and run it pub fn createRun(pSelf: *Self, comptime T: type) !T { if (pSelf.logl >= 1) warn("run: logl={} min_runtime_ns={} max_iterations={}\n", pSelf.logl, pSelf.min_runtime_ns, pSelf.max_iterations); // Make sure T is a struct const info = @typeInfo(T); if (TypeId(info) != TypeId.Struct) @compileError("T is not a Struct"); // Call bm.init if available var bm: T = undefined; if (comptime defExists("init", info.Struct.defs)) { if (comptime @typeOf(T.init).ReturnType == T) { bm = T.init(); } else { bm = try T.init(); } } try pSelf.run(&bm); return bm; } pub fn run(pSelf: *Self, bm: var) !void { var once = true; var iterations: u64 = 1; var rep: u64 = 0; while (rep < pSelf.repetitions) : (rep += 1) { const T = @typeOf(bm.*); // This loop increases iterations until the time is at least min_runtime_ns. // uses that iterations count for each subsequent repetition. while (iterations <= pSelf.max_iterations) { // Run the current iterations var result = try pSelf.runIterations(T, bm, iterations); // If it took >= min_runtime_ns or was very large we'll do the next repeition. if ((result.run_time_ns >= pSelf.min_runtime_ns) or (iterations >= pSelf.max_iterations)) { // Append the result and do the next iteration try pSelf.results.append(result); break; } else { if (pSelf.logl >= 1) { try pSelf.report(result); warn("\n"); } try pSelf.pre_run_results.append(result); // Increase iterations count var denom: u64 = undefined; var numer: u64 = undefined; if (result.run_time_ns < 1000) { numer = 1000; denom = 1; } else if (result.run_time_ns < (pSelf.min_runtime_ns / 10)) { numer = 10; denom = 1; } else { numer = 14; denom = 10; } iterations = (iterations * numer) / denom; if (iterations > pSelf.max_iterations) { iterations = pSelf.max_iterations; } if (pSelf.logl >= 2) { warn("iteratons:{} numer:{} denom:{}\n", iterations, numer, denom); } } } // Report Type header once if (once) { once = false; try leftJustified(22, "name repetitions:{}", pSelf.repetitions); try rightJustified(14, "{}", "iterations"); try rightJustified(12, "{}", "time"); try rightJustified(18, "{}", "time/operation"); warn("\n"); } // Report results try pSelf.report(pSelf.results.items[pSelf.results.len - 1]); warn("\n"); } try pSelf.reportStats(pSelf.results); } /// Run the specified number of iterations returning the time in ns fn runIterations( pSelf: *Self, comptime T: type, pBm: *T, iterations: u64, ) !Result { const info = @typeInfo(T); // Call bm.setup with try if needed if (comptime defExists("setup", info.Struct.defs)) { if (comptime @typeOf(T.setup).ReturnType == void) { pBm.setup(); } else { try pBm.setup(); } } var timer = try Timer.start(); var iter = iterations; var inner_iterations: u64 = 1; while (iter > 0) : (iter -= 1) { const args_len = comptime @typeInfo(@typeOf(T.benchmark)).Fn.args.len; switch (comptime args_len) { 0 => { switch (comptime @typeOf(T.benchmark).ReturnType) { void => T.benchmark(), error!void => try T.benchmark(), u64 => inner_iterations = T.benchmark(), error!u64 => inner_iterations = try T.benchmark(), else => @compileError("Expected return type to be void, !void, u64, !u64\n"), } }, 1 => { switch (comptime @typeOf(T.benchmark).ReturnType) { void => pBm.benchmark(), error!void => try pBm.benchmark(), u64 => inner_iterations = pBm.benchmark(), error!u64 => inner_iterations = try pBm.benchmark(), else => @compileError("Expected return type to be void, !void, u64, !u64\n"), } }, else => { @compileError("Expected T.benchmark to have 0 or 1 parameters"); }, } } var duration = timer.read(); // Call bm.tearDown with try if needed if (comptime defExists("tearDown", info.Struct.defs)) { if (comptime @typeOf(T.tearDown).ReturnType == void) { pBm.tearDown(); } else { try pBm.tearDown(); } } return Result{ .run_time_ns = duration, .iterations = iterations, .inner_iterations = inner_iterations,}; } fn defExists(name: [] const u8, comptime defs: []TypeInfo.Definition) bool { for (defs) |def| { if (std.mem.eql(u8, def.name, name)) { return true; } } return false; } fn pad(count: usize, char: u8) void { var i: u32 = 0; while (i < count) : (i += 1) { warn("{c}", char); } } fn rightJustified(width: usize, comptime fmt: []const u8, args: ...) !void { var buffer: [40]u8 = undefined; var str = try bufPrint(buffer[0..], fmt, args); if (width > str.len) { pad(width - str.len, ' '); } warn("{}", str[0..]); } fn leftJustified(width: usize, comptime fmt: []const u8, args: ...) !void { var buffer: [40]u8 = undefined; var str = try bufPrint(buffer[0..], fmt, args); warn("{}", str[0..]); if (width > str.len) { pad(width - str.len, ' '); } } fn report(pSelf: *Self, result: Result) !void { try leftJustified(22, "{s}", pSelf.name); try rightJustified(14, "{}", result.iterations * result.inner_iterations); try rightJustified(12, "{.3} s", @intToFloat(f64, result.run_time_ns)/@intToFloat(f64, ns_per_s)); try rightJustified(18, "{.3} ns/op", @intToFloat(f64, result.run_time_ns)/ (@intToFloat(f64, result.iterations) * @intToFloat(f64, result.inner_iterations))); } fn reportStats(pSelf: *Self, results: ArrayList(Result)) !void { // Compute sum var sum: f64 = 0; for (results.toSlice()) |result, i| { sum += @intToFloat(f64, result.run_time_ns); } try pSelf.reportStatsMean(sum, pSelf.results); warn(" mean\n"); try pSelf.reportStatsMedian(sum, pSelf.results); warn(" median\n"); try pSelf.reportStatsStdDev(sum, pSelf.results); warn(" stddev\n"); } fn reportStatsMean(pSelf: *Self, sum: f64, results: ArrayList(Result)) !void { try pSelf.report(Result { .run_time_ns = @floatToInt(u64, sum / @intToFloat(f64, results.len)), .iterations = results.items[0].iterations, .inner_iterations = results.items[0].inner_iterations, }); } fn reportStatsMedian(pSelf: *Self, sum: f64, results: ArrayList(Result)) !void { if (results.len < 3) { try pSelf.reportStatsMean(sum, results); return; } try pSelf.report(Result { .run_time_ns = @floatToInt(u64, try pSelf.statsMedian(sum, results)), .iterations = results.items[0].iterations, .inner_iterations = results.items[0].inner_iterations, }); } fn reportStatsStdDev(pSelf: *Self, sum: f64, results: ArrayList(Result)) !void { var std_dev: f64 = 0; if (results.len <= 1) { std_dev = 0; } else { std_dev = pSelf.statsStdDev(sum, results); } try pSelf.report(Result { .run_time_ns = @floatToInt(u64, std_dev), .iterations = results.items[0].iterations, .inner_iterations = results.items[0].inner_iterations, }); } fn statsMean(pSelf: *Self, sum: f64, results: ArrayList(Result)) f64 { return sum / @intToFloat(f64, results.len); } fn statsMedian(pSelf: *Self, sum: f64, results: ArrayList(Result)) !f64 { if (results.len < 3) { return pSelf.statsMean(sum, results); } // Make a copy and sort it var copy = ArrayList(Result).init(pSelf.pAllocator); for (results.toSlice()) |result| { try copy.append(result); } std.sort.sort(Result, copy.toSlice(), Result.asc); // Determine the median var center = copy.len / 2; var median: f64 = undefined; if ((copy.len & 1) == 1) { // Odd number of items, use center median = @intToFloat(f64, copy.items[center].run_time_ns); } else { // Even number of items, use average of items[center] and items[center - 1] median = @intToFloat(f64, copy.items[center-1].run_time_ns + copy.items[center].run_time_ns) / 2; } return median; } fn statsStdDev(pSelf: *Self, sum: f64, results: ArrayList(Result)) f64 { var std_dev: f64 = 0; if (results.len <= 1) { std_dev = 0; } else { var sum_of_squares: f64 = 0; var mean: f64 = pSelf.statsMean(sum, results); for (results.toSlice()) |result| { var diff = @intToFloat(f64, result.run_time_ns) - mean; var square = diff * diff; sum_of_squares += square; } std_dev = @sqrt(f64, sum_of_squares / @intToFloat(f64, results.len - 1)); } return std_dev; } }; /// Run a benchmark that needs special init handling before running it benchmark test "BmRun.1" { // Since this is a test print a \n before we run warn("\n"); const X = struct { const Self = @This(); i: u64, initial_i: u64, init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init(initial_i: u64) Self { return Self { .i = 0, .initial_i = initial_i, .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } fn setup(pSelf: *Self) void { pSelf.i = pSelf.initial_i; pSelf.setup_count += 1; } fn benchmark(pSelf: *Self) void { var pI: *volatile u64 = &pSelf.i; pI.* += 1; pSelf.benchmark_count += 1; } fn tearDown(pSelf: *Self) void { pSelf.tearDown_count += 1; } }; // Create and initialize outside of Benchmark const initial_i: u64 = 123; var x = X.init(initial_i); // Use Benchmark.run to run it var bm = Benchmark.init("BmRun", std.debug.global_allocator); bm.repetitions = 2; try bm.run(&x); assert(x.i == (initial_i + bm.results.items[0].iterations)); assert(x.i == (initial_i + bm.results.items[1].iterations)); assert(x.init_count == 1); assert(x.setup_count - bm.pre_run_results.len == 2); assert(x.benchmark_count > 1000000); assert(x.tearDown_count - bm.pre_run_results.len == 2); } /// Run a benchmark that needs special init handling before running it benchmark test "BmRun.inner_iterations" { // Since this is a test print a \n before we run warn("\n"); const X = struct { const Self = @This(); i: u64, initial_i: u64, init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, inner_iterations: u64, fn init(initial_i: u64, inner_iterations: u64) Self { return Self { .i = 0, .initial_i = initial_i, .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, .inner_iterations = inner_iterations, }; } fn setup(pSelf: *Self) void { pSelf.i = pSelf.initial_i; pSelf.setup_count += 1; } fn benchmark(pSelf: *Self) u64 { var count = pSelf.inner_iterations; while(count > 0) : (count -= 1) { var pI: *volatile u64 = &pSelf.i; pI.* += 1; } pSelf.benchmark_count += 1; return pSelf.inner_iterations; } fn tearDown(pSelf: *Self) void { pSelf.tearDown_count += 1; } }; // Create and initialize outside of Benchmark const initial_i: u64 = 123; var x = X.init(initial_i, 100); // Use Benchmark.run to run it var bm = Benchmark.init("BmRun", std.debug.global_allocator); bm.repetitions = 2; try bm.run(&x); assert(x.i == (initial_i + (bm.results.items[0].iterations * x.inner_iterations))); assert(x.i == (initial_i + (bm.results.items[1].iterations * x.inner_iterations))); assert(x.init_count == 1); assert(x.setup_count - bm.pre_run_results.len == 2); assert(x.benchmark_count > 1000000); assert(x.tearDown_count - bm.pre_run_results.len == 2); } test "BmSimple.cfence" { // Since this is a test print a \n before we run warn("\n"); // Create an instance of Benchmark and run var bm = Benchmark.init("BmSimple.cfence", std.debug.global_allocator); _ = try bm.createRun(struct { fn benchmark() void { cfence(); } }); } test "BmSimple.lfence" { // Since this is a test print a \n before we run warn("\n"); // Create an instance of Benchmark and run var bm = Benchmark.init("BmSimple.lfence", std.debug.global_allocator); _ = try bm.createRun(struct { fn benchmark() void { lfence(); } }); } test "BmSimple.sfence" { // Since this is a test print a \n before we run warn("\n"); // Create an instance of Benchmark and run var bm = Benchmark.init("BmSimple.sfence", std.debug.global_allocator); _ = try bm.createRun(struct { fn benchmark() void { sfence(); } }); } test "BmSimple.mfence" { // Since this is a test print a \n before we run warn("\n"); // Create an instance of Benchmark and run var bm = Benchmark.init("BmSimple.mfence", std.debug.global_allocator); _ = try bm.createRun(struct { fn benchmark() void { mfence(); } }); } // All of the BmPoor.xxx tests endup with no loops at all and take zero time, // but are "correct" do test that combinations of init, setup and tearDown work. // // Here is a sample of the code from a release-fast build, NOTE there is no loop at all: // self.start_time = @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec); // 20d6c7: c5 f9 6f 8c 24 90 00 vmovdqa xmm1,XMMWORD PTR [rsp+0x90] // 20d6ce: 00 00 // var ts: posix.timespec = undefined; // 20d6d0: c5 f9 7f 84 24 90 00 vmovdqa XMMWORD PTR [rsp+0x90],xmm0 // 20d6d7: 00 00 test "BmPoor.init" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmPoor.init", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() Self { return Self { .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } // Called on every iteration of the benchmark, may return void or !void fn benchmark(pSelf: *Self) void { pSelf.benchmark_count += 1; } }; var bmSelf = try bm.createRun(BmSelf); assert(bmSelf.init_count == 1); assert(bmSelf.setup_count == 0); assert(bmSelf.benchmark_count > 1000000); assert(bmSelf.tearDown_count == 0); } test "BmPoor.init.setup" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmPoor.init.setup", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() Self { return Self { .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } fn setup(pSelf: *Self) void { pSelf.setup_count += 1; } fn benchmark(pSelf: *Self) void { pSelf.benchmark_count += 1; } }; bm.repetitions = 3; var bmSelf = try bm.createRun(BmSelf); assert(bmSelf.init_count == 1); assert(bmSelf.setup_count - bm.pre_run_results.len == 3); assert(bmSelf.benchmark_count > 1000000); assert(bmSelf.tearDown_count == 0); } test "BmPoor.init.setup.tearDown" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmPoor.init.setup.tearDown", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() Self { return Self { .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } fn setup(pSelf: *Self) void { pSelf.setup_count += 1; } fn benchmark(pSelf: *Self) void { pSelf.benchmark_count += 1; } fn tearDown(pSelf: *Self) void { pSelf.tearDown_count += 1; } }; bm.repetitions = 3; var bmSelf = try bm.createRun(BmSelf); assert(bmSelf.init_count == 1); assert(bmSelf.setup_count - bm.pre_run_results.len == 3); assert(bmSelf.benchmark_count > 1000000); assert(bmSelf.tearDown_count - bm.pre_run_results.len == 3); } /// The inner loop is optimized away. test "BmPoor.add" { // Our benchmark const BmAdd = struct { const Self = @This(); a: u64, b: u64, r: u64, // Initialize Self fn init() Self { return Self { .a = undefined, .b = undefined, .r = undefined, }; } // Optional setup prior to the first call to Self.benchmark, may return void or !void fn setup(pSelf: *Self) !void { var timer = try Timer.start(); const DefaultPrng = std.rand.DefaultPrng; var prng = DefaultPrng.init(timer.read()); pSelf.a = prng.random.scalar(u64); pSelf.b = prng.random.scalar(u64); } fn benchmark(pSelf: *Self) void { pSelf.r = (pSelf.a +% pSelf.b); } // Optional tearDown called after the last call to Self.benchmark, may return void or !void fn tearDown(pSelf: *Self) !void { if (pSelf.r != (u64(pSelf.a) +% u64(pSelf.b))) return error.Failed; } }; // Since this is a test print a \n before we run warn("\n"); // Create an instance of Benchmark, set 10 iterations and run var bm = Benchmark.init("BmAdd", std.debug.global_allocator); bm.repetitions = 10; _ = try bm.createRun(BmAdd); } // Measure @atomicRmw Add operation test "Bm.AtomicRmwOp.Add" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("Bm.AtomicRmwOp.Add", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); benchmark_count: u64, fn init() Self { return Self { .benchmark_count = 0, }; } // This measures the cost of the atomic rmw add with loop unrolling: // self.start_time = @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec); // 210811: c5 f9 6f 8c 24 90 00 vmovdqa xmm1,XMMWORD PTR [rsp+0x90] // 210818: 00 00 // while (iter > 0) : (iter -= 1) { // 21081a: 48 85 db test rbx,rbx // 21081d: 0f 84 93 00 00 00 je 2108b6 <Bm.AtomicRmwOp.Add+0x266> // _ = @atomicRmw(u64, &pSelf.benchmark_count, AtomicRmwOp.Add, 1, AtomicOrder.Release); // 210823: 48 8d 4b ff lea rcx,[rbx-0x1] // 210827: 48 89 da mov rdx,rbx // 21082a: 48 89 d8 mov rax,rbx // 21082d: 48 83 e2 07 and rdx,0x7 // 210831: 74 21 je 210854 <Bm.AtomicRmwOp.Add+0x204> // 210833: 48 f7 da neg rdx // 210836: 48 89 d8 mov rax,rbx // 210839: 0f 1f 80 00 00 00 00 nop DWORD PTR [rax+0x0] // 210840: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 210847: 00 00 00 // while (iter > 0) : (iter -= 1) { // 21084a: 48 83 c0 ff add rax,0xffffffffffffffff // 21084e: 48 83 c2 01 add rdx,0x1 // 210852: 75 ec jne 210840 <Bm.AtomicRmwOp.Add+0x1f0> // _ = @atomicRmw(u64, &pSelf.benchmark_count, AtomicRmwOp.Add, 1, AtomicOrder.Release); // 210854: 48 83 f9 07 cmp rcx,0x7 // 210858: 72 5c jb 2108b6 <Bm.AtomicRmwOp.Add+0x266> // 21085a: 66 0f 1f 44 00 00 nop WORD PTR [rax+rax*1+0x0] // 210860: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 210867: 00 00 00 // 21086a: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 210871: 00 00 00 // 210874: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 21087b: 00 00 00 // 21087e: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 210885: 00 00 00 // 210888: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 21088f: 00 00 00 // 210892: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 210899: 00 00 00 // 21089c: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 2108a3: 00 00 00 // 2108a6: f0 48 81 44 24 08 01 lock add QWORD PTR [rsp+0x8],0x1 // 2108ad: 00 00 00 // while (iter > 0) : (iter -= 1) { // 2108b0: 48 83 c0 f8 add rax,0xfffffffffffffff8 // 2108b4: 75 aa jne 210860 <Bm.AtomicRmwOp.Add+0x210> // var ts: posix.timespec = undefined; // 2108b6: c5 f9 7f 84 24 90 00 vmovdqa XMMWORD PTR [rsp+0x90],xmm0 // 2108bd: 00 00 fn benchmark(pSelf: *Self) void { _ = @atomicRmw(u64, &pSelf.benchmark_count, AtomicRmwOp.Add, 1, AtomicOrder.Release); } }; bm.repetitions = 10; var bmSelf = try bm.createRun(BmSelf); } /// Use volatile to actually measure r = a +% b test "Bm.volatile.add" { // Our benchmark const BmAdd = struct { const Self = @This(); a: u64, b: u64, r: u64, // Initialize Self fn init() Self { return Self { .a = undefined, .b = undefined, .r = undefined, }; } // Optional setup prior to the first call to Self.benchmark, may return void or !void fn setup(pSelf: *Self) !void { var timer = try Timer.start(); const DefaultPrng = std.rand.DefaultPrng; var prng = DefaultPrng.init(timer.read()); pSelf.a = prng.random.scalar(u64); pSelf.b = prng.random.scalar(u64); } // Using volatile we actually measure the cost of loading, adding and storing: // self.start_time = @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec); // 211559: c5 f9 6f 54 24 30 vmovdqa xmm2,XMMWORD PTR [rsp+0x30] // while (iter > 0) : (iter -= 1) { // 21155f: 48 85 db test rbx,rbx // 211562: 0f 84 b6 00 00 00 je 21161e <Bm.volatile.add+0x42e> // pR.* = (pA.* +% pB.*); // 211568: 48 8d 4b ff lea rcx,[rbx-0x1] // 21156c: 48 89 da mov rdx,rbx // 21156f: 48 89 d8 mov rax,rbx // 211572: 48 83 e2 07 and rdx,0x7 // 211576: 74 21 je 211599 <Bm.volatile.add+0x3a9> // 211578: 48 f7 da neg rdx // 21157b: 48 89 d8 mov rax,rbx // 21157e: 66 90 xchg ax,ax // 211580: 48 8b 74 24 10 mov rsi,QWORD PTR [rsp+0x10] // 211585: 48 03 74 24 08 add rsi,QWORD PTR [rsp+0x8] // 21158a: 48 89 74 24 18 mov QWORD PTR [rsp+0x18],rsi // while (iter > 0) : (iter -= 1) { // 21158f: 48 83 c0 ff add rax,0xffffffffffffffff // 211593: 48 83 c2 01 add rdx,0x1 // 211597: 75 e7 jne 211580 <Bm.volatile.add+0x390> // pR.* = (pA.* +% pB.*); // 211599: 48 83 f9 07 cmp rcx,0x7 // 21159d: 72 7f jb 21161e <Bm.volatile.add+0x42e> // 21159f: 90 nop // 2115a0: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 2115a5: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // 2115aa: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // 2115af: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 2115b4: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // 2115b9: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // 2115be: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 2115c3: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // 2115c8: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // 2115cd: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 2115d2: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // 2115d7: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // 2115dc: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 2115e1: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // 2115e6: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // 2115eb: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 2115f0: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // 2115f5: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // 2115fa: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 2115ff: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // 211604: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // 211609: 48 8b 4c 24 10 mov rcx,QWORD PTR [rsp+0x10] // 21160e: 48 03 4c 24 08 add rcx,QWORD PTR [rsp+0x8] // while (iter > 0) : (iter -= 1) { // 211613: 48 83 c0 f8 add rax,0xfffffffffffffff8 // pR.* = (pA.* +% pB.*); // 211617: 48 89 4c 24 18 mov QWORD PTR [rsp+0x18],rcx // while (iter > 0) : (iter -= 1) { // 21161c: 75 82 jne 2115a0 <Bm.volatile.add+0x3b0> // var ts: posix.timespec = undefined; // 21161e: c5 f8 29 4c 24 30 vmovaps XMMWORD PTR [rsp+0x30],xmm1 fn benchmark(pSelf: *Self) void { var pA: *volatile u64 = &pSelf.a; var pB: *volatile u64 = &pSelf.b; var pR: *volatile u64 = &pSelf.r; pR.* = (pA.* +% pB.*); } // Optional tearDown called after the last call to Self.benchmark, may return void or !void fn tearDown(pSelf: *Self) !void { if (pSelf.r != (u64(pSelf.a) +% u64(pSelf.b))) return error.Failed; } }; // Since this is a test print a \n before we run warn("\n"); // Create an instance of Benchmark, set 10 iterations and run var bm = Benchmark.init("Bm.Add", std.debug.global_allocator); bm.repetitions = 10; _ = try bm.createRun(BmAdd); } test "BmError.benchmark" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark() can return an error var bm = Benchmark.init("BmNoSelf.error", std.debug.global_allocator); assertError(bm.createRun(struct { fn benchmark() error!void { return error.TestError; } }), error.TestError); } test "BmError.benchmark.pSelf" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmError.benchmark.pSelf", std.debug.global_allocator); assertError(bm.createRun(struct { const Self = @This(); // Called on every iteration of the benchmark, may return void or !void fn benchmark(pSelf: *Self) error!void { return error.BenchmarkError; } }), error.BenchmarkError); } test "BmError.init_error.setup.tearDown" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmError.init_error.setup.tearDown", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() !Self { return error.InitError; } fn setup(pSelf: *Self) void { pSelf.setup_count += 1; } // Called on every iteration of the benchmark, may return void or !void fn benchmark(pSelf: *Self) void { pSelf.benchmark_count += 1; } fn tearDown(pSelf: *Self) void { pSelf.tearDown_count += 1; } }; assertError(bm.createRun(BmSelf), error.InitError); } test "BmError.init.setup_error.tearDown" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmError.init.setup_error.tearDown", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() Self { return Self { .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } fn setup(pSelf: *Self) !void { pSelf.setup_count += 1; return error.SetupError; } // Called on every iteration of the benchmark, may return void or !void fn benchmark(pSelf: *Self) void { pSelf.benchmark_count += 1; } fn tearDown(pSelf: *Self) void { pSelf.tearDown_count += 1; } }; assertError(bm.createRun(BmSelf), error.SetupError); } test "BmError.init.setup.tearDown_error" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmError.init.setup.tearDown_error", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() Self { return Self { .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } fn setup(pSelf: *Self) void { pSelf.setup_count += 1; } // Called on every iteration of the benchmark, may return void or !void fn benchmark(pSelf: *Self) void { pSelf.benchmark_count += 1; } fn tearDown(pSelf: *Self) !void { return error.TearDownError; } }; assertError(bm.createRun(BmSelf), error.TearDownError); } test "BmError.init.setup.tearDown.benchmark_error" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmError.init.setup.tearDown.benchmark_error", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() Self { return Self { .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } fn setup(pSelf: *Self) void { pSelf.setup_count += 1; } // Called on every iteration of the benchmark, may return void or !void fn benchmark(pSelf: *Self) error!void { return error.BenchmarkError; } fn tearDown(pSelf: *Self) void { pSelf.tearDown_count += 1; } }; assertError(bm.createRun(BmSelf), error.BenchmarkError); } test "BmError.init.setup.tearDown.benchmark_error!u64" { // Since this is a test print a \n before we run warn("\n"); // Test fn benchmark(pSelf) can return an error var bm = Benchmark.init("BmError.init.setup.tearDown.benchmark_error", std.debug.global_allocator); const BmSelf = struct { const Self = @This(); init_count: u64, setup_count: u64, benchmark_count: u64, tearDown_count: u64, fn init() Self { return Self { .init_count = 1, .setup_count = 0, .benchmark_count = 0, .tearDown_count = 0, }; } fn setup(pSelf: *Self) void { pSelf.setup_count += 1; } // Called on every iteration of the benchmark, may return void or !void fn benchmark(pSelf: *Self) error!u64 { return error.BenchmarkError; } fn tearDown(pSelf: *Self) void { pSelf.tearDown_count += 1; } }; assertError(bm.createRun(BmSelf), error.BenchmarkError); }
benchmark.zig
const std = @import("std"); const assert = std.debug.assert; const mem = std.mem; const ArrayList = std.ArrayList; const json = std.json; const utf8 = @import("../unicode/unicode.zig"); const log = @import("log.zig"); const Token = @import("token.zig").Token; const TokenRule = @import("token.zig").TokenRule; const TokenId = @import("token.zig").TokenId; const atxRules = @import("token_atx_heading.zig"); const inlineRules = @import("token_inline.zig"); pub const Lexer = struct { view: utf8.Utf8View, index: u32, rules: ArrayList(TokenRule), tokens: ArrayList(Token), tokenIndex: u64, lineNumber: u32, allocator: *mem.Allocator, pub fn init(allocator: *mem.Allocator, input: []const u8) !Lexer { // Skip the UTF-8 BOM if present var t = Lexer{ .view = try utf8.Utf8View.init(input), .index = 0, .allocator = allocator, .rules = ArrayList(TokenRule).init(allocator), .tokens = ArrayList(Token).init(allocator), .tokenIndex = 0, .lineNumber = 1, }; try t.registerRule(ruleWhitespace); try t.registerRule(atxRules.ruleAtxHeader); try t.registerRule(inlineRules.ruleInline); try t.registerRule(ruleEOF); return t; } pub fn deinit(l: *Lexer) void { l.rules.deinit(); l.tokens.deinit(); } pub fn registerRule(l: *Lexer, rule: TokenRule) !void { try l.rules.append(rule); } /// Get the next token from the input. pub fn next(l: *Lexer) !?Token { for (l.rules.items) |rule| { if (try rule(l)) |v| { return v; } } return null; } /// Peek at the next token. pub fn peekNext(l: *Lexer) !?Token { var indexBefore = l.index; var tokenIndexBefore = l.tokenIndex; var pNext = try l.next(); l.index = indexBefore; l.tokenIndex = tokenIndexBefore; return pNext; } /// Gets a codepoint at index from the input. Returns null if index exceeds the length of the view. pub fn getRune(l: *Lexer, index: u32) ?[]const u8 { return l.view.index(index); } pub fn debugPrintToken(l: *Lexer, msg: []const u8, token: anytype) !void { // TODO: only stringify json if debug logging var buf = std.ArrayList(u8).init(l.allocator); defer buf.deinit(); try json.stringify(token, json.StringifyOptions{ // This works differently than normal StringifyOptions for Tokens, separator does not // add \n. .whitespace = .{ .indent = .{ .Space = 1 }, .separator = true, }, }, buf.outStream()); log.Debugf("{}: {}\n", .{ msg, buf.items }); } pub fn emit(l: *Lexer, tok: TokenId, startOffset: u32, endOffset: u32) !?Token { // log.Debugf("start: {} end: {}\n", .{ start, end }); var str = l.view.slice(startOffset, endOffset); // check for diacritic log.Debugf("str: '{Z}'\n", .{str.bytes}); var nEndOffset: u32 = endOffset - 1; if ((endOffset - startOffset) == 1 or nEndOffset < startOffset) { nEndOffset = startOffset; } // check if token already emitted if (l.tokens.items.len > l.tokenIndex) { // try l.debugPrintToken("lexer last token", l.tokens.items[l.tokens.items.len - 1]); var lastTok = l.tokens.items[l.tokens.items.len - 1]; if (lastTok.ID == tok and lastTok.startOffset == startOffset and lastTok.endOffset == nEndOffset) { log.Debug("Token already encountered"); l.tokenIndex = l.tokens.items.len - 1; l.index = endOffset; return lastTok; } } var column: u32 = l.offsetToColumn(startOffset); if (tok == TokenId.EOF) { column = l.tokens.items[l.tokens.items.len - 1].column; l.lineNumber -= 1; } var newTok = Token{ .ID = tok, .startOffset = startOffset, .endOffset = nEndOffset, .string = str.bytes, .lineNumber = l.lineNumber, .column = column, }; try l.debugPrintToken("lexer emit", &newTok); try l.tokens.append(newTok); l.index = endOffset; l.tokenIndex = l.tokens.items.len - 1; if (mem.eql(u8, str.bytes, "\n")) { l.lineNumber += 1; } return newTok; } /// Returns the column number of offset translated from the start of the line pub fn offsetToColumn(l: *Lexer, offset: u32) u32 { var i: u32 = offset; var start: u32 = 1; var char: []const u8 = ""; var foundLastNewline: bool = false; if (offset > 0) { i = offset - 1; } // Get the last newline starting from offset while (!mem.eql(u8, char, "\n")) : (i -= 1) { if (i == 0) { break; } char = l.view.index(i).?; start = i; } if (mem.eql(u8, char, "\n")) { foundLastNewline = true; start = i + 1; } char = ""; i = offset; // Get the next newline starting from offset while (!mem.eql(u8, char, "\n")) : (i += 1) { if (i == l.view.len) { break; } char = l.view.index(i).?; } // only one line of input or on the first line of input if (!foundLastNewline) { return offset + 1; } return offset - start; } /// Checks for a single whitespace character. Returns true if char is a space character. pub fn isSpace(l: *Lexer, char: u8) bool { if (char == '\u{0020}') { return true; } return false; } /// Checks for all the whitespace characters. Returns true if the rune is a whitespace. pub fn isWhitespace(l: *Lexer, rune: []const u8) bool { // A whitespace character is a space (U+0020), tab (U+0009), newline (U+000A), line tabulation (U+000B), form feed // (U+000C), or carriage return (U+000D). const runes = &[_][]const u8{ "\u{0020}", "\u{0009}", "\u{000A}", "\u{000B}", "\u{000C}", "\u{000D}", }; for (runes) |itrune| if (mem.eql(u8, itrune, rune)) return true; return false; } pub fn isPunctuation(l: *Lexer, rune: []const u8) bool { // Check for ASCII punctuation characters... // // FIXME: Check against the unicode punctuation tables... there isn't a Zig library that does this that I have found. // // A punctuation character is an ASCII punctuation character or anything in the general Unicode categories Pc, Pd, // Pe, Pf, Pi, Po, or Ps. const runes = &[_][]const u8{ "!", "\"", "#", "$", "%", "&", "\'", "(", ")", "*", "+", ",", "-", ".", "/", ":", ";", "<", "=", ">", "?", "@", "[", "\\", "]", "^", "_", "`", "{", "|", "}", "~", }; for (runes) |itrune| if (mem.eql(u8, itrune, rune)) return true; return false; } pub fn isLetter(l: *Lexer, rune: []const u8) bool { // TODO: make this more robust by using unicode character sets if (!l.isPunctuation(rune) and !l.isWhitespace(rune)) { return true; } return false; } /// Get the last token emitted, exclude peek tokens pub fn lastToken(l: *Lexer) Token { return l.tokens.items[l.tokenIndex]; } /// Skip the next token pub fn skipNext(l: *Lexer) !void { _ = try l.next(); } }; /// Get all the whitespace characters greedly. pub fn ruleWhitespace(t: *Lexer) !?Token { var index: u32 = t.index; log.Debug("in ruleWhitespace"); while (t.getRune(index)) |val| { if (t.isWhitespace(val)) { index += 1; if (mem.eql(u8, "\n", val)) { break; } } else { log.Debugf("index: {}\n", .{index}); break; } } log.Debugf("t.index: {} index: {}\n", .{ t.index, index }); if (index > t.index) { return t.emit(.Whitespace, t.index, index); } return null; } /// Return EOF at the end of the input pub fn ruleEOF(t: *Lexer) !?Token { if (t.index == t.view.len) { return t.emit(.EOF, t.index, t.index); } return null; } test "lexer: peekNext " { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); const allocator = &arena.allocator; const input = "# foo"; log.Debugf("input:\n{}-- END OF TEST --\n", .{input}); var t = try Lexer.init(allocator, input); if (try t.next()) |tok| { assert(tok.ID == TokenId.AtxHeader); } // two consecutive peeks should return the same token if (try t.peekNext()) |tok| { assert(tok.ID == TokenId.Whitespace); } if (try t.peekNext()) |tok| { assert(tok.ID == TokenId.Whitespace); } // The last token does not include peek'd tokens assert(t.lastToken().ID == TokenId.AtxHeader); if (try t.next()) |tok| { assert(tok.ID == TokenId.Whitespace); } } test "lexer: offsetToColumn" { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); const allocator = &arena.allocator; const input = "foo\nbar \t\nbaz"; var t = try Lexer.init(allocator, input); _ = try t.next(); _ = try t.next(); if (try t.next()) |tok| { assert(tok.column == 1); } _ = try t.next(); _ = try t.next(); _ = try t.next(); if (try t.next()) |tok| { assert(tok.column == 1); } }
src/md/lexer.zig
const builtin = @import("builtin"); const std = @import("std"); const os = @import("windows.zig"); const HRESULT = os.HRESULT; pub const FACTORY_TYPE = extern enum { SHARED = 0, ISOLATED = 1, }; pub const FONT_WEIGHT = extern enum { THIN = 100, EXTRA_LIGHT = 200, LIGHT = 300, SEMI_LIGHT = 350, NORMAL = 400, MEDIUM = 500, SEMI_BOLD = 600, BOLD = 700, EXTRA_BOLD = 800, HEAVY = 900, EXTRA_HEAVY = 950, }; pub const FONT_STYLE = extern enum { NORMAL = 0, OBLIQUE = 1, ITALIC = 2, }; pub const FONT_STRETCH = extern enum { UNDEFINED = 0, ULTRA_CONDENSED = 1, EXTRA_CONDENSED = 2, CONDENSED = 3, SEMI_CONDENSED = 4, NORMAL = 5, MEDIUM = 5, SEMI_EXPANDED = 6, EXPANDED = 7, EXTRA_EXPANDED = 8, ULTRA_EXPANDED = 9, }; pub const TEXT_ALIGNMENT = extern enum { LEADING = 0, TRAILING = 1, CENTER = 2, JUSTIFIED = 3, }; pub const PARAGRAPH_ALIGNMENT = extern enum { NEAR = 0, FAR = 1, CENTER = 2, }; pub const IFactory = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // IDWriteFactory GetSystemFontCollection: *c_void, CreateCustomFontCollection: *c_void, RegisterFontCollectionLoader: *c_void, UnregisterFontCollectionLoader: *c_void, CreateFontFileReference: *c_void, CreateCustomFontFileReference: *c_void, CreateFontFace: *c_void, CreateRenderingParams: *c_void, CreateMonitorRenderingParams: *c_void, CreateCustomRenderingParams: *c_void, RegisterFontFileLoader: *c_void, UnregisterFontFileLoader: *c_void, CreateTextFormat: fn ( *Self, os.LPCWSTR, ?*IFontCollection, FONT_WEIGHT, FONT_STYLE, FONT_STRETCH, f32, os.LPCWSTR, **ITextFormat, ) callconv(.C) HRESULT, CreateTypography: *c_void, GetGdiInterop: *c_void, CreateTextLayout: *c_void, CreateGdiCompatibleTextLayout: *c_void, CreateEllipsisTrimmingSign: *c_void, CreateTextAnalyzer: *c_void, CreateNumberSubstitution: *c_void, CreateGlyphRunAnalysis: *c_void, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IFactory.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn CreateTextFormat( self: *T, font_family_name: os.LPCWSTR, font_collection: ?*IFontCollection, font_weight: FONT_WEIGHT, font_style: FONT_STYLE, font_stretch: FONT_STRETCH, font_size: f32, locale_name: os.LPCWSTR, text_format: **ITextFormat, ) HRESULT { return self.vtbl.CreateTextFormat( self, font_family_name, font_collection, font_weight, font_style, font_stretch, font_size, locale_name, text_format, ); } }; } }; pub const IFontCollection = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // IDWriteFontCollection GetFontFamilyCount: *c_void, GetFontFamily: *c_void, FindFamilyName: *c_void, GetFontFromFontFace: *c_void, }, usingnamespace os.IUnknown.Methods(Self); }; pub const ITextFormat = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // IDWriteTextFormat SetTextAlignment: fn (*Self, TEXT_ALIGNMENT) callconv(.C) HRESULT, SetParagraphAlignment: fn (*Self, PARAGRAPH_ALIGNMENT) callconv(.C) HRESULT, SetWordWrapping: *c_void, SetReadingDirection: *c_void, SetFlowDirection: *c_void, SetIncrementalTabStop: *c_void, SetTrimming: *c_void, SetLineSpacing: *c_void, GetTextAlignment: *c_void, GetParagraphAlignment: *c_void, GetWordWrapping: *c_void, GetReadingDirection: *c_void, GetFlowDirection: *c_void, GetIncrementalTabStop: *c_void, GetTrimming: *c_void, GetLineSpacing: *c_void, GetFontCollection: *c_void, GetFontFamilyNameLength: *c_void, GetFontFamilyName: *c_void, GetFontWeight: *c_void, GetFontStyle: *c_void, GetFontStretch: *c_void, GetFontSize: *c_void, GetLocaleNameLength: *c_void, GetLocaleName: *c_void, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace ITextFormat.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn SetTextAlignment(self: *T, alignment: TEXT_ALIGNMENT) HRESULT { return self.vtbl.SetTextAlignment(self, alignment); } pub inline fn SetParagraphAlignment(self: *T, alignment: PARAGRAPH_ALIGNMENT) HRESULT { return self.vtbl.SetParagraphAlignment(self, alignment); } }; } }; pub const IID_IFactory = os.GUID{ .Data1 = 0xb859ee5a, .Data2 = 0xd838, .Data3 = 0x4b5b, .Data4 = .{ 0xa2, 0xe8, 0x1a, 0xdc, 0x7d, 0x93, 0xdb, 0x48 }, }; pub var CreateFactory: fn ( FACTORY_TYPE, *const os.GUID, **c_void, ) callconv(.C) HRESULT = undefined; pub fn init() void { var dwrite_dll = os.LoadLibraryA("dwrite.dll").?; CreateFactory = @ptrCast( @TypeOf(CreateFactory), os.kernel32.GetProcAddress(dwrite_dll, "DWriteCreateFactory").?, ); }
src/windows/dwrite.zig
const imgui = @import("../../c.zig"); pub const struct_ImNodesEditorContext = opaque {}; pub const ImNodesEditorContext = struct_ImNodesEditorContext; pub const struct_ImNodesContext = opaque {}; pub const ImNodesContext = struct_ImNodesContext; pub const ImNodesStyleFlags = c_int; pub const struct_ImNodesStyle = extern struct { GridSpacing: f32, NodeCornerRounding: f32, NodePaddingHorizontal: f32, NodePaddingVertical: f32, NodeBorderThickness: f32, LinkThickness: f32, LinkLineSegmentsPerLength: f32, LinkHoverDistance: f32, PinCircleRadius: f32, PinQuadSideLength: f32, PinTriangleSideLength: f32, PinLineThickness: f32, PinHoverRadius: f32, PinOffset: f32, Flags: ImNodesStyleFlags, Colors: [26]c_uint, }; pub const ImNodesStyle = struct_ImNodesStyle; pub const struct_LinkDetachWithModifierClick = extern struct { Modifier: [*c]const bool, }; pub const LinkDetachWithModifierClick = struct_LinkDetachWithModifierClick; pub const struct_EmulateThreeButtonMouse = extern struct { Modifier: [*c]const bool, }; pub const EmulateThreeButtonMouse = struct_EmulateThreeButtonMouse; pub const struct_ImNodesIO = extern struct { EmulateThreeButtonMouse: EmulateThreeButtonMouse, LinkDetachWithModifierClick: LinkDetachWithModifierClick, AltMouseButton: c_int, }; pub const ImNodesIO = struct_ImNodesIO; pub const ImNodesCol = c_int; pub const ImNodesStyleVar = c_int; pub const ImNodesPinShape = c_int; pub const ImNodesAttributeFlags = c_int; pub const ImNodesMiniMapLocation = c_int; pub const ImNodesMiniMapNodeHoveringCallback = ?fn (c_int, ?*anyopaque) callconv(.C) void; pub const ImNodesCol_NodeBackground: c_int = 0; pub const ImNodesCol_NodeBackgroundHovered: c_int = 1; pub const ImNodesCol_NodeBackgroundSelected: c_int = 2; pub const ImNodesCol_NodeOutline: c_int = 3; pub const ImNodesCol_TitleBar: c_int = 4; pub const ImNodesCol_TitleBarHovered: c_int = 5; pub const ImNodesCol_TitleBarSelected: c_int = 6; pub const ImNodesCol_Link: c_int = 7; pub const ImNodesCol_LinkHovered: c_int = 8; pub const ImNodesCol_LinkSelected: c_int = 9; pub const ImNodesCol_Pin: c_int = 10; pub const ImNodesCol_PinHovered: c_int = 11; pub const ImNodesCol_BoxSelector: c_int = 12; pub const ImNodesCol_BoxSelectorOutline: c_int = 13; pub const ImNodesCol_GridBackground: c_int = 14; pub const ImNodesCol_GridLine: c_int = 15; pub const ImNodesCol_MiniMapBackground: c_int = 16; pub const ImNodesCol_MiniMapBackgroundHovered: c_int = 17; pub const ImNodesCol_MiniMapOutline: c_int = 18; pub const ImNodesCol_MiniMapOutlineHovered: c_int = 19; pub const ImNodesCol_MiniMapNodeBackground: c_int = 20; pub const ImNodesCol_MiniMapNodeBackgroundHovered: c_int = 21; pub const ImNodesCol_MiniMapNodeBackgroundSelected: c_int = 22; pub const ImNodesCol_MiniMapNodeOutline: c_int = 23; pub const ImNodesCol_MiniMapLink: c_int = 24; pub const ImNodesCol_MiniMapLinkSelected: c_int = 25; pub const ImNodesCol_COUNT: c_int = 26; pub const ImNodesCol_ = c_uint; pub const ImNodesStyleVar_GridSpacing: c_int = 0; pub const ImNodesStyleVar_NodeCornerRounding: c_int = 1; pub const ImNodesStyleVar_NodePaddingHorizontal: c_int = 2; pub const ImNodesStyleVar_NodePaddingVertical: c_int = 3; pub const ImNodesStyleVar_NodeBorderThickness: c_int = 4; pub const ImNodesStyleVar_LinkThickness: c_int = 5; pub const ImNodesStyleVar_LinkLineSegmentsPerLength: c_int = 6; pub const ImNodesStyleVar_LinkHoverDistance: c_int = 7; pub const ImNodesStyleVar_PinCircleRadius: c_int = 8; pub const ImNodesStyleVar_PinQuadSideLength: c_int = 9; pub const ImNodesStyleVar_PinTriangleSideLength: c_int = 10; pub const ImNodesStyleVar_PinLineThickness: c_int = 11; pub const ImNodesStyleVar_PinHoverRadius: c_int = 12; pub const ImNodesStyleVar_PinOffset: c_int = 13; pub const ImNodesStyleVar_ = c_uint; pub const ImNodesStyleFlags_None: c_int = 0; pub const ImNodesStyleFlags_NodeOutline: c_int = 1; pub const ImNodesStyleFlags_GridLines: c_int = 4; pub const ImNodesStyleFlags_ = c_uint; pub const ImNodesPinShape_Circle: c_int = 0; pub const ImNodesPinShape_CircleFilled: c_int = 1; pub const ImNodesPinShape_Triangle: c_int = 2; pub const ImNodesPinShape_TriangleFilled: c_int = 3; pub const ImNodesPinShape_Quad: c_int = 4; pub const ImNodesPinShape_QuadFilled: c_int = 5; pub const ImNodesPinShape_ = c_uint; pub const ImNodesAttributeFlags_None: c_int = 0; pub const ImNodesAttributeFlags_EnableLinkDetachWithDragClick: c_int = 1; pub const ImNodesAttributeFlags_EnableLinkCreationOnSnap: c_int = 2; pub const ImNodesAttributeFlags_ = c_uint; pub const ImNodesMiniMapLocation_BottomLeft: c_int = 0; pub const ImNodesMiniMapLocation_BottomRight: c_int = 1; pub const ImNodesMiniMapLocation_TopLeft: c_int = 2; pub const ImNodesMiniMapLocation_TopRight: c_int = 3; pub const ImNodesMiniMapLocation_ = c_uint; pub extern fn EmulateThreeButtonMouse_EmulateThreeButtonMouse() [*c]EmulateThreeButtonMouse; pub extern fn EmulateThreeButtonMouse_destroy(self: [*c]EmulateThreeButtonMouse) void; pub extern fn LinkDetachWithModifierClick_LinkDetachWithModifierClick() [*c]LinkDetachWithModifierClick; pub extern fn LinkDetachWithModifierClick_destroy(self: [*c]LinkDetachWithModifierClick) void; pub extern fn ImNodesIO_ImNodesIO() [*c]ImNodesIO; pub extern fn ImNodesIO_destroy(self: [*c]ImNodesIO) void; pub extern fn ImNodesStyle_ImNodesStyle() [*c]ImNodesStyle; pub extern fn ImNodesStyle_destroy(self: [*c]ImNodesStyle) void; pub extern fn imnodes_SetImGuiContext(ctx: [*c]imgui.ImGuiContext) void; pub extern fn imnodes_CreateContext() ?*ImNodesContext; pub extern fn imnodes_DestroyContext(ctx: ?*ImNodesContext) void; pub extern fn imnodes_GetCurrentContext() ?*ImNodesContext; pub extern fn imnodes_SetCurrentContext(ctx: ?*ImNodesContext) void; pub extern fn imnodes_EditorContextCreate() ?*ImNodesEditorContext; pub extern fn imnodes_EditorContextFree(noname1: ?*ImNodesEditorContext) void; pub extern fn imnodes_EditorContextSet(noname1: ?*ImNodesEditorContext) void; pub extern fn imnodes_EditorContextGetPanning(pOut: [*c]imgui.ImVec2) void; pub extern fn imnodes_EditorContextResetPanning(pos: imgui.ImVec2) void; pub extern fn imnodes_EditorContextMoveToNode(node_id: c_int) void; pub extern fn imnodes_GetIO() [*c]ImNodesIO; pub extern fn imnodes_GetStyle() [*c]ImNodesStyle; pub extern fn imnodes_StyleColorsDark() void; pub extern fn imnodes_StyleColorsClassic() void; pub extern fn imnodes_StyleColorsLight() void; pub extern fn imnodes_BeginNodeEditor() void; pub extern fn imnodes_EndNodeEditor() void; pub extern fn imnodes_MiniMap(minimap_size_fraction: f32, location: ImNodesMiniMapLocation, node_hovering_callback: ImNodesMiniMapNodeHoveringCallback, node_hovering_callback_data: ?*anyopaque) void; pub extern fn imnodes_PushColorStyle(item: ImNodesCol, color: c_uint) void; pub extern fn imnodes_PopColorStyle() void; pub extern fn imnodes_PushStyleVar(style_item: ImNodesStyleVar, value: f32) void; pub extern fn imnodes_PopStyleVar() void; pub extern fn imnodes_BeginNode(id: c_int) void; pub extern fn imnodes_EndNode() void; pub extern fn imnodes_GetNodeDimensions(pOut: [*c]imgui.ImVec2, id: c_int) void; pub extern fn imnodes_BeginNodeTitleBar() void; pub extern fn imnodes_EndNodeTitleBar() void; pub extern fn imnodes_BeginInputAttribute(id: c_int, shape: ImNodesPinShape) void; pub extern fn imnodes_EndInputAttribute() void; pub extern fn imnodes_BeginOutputAttribute(id: c_int, shape: ImNodesPinShape) void; pub extern fn imnodes_EndOutputAttribute() void; pub extern fn imnodes_BeginStaticAttribute(id: c_int) void; pub extern fn imnodes_EndStaticAttribute() void; pub extern fn imnodes_PushAttributeFlag(flag: ImNodesAttributeFlags) void; pub extern fn imnodes_PopAttributeFlag() void; pub extern fn imnodes_Link(id: c_int, start_attribute_id: c_int, end_attribute_id: c_int) void; pub extern fn imnodes_SetNodeDraggable(node_id: c_int, draggable: bool) void; pub extern fn imnodes_SetNodeScreenSpacePos(node_id: c_int, screen_space_pos: imgui.ImVec2) void; pub extern fn imnodes_SetNodeEditorSpacePos(node_id: c_int, editor_space_pos: imgui.ImVec2) void; pub extern fn imnodes_SetNodeGridSpacePos(node_id: c_int, grid_pos: imgui.ImVec2) void; pub extern fn imnodes_GetNodeScreenSpacePos(pOut: [*c]imgui.ImVec2, node_id: c_int) void; pub extern fn imnodes_GetNodeEditorSpacePos(pOut: [*c]imgui.ImVec2, node_id: c_int) void; pub extern fn imnodes_GetNodeGridSpacePos(pOut: [*c]imgui.ImVec2, node_id: c_int) void; pub extern fn imnodes_IsEditorHovered() bool; pub extern fn imnodes_IsNodeHovered(node_id: [*c]c_int) bool; pub extern fn imnodes_IsLinkHovered(link_id: [*c]c_int) bool; pub extern fn imnodes_IsPinHovered(attribute_id: [*c]c_int) bool; pub extern fn imnodes_NumSelectedNodes() c_int; pub extern fn imnodes_NumSelectedLinks() c_int; pub extern fn imnodes_GetSelectedNodes(node_ids: [*c]c_int) void; pub extern fn imnodes_GetSelectedLinks(link_ids: [*c]c_int) void; pub extern fn imnodes_ClearNodeSelection_Nil() void; pub extern fn imnodes_ClearLinkSelection_Nil() void; pub extern fn imnodes_SelectNode(node_id: c_int) void; pub extern fn imnodes_ClearNodeSelection_Int(node_id: c_int) void; pub extern fn imnodes_IsNodeSelected(node_id: c_int) bool; pub extern fn imnodes_SelectLink(link_id: c_int) void; pub extern fn imnodes_ClearLinkSelection_Int(link_id: c_int) void; pub extern fn imnodes_IsLinkSelected(link_id: c_int) bool; pub extern fn imnodes_IsAttributeActive() bool; pub extern fn imnodes_IsAnyAttributeActive(attribute_id: [*c]c_int) bool; pub extern fn imnodes_IsLinkStarted(started_at_attribute_id: [*c]c_int) bool; pub extern fn imnodes_IsLinkDropped(started_at_attribute_id: [*c]c_int, including_detached_links: bool) bool; pub extern fn imnodes_IsLinkCreated_BoolPtr(started_at_attribute_id: [*c]c_int, ended_at_attribute_id: [*c]c_int, created_from_snap: [*c]bool) bool; pub extern fn imnodes_IsLinkCreated_IntPtr(started_at_node_id: [*c]c_int, started_at_attribute_id: [*c]c_int, ended_at_node_id: [*c]c_int, ended_at_attribute_id: [*c]c_int, created_from_snap: [*c]bool) bool; pub extern fn imnodes_IsLinkDestroyed(link_id: [*c]c_int) bool; pub extern fn imnodes_SaveCurrentEditorStateToIniString(data_size: [*c]usize) [*c]const u8; pub extern fn imnodes_SaveEditorStateToIniString(editor: ?*const ImNodesEditorContext, data_size: [*c]usize) [*c]const u8; pub extern fn imnodes_LoadCurrentEditorStateFromIniString(data: [*c]const u8, data_size: usize) void; pub extern fn imnodes_LoadEditorStateFromIniString(editor: ?*ImNodesEditorContext, data: [*c]const u8, data_size: usize) void; pub extern fn imnodes_SaveCurrentEditorStateToIniFile(file_name: [*c]const u8) void; pub extern fn imnodes_SaveEditorStateToIniFile(editor: ?*const ImNodesEditorContext, file_name: [*c]const u8) void; pub extern fn imnodes_LoadCurrentEditorStateFromIniFile(file_name: [*c]const u8) void; pub extern fn imnodes_LoadEditorStateFromIniFile(editor: ?*ImNodesEditorContext, file_name: [*c]const u8) void; pub extern fn getIOKeyCtrlPtr(...) [*c]bool;
src/deps/imgui/ext/imnodes/c.zig
const std = @import("std"); const testing = std.testing; const c_allocator = std.heap.c_allocator; const parse = @import("Parse.zig"); const Files = @import("Files.zig"); const atoi = @import("Atoi.zig").atoi; const ArrayList = std.ArrayList; pub const Config = struct { threads: u32 = 0, // 0 = auto enable_statistics: bool = true, http_port: u16 = 80, https_port: u16 = 443, // Both are null terminated certificate_file_path: [*c]const u8 = "cert.pem", certificate_key_file_path: [*c]const u8 = "key.pem", }; var config_ = Config{}; pub fn config() Config { return config_; } var http_extra_headers: []const u8 = "Strict-Transport-Security: max-age=63072000; includeSubDomains\r\n" ++ "Content-Security-Policy: default-src 'none'; script-src 'self'; img-src 'self'; font-src 'self'; style-src 'self'; frame-ancestors 'none'\r\n" ++ "Cache-Control: public, max-age=604800, immutable\r\n" ++ "Referrer-Policy: strict-origin-when-cross-origin\r\n" ++ "X-Content-Type-Options: nosniff\r\n" ++ "X-Frame-Options: DENY\r\n" ++ "X-XSS-Protection: 1; mode=block\r\n"; pub fn httpExtraHeaders() []const u8 { return http_extra_headers; } fn clamp(i: isize, low: isize, high: isize) isize { if (i < low) { return low; } if (i > high) { return high; } return i; } fn parseLine(line: []u8) !void { if (line.len == 0 or line[0] == '#') { return; } if (parse.caseInsensitiveStartsWith(line, "threads:")) { var value = line["threads:".len..]; parse.skipWhitespace(&value) catch return; config_.threads = @intCast(u32, clamp(try atoi(value), 0, 1024)); } else if (parse.caseInsensitiveStartsWith(line, "statistics:")) { var value = line["statistics:".len..]; parse.skipWhitespace(&value) catch return; if (parse.caseInsensitiveCompareIgnoreEndWhitespace(value, "on")) { config_.enable_statistics = true; } else { config_.enable_statistics = false; } } else if (parse.caseInsensitiveStartsWith(line, "http port:")) { // TODO check http and https port numbers are not the same var value = line["http port:".len..]; parse.skipWhitespace(&value) catch return; config_.http_port = @intCast(u16, clamp(try atoi(value), 0, 65535)); } else if (parse.caseInsensitiveStartsWith(line, "https port:")) { var value = line["https port:".len..]; parse.skipWhitespace(&value) catch return; config_.https_port = @intCast(u16, clamp(try atoi(value), 0, 65535)); } else if (parse.caseInsensitiveStartsWith(line, "cert:")) { var value = line["cert:".len..]; parse.skipWhitespace(&value) catch return; const path = value[0 .. parse.getLineLen(value) catch value.len]; var i: usize = path.len - 1; while (i > 0) : (i -= 1) { if (path[i] != ' ' and path[i] != '\t') { break; } path[i] = 0; } config_.certificate_file_path = value.ptr; // Line was null terminated already } else if (parse.caseInsensitiveStartsWith(line, "cert key:")) { var value = line["cert key:".len..]; parse.skipWhitespace(&value) catch return; const path = value[0 .. parse.getLineLen(value) catch value.len]; var i: usize = path.len - 1; while (i > 0) : (i -= 1) { if (path[i] != ' ' and path[i] != '\t') { break; } path[i] = 0; } config_.certificate_key_file_path = value.ptr; } } fn parseConfigFile(s_: []u8) void { var s = s_; while (true) { parse.skipWhitespace(&s) catch return; const line_length = parse.getLineLen(s) catch s.len; if (line_length < 2) { break; } const line = s[0..line_length]; s[line_length] = 0; // Null terminator hack for openSSL file name strings parseLine(line) catch |e| { std.debug.warn("Error parsing configuration file: {} on line: {}\n", .{ e, line[0..std.math.min(512, line.len)], }); }; // Guaranteed to have the extra byte s = s[line_length + 1 ..]; } } fn loadConfigFile() !void { var file = try std.fs.cwd().openFile("settings.conf", std.fs.File.OpenFlags{}); defer file.close(); var size: usize = try file.getEndPos(); // File stays in memory var s: []u8 = try c_allocator.alloc(u8, size + 1); const bytesRead = try file.read(s[0..size]); if (bytesRead != size) { return error.IOError; } s[s.len - 1] = '\n'; // Extra newline needed for null terminator hack parseConfigFile(s); std.debug.warn("Config file loaded\n", .{}); } fn loadHeadersFile() !void { var file = try std.fs.cwd().openFile("extra_headers.http", std.fs.File.OpenFlags{}); defer file.close(); var size: usize = try file.getEndPos(); // File stays in memory var s: []u8 = try c_allocator.alloc(u8, size + 2); const bytesRead = try file.read(s[0..size]); if (bytesRead != size) { return error.IOError; } // Add final newline if (s[size - 1] == '\n') { s = s[0..size]; } else { s[size] = '\r'; s[size + 1] = '\n'; } http_extra_headers = s; std.debug.warn("Config file loaded\n", .{}); } fn parseFilesFile(s_: []const u8) !void { var s = s_; while (true) { parse.skipWhitespace(&s) catch return; const url = (parse.getCSVField(&s) catch return) orelse continue; const file_path = (parse.getCSVField(&s) catch return) orelse continue; const mime_type = parse.getCSVField(&s) catch return; const data = loadFile(file_path) catch |e| { std.debug.warn("Error loading file {}: {}", .{ file_path, e }); break; }; try Files.addStaticFile(url, data, mime_type); } } fn loadFile(path: []const u8) ![]const u8 { var file = try std.fs.cwd().openFile(path, std.fs.File.OpenFlags{}); defer file.close(); var size: usize = try file.getEndPos(); // File stays in memory var s: []u8 = try c_allocator.alloc(u8, size); const bytesRead = try file.read(s); if (bytesRead != size) { return error.IOError; } return s; } fn loadStaticFiles() !void { var file = try std.fs.cwd().openFile("files.csv", std.fs.File.OpenFlags{}); defer file.close(); var size: usize = try file.getEndPos(); // File stays in memory var s: []u8 = try c_allocator.alloc(u8, size + 1); const bytesRead = try file.read(s[0..size]); if (bytesRead != size) { return error.IOError; } s[size] = '\n'; try parseFilesFile(s); } pub fn init() !void { loadConfigFile() catch {}; loadHeadersFile() catch {}; try loadStaticFiles(); } test "Config" { const s = "threads: 4\n\n\nstatistics:on \nhttp port:80\nhttps port:443\ncert:a \ncert key:b \t\n#a\n"; var s2 = try testing.allocator.alloc(u8, s.len); std.mem.copy(u8, s2, s); parseConfigFile(s2); testing.expect(config().threads == 4); testing.expect(config().enable_statistics); testing.expect(config().http_port == 80); testing.expect(config().https_port == 443); testing.expect(config().certificate_file_path[0] == 'a' and config().certificate_file_path[1] == 0); testing.expect(config().certificate_key_file_path[0] == 'b' and config().certificate_key_file_path[1] == 0); testing.allocator.free(s2); }
src/Config.zig
const std = @import("std"); const curl = @import("curl.zig"); const pacman = @import("pacman.zig"); const Host = "https://aur.archlinux.org/rpc/?v=5"; pub const Snapshot = "https://aur.archlinux.org/cgit/aur.git/snapshot"; pub const RPCRespV5 = struct { version: usize, type: []const u8, resultcount: usize, results: []Info, }; // TODO: Maybe some opportunity to de-dep this pub const RPCSearchRespV5 = struct { version: usize, type: []const u8, resultcount: usize, results: []Search, }; pub const Info = struct { ID: usize, Name: []const u8, PackageBaseID: usize, PackageBase: []const u8, Version: []const u8, Description: ?[]const u8 = null, URL: []const u8, NumVotes: usize, Popularity: f64, OutOfDate: ?i32 = null, // TODO: parse this unixtime Maintainer: ?[]const u8 = null, FirstSubmitted: i32, // TODO: parse this unixtime LastModified: i32, // TODO: parse this unixtime URLPath: []const u8, Depends: ?[][]const u8 = null, MakeDepends: ?[][]const u8 = null, OptDepends: ?[][]const u8 = null, CheckDepends: ?[][]const u8 = null, Conflicts: ?[][]const u8 = null, Provides: ?[][]const u8 = null, Replaces: ?[][]const u8 = null, Groups: ?[][]const u8 = null, License: ?[][]const u8 = null, Keywords: ?[][]const u8 = null, }; pub const Search = struct { ID: usize, Name: []const u8, PackageBaseID: usize, PackageBase: []const u8, Version: []const u8, Description: ?[]const u8 = null, URL: ?[]const u8 = null, NumVotes: usize, Popularity: f64, OutOfDate: ?i32 = null, // TODO: parse this unixtime Maintainer: ?[]const u8 = null, FirstSubmitted: i32, // TODO: parse this unixtime LastModified: i32, // TODO: parse this unixtime URLPath: []const u8, }; pub fn queryAll(allocator: std.mem.Allocator, pkgs: std.StringHashMap(*pacman.Package)) !RPCRespV5 { const uri = try buildInfoQuery(allocator, pkgs); var resp = try curl.get(allocator, uri); @setEvalBranchQuota(100000); var json_resp = std.json.TokenStream.init(resp.items); var result = try std.json.parse(RPCRespV5, &json_resp, std.json.ParseOptions{ .allocator = allocator }); return result; } pub fn search(allocator: std.mem.Allocator, search_name: []const u8) !RPCSearchRespV5 { var uri = std.ArrayList(u8).init(allocator); try uri.appendSlice(Host); try uri.appendSlice("&type=search&by=name&arg="); // TODO: maybe consider opening this up try uri.appendSlice(search_name); var uri_for_curl = try uri.toOwnedSliceSentinel(0); var resp = try curl.get(allocator, uri_for_curl); @setEvalBranchQuota(100000); var json_resp = std.json.TokenStream.init(resp.items); var result = try std.json.parse(RPCSearchRespV5, &json_resp, std.json.ParseOptions{ .allocator = allocator }); return result; } fn buildInfoQuery(allocator: std.mem.Allocator, pkgs: std.StringHashMap(*pacman.Package)) ![*:0]const u8 { var uri = std.ArrayList(u8).init(allocator); try uri.appendSlice(Host); try uri.appendSlice("&type=info"); var pkgs_iter = pkgs.iterator(); while (pkgs_iter.next()) |pkg| { try uri.appendSlice("&arg[]="); var copyKey = try allocator.alloc(u8, pkg.key_ptr.*.len); std.mem.copy(u8, copyKey, pkg.key_ptr.*); try uri.appendSlice(copyKey); defer allocator.free(copyKey); } return try uri.toOwnedSliceSentinel(0); }
src/aur.zig
const std = @import("std"); const Compiler = @import("Compiler.zig"); const Context = @import("Context.zig"); const Lexer = @import("Lexer.zig"); const Node = @import("Node.zig"); const Parser = @import("Parser.zig"); const Scope = @import("Scope.zig"); const ScopeStack = @import("ScopeStack.zig"); const value = @import("value.zig"); const Value = value.Value; const Vm = @import("Vm.zig"); fn printUsage() !void { std.log.err("Usage: zed <your_program_file.zed> [<data_file_1> <data_file_2> ... <data_file_n>]\n", .{}); return error.InvalidUsage; } pub fn main() anyerror!void { // Allocation //var allocator = std.testing.allocator; const allocator = std.heap.page_allocator; var static_arena = std.heap.ArenaAllocator.init(allocator); defer static_arena.deinit(); const static_allocator = static_arena.allocator(); var tmp_arena = std.heap.ArenaAllocator.init(allocator); var need_tmp_deinit = true; defer if (need_tmp_deinit) tmp_arena.deinit(); // Command line args. var args = try std.process.argsWithAllocator(allocator); _ = args.skip(); // skip program name. // Program file. const program_filename = args.next() orelse return printUsage(); var program_file = try std.fs.cwd().openFile(program_filename, .{}); defer program_file.close(); // Context var ctx = Context{ .filename = program_filename, .src = undefined }; // Frontend var compiled: [5][]const u8 = undefined; if (std.mem.endsWith(u8, program_filename, ".zbc")) { var i: usize = 0; while (i < 5) : (i += 1) { var bytes_len_buf: [2]u8 = undefined; _ = try program_file.readAll(&bytes_len_buf); const bytes_len = std.mem.bytesAsValue(u16, &bytes_len_buf); var bytes_buf = try static_allocator.alloc(u8, bytes_len.*); _ = try program_file.readAll(bytes_buf); compiled[i] = bytes_buf; } // Need program source for error messages. var fn_buf: [1024]u8 = undefined; const idx = std.mem.lastIndexOf(u8, program_filename, ".zbc").?; const src_filename = try std.fmt.bufPrint(&fn_buf, "{s}.zed", .{program_filename[0..idx]}); var src_file = try std.fs.cwd().openFile(src_filename, .{}); defer src_file.close(); ctx.src = try src_file.readToEndAlloc(static_allocator, 1024 * 64); // 64K } else { const program_src = try program_file.readToEndAlloc(static_allocator, 1024 * 64); // 64K ctx.src = program_src; // Lex var lexer = Lexer{ .allocator = tmp_arena.allocator(), .ctx = ctx }; const tokens = try lexer.lex(); // Parse var parser = Parser{ .allocator = tmp_arena.allocator(), .ctx = ctx, .tokens = tokens, }; const program = try parser.parse(); // Backend / Compile to bytecode var compiler = try Compiler.init(tmp_arena.allocator(), ctx); compiled = try compiler.compileProgram(static_allocator, program); tmp_arena.deinit(); need_tmp_deinit = false; } // Program scope stack with global scope var scope_stack = ScopeStack.init(static_allocator); defer scope_stack.deinit(); // Output change check var output = std.ArrayList(u8).init(static_allocator); var prev_output_len: usize = 0; // onInit tmp_arena = std.heap.ArenaAllocator.init(allocator); need_tmp_deinit = true; var inits_vm = try Vm.init( tmp_arena.allocator(), compiled[0], &scope_stack, ctx, &output, ); try inits_vm.run(); tmp_arena.deinit(); need_tmp_deinit = false; // Loop over input files. while (args.next()) |filename| { // Filename if (filename.len < 7) { scope_stack.file = value.strToValue(filename); } else { const obj_ptr = try static_allocator.create(value.Object); obj_ptr.* = .{ .string = filename }; scope_stack.file = value.addrToValue(@ptrToInt(obj_ptr)); } // onFile tmp_arena = std.heap.ArenaAllocator.init(allocator); need_tmp_deinit = true; var files_vm = try Vm.init( tmp_arena.allocator(), compiled[1], &scope_stack, ctx, &output, ); try files_vm.run(); tmp_arena.deinit(); need_tmp_deinit = false; // Data file var data_file: std.fs.File = undefined; if (std.mem.eql(u8, filename, "-")) { data_file = std.io.getStdIn(); } else { data_file = try std.fs.cwd().openFile(filename, .{}); } defer data_file.close(); var data_reader = std.io.bufferedReader(data_file.reader()).reader(); // File record numbering scope_stack.frnum = 0; // Input record separator var str_irs = if (value.unboxStr(scope_stack.irs)) |u| std.mem.sliceTo(std.mem.asBytes(&u), 0) else value.asString(scope_stack.irs).?.string; // Loop over records. while (try data_reader.readUntilDelimiterOrEof(&scope_stack.rec_buf, str_irs[0])) |record| : ({ scope_stack.frnum += 1; scope_stack.rnum += 1; }) { // onRec tmp_arena = std.heap.ArenaAllocator.init(allocator); need_tmp_deinit = true; const tmp_allocator = tmp_arena.allocator(); if (record.len < 7) { scope_stack.record = value.strToValue(record); } else { const obj_ptr = try tmp_allocator.create(value.Object); obj_ptr.* = .{ .string = record }; scope_stack.record = value.addrToValue(@ptrToInt(obj_ptr)); } var recs_vm = try Vm.init( tmp_allocator, compiled[2], &scope_stack, ctx, &output, ); try recs_vm.run(); // Loop over fields const str_rec = if (value.unboxStr(scope_stack.record)) |u| std.mem.sliceTo(std.mem.asBytes(&u), 0) else value.asString(scope_stack.record).?.string; const str_ics = if (value.unboxStr(scope_stack.ics)) |u| std.mem.sliceTo(std.mem.asBytes(&u), 0) else value.asString(scope_stack.ics).?.string; var field_iter = std.mem.split(u8, str_rec, str_ics); // Header row? if (scope_stack.header_row != null and scope_stack.header_row.? == scope_stack.frnum) { scope_stack.headers.clearRetainingCapacity(); var field_index: usize = 0; while (field_iter.next()) |field| : (field_index += 1) { const field_copy = try scope_stack.allocator.dupe(u8, field); try scope_stack.headers.put(field_copy, field_index); } continue; // On to next row. } // New record, new fileds. var columns = std.ArrayList(Value).init(tmp_allocator); while (field_iter.next()) |field| { if (field.len < 7) { try columns.append(value.strToValue(field)); } else { const obj_ptr = try tmp_allocator.create(value.Object); obj_ptr.* = .{ .string = field }; try columns.append(value.addrToValue(@ptrToInt(obj_ptr))); } } const obj_ptr = try tmp_allocator.create(value.Object); obj_ptr.* = .{ .list = columns }; scope_stack.columns = value.addrToValue(@ptrToInt(obj_ptr)); // For each record, exec the rules. var rules_vm = try Vm.init( tmp_allocator, compiled[3], &scope_stack, ctx, &output, ); try rules_vm.run(); tmp_arena.deinit(); need_tmp_deinit = false; // Output if (output.items.len != 0 and output.items.len != prev_output_len) { const str_ors = if (value.unboxStr(scope_stack.ors)) |u| std.mem.sliceTo(std.mem.asBytes(&u), 0) else value.asString(scope_stack.ors).?.string; try output.appendSlice(str_ors); } // To know if we have new output. prev_output_len = output.items.len; // Update irs for next iteration. str_irs = if (value.unboxStr(scope_stack.irs)) |u| std.mem.sliceTo(std.mem.asBytes(&u), 0) else value.asString(scope_stack.irs).?.string; } } // onExit tmp_arena = std.heap.ArenaAllocator.init(allocator); need_tmp_deinit = true; var exits_vm = try Vm.init( tmp_arena.allocator(), compiled[4], &scope_stack, ctx, &output, ); try exits_vm.run(); tmp_arena.deinit(); need_tmp_deinit = false; // Print hte output. _ = try std.io.getStdOut().writer().print("{s}", .{output.items}); } test { _ = @import("Lexer.zig"); _ = @import("Parser.zig"); _ = @import("Compiler.zig"); _ = @import("Vm.zig"); }
src/main.zig
const std = @import("std"); const builtin = @import("builtin"); pub const pkg = std.build.Pkg{ .name = "uv", .source = .{ .path = srcPath() ++ "/uv.zig" }, }; pub const Options = struct { lib_path: ?[]const u8 = null, }; pub fn addPackage(step: *std.build.LibExeObjStep) void { step.addPackage(pkg); step.addIncludeDir(srcPath() ++ "/vendor/include"); step.addIncludeDir(srcPath() ++ "/"); } pub fn create( b: *std.build.Builder, target: std.zig.CrossTarget, mode: std.builtin.Mode, ) !*std.build.LibExeObjStep { const lib = b.addStaticLibrary("uv", null); lib.setTarget(target); lib.setBuildMode(mode); const alloc = b.allocator; var c_flags = std.ArrayList([]const u8).init(alloc); // From CMakeLists.txt var c_files = std.ArrayList([]const u8).init(alloc); try c_files.appendSlice(&.{ // common "src/fs-poll.c", "src/idna.c", "src/inet.c", "src/random.c", "src/strscpy.c", "src/threadpool.c", "src/timer.c", "src/uv-common.c", "src/uv-data-getter-setters.c", "src/version.c", }); if (target.getOsTag() == .linux or target.getOsTag() == .macos) { try c_files.appendSlice(&.{ "src/unix/async.c", "src/unix/core.c", "src/unix/dl.c", "src/unix/fs.c", "src/unix/getaddrinfo.c", "src/unix/getnameinfo.c", "src/unix/loop-watcher.c", "src/unix/loop.c", "src/unix/pipe.c", "src/unix/poll.c", "src/unix/process.c", "src/unix/random-devurandom.c", "src/unix/signal.c", "src/unix/stream.c", "src/unix/tcp.c", "src/unix/thread.c", "src/unix/tty.c", "src/unix/udp.c", "src/unix/proctitle.c", }); } if (target.getOsTag() == .linux) { try c_files.appendSlice(&.{ // sys "src/unix/linux-core.c", "src/unix/linux-inotify.c", "src/unix/linux-syscalls.c", "src/unix/procfs-exepath.c", "src/unix/random-getrandom.c", "src/unix/random-sysctl-linux.c", "src/unix/epoll.c", }); try c_flags.appendSlice(&.{ "-D_GNU_SOURCE", "-D_POSIX_C_SOURCE=200112", }); } else if (target.getOsTag() == .macos) { try c_files.appendSlice(&.{ "src/unix/bsd-ifaddrs.c", "src/unix/kqueue.c", "src/unix/random-getentropy.c", "src/unix/darwin-proctitle.c", "src/unix/darwin.c", "src/unix/fsevents.c", }); try c_flags.appendSlice(&.{ "-D_DARWIN_UNLIMITED_SELECT=1", "-D_DARWIN_USE_64_BIT_INODE=1", "-D_FILE_OFFSET_BITS=64", "-D_LARGEFILE_SOURCE", }); } else if (target.getOsTag() == .windows) { try c_files.appendSlice(&.{ "src/win/loop-watcher.c", "src/win/tcp.c", "src/win/async.c", "src/win/core.c", "src/win/signal.c", "src/win/snprintf.c", "src/win/getnameinfo.c", "src/win/fs.c", "src/win/fs-event.c", "src/win/getaddrinfo.c", "src/win/handle.c", "src/win/dl.c", "src/win/udp.c", "src/win/util.c", "src/win/error.c", "src/win/winapi.c", "src/win/winsock.c", "src/win/detect-wakeup.c", "src/win/stream.c", "src/win/tty.c", "src/win/process-stdio.c", "src/win/process.c", "src/win/poll.c", "src/win/thread.c", "src/win/pipe.c", }); } for (c_files.items) |file| { const path = b.fmt("{s}/vendor/{s}", .{ srcPath(), file }); lib.addCSourceFile(path, c_flags.items); } // libuv has UB in uv__write_req_update when the last buf->base has a null ptr. lib.disable_sanitize_c = true; lib.linkLibC(); lib.addIncludeDir(fromRoot(b, "vendor/include")); lib.addIncludeDir(fromRoot(b, "vendor/src")); if (builtin.os.tag == .macos and target.getOsTag() == .macos) { if (target.isNativeOs()) { // Force using native headers or it'll compile with ___darwin_check_fd_set_overflow calls // which doesn't exist in later mac libs. lib.linkFramework("CoreServices"); } else { lib.setLibCFile(std.build.FileSource.relative("./lib/macos.libc")); } } return lib; } pub fn buildAndLink(step: *std.build.LibExeObjStep, opts: Options) void { if (opts.lib_path) |path| { linkLibPath(step, path); } else { const lib = create(step.builder, step.target, step.build_mode) catch unreachable; linkLib(step, lib); } } pub fn linkLib(step: *std.build.LibExeObjStep, lib: *std.build.LibExeObjStep) void { linkDeps(step); step.linkLibrary(lib); } pub fn linkLibPath(step: *std.build.LibExeObjStep, path: []const u8) void { linkDeps(step); step.addAssemblyFile(path); } fn linkDeps(step: *std.build.LibExeObjStep) void { if (step.target.getOsTag() == .windows and step.target.getAbi() == .gnu) { step.linkSystemLibrary("iphlpapi"); step.linkSystemLibrary("userenv"); } } fn srcPath() []const u8 { return std.fs.path.dirname(@src().file) orelse unreachable; } fn fromRoot(b: *std.build.Builder, rel_path: []const u8) []const u8 { return std.fs.path.resolve(b.allocator, &.{ srcPath(), rel_path }) catch unreachable; }
lib/uv/lib.zig
const std = @import("std"); const Container_Impl = @import("containers.zig").Container_Impl; const lasting_allocator = @import("internal.zig").lasting_allocator; const milliTimestamp = @import("internal.zig").milliTimestamp; /// Linear interpolation between a and b with factor t. fn lerp(a: anytype, b: @TypeOf(a), t: f64) @TypeOf(a) { return a * (1 - @floatCast(@TypeOf(a), t)) + b * @floatCast(@TypeOf(a), t); } pub const Easings = struct { pub fn Linear(t: f64) f64 { return t; } pub fn In(t: f64) f64 { return t * t; } pub fn Out(t: f64) f64 { return 1 - (1 - t) * (1 - t); } pub fn InOut(t: f64) f64 { return lerp(In(t), Out(t), t); } }; pub fn Animation(comptime T: type) type { return struct { start: i64, end: i64, min: T, max: T, animFn: fn (t: f64) f64, /// Get the current value from the animation pub fn get(self: *@This()) T { const maxDiff = @intToFloat(f64, self.end - self.start); const diff = @intToFloat(f64, milliTimestamp() - self.start); var t = diff / maxDiff; // Clamp t to [0, 1] t = std.math.clamp(t, 0.0, 1.0); // Transform 't' using the animation function t = self.animFn(t); const min = comptime blk: { if (std.meta.trait.isIntegral(T)) { break :blk @intToFloat(f64, self.min); } else { break :blk self.min; } }; const max = comptime blk: { if (std.meta.trait.isIntegral(T)) { break :blk @intToFloat(f64, self.max); } else { break :blk self.max; } }; // Do a linear interpolation if (comptime std.meta.trait.isNumber(T)) { const result = lerp(min, max, t); if (comptime std.meta.trait.isIntegral(T)) { return @floatToInt(T, @round(result)); } else { return result; } } else if (comptime std.meta.trait.isContainer(T) and @hasDecl(T, "lerp")) { return T.lerp(min, max, t); } else { @compileError("Type " ++ @typeName(T) ++ " does not support animations."); } } }; } const Updater = struct { /// Pointer to some function fnPtr: usize, // TODO: list of data wrappers that it called }; // Atomic stack with list of current 'updater' that are being proned // this would allow for it to work with external data wrappers, and in fact with all data wrappers // with minimal change const UpdaterQueue = std.atomic.Queue(Updater); var pronedUpdaterQueue = UpdaterQueue.init(); /// This is used for tracking whether a data wrapper's value has been accessed or not. /// This is mostly used for the 'updater' pattern to automatically detect on /// which properties an updater depends. pub fn proneUpdater(updater: anytype, root: *Container_Impl) !void { var node = try lasting_allocator.create(UpdaterQueue.Node); defer lasting_allocator.destroy(node); node.data = .{ .fnPtr = @ptrToInt(updater) }; pronedUpdaterQueue.put(node); defer _ = pronedUpdaterQueue.remove(node); _ = updater(root); } pub fn isDataWrapper(comptime T: type) bool { if (!comptime std.meta.trait.is(.Struct)(T)) return false; return @hasField(T, "bindLock"); // TODO: check all properties using comptime } pub var _animatedDataWrappers = std.ArrayList(struct { fnPtr: fn (data: *anyopaque) bool, userdata: *anyopaque }).init(lasting_allocator); pub fn DataWrapper(comptime T: type) type { return struct { value: T, lock: std.Thread.Mutex = .{}, onChange: std.ArrayList(ChangeListener), // TODO: multiple bindings and binders /// The object this wrapper is binded by binderWrapper: ?*Self = null, /// The object this wrapper is binded to bindWrapper: ?*Self = null, /// This lock is used to protect from recursive relations between wrappers /// For example if there are two two-way binded data wrappers A and B: /// When A is set, B is set too. Since B is set, it will set A too. A is set, it will set B too, and so on.. /// To prevent that, the bindLock is acquired when setting the value of the other. /// If the lock has already been acquired, set() returns without calling the other. For example: /// When A is set, it acquires its lock and sets B. Since B is set, it will set A too. /// A notices it already acquired the binding lock, and thus returns. bindLock: std.Thread.Mutex = .{}, allocator: ?std.mem.Allocator = null, animation: if (IsAnimable) ?Animation(T) else void = if (IsAnimable) null else {}, updater: ?fn (*Container_Impl) T = null, const Self = @This(); const IsAnimable = std.meta.trait.isNumber(T) or (std.meta.trait.isContainer(T) and @hasDecl(T, "lerp")); pub const ChangeListener = struct { function: fn (newValue: T, userdata: usize) void, userdata: usize = 0 }; pub fn of(value: T) Self { return Self{ .value = value, .onChange = std.ArrayList(ChangeListener).init(lasting_allocator) }; } /// This function updates any current animation. /// It returns true if the animation isn't done, false otherwises. pub fn update(self: *Self) bool { if (self.animation) |*anim| { self.extendedSet(anim.get(), true, false); if (milliTimestamp() >= anim.end) { self.animation = null; return false; } else { return true; } } else { return false; } } /// Returns true if there is currently an animation playing. pub fn hasAnimation(self: *Self) bool { return self.animation != null; } pub fn animate(self: *Self, anim: fn (f64) f64, target: T, duration: i64) void { if (!IsAnimable) { @compileError("animate only supported on numbers"); } const time = milliTimestamp(); self.animation = Animation(T){ .start = time, .end = time + duration, .min = self.value, .max = target, .animFn = anim }; var contains = false; for (_animatedDataWrappers.items) |item| { if (@ptrCast(*anyopaque, self) == item.userdata) { contains = true; break; } } if (!contains) { _animatedDataWrappers.append(.{ .fnPtr = @ptrCast(fn (*anyopaque) bool, Self.update), .userdata = self }) catch {}; } } pub fn addChangeListener(self: *Self, listener: ChangeListener) !usize { try self.onChange.append(listener); return self.onChange.items.len - 1; // index of the listener } /// All writes to sender will also change the value of receiver pub fn bindOneWay(sender: *Self, receiver: *Self) void { sender.bindWrapper = receiver; receiver.binderWrapper = sender; } /// All writes to one change the value of the other pub fn bind(self: *Self, other: *Self) void { self.bindOneWay(other); other.bindOneWay(self); } /// Updates binder's pointers so they point to this object. pub fn updateBinders(self: *Self) void { if (self.binderWrapper) |binder| { binder.bindWrapper = self; } } /// Thread-safe get operation. If doing a read-modify-write operation /// manually changing the value and acquiring the lock is recommended. pub fn get(self: *Self) T { self.lock.lock(); defer self.lock.unlock(); return self.value; } /// Thread-safe set operation. If doing a read-modify-write operation /// manually changing the value and acquiring the lock is recommended. /// This also removes any previously set animation! pub fn set(self: *Self, value: T) void { self.extendedSet(value, true, true); } /// Thread-safe set operation without calling change listeners /// This should only be used in widget implementations when calling /// change listeners would cause an infinite recursion. /// This also removes any previously set animation! /// /// Example: A text field listens for data wrapper changes in order to /// change its text. When the user edits the text, it wants to /// change the data wrapper, but without setNoListen, it would /// cause an infinite recursion. pub fn setNoListen(self: *Self, value: T) void { self.extendedSet(value, false, true); } fn extendedSet(self: *Self, value: T, comptime callHandlers: bool, comptime resetAnimation: bool) void { if (self.bindLock.tryLock()) { defer self.bindLock.unlock(); self.lock.lock(); self.value = value; if (IsAnimable and resetAnimation) { self.animation = null; } self.lock.unlock(); if (callHandlers) { for (self.onChange.items) |listener| { listener.function(self.value, listener.userdata); } } if (self.bindWrapper) |binding| { binding.set(value); } } else { // Do nothing ... } } pub fn deinit(self: *Self) void { self.onChange.deinit(); if (self.allocator) |allocator| { allocator.destroy(self); } } }; } pub fn FormatDataWrapper(allocator: std.mem.Allocator, comptime fmt: []const u8, childs: anytype) !*StringDataWrapper { const Self = struct { wrapper: StringDataWrapper, childs: @TypeOf(childs) }; var self = try allocator.create(Self); const empty = try allocator.alloc(u8, 0); // alloc an empty string so it can be freed self.* = Self{ .wrapper = StringDataWrapper.of(empty), .childs = childs }; self.wrapper.allocator = allocator; const childTypes = comptime blk: { var types: []const type = &[_]type{}; for (std.meta.fields(@TypeOf(childs))) |field| { const T = @TypeOf(@field(childs, field.name).value); types = types ++ &[_]type{T}; } break :blk types; }; const format = struct { fn format(ptr: *Self) void { const TupleType = std.meta.Tuple(childTypes); var tuple: TupleType = undefined; inline for (std.meta.fields(@TypeOf(ptr.childs))) |childF, i| { const child = @field(ptr.childs, childF.name); tuple[i] = child.get(); } var str = std.fmt.allocPrint(ptr.wrapper.allocator.?, fmt, tuple) catch unreachable; ptr.wrapper.allocator.?.free(ptr.wrapper.get()); ptr.wrapper.set(str); } }.format; format(self); const childFs = std.meta.fields(@TypeOf(childs)); comptime var i = 0; inline while (i < childFs.len) : (i += 1) { const childF = childFs[i]; const child = @field(childs, childF.name); const T = @TypeOf(child.value); _ = try child.addChangeListener(.{ .userdata = @ptrToInt(self), .function = struct { fn callback(newValue: T, userdata: usize) void { _ = newValue; const ptr = @intToPtr(*Self, userdata); format(ptr); } }.callback }); } return &self.wrapper; } pub const StringDataWrapper = DataWrapper([]const u8); pub const FloatDataWrapper = DataWrapper(f32); pub const DoubleDataWrapper = DataWrapper(f64); /// The size expressed in display pixels. pub const Size = struct { width: u32, height: u32, /// Shorthand for struct initialization pub fn init(width: u32, height: u32) Size { return Size{ .width = width, .height = height }; } /// Returns the size with the least area pub fn min(a: Size, b: Size) Size { if (a.width * a.height < b.width * b.height) { return a; } else { return b; } } /// Returns the size with the most area pub fn max(a: Size, b: Size) Size { if (a.width * a.height > b.width * b.height) { return a; } else { return b; } } /// Combine two sizes by taking the largest width and the largest height pub fn combine(a: Size, b: Size) Size { return Size{ .width = std.math.max(a.width, b.width), .height = std.math.max(a.height, b.height) }; } /// Intersect two sizes by taking the lowest width and the lowest height pub fn intersect(a: Size, b: Size) Size { return Size{ .width = std.math.min(a.width, b.width), .height = std.math.min(a.height, b.height) }; } test "Size.max" { const a = Size.init(200, 10); const b = Size.init(2001, 1); try std.testing.expectEqual(b, Size.max(a, b)); try std.testing.expectEqual(b, Size.max(b, a)); } test "Size.min" { const a = Size.init(200, 10); const b = Size.init(2001, 1); try std.testing.expectEqual(a, Size.min(a, b)); try std.testing.expectEqual(a, Size.min(b, a)); } test "Size.combine" { const a = Size.init(202, 12); const b = Size.init(14, 153); const expected = Size.init(202, 153); try std.testing.expectEqual(expected, Size.combine(a, b)); try std.testing.expectEqual(expected, Size.combine(b, a)); } test "Size.intersect" { const a = Size.init(202, 12); const b = Size.init(14, 153); const expected = Size.init(14, 12); try std.testing.expectEqual(expected, Size.intersect(a, b)); try std.testing.expectEqual(expected, Size.intersect(b, a)); } }; pub const Rectangle = struct { left: u32, top: u32, right: u32, bottom: u32 }; const expectEqual = std.testing.expectEqual; test "lerp" { const floatTypes = .{ f16, f32, f64, f128 }; inline for (floatTypes) |Float| { try expectEqual(@as(Float, 0.0), lerp(@as(Float, 0), 1.0, 0.0)); // 0 |-0.0 > 1.0 = 0.0 try expectEqual(@as(Float, 0.1), lerp(@as(Float, 0), 0.2, 0.5)); // 0 |-0.5 > 0.2 = 0.1 try expectEqual(@as(Float, 0.5), lerp(@as(Float, 0), 1.0, 0.5)); // 0 |-0.5 > 1.0 = 0.5 try expectEqual(@as(Float, 1.0), lerp(@as(Float, 0), 1.0, 1.0)); // 0 |-1.0 > 1.0 = 1.0 } } test "data wrappers" { var testData = DataWrapper(i32).of(0); testData.set(5); try expectEqual(@as(i32, 5), testData.get()); } test "data wrapper change listeners" { // TODO } test "format data wrapper" { // FormatDataWrapper should be used with an arena allocator var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); const allocator = arena.allocator(); // NOT PASSING DUE TO stage1 COMPILER BUGS // var dataSource1 = DataWrapper(i32).of(5); // defer dataSource1.deinit(); // var dataSource2 = DataWrapper(f32).of(1.23); // defer dataSource2.deinit(); // // var format = try FormatDataWrapper(allocator, "{} and {d}", .{ &dataSource1, &dataSource2 }); // defer format.deinit(); // // try std.testing.expectEqualStrings("5 and 1.23", format.get()); // dataSource1.set(10); // try std.testing.expectEqualStrings("10 and 1.23", format.get()); // dataSource2.set(1456.89); // try std.testing.expectEqualStrings("10 and 1456.89", format.get()); var dataSource3 = DataWrapper(i32).of(5); defer dataSource3.deinit(); var dataSource4 = DataWrapper(i32).of(1); defer dataSource4.deinit(); var format2 = try FormatDataWrapper(allocator, "{} and {}", .{ &dataSource3, &dataSource4 }); defer format2.deinit(); try std.testing.expectEqualStrings("5 and 1", format2.get()); dataSource3.set(10); try std.testing.expectEqualStrings("10 and 1", format2.get()); dataSource4.set(42); try std.testing.expectEqualStrings("10 and 42", format2.get()); }
src/data.zig
const builtin = @import("builtin"); const std = @import("std.zig"); const io = std.io; const mem = std.mem; const os = std.os; const File = std.fs.File; const ArrayList = std.ArrayList; // CoffHeader.machine values // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680313(v=vs.85).aspx const IMAGE_FILE_MACHINE_I386 = 0x014c; const IMAGE_FILE_MACHINE_IA64 = 0x0200; const IMAGE_FILE_MACHINE_AMD64 = 0x8664; pub const MachineType = enum(u16) { Unknown = 0x0, /// Matsushita AM33 AM33 = 0x1d3, /// x64 X64 = 0x8664, /// ARM little endian ARM = 0x1c0, /// ARM64 little endian ARM64 = 0xaa64, /// ARM Thumb-2 little endian ARMNT = 0x1c4, /// EFI byte code EBC = 0xebc, /// Intel 386 or later processors and compatible processors I386 = 0x14c, /// Intel Itanium processor family IA64 = 0x200, /// Mitsubishi M32R little endian M32R = 0x9041, /// MIPS16 MIPS16 = 0x266, /// MIPS with FPU MIPSFPU = 0x366, /// MIPS16 with FPU MIPSFPU16 = 0x466, /// Power PC little endian POWERPC = 0x1f0, /// Power PC with floating point support POWERPCFP = 0x1f1, /// MIPS little endian R4000 = 0x166, /// RISC-V 32-bit address space RISCV32 = 0x5032, /// RISC-V 64-bit address space RISCV64 = 0x5064, /// RISC-V 128-bit address space RISCV128 = 0x5128, /// Hitachi SH3 SH3 = 0x1a2, /// Hitachi SH3 DSP SH3DSP = 0x1a3, /// Hitachi SH4 SH4 = 0x1a6, /// Hitachi SH5 SH5 = 0x1a8, /// Thumb Thumb = 0x1c2, /// MIPS little-endian WCE v2 WCEMIPSV2 = 0x169, }; // OptionalHeader.magic values // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b; const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b; // Image Characteristics pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1; pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200; pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2; pub const IMAGE_FILE_32BIT_MACHINE = 0x100; pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20; // Section flags pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40; pub const IMAGE_SCN_MEM_READ = 0x40000000; pub const IMAGE_SCN_CNT_CODE = 0x20; pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000; pub const IMAGE_SCN_MEM_WRITE = 0x80000000; const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16; const IMAGE_DEBUG_TYPE_CODEVIEW = 2; const DEBUG_DIRECTORY = 6; pub const CoffError = error{ InvalidPEMagic, InvalidPEHeader, InvalidMachine, MissingCoffSection, }; // Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format pub const Coff = struct { in_file: File, allocator: *mem.Allocator, coff_header: CoffHeader, pe_header: OptionalHeader, sections: ArrayList(Section), guid: [16]u8, age: u32, pub fn init(allocator: *mem.Allocator, in_file: File) Coff { return Coff{ .in_file = in_file, .allocator = allocator, .coff_header = undefined, .pe_header = undefined, .sections = ArrayList(Section).init(allocator), .guid = undefined, .age = undefined, }; } pub fn loadHeader(self: *Coff) !void { const pe_pointer_offset = 0x3C; const in = self.in_file.inStream(); var magic: [2]u8 = undefined; try in.readNoEof(magic[0..]); if (!mem.eql(u8, &magic, "MZ")) return error.InvalidPEMagic; // Seek to PE File Header (coff header) try self.in_file.seekTo(pe_pointer_offset); const pe_magic_offset = try in.readIntLittle(u32); try self.in_file.seekTo(pe_magic_offset); var pe_header_magic: [4]u8 = undefined; try in.readNoEof(pe_header_magic[0..]); if (!mem.eql(u8, &pe_header_magic, &[_]u8{ 'P', 'E', 0, 0 })) return error.InvalidPEHeader; self.coff_header = CoffHeader{ .machine = try in.readIntLittle(u16), .number_of_sections = try in.readIntLittle(u16), .timedate_stamp = try in.readIntLittle(u32), .pointer_to_symbol_table = try in.readIntLittle(u32), .number_of_symbols = try in.readIntLittle(u32), .size_of_optional_header = try in.readIntLittle(u16), .characteristics = try in.readIntLittle(u16), }; switch (self.coff_header.machine) { IMAGE_FILE_MACHINE_I386, IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_IA64 => {}, else => return error.InvalidMachine, } try self.loadOptionalHeader(); } fn loadOptionalHeader(self: *Coff) !void { const in = self.in_file.inStream(); self.pe_header.magic = try in.readIntLittle(u16); // For now we're only interested in finding the reference to the .pdb, // so we'll skip most of this header, which size is different in 32 // 64 bits by the way. var skip_size: u16 = undefined; if (self.pe_header.magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC) { skip_size = 2 * @sizeOf(u8) + 8 * @sizeOf(u16) + 18 * @sizeOf(u32); } else if (self.pe_header.magic == IMAGE_NT_OPTIONAL_HDR64_MAGIC) { skip_size = 2 * @sizeOf(u8) + 8 * @sizeOf(u16) + 12 * @sizeOf(u32) + 5 * @sizeOf(u64); } else return error.InvalidPEMagic; try self.in_file.seekBy(skip_size); const number_of_rva_and_sizes = try in.readIntLittle(u32); if (number_of_rva_and_sizes != IMAGE_NUMBEROF_DIRECTORY_ENTRIES) return error.InvalidPEHeader; for (self.pe_header.data_directory) |*data_dir| { data_dir.* = OptionalHeader.DataDirectory{ .virtual_address = try in.readIntLittle(u32), .size = try in.readIntLittle(u32), }; } } pub fn getPdbPath(self: *Coff, buffer: []u8) !usize { try self.loadSections(); const header = blk: { if (self.getSection(".buildid")) |section| { break :blk section.header; } else if (self.getSection(".rdata")) |section| { break :blk section.header; } else { return error.MissingCoffSection; } }; const debug_dir = &self.pe_header.data_directory[DEBUG_DIRECTORY]; const file_offset = debug_dir.virtual_address - header.virtual_address + header.pointer_to_raw_data; const in = self.in_file.inStream(); try self.in_file.seekTo(file_offset); // Find the correct DebugDirectoryEntry, and where its data is stored. // It can be in any section. const debug_dir_entry_count = debug_dir.size / @sizeOf(DebugDirectoryEntry); var i: u32 = 0; blk: while (i < debug_dir_entry_count) : (i += 1) { const debug_dir_entry = try in.readStruct(DebugDirectoryEntry); if (debug_dir_entry.type == IMAGE_DEBUG_TYPE_CODEVIEW) { for (self.sections.items) |*section| { const section_start = section.header.virtual_address; const section_size = section.header.misc.virtual_size; const rva = debug_dir_entry.address_of_raw_data; const offset = rva - section_start; if (section_start <= rva and offset < section_size and debug_dir_entry.size_of_data <= section_size - offset) { try self.in_file.seekTo(section.header.pointer_to_raw_data + offset); break :blk; } } } } var cv_signature: [4]u8 = undefined; // CodeView signature try in.readNoEof(cv_signature[0..]); // 'RSDS' indicates PDB70 format, used by lld. if (!mem.eql(u8, &cv_signature, "RSDS")) return error.InvalidPEMagic; try in.readNoEof(self.guid[0..]); self.age = try in.readIntLittle(u32); // Finally read the null-terminated string. var byte = try in.readByte(); i = 0; while (byte != 0 and i < buffer.len) : (i += 1) { buffer[i] = byte; byte = try in.readByte(); } if (byte != 0 and i == buffer.len) return error.NameTooLong; return @as(usize, i); } pub fn loadSections(self: *Coff) !void { if (self.sections.items.len == self.coff_header.number_of_sections) return; try self.sections.ensureCapacity(self.coff_header.number_of_sections); const in = self.in_file.inStream(); var name: [8]u8 = undefined; var i: u16 = 0; while (i < self.coff_header.number_of_sections) : (i += 1) { try in.readNoEof(name[0..]); try self.sections.append(Section{ .header = SectionHeader{ .name = name, .misc = SectionHeader.Misc{ .virtual_size = try in.readIntLittle(u32) }, .virtual_address = try in.readIntLittle(u32), .size_of_raw_data = try in.readIntLittle(u32), .pointer_to_raw_data = try in.readIntLittle(u32), .pointer_to_relocations = try in.readIntLittle(u32), .pointer_to_line_numbers = try in.readIntLittle(u32), .number_of_relocations = try in.readIntLittle(u16), .number_of_line_numbers = try in.readIntLittle(u16), .characteristics = try in.readIntLittle(u32), }, }); } } pub fn getSection(self: *Coff, comptime name: []const u8) ?*Section { for (self.sections.items) |*sec| { if (mem.eql(u8, sec.header.name[0..name.len], name)) { return sec; } } return null; } }; const CoffHeader = struct { machine: u16, number_of_sections: u16, timedate_stamp: u32, pointer_to_symbol_table: u32, number_of_symbols: u32, size_of_optional_header: u16, characteristics: u16, }; const OptionalHeader = struct { const DataDirectory = struct { virtual_address: u32, size: u32, }; magic: u16, data_directory: [IMAGE_NUMBEROF_DIRECTORY_ENTRIES]DataDirectory, }; const DebugDirectoryEntry = packed struct { characteristiccs: u32, time_date_stamp: u32, major_version: u16, minor_version: u16, @"type": u32, size_of_data: u32, address_of_raw_data: u32, pointer_to_raw_data: u32, }; pub const Section = struct { header: SectionHeader, }; const SectionHeader = struct { const Misc = union { physical_address: u32, virtual_size: u32, }; name: [8]u8, misc: Misc, virtual_address: u32, size_of_raw_data: u32, pointer_to_raw_data: u32, pointer_to_relocations: u32, pointer_to_line_numbers: u32, number_of_relocations: u16, number_of_line_numbers: u16, characteristics: u32, };
lib/std/coff.zig
const std = @import("std"); const builtin = @import("builtin"); const testing = std.testing; const default_seed: u32 = 0xc70f6907; pub const Murmur2_32 = struct { const Self = @This(); pub fn hash(str: []const u8) u32 { return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed }); } pub fn hashWithSeed(str: []const u8, seed: u32) u32 { const m: u32 = 0x5bd1e995; const len = @truncate(u32, str.len); var h1: u32 = seed ^ len; for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| { var k1: u32 = v; if (builtin.endian == .Big) k1 = @byteSwap(u32, k1); k1 *%= m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; h1 ^= k1; } const offset = len & 0xfffffffc; const rest = len & 3; if (rest >= 3) { h1 ^= @intCast(u32, str[offset + 2]) << 16; } if (rest >= 2) { h1 ^= @intCast(u32, str[offset + 1]) << 8; } if (rest >= 1) { h1 ^= @intCast(u32, str[offset + 0]); h1 *%= m; } h1 ^= h1 >> 13; h1 *%= m; h1 ^= h1 >> 15; return h1; } pub fn hashUint32(v: u32) u32 { return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed }); } pub fn hashUint32WithSeed(v: u32, seed: u32) u32 { const m: u32 = 0x5bd1e995; const len: u32 = 4; var h1: u32 = seed ^ len; var k1: u32 = undefined; k1 = v *% m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; h1 ^= k1; h1 ^= h1 >> 13; h1 *%= m; h1 ^= h1 >> 15; return h1; } pub fn hashUint64(v: u64) u32 { return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed }); } pub fn hashUint64WithSeed(v: u64, seed: u32) u32 { const m: u32 = 0x5bd1e995; const len: u32 = 8; var h1: u32 = seed ^ len; var k1: u32 = undefined; k1 = @truncate(u32, v) *% m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; h1 ^= k1; k1 = @truncate(u32, v >> 32) *% m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; h1 ^= k1; h1 ^= h1 >> 13; h1 *%= m; h1 ^= h1 >> 15; return h1; } }; pub const Murmur2_64 = struct { const Self = @This(); pub fn hash(str: []const u8) u64 { return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed }); } pub fn hashWithSeed(str: []const u8, seed: u64) u64 { const m: u64 = 0xc6a4a7935bd1e995; const len = @as(u64, str.len); var h1: u64 = seed ^ (len *% m); for (@ptrCast([*]align(1) const u64, str.ptr)[0..@intCast(usize, len >> 3)]) |v| { var k1: u64 = v; if (builtin.endian == .Big) k1 = @byteSwap(u64, k1); k1 *%= m; k1 ^= k1 >> 47; k1 *%= m; h1 ^= k1; h1 *%= m; } const rest = len & 7; const offset = len - rest; if (rest > 0) { var k1: u64 = 0; @memcpy(@ptrCast([*]u8, &k1), @ptrCast([*]const u8, &str[@intCast(usize, offset)]), @intCast(usize, rest)); if (builtin.endian == .Big) k1 = @byteSwap(u64, k1); h1 ^= k1; h1 *%= m; } h1 ^= h1 >> 47; h1 *%= m; h1 ^= h1 >> 47; return h1; } pub fn hashUint32(v: u32) u64 { return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed }); } pub fn hashUint32WithSeed(v: u32, seed: u64) u64 { const m: u64 = 0xc6a4a7935bd1e995; const len: u64 = 4; var h1: u64 = seed ^ (len *% m); var k1: u64 = v; h1 ^= k1; h1 *%= m; h1 ^= h1 >> 47; h1 *%= m; h1 ^= h1 >> 47; return h1; } pub fn hashUint64(v: u64) u64 { return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed }); } pub fn hashUint64WithSeed(v: u64, seed: u64) u64 { const m: u64 = 0xc6a4a7935bd1e995; const len: u64 = 8; var h1: u64 = seed ^ (len *% m); var k1: u64 = undefined; k1 = v *% m; k1 ^= k1 >> 47; k1 *%= m; h1 ^= k1; h1 *%= m; h1 ^= h1 >> 47; h1 *%= m; h1 ^= h1 >> 47; return h1; } }; pub const Murmur3_32 = struct { const Self = @This(); fn rotl32(x: u32, comptime r: u32) u32 { return (x << r) | (x >> (32 - r)); } pub fn hash(str: []const u8) u32 { return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed }); } pub fn hashWithSeed(str: []const u8, seed: u32) u32 { const c1: u32 = 0xcc9e2d51; const c2: u32 = 0x1b873593; const len = @truncate(u32, str.len); var h1: u32 = seed; for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| { var k1: u32 = v; if (builtin.endian == .Big) k1 = @byteSwap(u32, k1); k1 *%= c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 *%= 5; h1 +%= 0xe6546b64; } { var k1: u32 = 0; const offset = len & 0xfffffffc; const rest = len & 3; if (rest == 3) { k1 ^= @intCast(u32, str[offset + 2]) << 16; } if (rest >= 2) { k1 ^= @intCast(u32, str[offset + 1]) << 8; } if (rest >= 1) { k1 ^= @intCast(u32, str[offset + 0]); k1 *%= c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; } } h1 ^= len; h1 ^= h1 >> 16; h1 *%= 0x85ebca6b; h1 ^= h1 >> 13; h1 *%= 0xc2b2ae35; h1 ^= h1 >> 16; return h1; } pub fn hashUint32(v: u32) u32 { return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed }); } pub fn hashUint32WithSeed(v: u32, seed: u32) u32 { const c1: u32 = 0xcc9e2d51; const c2: u32 = 0x1b873593; const len: u32 = 4; var h1: u32 = seed; var k1: u32 = undefined; k1 = v *% c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 *%= 5; h1 +%= 0xe6546b64; h1 ^= len; h1 ^= h1 >> 16; h1 *%= 0x85ebca6b; h1 ^= h1 >> 13; h1 *%= 0xc2b2ae35; h1 ^= h1 >> 16; return h1; } pub fn hashUint64(v: u64) u32 { return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed }); } pub fn hashUint64WithSeed(v: u64, seed: u32) u32 { const c1: u32 = 0xcc9e2d51; const c2: u32 = 0x1b873593; const len: u32 = 8; var h1: u32 = seed; var k1: u32 = undefined; k1 = @truncate(u32, v) *% c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 *%= 5; h1 +%= 0xe6546b64; k1 = @truncate(u32, v >> 32) *% c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 *%= 5; h1 +%= 0xe6546b64; h1 ^= len; h1 ^= h1 >> 16; h1 *%= 0x85ebca6b; h1 ^= h1 >> 13; h1 *%= 0xc2b2ae35; h1 ^= h1 >> 16; return h1; } }; fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 { const hashbytes = hashbits / 8; var key: [256]u8 = undefined; var hashes: [hashbytes * 256]u8 = undefined; var final: [hashbytes]u8 = undefined; @memset(@ptrCast([*]u8, &key[0]), 0, @sizeOf(@TypeOf(key))); @memset(@ptrCast([*]u8, &hashes[0]), 0, @sizeOf(@TypeOf(hashes))); @memset(@ptrCast([*]u8, &final[0]), 0, @sizeOf(@TypeOf(final))); var i: u32 = 0; while (i < 256) : (i += 1) { key[i] = @truncate(u8, i); var h = hash_fn(key[0..i], 256 - i); if (builtin.endian == .Big) h = @byteSwap(@TypeOf(h), h); @memcpy(@ptrCast([*]u8, &hashes[i * hashbytes]), @ptrCast([*]u8, &h), hashbytes); } return @truncate(u32, hash_fn(&hashes, 0)); } test "murmur2_32" { testing.expectEqual(SMHasherTest(Murmur2_32.hashWithSeed, 32), 0x27864C1E); var v0: u32 = 0x12345678; var v1: u64 = 0x1234567812345678; var v0le: u32 = v0; var v1le: u64 = v1; if (builtin.endian == .Big) { v0le = @byteSwap(u32, v0le); v1le = @byteSwap(u64, v1le); } testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_32.hashUint32(v0)); testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_32.hashUint64(v1)); } test "murmur2_64" { std.testing.expectEqual(SMHasherTest(Murmur2_64.hashWithSeed, 64), 0x1F0D3804); var v0: u32 = 0x12345678; var v1: u64 = 0x1234567812345678; var v0le: u32 = v0; var v1le: u64 = v1; if (builtin.endian == .Big) { v0le = @byteSwap(u32, v0le); v1le = @byteSwap(u64, v1le); } testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_64.hashUint32(v0)); testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_64.hashUint64(v1)); } test "murmur3_32" { std.testing.expectEqual(SMHasherTest(Murmur3_32.hashWithSeed, 32), 0xB0F57EE3); var v0: u32 = 0x12345678; var v1: u64 = 0x1234567812345678; var v0le: u32 = v0; var v1le: u64 = v1; if (builtin.endian == .Big) { v0le = @byteSwap(u32, v0le); v1le = @byteSwap(u64, v1le); } testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur3_32.hashUint32(v0)); testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur3_32.hashUint64(v1)); }
lib/std/hash/murmur.zig
const std = @import("std"); const mem = std.mem; const Allocator = mem.Allocator; const assert = std.debug.assert; const print = std.debug.print; const fmt = std.fmt; const util = @import("utils.zig"); const input = @embedFile("../in/day02.txt"); fn part1() void { var eligible_passwords: usize = 0; var it = InputParser(input, "- : \n"); while (it.next()) |line| { const char_count = mem.count(u8, line.password, &[_]u8{line.char}); if (char_count <= line.hi and char_count >= line.lo) { eligible_passwords += 1; } } print("part1:\n# eligible passwords: {}\n", .{eligible_passwords}); } fn part2() void { var eligible_passwords: usize = 0; var it = InputParser(input, "- : \n"); while (it.next()) |line| { const first = line.lo - 1; const second = line.hi - 1; if (util.xor(line.password[first] == line.char, line.password[second] == line.char)) { eligible_passwords += 1; } } print("part2:\n# eligible passwords: {}\n", .{eligible_passwords}); } pub fn main() !void { part1(); part2(); } fn InputParser(buffer: []const u8, delimiter_bytes: []const u8) LineIterator { return LineIterator{ .token_it = mem.TokenIterator{ .buffer = buffer, .delimiter_bytes = delimiter_bytes, .index = 0 } }; } const ConstrainedPassword = struct { lo: u8, hi: u8, char: u8, password: []const u8 }; const LineIterator = struct { token_it: mem.TokenIterator, pub fn next(self: *LineIterator) ?ConstrainedPassword { const lo = self.token_it.next() orelse return null; const hi = self.token_it.next() orelse return null; return ConstrainedPassword{ .lo = fmt.parseInt(u8, lo, 10) catch { return null; }, .hi = fmt.parseInt(u8, hi, 10) catch { return null; }, .char = (self.token_it.next().?)[0], .password = self.token_it.next().?, }; } };
src/day02.zig
const std = @import("std"); const assert = std.debug.assert; const Allocator = std.mem.Allocator; /// Returns the optimal static bit set type for the specified number /// of elements. The returned type will perform no allocations, /// can be copied by value, and does not require deinitialization. /// Both possible implementations fulfill the same interface. pub fn StaticBitSet(comptime size: usize) type { if (size <= @bitSizeOf(usize)) { return IntegerBitSet(size); } else { return ArrayBitSet(usize, size); } } /// A bit set with static size, which is backed by a single integer. /// This set is good for sets with a small size, but may generate /// inefficient code for larger sets, especially in debug mode. pub fn IntegerBitSet(comptime size: u16) type { return packed struct { const Self = @This(); // TODO: Make this a comptime field once those are fixed /// The number of items in this bit set pub const bit_length: usize = size; /// The integer type used to represent a mask in this bit set pub const MaskInt = std.meta.Int(.unsigned, size); /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); /// The bit mask, as a single integer mask: MaskInt, /// Creates a bit set with no elements present. pub fn initEmpty() Self { return .{ .mask = 0 }; } /// Creates a bit set with all elements present. pub fn initFull() Self { return .{ .mask = ~@as(MaskInt, 0) }; } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { _ = self; return bit_length; } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { assert(index < bit_length); return (self.mask & maskBit(index)) != 0; } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { return @popCount(MaskInt, self.mask); } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { assert(index < bit_length); if (MaskInt == u0) return; const bit = maskBit(index); const new_bit = bit & std.math.boolMask(MaskInt, value); self.mask = (self.mask & ~bit) | new_bit; } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { assert(index < bit_length); self.mask |= maskBit(index); } /// Changes the value of all bits in the specified range to /// match the passed boolean. pub fn setRangeValue(self: *Self, range: Range, value: bool) void { assert(range.end <= bit_length); assert(range.start <= range.end); if (range.start == range.end) return; if (MaskInt == u0) return; const start_bit = @intCast(ShiftInt, range.start); var mask = std.math.boolMask(MaskInt, true) << start_bit; if (range.end != bit_length) { const end_bit = @intCast(ShiftInt, range.end); mask &= std.math.boolMask(MaskInt, true) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)); } self.mask &= ~mask; mask = std.math.boolMask(MaskInt, value) << start_bit; if (range.end != bit_length) { const end_bit = @intCast(ShiftInt, range.end); mask &= std.math.boolMask(MaskInt, value) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)); } self.mask |= mask; } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { assert(index < bit_length); // Workaround for #7953 if (MaskInt == u0) return; self.mask &= ~maskBit(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { assert(index < bit_length); self.mask ^= maskBit(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. pub fn toggleSet(self: *Self, toggles: Self) void { self.mask ^= toggles.mask; } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { self.mask = ~self.mask; } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. pub fn setUnion(self: *Self, other: Self) void { self.mask |= other.mask; } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. pub fn setIntersection(self: *Self, other: Self) void { self.mask &= other.mask; } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { const mask = self.mask; if (mask == 0) return null; return @ctz(MaskInt, mask); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { const mask = self.mask; if (mask == 0) return null; const index = @ctz(MaskInt, mask); self.mask = mask & (mask - 1); return index; } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { return .{ .bits_remain = switch (options.kind) { .set => self.mask, .unset => ~self.mask, }, }; } pub fn Iterator(comptime options: IteratorOptions) type { return SingleWordIterator(options.direction); } fn SingleWordIterator(comptime direction: IteratorOptions.Direction) type { return struct { const IterSelf = @This(); // all bits which have not yet been iterated over bits_remain: MaskInt, /// Returns the index of the next unvisited set bit /// in the bit set, in ascending order. pub fn next(self: *IterSelf) ?usize { if (self.bits_remain == 0) return null; switch (direction) { .forward => { const next_index = @ctz(MaskInt, self.bits_remain); self.bits_remain &= self.bits_remain - 1; return next_index; }, .reverse => { const leading_zeroes = @clz(MaskInt, self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1; return top_bit; }, } } }; } fn maskBit(index: usize) MaskInt { if (MaskInt == u0) return 0; return @as(MaskInt, 1) << @intCast(ShiftInt, index); } fn boolMaskBit(index: usize, value: bool) MaskInt { if (MaskInt == u0) return 0; return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index); } }; } /// A bit set with static size, which is backed by an array of usize. /// This set is good for sets with a larger size, but may use /// more bytes than necessary if your set is small. pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type { const mask_info: std.builtin.Type = @typeInfo(MaskIntType); // Make sure the mask int is indeed an int if (mask_info != .Int) @compileError("ArrayBitSet can only operate on integer masks, but was passed " ++ @typeName(MaskIntType)); // It must also be unsigned. if (mask_info.Int.signedness != .unsigned) @compileError("ArrayBitSet requires an unsigned integer mask type, but was passed " ++ @typeName(MaskIntType)); // And it must not be empty. if (MaskIntType == u0) @compileError("ArrayBitSet requires a sized integer for its mask int. u0 does not work."); const byte_size = std.mem.byte_size_in_bits; // We use shift and truncate to decompose indices into mask indices and bit indices. // This operation requires that the mask has an exact power of two number of bits. if (!std.math.isPowerOfTwo(@bitSizeOf(MaskIntType))) { var desired_bits = std.math.ceilPowerOfTwoAssert(usize, @bitSizeOf(MaskIntType)); if (desired_bits < byte_size) desired_bits = byte_size; const FixedMaskType = std.meta.Int(.unsigned, desired_bits); @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++ ", which is not a power of two. Please round this up to a power of two integer size (i.e. " ++ @typeName(FixedMaskType) ++ ")."); } // Make sure the integer has no padding bits. // Those would be wasteful here and are probably a mistake by the user. // This case may be hit with small powers of two, like u4. if (@bitSizeOf(MaskIntType) != @sizeOf(MaskIntType) * byte_size) { var desired_bits = @sizeOf(MaskIntType) * byte_size; desired_bits = std.math.ceilPowerOfTwoAssert(usize, desired_bits); const FixedMaskType = std.meta.Int(.unsigned, desired_bits); @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++ ", which contains padding bits. Please round this up to an unpadded integer size (i.e. " ++ @typeName(FixedMaskType) ++ ")."); } return extern struct { const Self = @This(); // TODO: Make this a comptime field once those are fixed /// The number of items in this bit set pub const bit_length: usize = size; /// The integer type used to represent a mask in this bit set pub const MaskInt = MaskIntType; /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); // bits in one mask const mask_len = @bitSizeOf(MaskInt); // total number of masks const num_masks = (size + mask_len - 1) / mask_len; // padding bits in the last mask (may be 0) const last_pad_bits = mask_len * num_masks - size; // Mask of valid bits in the last mask. // All functions will ensure that the invalid // bits in the last mask are zero. pub const last_item_mask = ~@as(MaskInt, 0) >> last_pad_bits; /// The bit masks, ordered with lower indices first. /// Padding bits at the end are undefined. masks: [num_masks]MaskInt, /// Creates a bit set with no elements present. pub fn initEmpty() Self { return .{ .masks = [_]MaskInt{0} ** num_masks }; } /// Creates a bit set with all elements present. pub fn initFull() Self { if (num_masks == 0) { return .{ .masks = .{} }; } else { return .{ .masks = [_]MaskInt{~@as(MaskInt, 0)} ** (num_masks - 1) ++ [_]MaskInt{last_item_mask} }; } } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { _ = self; return bit_length; } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { assert(index < bit_length); if (num_masks == 0) return false; // doesn't compile in this case return (self.masks[maskIndex(index)] & maskBit(index)) != 0; } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { var total: usize = 0; for (self.masks) |mask| { total += @popCount(MaskInt, mask); } return total; } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case const bit = maskBit(index); const mask_index = maskIndex(index); const new_bit = bit & std.math.boolMask(MaskInt, value); self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit; } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case self.masks[maskIndex(index)] |= maskBit(index); } /// Changes the value of all bits in the specified range to /// match the passed boolean. pub fn setRangeValue(self: *Self, range: Range, value: bool) void { assert(range.end <= bit_length); assert(range.start <= range.end); if (range.start == range.end) return; if (num_masks == 0) return; const start_mask_index = maskIndex(range.start); const start_bit = @truncate(ShiftInt, range.start); const end_mask_index = maskIndex(range.end); const end_bit = @truncate(ShiftInt, range.end); if (start_mask_index == end_mask_index) { var mask1 = std.math.boolMask(MaskInt, true) << start_bit; var mask2 = std.math.boolMask(MaskInt, true) >> (mask_len - 1) - (end_bit - 1); self.masks[start_mask_index] &= ~(mask1 & mask2); mask1 = std.math.boolMask(MaskInt, value) << start_bit; mask2 = std.math.boolMask(MaskInt, value) >> (mask_len - 1) - (end_bit - 1); self.masks[start_mask_index] |= mask1 & mask2; } else { var bulk_mask_index: usize = undefined; if (start_bit > 0) { self.masks[start_mask_index] = (self.masks[start_mask_index] & ~(std.math.boolMask(MaskInt, true) << start_bit)) | (std.math.boolMask(MaskInt, value) << start_bit); bulk_mask_index = start_mask_index + 1; } else { bulk_mask_index = start_mask_index; } while (bulk_mask_index < end_mask_index) : (bulk_mask_index += 1) { self.masks[bulk_mask_index] = std.math.boolMask(MaskInt, value); } if (end_bit > 0) { self.masks[end_mask_index] = (self.masks[end_mask_index] & (std.math.boolMask(MaskInt, true) << end_bit)) | (std.math.boolMask(MaskInt, value) >> ((@bitSizeOf(MaskInt) - 1) - (end_bit - 1))); } } } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case self.masks[maskIndex(index)] &= ~maskBit(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case self.masks[maskIndex(index)] ^= maskBit(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. pub fn toggleSet(self: *Self, toggles: Self) void { for (self.masks) |*mask, i| { mask.* ^= toggles.masks[i]; } } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { for (self.masks) |*mask| { mask.* = ~mask.*; } // Zero the padding bits if (num_masks > 0) { self.masks[num_masks - 1] &= last_item_mask; } } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. pub fn setUnion(self: *Self, other: Self) void { for (self.masks) |*mask, i| { mask.* |= other.masks[i]; } } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. pub fn setIntersection(self: *Self, other: Self) void { for (self.masks) |*mask, i| { mask.* &= other.masks[i]; } } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { var offset: usize = 0; const mask = for (self.masks) |mask| { if (mask != 0) break mask; offset += @bitSizeOf(MaskInt); } else return null; return offset + @ctz(MaskInt, mask); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { var offset: usize = 0; const mask = for (self.masks) |*mask| { if (mask.* != 0) break mask; offset += @bitSizeOf(MaskInt); } else return null; const index = @ctz(MaskInt, mask.*); mask.* &= (mask.* - 1); return offset + index; } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { return Iterator(options).init(&self.masks, last_item_mask); } pub fn Iterator(comptime options: IteratorOptions) type { return BitSetIterator(MaskInt, options); } fn maskBit(index: usize) MaskInt { return @as(MaskInt, 1) << @truncate(ShiftInt, index); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index); } }; } /// A bit set with runtime known size, backed by an allocated slice /// of usize. The allocator must be tracked externally by the user. pub const DynamicBitSetUnmanaged = struct { const Self = @This(); /// The integer type used to represent a mask in this bit set pub const MaskInt = usize; /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); /// The number of valid items in this bit set bit_length: usize = 0, /// The bit masks, ordered with lower indices first. /// Padding bits at the end must be zeroed. masks: [*]MaskInt = empty_masks_ptr, // This pointer is one usize after the actual allocation. // That slot holds the size of the true allocation, which // is needed by Zig's allocator interface in case a shrink // fails. // Don't modify this value. Ideally it would go in const data so // modifications would cause a bus error, but the only way // to discard a const qualifier is through ptrToInt, which // cannot currently round trip at comptime. var empty_masks_data = [_]MaskInt{ 0, undefined }; const empty_masks_ptr = empty_masks_data[1..2]; /// Creates a bit set with no elements present. /// If bit_length is not zero, deinit must eventually be called. pub fn initEmpty(allocator: Allocator, bit_length: usize) !Self { var self = Self{}; try self.resize(allocator, bit_length, false); return self; } /// Creates a bit set with all elements present. /// If bit_length is not zero, deinit must eventually be called. pub fn initFull(allocator: Allocator, bit_length: usize) !Self { var self = Self{}; try self.resize(allocator, bit_length, true); return self; } /// Resizes to a new bit_length. If the new length is larger /// than the old length, fills any added bits with `fill`. /// If new_len is not zero, deinit must eventually be called. pub fn resize(self: *@This(), allocator: Allocator, new_len: usize, fill: bool) !void { const old_len = self.bit_length; const old_masks = numMasks(old_len); const new_masks = numMasks(new_len); const old_allocation = (self.masks - 1)[0..(self.masks - 1)[0]]; if (new_masks == 0) { assert(new_len == 0); allocator.free(old_allocation); self.masks = empty_masks_ptr; self.bit_length = 0; return; } if (old_allocation.len != new_masks + 1) realloc: { // If realloc fails, it may mean one of two things. // If we are growing, it means we are out of memory. // If we are shrinking, it means the allocator doesn't // want to move the allocation. This means we need to // hold on to the extra 8 bytes required to be able to free // this allocation properly. const new_allocation = allocator.realloc(old_allocation, new_masks + 1) catch |err| { if (new_masks + 1 > old_allocation.len) return err; break :realloc; }; new_allocation[0] = new_allocation.len; self.masks = new_allocation.ptr + 1; } // If we increased in size, we need to set any new bits // to the fill value. if (new_len > old_len) { // set the padding bits in the old last item to 1 if (fill and old_masks > 0) { const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len; const old_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, old_padding_bits); self.masks[old_masks - 1] |= ~old_mask; } // fill in any new masks if (new_masks > old_masks) { const fill_value = std.math.boolMask(MaskInt, fill); std.mem.set(MaskInt, self.masks[old_masks..new_masks], fill_value); } } // Zero out the padding bits if (new_len > 0) { const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len; const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); self.masks[new_masks - 1] &= last_item_mask; } // And finally, save the new length. self.bit_length = new_len; } /// deinitializes the array and releases its memory. /// The passed allocator must be the same one used for /// init* or resize in the past. pub fn deinit(self: *Self, allocator: Allocator) void { self.resize(allocator, 0, false) catch unreachable; } /// Creates a duplicate of this bit set, using the new allocator. pub fn clone(self: *const Self, new_allocator: Allocator) !Self { const num_masks = numMasks(self.bit_length); var copy = Self{}; try copy.resize(new_allocator, self.bit_length, false); std.mem.copy(MaskInt, copy.masks[0..num_masks], self.masks[0..num_masks]); return copy; } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { return self.bit_length; } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { assert(index < self.bit_length); return (self.masks[maskIndex(index)] & maskBit(index)) != 0; } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { const num_masks = (self.bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt); var total: usize = 0; for (self.masks[0..num_masks]) |mask| { // Note: This is where we depend on padding bits being zero total += @popCount(MaskInt, mask); } return total; } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { assert(index < self.bit_length); const bit = maskBit(index); const mask_index = maskIndex(index); const new_bit = bit & std.math.boolMask(MaskInt, value); self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit; } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { assert(index < self.bit_length); self.masks[maskIndex(index)] |= maskBit(index); } /// Changes the value of all bits in the specified range to /// match the passed boolean. pub fn setRangeValue(self: *Self, range: Range, value: bool) void { assert(range.end <= self.bit_length); assert(range.start <= range.end); if (range.start == range.end) return; const start_mask_index = maskIndex(range.start); const start_bit = @truncate(ShiftInt, range.start); const end_mask_index = maskIndex(range.end); const end_bit = @truncate(ShiftInt, range.end); if (start_mask_index == end_mask_index) { var mask1 = std.math.boolMask(MaskInt, true) << start_bit; var mask2 = std.math.boolMask(MaskInt, true) >> (@bitSizeOf(MaskInt) - 1) - (end_bit - 1); self.masks[start_mask_index] &= ~(mask1 & mask2); mask1 = std.math.boolMask(MaskInt, value) << start_bit; mask2 = std.math.boolMask(MaskInt, value) >> (@bitSizeOf(MaskInt) - 1) - (end_bit - 1); self.masks[start_mask_index] |= mask1 & mask2; } else { var bulk_mask_index: usize = undefined; if (start_bit > 0) { self.masks[start_mask_index] = (self.masks[start_mask_index] & ~(std.math.boolMask(MaskInt, true) << start_bit)) | (std.math.boolMask(MaskInt, value) << start_bit); bulk_mask_index = start_mask_index + 1; } else { bulk_mask_index = start_mask_index; } while (bulk_mask_index < end_mask_index) : (bulk_mask_index += 1) { self.masks[bulk_mask_index] = std.math.boolMask(MaskInt, value); } if (end_bit > 0) { self.masks[end_mask_index] = (self.masks[end_mask_index] & (std.math.boolMask(MaskInt, true) << end_bit)) | (std.math.boolMask(MaskInt, value) >> ((@bitSizeOf(MaskInt) - 1) - (end_bit - 1))); } } } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { assert(index < self.bit_length); self.masks[maskIndex(index)] &= ~maskBit(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { assert(index < self.bit_length); self.masks[maskIndex(index)] ^= maskBit(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. Both sets must have the /// same bit_length. pub fn toggleSet(self: *Self, toggles: Self) void { assert(toggles.bit_length == self.bit_length); const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks]) |*mask, i| { mask.* ^= toggles.masks[i]; } } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { const bit_length = self.bit_length; // avoid underflow if bit_length is zero if (bit_length == 0) return; const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks]) |*mask| { mask.* = ~mask.*; } const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length; const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); self.masks[num_masks - 1] &= last_item_mask; } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. /// The two sets must both be the same bit_length. pub fn setUnion(self: *Self, other: Self) void { assert(other.bit_length == self.bit_length); const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks]) |*mask, i| { mask.* |= other.masks[i]; } } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. /// The two sets must both be the same bit_length. pub fn setIntersection(self: *Self, other: Self) void { assert(other.bit_length == self.bit_length); const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks]) |*mask, i| { mask.* &= other.masks[i]; } } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { var offset: usize = 0; var mask = self.masks; while (offset < self.bit_length) { if (mask[0] != 0) break; mask += 1; offset += @bitSizeOf(MaskInt); } else return null; return offset + @ctz(MaskInt, mask[0]); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { var offset: usize = 0; var mask = self.masks; while (offset < self.bit_length) { if (mask[0] != 0) break; mask += 1; offset += @bitSizeOf(MaskInt); } else return null; const index = @ctz(MaskInt, mask[0]); mask[0] &= (mask[0] - 1); return offset + index; } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. Resizing the underlying /// bit set invalidates the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { const num_masks = numMasks(self.bit_length); const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length; const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); return Iterator(options).init(self.masks[0..num_masks], last_item_mask); } pub fn Iterator(comptime options: IteratorOptions) type { return BitSetIterator(MaskInt, options); } fn maskBit(index: usize) MaskInt { return @as(MaskInt, 1) << @truncate(ShiftInt, index); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index); } fn numMasks(bit_length: usize) usize { return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt); } }; /// A bit set with runtime known size, backed by an allocated slice /// of usize. Thin wrapper around DynamicBitSetUnmanaged which keeps /// track of the allocator instance. pub const DynamicBitSet = struct { const Self = @This(); /// The integer type used to represent a mask in this bit set pub const MaskInt = usize; /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); /// The allocator used by this bit set allocator: Allocator, /// The number of valid items in this bit set unmanaged: DynamicBitSetUnmanaged = .{}, /// Creates a bit set with no elements present. pub fn initEmpty(allocator: Allocator, bit_length: usize) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initEmpty(allocator, bit_length), .allocator = allocator, }; } /// Creates a bit set with all elements present. pub fn initFull(allocator: Allocator, bit_length: usize) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initFull(allocator, bit_length), .allocator = allocator, }; } /// Resizes to a new length. If the new length is larger /// than the old length, fills any added bits with `fill`. pub fn resize(self: *@This(), new_len: usize, fill: bool) !void { try self.unmanaged.resize(self.allocator, new_len, fill); } /// deinitializes the array and releases its memory. /// The passed allocator must be the same one used for /// init* or resize in the past. pub fn deinit(self: *Self) void { self.unmanaged.deinit(self.allocator); } /// Creates a duplicate of this bit set, using the new allocator. pub fn clone(self: *const Self, new_allocator: Allocator) !Self { return Self{ .unmanaged = try self.unmanaged.clone(new_allocator), .allocator = new_allocator, }; } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { return self.unmanaged.capacity(); } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { return self.unmanaged.isSet(index); } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { return self.unmanaged.count(); } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { self.unmanaged.setValue(index, value); } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { self.unmanaged.set(index); } /// Changes the value of all bits in the specified range to /// match the passed boolean. pub fn setRangeValue(self: *Self, range: Range, value: bool) void { self.unmanaged.setRangeValue(range, value); } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { self.unmanaged.unset(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { self.unmanaged.toggle(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. Both sets must have the /// same bit_length. pub fn toggleSet(self: *Self, toggles: Self) void { self.unmanaged.toggleSet(toggles.unmanaged); } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { self.unmanaged.toggleAll(); } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. /// The two sets must both be the same bit_length. pub fn setUnion(self: *Self, other: Self) void { self.unmanaged.setUnion(other.unmanaged); } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. /// The two sets must both be the same bit_length. pub fn setIntersection(self: *Self, other: Self) void { self.unmanaged.setIntersection(other.unmanaged); } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { return self.unmanaged.findFirstSet(); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { return self.unmanaged.toggleFirstSet(); } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. Resizing the underlying /// bit set invalidates the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { return self.unmanaged.iterator(options); } pub const Iterator = DynamicBitSetUnmanaged.Iterator; }; /// Options for configuring an iterator over a bit set pub const IteratorOptions = struct { /// determines which bits should be visited kind: Type = .set, /// determines the order in which bit indices should be visited direction: Direction = .forward, pub const Type = enum { /// visit indexes of set bits set, /// visit indexes of unset bits unset, }; pub const Direction = enum { /// visit indices in ascending order forward, /// visit indices in descending order. /// Note that this may be slightly more expensive than forward iteration. reverse, }; }; // The iterator is reusable between several bit set types fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) type { const ShiftInt = std.math.Log2Int(MaskInt); const kind = options.kind; const direction = options.direction; return struct { const Self = @This(); // all bits which have not yet been iterated over bits_remain: MaskInt, // all words which have not yet been iterated over words_remain: []const MaskInt, // the offset of the current word bit_offset: usize, // the mask of the last word last_word_mask: MaskInt, fn init(masks: []const MaskInt, last_word_mask: MaskInt) Self { if (masks.len == 0) { return Self{ .bits_remain = 0, .words_remain = &[_]MaskInt{}, .last_word_mask = last_word_mask, .bit_offset = 0, }; } else { var result = Self{ .bits_remain = 0, .words_remain = masks, .last_word_mask = last_word_mask, .bit_offset = if (direction == .forward) 0 else (masks.len - 1) * @bitSizeOf(MaskInt), }; result.nextWord(true); return result; } } /// Returns the index of the next unvisited set bit /// in the bit set, in ascending order. pub fn next(self: *Self) ?usize { while (self.bits_remain == 0) { if (self.words_remain.len == 0) return null; self.nextWord(false); switch (direction) { .forward => self.bit_offset += @bitSizeOf(MaskInt), .reverse => self.bit_offset -= @bitSizeOf(MaskInt), } } switch (direction) { .forward => { const next_index = @ctz(MaskInt, self.bits_remain) + self.bit_offset; self.bits_remain &= self.bits_remain - 1; return next_index; }, .reverse => { const leading_zeroes = @clz(MaskInt, self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1; self.bits_remain &= no_top_bit_mask; return top_bit + self.bit_offset; }, } } // Load the next word. Don't call this if there // isn't a next word. If the next word is the // last word, mask off the padding bits so we // don't visit them. inline fn nextWord(self: *Self, comptime is_first_word: bool) void { var word = switch (direction) { .forward => self.words_remain[0], .reverse => self.words_remain[self.words_remain.len - 1], }; switch (kind) { .set => {}, .unset => { word = ~word; if ((direction == .reverse and is_first_word) or (direction == .forward and self.words_remain.len == 1)) { word &= self.last_word_mask; } }, } switch (direction) { .forward => self.words_remain = self.words_remain[1..], .reverse => self.words_remain.len -= 1, } self.bits_remain = word; } }; } /// A range of indices within a bitset. pub const Range = struct { /// The index of the first bit of interest. start: usize, /// The index immediately after the last bit of interest. end: usize, }; // ---------------- Tests ----------------- const testing = std.testing; fn testBitSet(a: anytype, b: anytype, len: usize) !void { try testing.expectEqual(len, a.capacity()); try testing.expectEqual(len, b.capacity()); { var i: usize = 0; while (i < len) : (i += 1) { a.setValue(i, i & 1 == 0); b.setValue(i, i & 2 == 0); } } try testing.expectEqual((len + 1) / 2, a.count()); try testing.expectEqual((len + 3) / 4 + (len + 2) / 4, b.count()); { var iter = a.iterator(.{}); var i: usize = 0; while (i < len) : (i += 2) { try testing.expectEqual(@as(?usize, i), iter.next()); } try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); } a.toggleAll(); { var iter = a.iterator(.{}); var i: usize = 1; while (i < len) : (i += 2) { try testing.expectEqual(@as(?usize, i), iter.next()); } try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); } { var iter = b.iterator(.{ .kind = .unset }); var i: usize = 2; while (i < len) : (i += 4) { try testing.expectEqual(@as(?usize, i), iter.next()); if (i + 1 < len) { try testing.expectEqual(@as(?usize, i + 1), iter.next()); } } try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); } { var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 != 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); } } a.setUnion(b.*); { var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 != 0 or i & 2 == 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); } i = len; var set = a.iterator(.{ .direction = .reverse }); var unset = a.iterator(.{ .kind = .unset, .direction = .reverse }); while (i > 0) { i -= 1; if (i & 1 != 0 or i & 2 == 0) { try testing.expectEqual(@as(?usize, i), set.next()); } else { try testing.expectEqual(@as(?usize, i), unset.next()); } } try testing.expectEqual(@as(?usize, null), set.next()); try testing.expectEqual(@as(?usize, null), set.next()); try testing.expectEqual(@as(?usize, null), set.next()); try testing.expectEqual(@as(?usize, null), unset.next()); try testing.expectEqual(@as(?usize, null), unset.next()); try testing.expectEqual(@as(?usize, null), unset.next()); } a.toggleSet(b.*); { try testing.expectEqual(len / 4, a.count()); var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 != 0 and i & 2 != 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); if (i & 1 == 0) { a.set(i); } else { a.unset(i); } } } a.setIntersection(b.*); { try testing.expectEqual((len + 3) / 4, a.count()); var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 == 0 and i & 2 == 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); } } a.toggleSet(a.*); { var iter = a.iterator(.{}); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(usize, 0), a.count()); } { var iter = a.iterator(.{ .direction = .reverse }); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(usize, 0), a.count()); } const test_bits = [_]usize{ 0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 22, 31, 32, 63, 64, 66, 95, 127, 160, 192, 1000, }; for (test_bits) |i| { if (i < a.capacity()) { a.set(i); } } for (test_bits) |i| { if (i < a.capacity()) { try testing.expectEqual(@as(?usize, i), a.findFirstSet()); try testing.expectEqual(@as(?usize, i), a.toggleFirstSet()); } } try testing.expectEqual(@as(?usize, null), a.findFirstSet()); try testing.expectEqual(@as(?usize, null), a.toggleFirstSet()); try testing.expectEqual(@as(?usize, null), a.findFirstSet()); try testing.expectEqual(@as(?usize, null), a.toggleFirstSet()); try testing.expectEqual(@as(usize, 0), a.count()); a.setRangeValue(.{ .start = 0, .end = len }, false); try testing.expectEqual(@as(usize, 0), a.count()); a.setRangeValue(.{ .start = 0, .end = len }, true); try testing.expectEqual(len, a.count()); a.setRangeValue(.{ .start = 0, .end = len }, false); a.setRangeValue(.{ .start = 0, .end = 0 }, true); try testing.expectEqual(@as(usize, 0), a.count()); a.setRangeValue(.{ .start = len, .end = len }, true); try testing.expectEqual(@as(usize, 0), a.count()); if (len >= 1) { a.setRangeValue(.{ .start = 0, .end = len }, false); a.setRangeValue(.{ .start = 0, .end = 1 }, true); try testing.expectEqual(@as(usize, 1), a.count()); try testing.expect(a.isSet(0)); a.setRangeValue(.{ .start = 0, .end = len }, false); a.setRangeValue(.{ .start = 0, .end = len - 1 }, true); try testing.expectEqual(len - 1, a.count()); try testing.expect(!a.isSet(len - 1)); a.setRangeValue(.{ .start = 0, .end = len }, false); a.setRangeValue(.{ .start = 1, .end = len }, true); try testing.expectEqual(@as(usize, len - 1), a.count()); try testing.expect(!a.isSet(0)); a.setRangeValue(.{ .start = 0, .end = len }, false); a.setRangeValue(.{ .start = len - 1, .end = len }, true); try testing.expectEqual(@as(usize, 1), a.count()); try testing.expect(a.isSet(len - 1)); if (len >= 4) { a.setRangeValue(.{ .start = 0, .end = len }, false); a.setRangeValue(.{ .start = 1, .end = len - 2 }, true); try testing.expectEqual(@as(usize, len - 3), a.count()); try testing.expect(!a.isSet(0)); try testing.expect(a.isSet(1)); try testing.expect(a.isSet(len - 3)); try testing.expect(!a.isSet(len - 2)); try testing.expect(!a.isSet(len - 1)); } } } fn testStaticBitSet(comptime Set: type) !void { var a = Set.initEmpty(); var b = Set.initFull(); try testing.expectEqual(@as(usize, 0), a.count()); try testing.expectEqual(@as(usize, Set.bit_length), b.count()); try testBitSet(&a, &b, Set.bit_length); } test "IntegerBitSet" { if (@import("builtin").zig_backend != .stage1) return error.SkipZigTest; // TODO try testStaticBitSet(IntegerBitSet(0)); try testStaticBitSet(IntegerBitSet(1)); try testStaticBitSet(IntegerBitSet(2)); try testStaticBitSet(IntegerBitSet(5)); try testStaticBitSet(IntegerBitSet(8)); try testStaticBitSet(IntegerBitSet(32)); try testStaticBitSet(IntegerBitSet(64)); try testStaticBitSet(IntegerBitSet(127)); } test "ArrayBitSet" { if (@import("builtin").zig_backend != .stage1) return error.SkipZigTest; // TODO if (@import("builtin").cpu.arch == .aarch64) { // https://github.com/ziglang/zig/issues/9879 return error.SkipZigTest; } inline for (.{ 0, 1, 2, 31, 32, 33, 63, 64, 65, 254, 500, 3000 }) |size| { try testStaticBitSet(ArrayBitSet(u8, size)); try testStaticBitSet(ArrayBitSet(u16, size)); try testStaticBitSet(ArrayBitSet(u32, size)); try testStaticBitSet(ArrayBitSet(u64, size)); try testStaticBitSet(ArrayBitSet(u128, size)); } } test "DynamicBitSetUnmanaged" { if (@import("builtin").zig_backend != .stage1) return error.SkipZigTest; // TODO const allocator = std.testing.allocator; var a = try DynamicBitSetUnmanaged.initEmpty(allocator, 300); try testing.expectEqual(@as(usize, 0), a.count()); a.deinit(allocator); a = try DynamicBitSetUnmanaged.initEmpty(allocator, 0); defer a.deinit(allocator); for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| { const old_len = a.capacity(); var tmp = try a.clone(allocator); defer tmp.deinit(allocator); try testing.expectEqual(old_len, tmp.capacity()); var i: usize = 0; while (i < old_len) : (i += 1) { try testing.expectEqual(a.isSet(i), tmp.isSet(i)); } a.toggleSet(a); // zero a tmp.toggleSet(tmp); try a.resize(allocator, size, true); try tmp.resize(allocator, size, false); if (size > old_len) { try testing.expectEqual(size - old_len, a.count()); } else { try testing.expectEqual(@as(usize, 0), a.count()); } try testing.expectEqual(@as(usize, 0), tmp.count()); var b = try DynamicBitSetUnmanaged.initFull(allocator, size); defer b.deinit(allocator); try testing.expectEqual(@as(usize, size), b.count()); try testBitSet(&a, &b, size); } } test "DynamicBitSet" { if (@import("builtin").zig_backend != .stage1) return error.SkipZigTest; // TODO const allocator = std.testing.allocator; var a = try DynamicBitSet.initEmpty(allocator, 300); try testing.expectEqual(@as(usize, 0), a.count()); a.deinit(); a = try DynamicBitSet.initEmpty(allocator, 0); defer a.deinit(); for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| { const old_len = a.capacity(); var tmp = try a.clone(allocator); defer tmp.deinit(); try testing.expectEqual(old_len, tmp.capacity()); var i: usize = 0; while (i < old_len) : (i += 1) { try testing.expectEqual(a.isSet(i), tmp.isSet(i)); } a.toggleSet(a); // zero a tmp.toggleSet(tmp); // zero tmp try a.resize(size, true); try tmp.resize(size, false); if (size > old_len) { try testing.expectEqual(size - old_len, a.count()); } else { try testing.expectEqual(@as(usize, 0), a.count()); } try testing.expectEqual(@as(usize, 0), tmp.count()); var b = try DynamicBitSet.initFull(allocator, size); defer b.deinit(); try testing.expectEqual(@as(usize, size), b.count()); try testBitSet(&a, &b, size); } } test "StaticBitSet" { try testing.expectEqual(IntegerBitSet(0), StaticBitSet(0)); try testing.expectEqual(IntegerBitSet(5), StaticBitSet(5)); try testing.expectEqual(IntegerBitSet(@bitSizeOf(usize)), StaticBitSet(@bitSizeOf(usize))); try testing.expectEqual(ArrayBitSet(usize, @bitSizeOf(usize) + 1), StaticBitSet(@bitSizeOf(usize) + 1)); try testing.expectEqual(ArrayBitSet(usize, 500), StaticBitSet(500)); }
lib/std/bit_set.zig
const std = @import("std"); const c = @import("c.zig"); const intToError = @import("error.zig").intToError; const Error = @import("error.zig").Error; const Stroker = @import("Stroker.zig"); const Library = @import("freetype.zig").Library; const RenderMode = @import("freetype.zig").RenderMode; const SizeMetrics = @import("freetype.zig").SizeMetrics; const Matrix = @import("types.zig").Matrix; const BBox = @import("types.zig").BBox; const Outline = @import("image.zig").Outline; const GlyphFormat = @import("image.zig").GlyphFormat; const Vector = @import("image.zig").Vector; const Bitmap = @import("image.zig").Bitmap; const Glyph = @This(); pub const BBoxMode = enum(u2) { // https://freetype.org/freetype2/docs/reference/ft2-glyph_management.html#ft_glyph_bbox_mode // both `unscaled` and `subpixel` are set to 0 unscaled_or_subpixels = c.FT_GLYPH_BBOX_UNSCALED, gridfit = c.FT_GLYPH_BBOX_GRIDFIT, truncate = c.FT_GLYPH_BBOX_TRUNCATE, pixels = c.FT_GLYPH_BBOX_PIXELS, }; handle: c.FT_Glyph, pub fn deinit(self: Glyph) void { c.FT_Done_Glyph(self.handle); } pub fn newGlyph(library: Library, glyph_format: GlyphFormat) Glyph { var g: c.FT_Glyph = undefined; return .{ .handle = c.FT_New_Glyph(library.handle, @enumToInt(glyph_format), &g), }; } pub fn copy(self: Glyph) Error!Glyph { var g: c.FT_Glyph = undefined; try intToError(c.FT_Glyph_Copy(self.handle, &g)); return Glyph{ .handle = g }; } pub fn transform(self: Glyph, matrix: ?Matrix, delta: ?Vector) Error!void { try intToError(c.FT_Glyph_Transform(self.handle, if (matrix) |m| &m else null, if (delta) |d| &d else null)); } pub fn getCBox(self: Glyph, bbox_mode: BBoxMode) BBox { var b: BBox = undefined; c.FT_Glyph_Get_CBox(self.handle, @enumToInt(bbox_mode), &b); return b; } pub fn toBitmapGlyph(self: *Glyph, render_mode: RenderMode, origin: ?Vector) Error!BitmapGlyph { try intToError(c.FT_Glyph_To_Bitmap(&self.handle, @enumToInt(render_mode), if (origin) |o| &o else null, 1)); return BitmapGlyph{ .handle = @ptrCast(c.FT_BitmapGlyph, self.handle) }; } pub fn copyBitmapGlyph(self: *Glyph, render_mode: RenderMode, origin: ?Vector) Error!BitmapGlyph { try intToError(c.FT_Glyph_To_Bitmap(&self.handle, @enumToInt(render_mode), if (origin) |o| &o else null, 0)); return BitmapGlyph{ .handle = @ptrCast(c.FT_BitmapGlyph, self.handle) }; } pub fn castBitmapGlyph(self: Glyph) Error!BitmapGlyph { return BitmapGlyph{ .handle = @ptrCast(c.FT_BitmapGlyph, self.handle) }; } pub fn castOutlineGlyph(self: Glyph) Error!OutlineGlyph { return OutlineGlyph{ .handle = @ptrCast(c.FT_OutlineGlyph, self.handle) }; } pub fn castSvgGlyph(self: Glyph) Error!SvgGlyph { return SvgGlyph{ .handle = @ptrCast(c.FT_SvgGlyph, self.handle) }; } pub fn stroke(self: *Glyph, stroker: Stroker) Error!void { try intToError(c.FT_Glyph_Stroke(&self.handle, stroker.handle, 0)); } pub fn strokeBorder(self: *Glyph, stroker: Stroker, inside: bool) Error!void { try intToError(c.FT_Glyph_StrokeBorder(&self.handle, stroker.handle, if (inside) 1 else 0, 0)); } pub fn format(self: Glyph) GlyphFormat { return @intToEnum(GlyphFormat, self.handle.*.format); } pub fn advanceX(self: Glyph) isize { return self.handle.*.advance.x; } pub fn advanceY(self: Glyph) isize { return self.handle.*.advance.y; } const SvgGlyph = struct { handle: c.FT_SvgGlyph, pub fn deinit(self: SvgGlyph) void { c.FT_Done_Glyph(@ptrCast(c.FT_Glyph, self.handle)); } pub fn svgBuffer(self: SvgGlyph) []const u8 { return self.handle.*.svg_document[0..self.svgBufferLen()]; } pub fn svgBufferLen(self: SvgGlyph) usize { return self.handle.*.svg_document_length; } pub fn glyphIndex(self: SvgGlyph) usize { return self.handle.*.glyph_index; } pub fn metrics(self: SvgGlyph) SizeMetrics { return self.handle.*.metrics; } pub fn unitsPerEM(self: SvgGlyph) u16 { return self.handle.*.units_per_EM; } pub fn startGlyphID(self: SvgGlyph) u16 { return self.handle.*.start_glyph_id; } pub fn endGlyphID(self: SvgGlyph) u16 { return self.handle.*.end_glyph_id; } pub fn transform(self: SvgGlyph) Matrix { return self.handle.*.transform; } pub fn delta(self: SvgGlyph) Vector { return self.handle.*.delta; } }; pub const BitmapGlyph = struct { handle: c.FT_BitmapGlyph, pub fn deinit(self: BitmapGlyph) void { c.FT_Done_Glyph(@ptrCast(c.FT_Glyph, self.handle)); } pub fn left(self: BitmapGlyph) i32 { return self.handle.*.left; } pub fn top(self: BitmapGlyph) i32 { return self.handle.*.top; } pub fn bitmap(self: BitmapGlyph) Bitmap { return .{ .handle = self.handle.*.bitmap }; } }; pub const OutlineGlyph = struct { handle: c.FT_OutlineGlyph, pub fn deinit(self: OutlineGlyph) void { c.FT_Done_Glyph(@ptrCast(c.FT_Glyph, self.handle)); } pub fn outline(self: OutlineGlyph) Outline { return .{ .handle = &self.handle.*.outline }; } };
freetype/src/freetype/Glyph.zig
const std = @import("std"); const SDL = @import("sdl2"); const target_os = @import("builtin").os; pub fn main() !void { try SDL.init(.{ .video = true, .events = true, .audio = true, }); defer SDL.quit(); var window = try SDL.createWindow( "SDL.zig Basic Demo", .{ .centered = {} }, .{ .centered = {} }, 640, 480, .{ .shown = true }, ); defer window.destroy(); var renderer = try SDL.createRenderer(window, null, .{ .accelerated = true }); defer renderer.destroy(); mainLoop: while (true) { while (SDL.pollEvent()) |ev| { switch (ev) { .quit => { break :mainLoop; }, .key_down => |key| { switch (key.scancode) { .escape => break :mainLoop, else => std.log.info("key pressed: {}\n", .{key.scancode}), } }, else => {}, } } try renderer.setColorRGB(0, 0, 0); try renderer.clear(); try renderer.setColor(SDL.Color.parse("#F7A41D") catch unreachable); try renderer.drawRect(SDL.Rectangle{ .x = 270, .y = 215, .width = 100, .height = 50, }); if (target_os.tag != .linux) { // Ubuntu CI doesn't have this function available yet try renderer.drawGeometry( null, &[_]SDL.Vertex{ .{ .position = .{ .x = 400, .y = 150 }, .color = SDL.Color.rgb(255, 0, 0), }, .{ .position = .{ .x = 350, .y = 200 }, .color = SDL.Color.rgb(0, 0, 255), }, .{ .position = .{ .x = 450, .y = 200 }, .color = SDL.Color.rgb(0, 255, 0), }, }, null, ); } renderer.present(); } }
examples/wrapper.zig
const std = @import("std"); const ELF = @import("../elf.zig"); const phdr = @import("../data-structures/phdr.zig"); const shdr = @import("../data-structures/shdr.zig"); const ehdr = @import("../data-structures/ehdr.zig"); const sym_n_rela = @import("../data-structures/sym-n-rela.zig"); pub fn getSyms( a: ELF.ELF, alloc: std.mem.Allocator, ) !std.ArrayList(sym_n_rela.Syms) { var symtabList = std.ArrayList(shdr.Shdr).init(alloc); var strtab: ?shdr.Shdr = null; const stream = a.file.reader(); var dynstr: ?shdr.Shdr = null; for (a.shdrs.items) |section| { if (section.shtype == .SYMTAB) { try symtabList.append(section); } if (section.shtype == .STRTAB and (std.mem.eql(u8, section.name, ".strtab"))) { strtab = section; } } var list = std.ArrayList(sym_n_rela.Syms).init(alloc); if (!a.is32) { for (symtabList.items) |section| { var total_syms = section.size / @sizeOf(std.elf.Elf64_Sym); try a.file.seekableStream().seekTo(section.offset); var sym: std.elf.Elf64_Sym = undefined; //[total_syms]elf.Elf64_Sym = undefined; var i: usize = 0; while (i <= total_syms - 1) : (i = i + 1) { var curr_offset = section.offset + (@sizeOf(std.elf.Elf64_Sym) * i); try a.file.seekableStream().seekTo(curr_offset); try stream.readNoEof(std.mem.asBytes(&sym)); var syms2: sym_n_rela.Syms = undefined; syms2.num = i; syms2.value = sym.st_value; syms2.size = sym.st_size; syms2.symtype = @intToEnum(sym_n_rela.Symtype, (ELF32_ST_TYPE(sym.st_info))); syms2.bind = @intToEnum(sym_n_rela.Bind, (ELF32_ST_BIND(sym.st_info))); syms2.visibility = @intToEnum(sym_n_rela.Visiblity, sym.st_other); syms2.index = sym.st_shndx; syms2.name = blk: { if (section.shtype == .SYMTAB) { try a.file.seekableStream().seekTo(strtab.?.offset + sym.st_name); const c = (try stream.readUntilDelimiterOrEofAlloc(alloc, '\x00', 9000000000000000)) orelse "no name"; break :blk c; } else { try a.file.seekableStream().seekTo(dynstr.?.offset + sym.st_name); const c = (try stream.readUntilDelimiterOrEofAlloc(alloc, '\x00', 9000000000000000)) orelse "no name"; break :blk c; } }; try list.append(syms2); //try std.io.getStdOut().writer().print("{s}\n", .{syms2}); } } return list; } else { for (symtabList.items) |section| { var total_syms = section.size / @sizeOf(std.elf.Elf32_Sym); try a.file.seekableStream().seekTo(section.offset); var sym: std.elf.Elf32_Sym = undefined; //[total_syms]elf.Elf64_Sym = undefined; var i: usize = 0; while (i <= total_syms - 1) : (i = i + 1) { var curr_offset = section.offset + (@sizeOf(std.elf.Elf32_Sym) * i); try a.file.seekableStream().seekTo(curr_offset); try stream.readNoEof(std.mem.asBytes(&sym)); //const sym = std.mem.bytesToValue(elf.Elf64_Sym, &section_data[curr_offset .. curr_offset + @sizeOf(elf.Elf64_Sym)]); var syms2: sym_n_rela.Syms = undefined; syms2.num = i; syms2.value = sym.st_value; syms2.size = sym.st_size; syms2.symtype = @intToEnum(sym_n_rela.Symtype, (ELF32_ST_TYPE(sym.st_info))); syms2.bind = @intToEnum(sym_n_rela.Bind, (ELF32_ST_BIND(sym.st_info))); syms2.visibility = @intToEnum(sym_n_rela.Visiblity, sym.st_other); syms2.index = sym.st_shndx; syms2.name = blk: { if (section.shtype == .SYMTAB) { try a.file.seekableStream().seekTo(strtab.?.offset + sym.st_name); const c = (try stream.readUntilDelimiterOrEofAlloc(alloc, '\x00', 9000000000000000)) orelse "no name"; break :blk c; } else { try a.file.seekableStream().seekTo(dynstr.?.offset + sym.st_name); const c = (try stream.readUntilDelimiterOrEofAlloc(alloc, '\x00', 9000000000000000)) orelse "no name"; break :blk c; } }; try list.append(syms2); //try std.io.getStdOut().writer().print("{s}\n", .{syms2}); } } return list; } } pub fn ELF32_ST_BIND(val: u8) u8 { return val >> 4; } pub fn ELF32_ST_TYPE(val: anytype) c_int { return (@as(c_int, val) & @as(c_int, 0xf)); } pub fn ELF32_ST_INFO(bind: anytype, type_1: anytype) c_int { return (bind << @as(c_int, 4)) + (type_1 & @as(c_int, 0xf)); } // PARSER IS 100% COMPLETE!!!! // writer needs some work // then i need to connect everything const returnSym = union { bit32: std.elf.Elf32_Sym, bit64: std.elf.Elf64_Sym, }; pub fn fixRawSyms(a: ELF.ELF, alloc: std.mem.Allocator) ![]returnSym { var shstrtab_offset: usize = undefined; for (a.shdrs.items) |s| { if (s.sh_type == .STRTAB) { shstrtab_offset = s.sh_offset; break; } } if (a.is32) { var c = std.mem.ArrayList(std.elf.Elf32_Sym).init(alloc); defer c.deinit(); var arraylist = std.mem.ArrayList(u8).init(alloc); defer arraylist.deinit(); try arraylist.append("\x00"); for (a.symbols) |sym| { if (sym.name == "") { continue; } try arraylist.appendSlice(sym.name); try arraylist.append("\x00"); } for (a.symbols) |sym| { var b: std.elf.Elf32_Sym = undefined; if (std.mem.eql(u8, sym.name, "")) { b.st_name = 0; } else { b.st_name = try std.mem.indexOf(u8, arraylist.items, sym.name); } b.st_size = @intCast(std.elf.Elf32_Word, sym.size); b.st_info = ELF32_ST_INFO(sym.bind, sym.symtype); b.st_value = @intCast(std.elf.Elf32_Addr, sym.value); b.st_other = @intCast(u8, sym.visibility); try c.append(b); } try a.file.seekableStream().seekTo(shstrtab_offset); try a.file.writer().writeAll(arraylist.items); return alloc.dupe(std.elf.Elf32_Sym, arraylist.items); } else { // fix this var c = std.mem.ArrayList(std.elf.Elf64_Sym).init(alloc); defer c.deinit(); var arraylist = std.mem.ArrayList(u8).init(alloc); defer arraylist.deinit(); try arraylist.append("\x00"); for (a.symbols) |sym| { if (sym.name == "") { continue; } try arraylist.appendSlice(sym.name); try arraylist.append("\x00"); } for (a.symbols) |sym| { var b: std.elf.Elf64_Sym = undefined; if (std.mem.eql(u8, sym.name, "")) { b.st_name = 0; } else { b.st_name = try std.mem.indexOf(u8, arraylist.items, sym.name); } b.st_size = @intCast(std.elf.Elf64_Xword, sym.size); b.st_info = ELF32_ST_INFO(sym.bind, sym.symtype); b.st_value = @intCast(std.elf.Elf64_Addr, sym.value); b.st_other = @intCast(u8, sym.visibility); try c.append(b); } try a.file.seekableStream().seekTo(shstrtab_offset); try a.file.writer().writeAll(arraylist.items); return alloc.dupe(std.elf.Elf64_Sym, arraylist.items); } } // idk how to write and i prolly wont try for now
src/functions/sym-n-rela.zig
usingnamespace @import("raylib"); pub fn main() anyerror!void { // Initialization //-------------------------------------------------------------------------------------- const screenWidth = 800; const screenHeight = 450; InitWindow(screenWidth, screenHeight, "raylib [models] example - models loading"); // Define the camera to look into our 3d world var camera = Camera { .position = Vector3 { .x = 50.0, .y = 50.0, .z = 50.0 }, // Camera position .target = Vector3 { .x = 0.0, .y = 10.0, .z = 0.0 }, // Camera looking at point .up = Vector3 { .x = 0.0, .y = 1.0, .z = 0.0 }, // Camera up vector (rotation towards target) .fovy = 45.0, // Camera field-of-view Y .type = CameraType.CAMERA_PERSPECTIVE // Camera mode type }; var model = LoadModel("resources/models/castle.obj"); // Load model var texture = LoadTexture("resources/models/castle_diffuse.png"); // Load model texture model.materials[0].maps[@enumToInt(MAP_DIFFUSE)].texture = texture; // Set map diffuse texture //var position = Vector3 { .x = 0.0, .y = 0.0, .z = 0.0 }; // Set model position var bounds = MeshBoundingBox(model.meshes[0]); // Set model bounds // NOTE: bounds are calculated from the original size of the model, // if model is scaled on drawing, bounds must be also scaled camera.SetMode(CameraMode.CAMERA_FREE); // Set a free camera mode var selected = false; // Selected object flag SetTargetFPS(60); // Set our game to run at 60 frames-per-second //-------------------------------------------------------------------------------------- // Main game loop while (!WindowShouldClose()) // Detect window close button or ESC key { // Update //---------------------------------------------------------------------------------- camera.Update(); // Load new models/textures on drag&drop if (IsFileDropped()) { var count: c_int = 0; var droppedFiles = GetDroppedFiles(&count); if (count == 1) // Only support one file dropped { if (IsFileExtension(droppedFiles[0], ".obj") or IsFileExtension(droppedFiles[0], ".gltf") or IsFileExtension(droppedFiles[0], ".iqm")) // Model file formats supported { UnloadModel(model); // Unload previous model model = LoadModel(droppedFiles[0]); // Load new model model.materials[0].maps[@enumToInt(MAP_DIFFUSE)].texture = texture; // Set current map diffuse texture bounds = MeshBoundingBox(model.meshes[0]); // TODO: Move camera position from target enough distance to visualize model properly } else if (IsFileExtension(droppedFiles[0], ".png")) // Texture file formats supported { // Unload current model texture and load new one UnloadTexture(texture); texture = LoadTexture(droppedFiles[0]); model.materials[0].maps[@enumToInt(MAP_DIFFUSE)].texture = texture; } } ClearDroppedFiles(); // Clear internal buffers } // Select model on mouse click if (IsMouseButtonPressed(MouseButton.MOUSE_LEFT_BUTTON)) { // Check collision between ray and box //if (CheckCollisionRayBox(GetMouseRay(GetMousePosition(), camera), bounds)) //{ // selected = !selected; //} //else //{ // selected = false; //} } //---------------------------------------------------------------------------------- // Draw //---------------------------------------------------------------------------------- BeginDrawing(); ClearBackground(RAYWHITE); camera.Begin(); //DrawModel(model, position, 1.0, WHITE); // Draw 3d model with texture DrawGrid(20, 10.0); // Draw a grid if (selected) DrawBoundingBox(bounds, GREEN); // Draw selection box camera.End(); DrawText("Drag & drop model to load mesh/texture.", 10, GetScreenHeight() - 20, 10, DARKGRAY); if (selected) DrawText("MODEL SELECTED", GetScreenWidth() - 110, 10, 10, GREEN); DrawText("(c) Castle 3D model by <NAME>", screenWidth - 200, screenHeight - 20, 10, GRAY); DrawFPS(10, 10); EndDrawing(); //---------------------------------------------------------------------------------- } // De-Initialization //-------------------------------------------------------------------------------------- UnloadTexture(texture); // Unload texture UnloadModel(model); // Unload model CloseWindow(); // Close window and OpenGL context //-------------------------------------------------------------------------------------- }
examples/models/models_loading.zig
const std = @import("std"); pub fn ComptimeTable(comptime K: type, comptime V: type, comptime eq: fn (K, K) bool) type { return struct { pub fn set(comptime self: *@This(), comptime key: K, comptime value: V) void { if (self.lookupNode(key)) |node| { node.value = value; } else comptime { var old_self = self.*; //comptime allocator :^) self.* = @This(){ .key = key, .value = value, .next = &old_self, }; } } pub fn init(comptime key: K, comptime value: V) @This() { return @This(){ .key = key, .value = value, .next = null, }; } pub fn lookup(comptime self: @This(), comptime key: K) ?V { if (self.lookupNodeConst(key)) |node| return node.value else return null; } fn lookupNode(comptime self: *@This(), comptime key: K) ?*@This() { var table: @TypeOf(self.next) = self; while (table) |node| : (table = node.next) { if (eq(key, node.key)) return node; } return null; } fn lookupNodeConst(comptime self: @This(), comptime key: K) ?@This() { var newself = self; var table: @TypeOf(self.next) = &newself; while (table) |node| : (table = node.next) { if (eq(key, node.key)) return node.*; } return null; } key: K, value: V, next: ?*@This(), }; } fn strEq(a: []const u8, b: []const u8) bool { return std.mem.eql(u8, a, b); } fn typeEq(comptime a: type, comptime b: type) bool { return a == b; } test "stuff" { @setEvalBranchQuota(1000000); const table = comptime blk: { var table = ComptimeTable([]const u8, type, strEq).init("hello", i32); var i: u8 = 0; while (i < 40) : (i += 1) { table.set(&[1]u8{i}, u8); } table.set("there", type); table.set("cruel", ComptimeTable(type, type, typeEq)); table.set("world", i64); while (i < 40) : (i += 1) { table.set(&[1]u8{i}, u8); } break :blk table; }; std.debug.warn("\ntable[\"hello\"] = {}\n", .{@typeName(comptime table.lookup("hello").?)}); std.debug.warn("\ntable[20] = {}\n", .{@typeName(comptime table.lookup(&[1]u8{20}).?)}); std.debug.warn("table = {}\n", .{table}); }
comptime_table.zig
const utils = @import("utils"); const kernel = @import("root").kernel; const print = kernel.print; const pmemory = @import("memory.zig"); const util = @import("util.zig"); const interrupts = @import("interrupts.zig"); const segments = @import("segments.zig"); const timing = @import("timing.zig"); const pci = @import("pci.zig"); const acpica = @cImport({ @cInclude("georgios_acpica_wrapper.h"); }); var page_directory: [1024]u32 = undefined; var prev_page_directory: [1024]u32 = undefined; fn check_status(comptime what: []const u8, status: acpica.Status) void { if (status != acpica.Ok) { print.format(what ++ " returned {:x}\n", .{status}); @panic(what ++ " failed"); } } pub const TableHeader = packed struct { signature: [4]u8, size: u32, revision: u8, checksum: u8, // TODO: Zig Bug, stage1 panics on [6]u8 // oem_id: [6]u8, oem_id1: [4]u8, oem_id2: [2]u8, oem_table_id: [8]u8, oem_revision: u32, creator_id: [4]u8, creator_revision: u32, }; pub const Address = packed struct { pub const Kind = enum(u8) { Memory = 0, Io = 1, _, }; kind: Kind, register_width: u8, register_offset: u8, reserved: u8, address: u64, }; fn device_callback(obj: acpica.ACPI_HANDLE, level: acpica.Uint32, context: ?*anyopaque, return_value: [*c]?*anyopaque) callconv(.C) acpica.Status { _ = level; _ = context; _ = return_value; var devinfo: [*c]acpica.ACPI_DEVICE_INFO = undefined; check_status("acpi.device_callback: AcpiGetObjectInfo", acpica.AcpiGetObjectInfo(obj, &devinfo)); const name = @ptrCast(*[4]u8, &devinfo.*.Name); print.format(" - {}\n", .{name}); if (utils.memory_compare(name, "HPET")) { print.string(" - HPET Found\n"); } acpica.AcpiOsFree(devinfo); return acpica.Ok; } pub fn init() !void { print.string(" - Initializing ACPI Subsystem\n"); _ = utils.memory_set(utils.to_bytes(page_directory[0..]), 0); try pmemory.load_page_directory(&page_directory, &prev_page_directory); check_status("acpi.init: AcpiInitializeSubsystem", acpica.AcpiInitializeSubsystem()); check_status("acpi.init: AcpiInitializeTables", acpica.AcpiInitializeTables(null, 16, 0)); check_status("acpi.init: AcpiLoadTables", acpica.AcpiLoadTables()); // check_status("acpi.init: AcpiEnableSubsystem", // acpica.AcpiEnableSubsystem(acpica.ACPI_FULL_INITIALIZATION)); // check_status("acpi.init: AcpiInitializeObjects", // acpica.AcpiInitializeObjects(acpica.ACPI_FULL_INITIALIZATION)); // var devcb_rv: ?*anyopaque = null; // check_status("acpi.init: AcpiGetDevices", acpica.AcpiGetDevices( // null, device_callback, null, &devcb_rv)); var table: [*c]acpica.ACPI_TABLE_HEADER = undefined; var hpet: [4]u8 = "HPET".*; check_status("acpi.init: AcpiGetTable", acpica.AcpiGetTable(@ptrCast([*c]u8, &hpet), 1, @ptrCast([*c][*c]acpica.ACPI_TABLE_HEADER, &table))); print.format("{}\n", .{@ptrCast(*timing.HpetTable, table).*}); try pmemory.load_page_directory(&prev_page_directory, &page_directory); } pub fn power_off() void { const power_off_state: u8 = 5; print.string("Powering Off Now\n"); // interrupts.pic.allow_irq(0, false); util.disable_interrupts(); pmemory.load_page_directory(&page_directory, null) catch @panic("acpi.power_off: load_page_directory failed"); check_status("acpi.power_off: AcpiEnterSleepStatePrep", acpica.AcpiEnterSleepStatePrep(power_off_state)); check_status("acpi.power_off: AcpiEnterSleepState", acpica.AcpiEnterSleepState(power_off_state)); @panic("acpi.power_off: reached end"); } // OS Abstraction Layer ====================================================== export fn AcpiOsInitialize() acpica.Status { return acpica.Ok; } export fn AcpiOsTerminate() acpica.Status { return acpica.Ok; } export fn AcpiOsGetRootPointer() acpica.PhysicalAddress { var p: acpica.PhysicalAddress = 0; _ = acpica.AcpiFindRootPointer(@ptrCast([*c]acpica.PhysicalAddress, &p)); return p; } export fn AcpiOsAllocate(size: acpica.Size) ?*anyopaque { const a = kernel.alloc.alloc_array(u8, size) catch return null; return @ptrCast(*anyopaque, a.ptr); } export fn AcpiOsFree(ptr: ?*anyopaque) void { if (ptr != null) { kernel.alloc.free_array(utils.make_const_slice(u8, @ptrCast([*]u8, ptr), 0)) catch {}; } } const Sem = kernel.sync.Semaphore(acpica.Uint32); export fn AcpiOsCreateSemaphore( max_units: acpica.Uint32, initial_units: acpica.Uint32, semaphore: **Sem) acpica.Status { _ = max_units; const sem = kernel.alloc.alloc(Sem) catch return acpica.NoMemory; sem.* = .{.value = initial_units}; sem.init(); semaphore.* = sem; return acpica.Ok; } export fn AcpiOsWaitSemaphore( semaphore: *Sem, units: acpica.Uint32, timeout: acpica.Uint16) acpica.Status { // TODO: Timeout in milliseconds _ = timeout; var got: acpica.Uint32 = 0; while (got < units) { semaphore.wait() catch continue; got += 1; } return acpica.Ok; } export fn AcpiOsSignalSemaphore(semaphore: *Sem, units: acpica.Uint32) acpica.Status { var left: acpica.Uint32 = units; while (left > 0) { semaphore.signal() catch continue; left -= 1; } return acpica.Ok; } export fn AcpiOsDeleteSemaphore(semaphore: *Sem) acpica.Status { kernel.alloc.free(semaphore) catch return acpica.BadParameter; return acpica.Ok; } const Lock = kernel.sync.Lock; export fn AcpiOsCreateLock(lock: **Lock) acpica.Status { const l = kernel.alloc.alloc(Lock) catch return acpica.NoMemory; l.* = .{}; lock.* = l; return acpica.Ok; } export fn AcpiOsAcquireLock(lock: *Lock) acpica.ACPI_CPU_FLAGS { lock.spin_lock(); return 0; } export fn AcpiOsReleaseLock(lock: *Lock, flags: acpica.ACPI_CPU_FLAGS) void { _ = flags; lock.unlock(); } export fn AcpiOsDeleteLock(lock: *Lock) acpica.Status { kernel.alloc.free(lock) catch return acpica.BadParameter; return acpica.Ok; } export fn AcpiOsGetThreadId() acpica.Uint64 { const t = kernel.threading_mgr.current_thread orelse &kernel.threading_mgr.boot_thread; return t.id; } export fn AcpiOsPredefinedOverride(predefined_object: *const acpica.ACPI_PREDEFINED_NAMES, new_value: **allowzero anyopaque) acpica.Status { _ = predefined_object; new_value.* = @intToPtr(*allowzero anyopaque, 0); return acpica.Ok; } export fn AcpiOsTableOverride(existing: [*c]acpica.ACPI_TABLE_HEADER, new: [*c][*c]acpica.ACPI_TABLE_HEADER) acpica.Status { _ = existing; new.* = null; return acpica.Ok; } export fn AcpiOsPhysicalTableOverride(existing: [*c]acpica.ACPI_TABLE_HEADER, new_addr: [*c]acpica.ACPI_PHYSICAL_ADDRESS, new_len: [*c]acpica.Uint32) acpica.Status { _ = existing; _ = new_len; new_addr.* = 0; return acpica.Ok; } export fn AcpiOsMapMemory( address: acpica.PhysicalAddress, size: acpica.Size) *allowzero anyopaque { const page = pmemory.page_size; const pmem = &kernel.memory_mgr.impl; const addr = @intCast(usize, address); const start_page = utils.align_down(addr, page); const offset = addr % page; const range = pmem.get_unused_kernel_space(size + offset) catch { print.string("AcpiOsMapMemory: get_unused_kernel_space failed\n"); return @intToPtr(*allowzero anyopaque, 0); }; pmem.map(range, start_page, false) catch { print.string("AcpiOsMapMemory: map failed\n"); return @intToPtr(*allowzero anyopaque, 0); }; return @intToPtr(*allowzero anyopaque, range.start + offset); } export fn AcpiOsUnmapMemory(address: *allowzero anyopaque, size: acpica.Size) void { // TODO _ = address; _ = size; } export fn AcpiOsReadPort(address: acpica.ACPI_IO_ADDRESS, value: [*c]acpica.Uint32, width: acpica.UINT32) acpica.Status { const port = @truncate(u16, address); value.* = switch (width) { 8 => util.in8(port), 16 => util.in16(port), 32 => util.in32(port), else => { print.format("AcpiOsReadPort: width is {}\n", .{width}); return acpica.AE_ERROR; }, }; return acpica.Ok; } export fn AcpiOsWritePort(address: acpica.ACPI_IO_ADDRESS, value: acpica.Uint32, width: acpica.Uint32) acpica.Status { const port = @truncate(u16, address); switch (width) { 8 => util.out8(port, @truncate(u8, value)), 16 => util.out16(port, @truncate(u16, value)), 32 => util.out32(port, value), else => { print.format("AcpiOsWritePort: width is {}\n", .{width}); return acpica.AE_ERROR; }, } return acpica.Ok; } fn convert_pci_loc(pci_loc: [*c]acpica.ACPI_PCI_ID) pci.Location { // TODO ACPI_PCI_ID has a UINT16 Segment field. This might be for PCIe? return .{ .bus = @intCast(pci.Bus, pci_loc.*.Bus), .device = @intCast(pci.Device, pci_loc.*.Device), .function = @intCast(pci.Function, pci_loc.*.Function), }; } export fn AcpiOsReadPciConfiguration(pci_loc: [*c]acpica.ACPI_PCI_ID, offset: acpica.Uint32, value: [*c]acpica.Uint64, width: acpica.Uint32) acpica.Status { // print.format("AcpiOsReadPciConfiguration: {}\n", .{pci_loc}); const off = @intCast(pci.Offset, offset); const loc = convert_pci_loc(pci_loc); value.* = switch (width) { 8 => pci.read_config(u8, loc, off), 16 => pci.read_config(u16, loc, off), 32 => pci.read_config(u32, loc, off), else => { print.format("AcpiOsReadPciConfiguration: width is {}\n", .{width}); return acpica.AE_ERROR; }, }; return acpica.Ok; } export fn AcpiOsWritePciConfiguration(pci_loc: [*c]acpica.ACPI_PCI_ID, offset: acpica.Uint32, value: acpica.Uint64, width: acpica.Uint32) acpica.Status { // print.format("AcpiOsWritePciConfiguration: {}\n", .{pci_loc}); const off = @intCast(pci.Offset, offset); const loc = convert_pci_loc(pci_loc); switch (width) { 8 => pci.write_config(u8, loc, off, @truncate(u8, value)), 16 => pci.write_config(u16, loc, off, @truncate(u16, value)), 32 => pci.write_config(u32, loc, off, @truncate(u32, value)), else => { print.format("AcpiOsWritePciConfiguration: width is {}\n", .{width}); return acpica.AE_ERROR; }, } return acpica.Ok; } var interrupt_handler: acpica.ACPI_OSD_HANDLER = null; var interrupt_context: ?*anyopaque = null; pub fn interrupt(interrupt_number: u32, interrupt_stack: *const interrupts.Stack) void { _ = interrupt_number; _ = interrupt_stack; if (interrupt_handler) |handler| { _ = handler(interrupt_context); } } export fn AcpiOsInstallInterruptHandler(number: acpica.Uint32, handler: acpica.ACPI_OSD_HANDLER, context: ?*anyopaque) acpica.Status { const fixed: u32 = 9; if (number != fixed) { print.format("AcpiOsInstallInterruptHandler: unexpected IRQ {}\n", .{number}); return acpica.AE_BAD_PARAMETER; } if (interrupt_handler != null) { print.string("AcpiOsInstallInterruptHandler: already installed one\n"); return acpica.AE_ALREADY_EXISTS; } interrupt_handler = handler; interrupt_context = context; interrupts.IrqInterruptHandler(fixed, interrupt).set( "ACPI", segments.kernel_code_selector, interrupts.kernel_flags); interrupts.load(); return acpica.Ok; } export fn AcpiOsRemoveInterruptHandler(number: acpica.Uint32, routine: acpica.ACPI_OSD_HANDLER) acpica.Status { // TODO print.format("AcpiOsRemoveInterruptHandler: TODO {}\n", .{number}); _ = number; _ = routine; return acpica.Ok; } export fn AcpiOsEnterSleep( sleep_state: acpica.Uint8, reg_a: acpica.Uint32, reg_b: acpica.Uint32) acpica.Status { // TODO? _ = sleep_state; _ = reg_a; _ = reg_b; return acpica.Ok; } export fn AcpiOsGetTimer() acpica.Uint64 { return 0; // @panic("AcpiOsGetTimer called"); } export fn AcpiOsSignal(function: acpica.Uint32, info: *anyopaque) acpica.Status { _ = function; _ = info; @panic("AcpiOsSignal called"); } export fn AcpiOsExecute() acpica.Status { @panic("AcpiOsExecute called"); } export fn AcpiOsWaitEventsComplete() acpica.Status { @panic("AcpiOsWaitEventsComplete called"); } export fn AcpiOsStall() acpica.Status { @panic("AcpiOsStall called"); } export fn AcpiOsSleep() acpica.Status { @panic("AcpiOsSleep called"); } export fn AcpiOsReadMemory() acpica.Status { @panic("AcpiOsReadMemory called"); } export fn AcpiOsWriteMemory() acpica.Status { @panic("AcpiOsWriteMemory called"); } export fn AcpiOsPrintf() acpica.Status { @panic("AcpiOsPrintf called"); }
kernel/platform/acpi.zig
const std = @import("std"); const objects = @import("objects.zig"); const Environment = @import("Environment.zig"); pub const TypeId = enum(u8) { void = 0, number = 1, object = 2, boolean = 3, string = 4, array = 5, enumerator = 6, }; /// A struct that represents any possible LoLa value. pub const Value = union(TypeId) { const Self = @This(); // non-allocating void: void, number: f64, object: objects.ObjectHandle, boolean: bool, // allocating string: String, array: Array, enumerator: Enumerator, pub fn initNumber(val: f64) Self { return Self{ .number = val }; } pub fn initInteger(comptime T: type, val: T) Self { comptime std.debug.assert(@typeInfo(T) == .Int); return Self{ .number = @intToFloat(f64, val) }; } pub fn initObject(id: objects.ObjectHandle) Self { return Self{ .object = id }; } pub fn initBoolean(val: bool) Self { return Self{ .boolean = val }; } /// Initializes a new value with string contents. pub fn initString(allocator: std.mem.Allocator, text: []const u8) !Self { return Self{ .string = try String.init(allocator, text) }; } /// Creates a new value that takes ownership of the passed string. /// This string must not be deinited. pub fn fromString(str: String) Self { return Self{ .string = str }; } /// Creates a new value that takes ownership of the passed array. /// This array must not be deinited. pub fn fromArray(array: Array) Self { return Self{ .array = array }; } /// Creates a new value with an enumerator. The array will be cloned /// into the enumerator and will not be owned. pub fn initEnumerator(array: Array) !Self { return Self{ .enumerator = try Enumerator.init(array) }; } /// Creates a new value that takes ownership of the passed enumerator. /// This enumerator must not be deinited. pub fn fromEnumerator(enumerator: Enumerator) Self { return Self{ .enumerator = enumerator }; } /// Duplicate this value. pub fn clone(self: Self) !Self { return switch (self) { .string => |s| Self{ .string = try s.clone() }, .array => |a| Self{ .array = try a.clone() }, .enumerator => |e| Self{ .enumerator = try e.clone() }, .void, .number, .object, .boolean => self, }; } /// Exchanges two values pub fn exchangeWith(self: *Self, other: *Self) void { const temp = self.*; self.* = other.*; other.* = temp; } /// Replaces the current instance with another instance. /// This will move the memory from the other instance into the /// current one. Calling deinit() on `other` after this function /// is an error. pub fn replaceWith(self: *Self, other: Self) void { self.deinit(); self.* = other; } /// Checks if two values are equal. pub fn eql(lhs: Self, rhs: Self) bool { const Tag = std.meta.Tag(Self); if (@as(Tag, lhs) != @as(Tag, rhs)) return false; return switch (lhs) { .void => true, .number => |n| n == rhs.number, .object => |o| o == rhs.object, .boolean => |b| b == rhs.boolean, .string => |s| String.eql(s, rhs.string), .array => |a| Array.eql(a, rhs.array), .enumerator => |e| Enumerator.eql(e, rhs.enumerator), }; } pub fn deinit(self: *Self) void { switch (self.*) { .array => |*a| a.deinit(), .string => |*s| s.deinit(), .enumerator => |*e| e.deinit(), else => {}, } self.* = undefined; } const ConversionError = error{ TypeMismatch, OutOfRange }; pub fn toNumber(self: Self) ConversionError!f64 { if (self != .number) return error.TypeMismatch; return self.number; } pub fn toInteger(self: Self, comptime T: type) ConversionError!T { const num = @floor(try self.toNumber()); if (num < std.math.minInt(T)) return error.OutOfRange; if (num > std.math.maxInt(T)) return error.OutOfRange; return @floatToInt(T, num); } pub fn toBoolean(self: Self) ConversionError!bool { if (self != .boolean) return error.TypeMismatch; return self.boolean; } pub fn toVoid(self: Self) ConversionError!void { if (self != .void) return error.TypeMismatch; } pub fn toObject(self: Self) ConversionError!objects.ObjectHandle { if (self != .object) return error.TypeMismatch; return self.object; } pub fn toArray(self: Self) ConversionError!Array { if (self != .array) return error.TypeMismatch; return self.array; } /// Returns either the string contents or errors with TypeMismatch pub fn toString(self: Self) ConversionError![]const u8 { if (self != .string) return error.TypeMismatch; return self.string.contents; } /// Gets the contained array or fails. pub fn getArray(self: *Self) ConversionError!*Array { if (self.* != .array) return error.TypeMismatch; return &self.array; } /// Gets the contained enumerator or fails. pub fn getEnumerator(self: *Self) ConversionError!*Enumerator { if (self.* != .enumerator) return error.TypeMismatch; return &self.enumerator; } fn formatArray(a: Array, stream: anytype) !void { try stream.writeAll("["); for (a.contents) |item, i| { if (i > 0) try stream.writeAll(","); // Workaround until #???? is fixed: // Print only the type name of the array item. // const itemType = @as(TypeId, item); // try std.fmt.format(context, Errors, output, " {}", .{@tagName(itemType)}); try stream.print(" {}", .{item}); } try stream.writeAll(" ]"); } /// Prints a LoLa value to the given stream. pub fn format(value: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, stream: anytype) !void { _ = fmt; _ = options; return switch (value) { .void => stream.writeAll("void"), .number => |n| stream.print("{d}", .{n}), .object => |o| stream.print("${d}", .{o}), .boolean => |b| if (b) stream.writeAll("true") else stream.writeAll("false"), .string => |s| stream.print("\"{s}\"", .{s.contents}), .array => |a| formatArray(a, stream), .enumerator => |e| stream.print("enumerator({}/{})", .{ e.index, e.array.contents.len }), }; } /// Serializes the value into the given `writer`. /// Note that this does not serialize object values but only references. It is required to serialize the corresponding /// object pool as well to gain restorability of objects. pub fn serialize(self: Self, writer: anytype) (@TypeOf(writer).Error || error{ NotSupported, ObjectTooLarge })!void { try writer.writeByte(@enumToInt(@as(TypeId, self))); switch (self) { .void => return, // void values are empty \o/ .number => |val| try writer.writeAll(std.mem.asBytes(&val)), .object => |val| try writer.writeIntLittle(u64, @enumToInt(val)), .boolean => |val| try writer.writeByte(if (val) @as(u8, 1) else 0), .string => |val| { try writer.writeIntLittle(u32, std.math.cast(u32, val.contents.len) catch return error.ObjectTooLarge); try writer.writeAll(val.contents); }, .array => |arr| { try writer.writeIntLittle(u32, std.math.cast(u32, arr.contents.len) catch return error.ObjectTooLarge); for (arr.contents) |item| { try item.serialize(writer); } }, .enumerator => |e| { try writer.writeIntLittle(u32, std.math.cast(u32, e.array.contents.len) catch return error.ObjectTooLarge); try writer.writeIntLittle(u32, std.math.cast(u32, e.index) catch return error.ObjectTooLarge); for (e.array.contents) |item| { try item.serialize(writer); } }, } } /// Deserializes a value from the `reader`, using `allocator` to allocate memory. /// Note that if objects are deserialized you need to also deserialize the corresponding object pool pub fn deserialize(reader: anytype, allocator: std.mem.Allocator) (@TypeOf(reader).Error || error{ OutOfMemory, InvalidEnumTag, EndOfStream, NotSupported })!Self { const type_id_src = try reader.readByte(); const type_id = try std.meta.intToEnum(TypeId, type_id_src); return switch (type_id) { .void => .void, .number => blk: { var buffer: [@sizeOf(f64)]u8 align(@alignOf(f64)) = undefined; try reader.readNoEof(&buffer); break :blk initNumber(@bitCast(f64, buffer)); }, .object => initObject(@intToEnum(objects.ObjectHandle, try reader.readIntLittle(std.meta.Tag(objects.ObjectHandle)))), .boolean => initBoolean((try reader.readByte()) != 0), .string => blk: { const size = try reader.readIntLittle(u32); const buffer = try allocator.alloc(u8, size); errdefer allocator.free(buffer); try reader.readNoEof(buffer); break :blk fromString(String.initFromOwned(allocator, buffer)); }, .array => blk: { const size = try reader.readIntLittle(u32); var array = try Array.init(allocator, size); errdefer array.deinit(); for (array.contents) |*item| { item.* = try deserialize(reader, allocator); } break :blk fromArray(array); }, .enumerator => blk: { const size = try reader.readIntLittle(u32); const index = try reader.readIntLittle(u32); var array = try Array.init(allocator, size); errdefer array.deinit(); for (array.contents) |*item| { item.* = try deserialize(reader, allocator); } break :blk fromEnumerator(Enumerator{ .array = array, .index = index, }); }, }; } }; test "Value.void" { var voidVal = Value{ .void = {} }; defer voidVal.deinit(); std.debug.assert(voidVal == .void); } test "Value.number" { var value = Value{ .number = 3.14 }; defer value.deinit(); std.debug.assert(value == .number); std.debug.assert(value.number == 3.14); } test "Value.boolean" { var value = Value{ .boolean = true }; defer value.deinit(); std.debug.assert(value == .boolean); std.debug.assert(value.boolean == true); } test "Value.object" { var value = Value{ .object = @intToEnum(objects.ObjectHandle, 2394) }; defer value.deinit(); std.debug.assert(value == .object); std.debug.assert(value.object == @intToEnum(objects.ObjectHandle, 2394)); } test "Value.string (move)" { var value = Value.fromString(try String.init(std.testing.allocator, "Hello")); defer value.deinit(); std.debug.assert(value == .string); std.debug.assert(std.mem.eql(u8, value.string.contents, "Hello")); } test "Value.string (init)" { var value = try Value.initString(std.testing.allocator, "Malloc'd"); defer value.deinit(); std.debug.assert(value == .string); std.debug.assert(std.mem.eql(u8, value.string.contents, "Malloc'd")); } test "Value.eql (void)" { var v1: Value = .void; var v2: Value = .void; std.debug.assert(v1.eql(v2)); } test "Value.eql (boolean)" { var v1 = Value.initBoolean(true); var v2 = Value.initBoolean(true); var v3 = Value.initBoolean(false); std.debug.assert(v1.eql(v2)); std.debug.assert(v2.eql(v1)); std.debug.assert(v1.eql(v3) == false); std.debug.assert(v2.eql(v3) == false); } test "Value.eql (number)" { var v1 = Value.initNumber(1.3); var v2 = Value.initNumber(1.3); var v3 = Value.initNumber(2.3); std.debug.assert(v1.eql(v2)); std.debug.assert(v2.eql(v1)); std.debug.assert(v1.eql(v3) == false); std.debug.assert(v2.eql(v3) == false); } test "Value.eql (object)" { var v1 = Value.initObject(@intToEnum(objects.ObjectHandle, 1)); var v2 = Value.initObject(@intToEnum(objects.ObjectHandle, 1)); var v3 = Value.initObject(@intToEnum(objects.ObjectHandle, 2)); std.debug.assert(v1.eql(v2)); std.debug.assert(v2.eql(v1)); std.debug.assert(v1.eql(v3) == false); std.debug.assert(v2.eql(v3) == false); } test "Value.eql (string)" { var v1 = try Value.initString(std.testing.allocator, "a"); defer v1.deinit(); var v2 = try Value.initString(std.testing.allocator, "a"); defer v2.deinit(); var v3 = try Value.initString(std.testing.allocator, "b"); defer v3.deinit(); std.debug.assert(v1.eql(v2)); std.debug.assert(v2.eql(v1)); std.debug.assert(v1.eql(v3) == false); std.debug.assert(v2.eql(v3) == false); } /// Immutable string type. /// Both pub const String = struct { const Self = @This(); allocator: std.mem.Allocator, contents: []const u8, refcount: ?*usize, /// Creates a new, uninitialized string pub fn initUninitialized(allocator: std.mem.Allocator, length: usize) !Self { const alignment = @alignOf(usize); const ptr_offset = std.mem.alignForward(length, alignment); const buffer = try allocator.allocAdvanced( u8, alignment, ptr_offset + @sizeOf(usize), .exact, ); std.mem.writeIntNative(usize, buffer[ptr_offset..][0..@sizeOf(usize)], 1); return Self{ .allocator = allocator, .contents = buffer[0..length], .refcount = @ptrCast(*usize, @alignCast(alignment, buffer.ptr + ptr_offset)), }; } /// Clones `text` with the given parameter and stores the /// duplicated value. pub fn init(allocator: std.mem.Allocator, text: []const u8) !Self { var string = try initUninitialized(allocator, text.len); std.mem.copy( u8, string.obtainMutableStorage() catch unreachable, text, ); return string; } /// Returns a string that will take ownership of the passed `text` and /// will free that with `allocator`. pub fn initFromOwned(allocator: std.mem.Allocator, text: []const u8) Self { return Self{ .allocator = allocator, .contents = text, .refcount = null, }; } /// Returns a muable slice of the string elements. /// This may fail with `error.Forbidden` when the string is referenced more than once. pub fn obtainMutableStorage(self: *Self) error{Forbidden}![]u8 { if (self.refcount) |rc| { std.debug.assert(rc.* > 0); if (rc.* > 1) return error.Forbidden; } // this is safe as we allocated the memory, so it is actually mutable return @intToPtr([*]u8, @ptrToInt(self.contents.ptr))[0..self.contents.len]; } pub fn clone(self: Self) error{OutOfMemory}!Self { if (self.refcount) |rc| { // we can just increase reference count here rc.* += 1; return self; } else { // otherwise, return a new copy which is now reference-counted // -> performance opt-in return try init(self.allocator, self.contents); } } pub fn eql(lhs: Self, rhs: Self) bool { return std.mem.eql(u8, lhs.contents, rhs.contents); } pub fn deinit(self: *Self) void { if (self.refcount) |rc| { std.debug.assert(rc.* > 0); rc.* -= 1; if (rc.* > 0) return; // patch-up the old length so the allocator will know what happened self.contents.len = std.mem.alignForward(self.contents.len, @alignOf(usize)) + @sizeOf(usize); } self.allocator.free(self.contents); self.* = undefined; } }; test "String" { var text = try String.init(std.testing.allocator, "Hello, World!"); std.debug.assert(std.mem.eql(u8, text.contents, "Hello, World!")); var text2 = try text.clone(); text.deinit(); std.debug.assert(std.mem.eql(u8, text2.contents, "Hello, World!")); text2.deinit(); } test "String.eql" { var str1 = try String.init(std.testing.allocator, "Hello, World!"); defer str1.deinit(); var str2 = try String.init(std.testing.allocator, "Hello, World!"); defer str2.deinit(); var str3 = try String.init(std.testing.allocator, "World, Hello!"); defer str3.deinit(); std.debug.assert(str1.eql(str2)); std.debug.assert(str2.eql(str1)); std.debug.assert(str1.eql(str3) == false); std.debug.assert(str2.eql(str3) == false); } pub const Array = struct { const Self = @This(); allocator: std.mem.Allocator, contents: []Value, pub fn init(allocator: std.mem.Allocator, size: usize) !Self { var arr = Self{ .allocator = allocator, .contents = try allocator.alloc(Value, size), }; for (arr.contents) |*item| { item.* = Value{ .void = {} }; } return arr; } pub fn clone(self: Self) error{OutOfMemory}!Self { var arr = Self{ .allocator = self.allocator, .contents = try self.allocator.alloc(Value, self.contents.len), }; errdefer arr.allocator.free(arr.contents); var index: usize = 0; // Cleanup all successfully cloned items errdefer { var i: usize = 0; while (i < index) : (i += 1) { arr.contents[i].deinit(); } } while (index < arr.contents.len) : (index += 1) { arr.contents[index] = try self.contents[index].clone(); } return arr; } pub fn eql(lhs: Self, rhs: Self) bool { if (lhs.contents.len != rhs.contents.len) return false; for (lhs.contents) |v, i| { if (!Value.eql(v, rhs.contents[i])) return false; } return true; } pub fn deinit(self: *Self) void { for (self.contents) |*item| { item.deinit(); } self.allocator.free(self.contents); self.* = undefined; } }; test "Array" { var array = try Array.init(std.testing.allocator, 3); defer array.deinit(); std.debug.assert(array.contents.len == 3); std.debug.assert(array.contents[0] == .void); std.debug.assert(array.contents[1] == .void); std.debug.assert(array.contents[2] == .void); array.contents[0].replaceWith(Value.initBoolean(true)); array.contents[1].replaceWith(try Value.initString(std.testing.allocator, "Hello")); array.contents[2].replaceWith(Value.initNumber(45.0)); std.debug.assert(array.contents[0] == .boolean); std.debug.assert(array.contents[1] == .string); std.debug.assert(array.contents[2] == .number); } test "Array.eql" { var array1 = try Array.init(std.testing.allocator, 2); defer array1.deinit(); array1.contents[0] = Value.initBoolean(true); array1.contents[1] = Value.initNumber(42); var array2 = try Array.init(std.testing.allocator, 2); defer array2.deinit(); array2.contents[0] = Value.initBoolean(true); array2.contents[1] = Value.initNumber(42); var array3 = try Array.init(std.testing.allocator, 2); defer array3.deinit(); array3.contents[0] = Value.initBoolean(true); array3.contents[1] = Value.initNumber(43); var array4 = try Array.init(std.testing.allocator, 3); defer array4.deinit(); std.debug.assert(array1.eql(array2)); std.debug.assert(array2.eql(array1)); std.debug.assert(array1.eql(array3) == false); std.debug.assert(array2.eql(array3) == false); std.debug.assert(array1.eql(array4) == false); std.debug.assert(array2.eql(array4) == false); std.debug.assert(array3.eql(array4) == false); } pub const Enumerator = struct { const Self = @This(); array: Array, index: usize, /// Creates a new enumerator that will clone the contained value. pub fn init(array: Array) !Self { return Self{ .array = try array.clone(), .index = 0, }; } /// Creates a new enumerator that will own the passed value. pub fn initFromOwned(array: Array) Self { return Self{ .array = array, .index = 0, }; } /// Checks if the enumerator has a next item. pub fn hasNext(self: Self) bool { return self.index < self.array.contents.len; } /// Returns either a owned value or nothing. /// Will replace the returned value in the enumerator array with `void`. /// As the enumerator can only yield values from the array and does not "store" /// them for later use, this prevents unnecessary clones. pub fn next(self: *Self) ?Value { if (self.index >= self.array.contents.len) return null; var result: Value = .void; self.array.contents[self.index].exchangeWith(&result); self.index += 1; return result; } pub fn clone(self: Self) !Self { return Self{ .array = try self.array.clone(), .index = self.index, }; } // Enumerators are never equal to each other. pub fn eql(lhs: Self, rhs: Self) bool { _ = lhs; _ = rhs; return false; } pub fn deinit(self: *Self) void { self.array.deinit(); self.* = undefined; } }; test "Enumerator" { var array = try Array.init(std.testing.allocator, 3); array.contents[0] = try Value.initString(std.testing.allocator, "a"); array.contents[1] = try Value.initString(std.testing.allocator, "b"); array.contents[2] = try Value.initString(std.testing.allocator, "c"); var enumerator = Enumerator.initFromOwned(array); defer enumerator.deinit(); std.debug.assert(enumerator.hasNext()); var a = enumerator.next() orelse return error.NotEnoughItems; defer a.deinit(); var b = enumerator.next() orelse return error.NotEnoughItems; defer b.deinit(); var c = enumerator.next() orelse return error.NotEnoughItems; defer c.deinit(); std.debug.assert(enumerator.next() == null); std.debug.assert(a == .string); std.debug.assert(b == .string); std.debug.assert(c == .string); std.debug.assert(std.mem.eql(u8, a.string.contents, "a")); std.debug.assert(std.mem.eql(u8, b.string.contents, "b")); std.debug.assert(std.mem.eql(u8, c.string.contents, "c")); } test "Enumerator.eql" { var array = try Array.init(std.testing.allocator, 0); defer array.deinit(); var enumerator1 = try Enumerator.init(array); defer enumerator1.deinit(); var enumerator2 = try Enumerator.init(array); defer enumerator2.deinit(); std.debug.assert(enumerator1.eql(enumerator2) == false); }
src/library/runtime/value.zig
const std = @import("std"); pub const Tag = enum { add_with_overflow, align_cast, align_of, as, async_call, atomic_load, atomic_rmw, atomic_store, bit_cast, bit_offset_of, bool_to_int, bit_size_of, breakpoint, mul_add, byte_swap, bit_reverse, offset_of, call, c_define, c_import, c_include, clz, cmpxchg_strong, cmpxchg_weak, compile_error, compile_log, ctz, c_undef, div_exact, div_floor, div_trunc, embed_file, enum_to_int, error_name, error_return_trace, error_to_int, err_set_cast, @"export", @"extern", fence, field, field_parent_ptr, float_cast, float_to_int, frame, Frame, frame_address, frame_size, has_decl, has_field, import, int_cast, int_to_enum, int_to_error, int_to_float, int_to_ptr, memcpy, memset, wasm_memory_size, wasm_memory_grow, mod, mul_with_overflow, panic, pop_count, ptr_cast, ptr_to_int, rem, return_address, set_align_stack, set_cold, set_eval_branch_quota, set_float_mode, set_runtime_safety, shl_exact, shl_with_overflow, shr_exact, shuffle, size_of, splat, reduce, src, sqrt, sin, cos, exp, exp2, log, log2, log10, fabs, floor, ceil, trunc, round, sub_with_overflow, tag_name, This, truncate, Type, type_info, type_name, TypeOf, union_init, Vector, }; tag: Tag, /// `true` if the builtin call can take advantage of a result location pointer. needs_mem_loc: bool = false, /// `true` if the builtin call can be the left-hand side of an expression (assigned to). allows_lvalue: bool = false, /// The number of parameters to this builtin function. `null` means variable number /// of parameters. param_count: ?u8, pub const list = list: { @setEvalBranchQuota(3000); break :list std.ComptimeStringMap(@This(), .{ .{ "@addWithOverflow", .{ .tag = .add_with_overflow, .param_count = 4, }, }, .{ "@alignCast", .{ .tag = .align_cast, .param_count = 2, }, }, .{ "@alignOf", .{ .tag = .align_of, .param_count = 1, }, }, .{ "@as", .{ .tag = .as, .needs_mem_loc = true, .param_count = 2, }, }, .{ "@asyncCall", .{ .tag = .async_call, .param_count = null, }, }, .{ "@atomicLoad", .{ .tag = .atomic_load, .param_count = 3, }, }, .{ "@atomicRmw", .{ .tag = .atomic_rmw, .param_count = 5, }, }, .{ "@atomicStore", .{ .tag = .atomic_store, .param_count = 4, }, }, .{ "@bitCast", .{ .tag = .bit_cast, .needs_mem_loc = true, .param_count = 2, }, }, .{ "@bitOffsetOf", .{ .tag = .bit_offset_of, .param_count = 2, }, }, .{ "@boolToInt", .{ .tag = .bool_to_int, .param_count = 1, }, }, .{ "@bitSizeOf", .{ .tag = .bit_size_of, .param_count = 1, }, }, .{ "@breakpoint", .{ .tag = .breakpoint, .param_count = 0, }, }, .{ "@mulAdd", .{ .tag = .mul_add, .param_count = 4, }, }, .{ "@byteSwap", .{ .tag = .byte_swap, .param_count = 2, }, }, .{ "@bitReverse", .{ .tag = .bit_reverse, .param_count = 2, }, }, .{ "@offsetOf", .{ .tag = .offset_of, .param_count = 2, }, }, .{ "@call", .{ .tag = .call, .needs_mem_loc = true, .param_count = 3, }, }, .{ "@cDefine", .{ .tag = .c_define, .param_count = 2, }, }, .{ "@cImport", .{ .tag = .c_import, .param_count = 1, }, }, .{ "@cInclude", .{ .tag = .c_include, .param_count = 1, }, }, .{ "@clz", .{ .tag = .clz, .param_count = 2, }, }, .{ "@cmpxchgStrong", .{ .tag = .cmpxchg_strong, .param_count = 6, }, }, .{ "@cmpxchgWeak", .{ .tag = .cmpxchg_weak, .param_count = 6, }, }, .{ "@compileError", .{ .tag = .compile_error, .param_count = 1, }, }, .{ "@compileLog", .{ .tag = .compile_log, .param_count = null, }, }, .{ "@ctz", .{ .tag = .ctz, .param_count = 2, }, }, .{ "@cUndef", .{ .tag = .c_undef, .param_count = 1, }, }, .{ "@divExact", .{ .tag = .div_exact, .param_count = 2, }, }, .{ "@divFloor", .{ .tag = .div_floor, .param_count = 2, }, }, .{ "@divTrunc", .{ .tag = .div_trunc, .param_count = 2, }, }, .{ "@embedFile", .{ .tag = .embed_file, .param_count = 1, }, }, .{ "@enumToInt", .{ .tag = .enum_to_int, .param_count = 1, }, }, .{ "@errorName", .{ .tag = .error_name, .param_count = 1, }, }, .{ "@errorReturnTrace", .{ .tag = .error_return_trace, .param_count = 0, }, }, .{ "@errorToInt", .{ .tag = .error_to_int, .param_count = 1, }, }, .{ "@errSetCast", .{ .tag = .err_set_cast, .param_count = 2, }, }, .{ "@export", .{ .tag = .@"export", .param_count = 2, }, }, .{ "@extern", .{ .tag = .@"extern", .param_count = 2, }, }, .{ "@fence", .{ .tag = .fence, .param_count = 1, }, }, .{ "@field", .{ .tag = .field, .needs_mem_loc = true, .param_count = 2, .allows_lvalue = true, }, }, .{ "@fieldParentPtr", .{ .tag = .field_parent_ptr, .param_count = 3, }, }, .{ "@floatCast", .{ .tag = .float_cast, .param_count = 2, }, }, .{ "@floatToInt", .{ .tag = .float_to_int, .param_count = 2, }, }, .{ "@frame", .{ .tag = .frame, .param_count = 0, }, }, .{ "@Frame", .{ .tag = .Frame, .param_count = 1, }, }, .{ "@frameAddress", .{ .tag = .frame_address, .param_count = 0, }, }, .{ "@frameSize", .{ .tag = .frame_size, .param_count = 1, }, }, .{ "@hasDecl", .{ .tag = .has_decl, .param_count = 2, }, }, .{ "@hasField", .{ .tag = .has_field, .param_count = 2, }, }, .{ "@import", .{ .tag = .import, .param_count = 1, }, }, .{ "@intCast", .{ .tag = .int_cast, .param_count = 2, }, }, .{ "@intToEnum", .{ .tag = .int_to_enum, .param_count = 2, }, }, .{ "@intToError", .{ .tag = .int_to_error, .param_count = 1, }, }, .{ "@intToFloat", .{ .tag = .int_to_float, .param_count = 2, }, }, .{ "@intToPtr", .{ .tag = .int_to_ptr, .param_count = 2, }, }, .{ "@memcpy", .{ .tag = .memcpy, .param_count = 3, }, }, .{ "@memset", .{ .tag = .memset, .param_count = 3, }, }, .{ "@wasmMemorySize", .{ .tag = .wasm_memory_size, .param_count = 1, }, }, .{ "@wasmMemoryGrow", .{ .tag = .wasm_memory_grow, .param_count = 2, }, }, .{ "@mod", .{ .tag = .mod, .param_count = 2, }, }, .{ "@mulWithOverflow", .{ .tag = .mul_with_overflow, .param_count = 4, }, }, .{ "@panic", .{ .tag = .panic, .param_count = 1, }, }, .{ "@popCount", .{ .tag = .pop_count, .param_count = 2, }, }, .{ "@ptrCast", .{ .tag = .ptr_cast, .param_count = 2, }, }, .{ "@ptrToInt", .{ .tag = .ptr_to_int, .param_count = 1, }, }, .{ "@rem", .{ .tag = .rem, .param_count = 2, }, }, .{ "@returnAddress", .{ .tag = .return_address, .param_count = 0, }, }, .{ "@setAlignStack", .{ .tag = .set_align_stack, .param_count = 1, }, }, .{ "@setCold", .{ .tag = .set_cold, .param_count = 1, }, }, .{ "@setEvalBranchQuota", .{ .tag = .set_eval_branch_quota, .param_count = 1, }, }, .{ "@setFloatMode", .{ .tag = .set_float_mode, .param_count = 1, }, }, .{ "@setRuntimeSafety", .{ .tag = .set_runtime_safety, .param_count = 1, }, }, .{ "@shlExact", .{ .tag = .shl_exact, .param_count = 2, }, }, .{ "@shlWithOverflow", .{ .tag = .shl_with_overflow, .param_count = 4, }, }, .{ "@shrExact", .{ .tag = .shr_exact, .param_count = 2, }, }, .{ "@shuffle", .{ .tag = .shuffle, .param_count = 4, }, }, .{ "@sizeOf", .{ .tag = .size_of, .param_count = 1, }, }, .{ "@splat", .{ .tag = .splat, .needs_mem_loc = true, .param_count = 2, }, }, .{ "@reduce", .{ .tag = .reduce, .param_count = 2, }, }, .{ "@src", .{ .tag = .src, .needs_mem_loc = true, .param_count = 0, }, }, .{ "@sqrt", .{ .tag = .sqrt, .param_count = 1, }, }, .{ "@sin", .{ .tag = .sin, .param_count = 1, }, }, .{ "@cos", .{ .tag = .cos, .param_count = 1, }, }, .{ "@exp", .{ .tag = .exp, .param_count = 1, }, }, .{ "@exp2", .{ .tag = .exp2, .param_count = 1, }, }, .{ "@log", .{ .tag = .log, .param_count = 1, }, }, .{ "@log2", .{ .tag = .log2, .param_count = 1, }, }, .{ "@log10", .{ .tag = .log10, .param_count = 1, }, }, .{ "@fabs", .{ .tag = .fabs, .param_count = 1, }, }, .{ "@floor", .{ .tag = .floor, .param_count = 1, }, }, .{ "@ceil", .{ .tag = .ceil, .param_count = 1, }, }, .{ "@trunc", .{ .tag = .trunc, .param_count = 1, }, }, .{ "@round", .{ .tag = .round, .param_count = 1, }, }, .{ "@subWithOverflow", .{ .tag = .sub_with_overflow, .param_count = 4, }, }, .{ "@tagName", .{ .tag = .tag_name, .param_count = 1, }, }, .{ "@This", .{ .tag = .This, .param_count = 0, }, }, .{ "@truncate", .{ .tag = .truncate, .param_count = 2, }, }, .{ "@Type", .{ .tag = .Type, .param_count = 1, }, }, .{ "@typeInfo", .{ .tag = .type_info, .param_count = 1, }, }, .{ "@typeName", .{ .tag = .type_name, .param_count = 1, }, }, .{ "@TypeOf", .{ .tag = .TypeOf, .param_count = null, }, }, .{ "@unionInit", .{ .tag = .union_init, .needs_mem_loc = true, .param_count = 3, }, }, .{ "@Vector", .{ .tag = .Vector, .param_count = 2, }, }, }); };
src/BuiltinFn.zig
const std = @import("std"); const ZqlError = error { StatementNotFound, StatementIncorrectSyntax, CommandError, TableFull, StatementParamTooLong, StatementErrorNegativeValue, }; const COLUMN_USERNAME_SIZE = 32; const COLUMN_EMAIL_SIZE = 255; const StatementType = enum { INSERT, SELECT, }; const Row = struct { id: u32 = 0, username: [COLUMN_USERNAME_SIZE]u8 = std.mem.zeroes([COLUMN_USERNAME_SIZE]u8), email: [COLUMN_EMAIL_SIZE]u8 = std.mem.zeroes([COLUMN_EMAIL_SIZE]u8), }; const PAGE_SIZE: u32 = 4096; const ROW_SIZE: u32 = @sizeOf(Row); const ROWS_PER_PAGE: u32 = PAGE_SIZE / ROW_SIZE; const TABLE_MAX_PAGES: u32 = 100; const TABLE_MAX_ROWS: u32 = ROWS_PER_PAGE * TABLE_MAX_PAGES; //const Table = struct { // numRows: u32, // pages: [TABLE_MAX_PAGES]?[]Row, //}; const Table = struct { numRows: u32, pages: std.ArrayList(?std.ArrayList(?Row)), allocator: *std.mem.Allocator, const Self = @This(); pub fn init(allocator: *std.mem.Allocator) !Self { var self = Self { .allocator = allocator, .numRows = 0, // Starting initCapacity to start an arraylist of nulls and initialising all with nulls .pages = try std.ArrayList(?std.ArrayList(?Row)).initCapacity(allocator, TABLE_MAX_PAGES), }; // Initialise all the pages to TABLE_MAX_PAGES with null values so they can be accessed var i: usize = 0; while (i < 10) : (i += 1) { try self.pages.append(null); } return self; } pub fn deinit(self: Self) void { for (self.pages.items) |page, i| { // Only deinit pages that are not null if (page != null) { page.?.deinit(); } } self.pages.deinit(); } }; const Statement = struct { type: StatementType = StatementType.SELECT, row: Row = .{}, }; // TODO: Make customizable const MAX_INPUT_SIZE: usize = 1000; fn saveRow(source: *Row, destination: *?Row) void { destination.* = source.*; } fn loadRow(source: *?Row, destination: *Row) void { destination.* = source.*.?; } fn rowSlot(allocator: *std.mem.Allocator, table: *Table, rowNum: u32) !*?Row { const pageNum: u32 = rowNum / ROWS_PER_PAGE; // TODO: Check max age if (table.pages.items[pageNum] == null) { table.pages.items[pageNum] = try std.ArrayList(?Row).initCapacity(allocator, ROWS_PER_PAGE); // Initialise all to null so they can be accessed as required var i: usize = 0; while (i < ROWS_PER_PAGE) : (i += 1) { try table.pages.items[pageNum].?.append(null); } } const rowOffset: u32 = rowNum % ROWS_PER_PAGE; return &(table.pages.items[pageNum].?.items[rowOffset]); } fn printPrompt(stdout: anytype) !void { try stdout.print("db > ", .{}); } fn readInput(bufStream: anytype, allocator: *std.mem.Allocator) []u8 { var line: ?[]u8 = bufStream.readUntilDelimiterOrEofAlloc(allocator, '\n', MAX_INPUT_SIZE) catch |err| { return ""; }; return line.?; } fn doMetaCommand(stdout: anytype, line: []u8) !void { if (std.mem.eql(u8, line, ".exit")) { try stdout.print("Exiting ZigQL prompt.\n", .{}); std.process.exit(0); } else { // TODO: Throw and catch more specific error return ZqlError.CommandError; } } fn prepareStatement(line: []u8, statement: *Statement) !void { if (line.len < 6) { return ZqlError.StatementNotFound; } else if (std.mem.eql(u8, line[0..6], "insert")) { var tokens = std.mem.tokenize(line, " "); const initStatement = tokens.next() orelse return ZqlError.StatementIncorrectSyntax; const idRaw = tokens.next() orelse return ZqlError.StatementIncorrectSyntax; const id = std.fmt.parseInt(i32, idRaw, 10) catch |err| return ZqlError.StatementIncorrectSyntax; if (id < 0) return ZqlError.StatementErrorNegativeValue; const username = tokens.next() orelse return ZqlError.StatementIncorrectSyntax; if (username.len > COLUMN_USERNAME_SIZE) { return ZqlError.StatementParamTooLong; } const email = tokens.next() orelse return ZqlError.StatementIncorrectSyntax; if (username.len > COLUMN_EMAIL_SIZE) { return ZqlError.StatementParamTooLong; } // If there are more elements we can return syntax error if (tokens.next() != null) { return ZqlError.StatementIncorrectSyntax; } statement.type = StatementType.INSERT; statement.row.id = @intCast(u32, id); std.mem.copy(u8, statement.row.username[0..], username); std.mem.copy(u8, statement.row.email[0..], email); } else if (std.mem.eql(u8, line[0..6], "select")) { statement.type = StatementType.SELECT; } else { // TODO: Throw and catch more specific error return ZqlError.StatementNotFound; } } fn executeInsert(allocator: *std.mem.Allocator, statement: *Statement, table: *Table) !void { if (table.numRows >= TABLE_MAX_ROWS) { return ZqlError.TableFull; } var pFreeRow: *?Row = try rowSlot(allocator, table, table.numRows); saveRow(&statement.row, pFreeRow); table.numRows += 1; } fn printRow(stdout: anytype, row: *Row) !void { try stdout.print("Row: {d}, {s}, {s}\n", .{ row.id, row.username, row.email }); } fn executeSelect(allocator: *std.mem.Allocator, stdout: anytype, statement: *Statement, table: *Table) !void { var row: Row = .{}; var i: u32 = 0; while (i < table.numRows): (i += 1) { var pCurrRow: *?Row = try rowSlot(allocator, table, i); loadRow(pCurrRow, &row); try printRow(stdout, &row); } } fn executeStatement(allocator: *std.mem.Allocator, stdout: anytype, statement: *Statement, table: *Table) !void { switch (statement.type) { StatementType.INSERT => { try stdout.print("Executing insert.\n", .{}); try executeInsert(allocator, statement, table); }, StatementType.SELECT => { try stdout.print("Executing select.\n", .{}); try executeSelect(allocator, stdout, statement, table); }, } } fn processLine(allocator: *std.mem.Allocator, stdout: anytype, line: []u8, table: *Table) !void { if (line[0] == '.') { doMetaCommand(stdout, line) catch |err| { try stdout.print("Unrecognized command: {s}.\n", .{ line }); return; }; } else { var statement: Statement = .{}; prepareStatement(line, &statement) catch |err| { switch(err) { ZqlError.StatementNotFound => { try stdout.print("Unrecognized keyword at start of: {s}.\n", .{ line }); }, ZqlError.StatementIncorrectSyntax => { try stdout.print("Incorrect statement: {s}.\n", .{ line }); }, ZqlError.StatementParamTooLong => { try stdout.print("Parameter too long on statement: {s}.\n", .{ line }); }, ZqlError.StatementErrorNegativeValue => { try stdout.print("Incorrect statement: ID must be positive.\n", .{}); }, else => unreachable, } return; }; executeStatement(allocator, stdout, &statement, table) catch |err| { try stdout.print("Error executing command: {s}.\n", .{ line }); return; }; try stdout.print("Executed.\n", .{}); } } pub fn main() !void { var allocatorWrapper = std.heap.GeneralPurposeAllocator(.{}){}; //errdefer allocatorWrapper.deinit(); const pAllocator = &allocatorWrapper.allocator; const stdout = std.io.getStdOut().writer(); const stdin = std.io.getStdIn().reader(); try stdout.print("Welcome to the ZigQL prompt.\n", .{}); try stdout.print("Type your query or .exit when you're done.\n", .{}); // Using bufferedReader for performance var bufReader = std.io.bufferedReader(stdin); const pBufStream = &bufReader.reader(); var table: *Table = &(try Table.init(pAllocator)); defer table.deinit(); while (true) { try printPrompt(stdout); var line: []u8 = readInput(pBufStream, pAllocator); defer pAllocator.free(line); try processLine(pAllocator, stdout, line, table); } } test "Test statement error" { const testAllocator = std.testing.allocator; var outList = std.ArrayList(u8).init(testAllocator); defer outList.deinit(); var table: *Table = &(try Table.init(testAllocator)); defer table.deinit(); var line: [11]u8 = "Hello world".*; try processLine(testAllocator, outList.writer(), &line, table); try std.testing.expect( std.mem.eql(u8, outList.items, "Unrecognized keyword at start of: Hello world.\n")); } test "Test statement error" { const testAllocator = std.testing.allocator; var outList = std.ArrayList(u8).init(testAllocator); defer outList.deinit(); var table: *Table = &(try Table.init(testAllocator)); defer table.deinit(); var line: [10]u8 = ".nocommand".*; try processLine(testAllocator, outList.writer(), &line, table); try std.testing.expect( std.mem.eql(u8, outList.items, "Unrecognized command: .nocommand.\n")); } test "Test statement select" { const testAllocator = std.testing.allocator; var outList = std.ArrayList(u8).init(testAllocator); defer outList.deinit(); var table: *Table = &(try Table.init(testAllocator)); defer table.deinit(); var line: [6]u8 = "select".*; try processLine(testAllocator, outList.writer(), &line, table); try std.testing.expect( std.mem.eql(u8, outList.items, "Executing select.\nExecuted.\n")); } test "Test statement insert error no args" { const testAllocator = std.testing.allocator; var outList = std.ArrayList(u8).init(testAllocator); defer outList.deinit(); var table: *Table = &(try Table.init(testAllocator)); defer table.deinit(); var line: [6]u8 = "insert".*; try processLine(testAllocator, outList.writer(), &line, table); try std.testing.expect( std.mem.eql(u8, outList.items, "Incorrect statement: insert.\n")); } test "Test statement insert error too many args" { const testAllocator = std.testing.allocator; var outList = std.ArrayList(u8).init(testAllocator); defer outList.deinit(); var table: *Table = &(try Table.init(testAllocator)); defer table.deinit(); var line: [22]u8 = "insert 1 one two three".*; try processLine(testAllocator, outList.writer(), &line, table); try std.testing.expect( std.mem.eql(u8, outList.items, "Incorrect statement: insert 1 one two three.\n")); } test "Test statement insert error negative ID" { const testAllocator = std.testing.allocator; var outList = std.ArrayList(u8).init(testAllocator); defer outList.deinit(); var table: *Table = &(try Table.init(testAllocator)); defer table.deinit(); var line: [23]u8 = "insert -1 one two three".*; try processLine(testAllocator, outList.writer(), &line, table); try std.testing.expect( std.mem.eql(u8, outList.items, "Incorrect statement: ID must be positive.\n")); } test "Test statement insert and select" { const testAllocator = std.testing.allocator; var outList = std.ArrayList(u8).init(testAllocator); defer outList.deinit(); var table: *Table = &(try Table.init(testAllocator)); defer table.deinit(); const testUsername: [4]u8 = "user".*; const testEmail: [17]u8 = "<EMAIL>".*; var lineInsert: []u8 = try std.fmt.allocPrint( testAllocator, "insert 1 {s} {s}", .{ &testUsername, &testEmail }); defer testAllocator.free(lineInsert); try processLine(testAllocator, outList.writer(), lineInsert, table); try processLine(testAllocator, outList.writer(), lineInsert, table); var lineSelect: [6]u8 = "select".*; try processLine(testAllocator, outList.writer(), &lineSelect, table); const lineResultTemplate: *const [118]u8 = \\Executing insert. \\Executed. \\Executing insert. \\Executed. \\Executing select. \\Row: 1, {s}, {s} \\Row: 1, {s}, {s} \\Executed. \\ ; var username: [COLUMN_USERNAME_SIZE]u8 = std.mem.zeroes([COLUMN_USERNAME_SIZE]u8); var email: [COLUMN_EMAIL_SIZE]u8 = std.mem.zeroes([COLUMN_EMAIL_SIZE]u8); std.mem.copy(u8, &username, &testUsername); std.mem.copy(u8, &email, &testEmail); var lineResult: []u8 = try std.fmt.allocPrint( testAllocator, lineResultTemplate, .{ username, email, username, email }); defer testAllocator.free(lineResult); try std.testing.expect( std.mem.eql(u8, outList.items, lineResult)); } // Currently needs to be extended to add the buffer // 190 333 Parameter too long on statement: insert 1 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA <EMAIL>. // Parameter too long on statement: insert 1 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA <EMAIL>. // test "Test statement insert param too long"... FAIL (TestUnexpectedResult) // test "Test statement insert param too long" { // const testAllocator = std.testing.allocator; // var outList = std.ArrayList(u8).init(testAllocator); // defer outList.deinit(); // // var table: *Table = Table.init(testAllocator); // defer table.deinit(); // // var testUsernameTooLong: [COLUMN_USERNAME_SIZE+1]u8 = // std.mem.zeroes([COLUMN_USERNAME_SIZE+1]u8); // std.mem.set(u8, &testUsernameTooLong, 'A'); // // const testEmail: [17]u8 = "<EMAIL>".*; // // var lineInsert: []u8 = try std.fmt.allocPrint( // testAllocator, // "insert 1 {s} {s}", // .{ &testUsernameTooLong, &testEmail }); // defer testAllocator.free(lineInsert); // try processLine(testAllocator, outList.writer(), lineInsert, table); // try processLine(testAllocator, outList.writer(), lineInsert, table); // const lineResultTemplate: *const [51]u8 = // "Parameter too long on statement: insert 1 {s} {s}.\n"; // var username: [COLUMN_USERNAME_SIZE+1]u8 = std.mem.zeroes([COLUMN_USERNAME_SIZE+1]u8); // var email: [COLUMN_EMAIL_SIZE]u8 = std.mem.zeroes([COLUMN_EMAIL_SIZE]u8); // std.mem.copy(u8, &username, &testUsernameTooLong); // std.mem.copy(u8, &email, &testEmail); // var lineResult: []u8 = try std.fmt.allocPrint( // testAllocator, lineResultTemplate, .{ testUsernameTooLong, email }); // defer testAllocator.free(lineResult); // std.debug.print("{d} {d} {s}", .{ outList.items.len, lineResult.len, outList.items }); // try std.testing.expect( // std.mem.eql(u8, outList.items, lineResult)); // }
src/main.zig
const __fixdfsi = @import("fixdfsi.zig").__fixdfsi; const std = @import("std"); const math = std.math; const testing = std.testing; const warn = std.debug.warn; fn test__fixdfsi(a: f64, expected: i32) void { const x = __fixdfsi(a); //warn("a={}:{x} x={}:{x} expected={}:{x}:@as(u64, {x})\n", .{a, @bitCast(u64, a), x, x, expected, expected, @bitCast(u32, expected)}); testing.expect(x == expected); } test "fixdfsi" { //warn("\n", .{}); test__fixdfsi(-math.f64_max, math.minInt(i32)); test__fixdfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32)); test__fixdfsi(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000); test__fixdfsi(-0x1.0000000000000p+127, -0x80000000); test__fixdfsi(-0x1.FFFFFFFFFFFFFp+126, -0x80000000); test__fixdfsi(-0x1.FFFFFFFFFFFFEp+126, -0x80000000); test__fixdfsi(-0x1.0000000000001p+63, -0x80000000); test__fixdfsi(-0x1.0000000000000p+63, -0x80000000); test__fixdfsi(-0x1.FFFFFFFFFFFFFp+62, -0x80000000); test__fixdfsi(-0x1.FFFFFFFFFFFFEp+62, -0x80000000); test__fixdfsi(-0x1.FFFFFEp+62, -0x80000000); test__fixdfsi(-0x1.FFFFFCp+62, -0x80000000); test__fixdfsi(-0x1.000000p+31, -0x80000000); test__fixdfsi(-0x1.FFFFFFp+30, -0x7FFFFFC0); test__fixdfsi(-0x1.FFFFFEp+30, -0x7FFFFF80); test__fixdfsi(-2.01, -2); test__fixdfsi(-2.0, -2); test__fixdfsi(-1.99, -1); test__fixdfsi(-1.0, -1); test__fixdfsi(-0.99, 0); test__fixdfsi(-0.5, 0); test__fixdfsi(-math.f64_min, 0); test__fixdfsi(0.0, 0); test__fixdfsi(math.f64_min, 0); test__fixdfsi(0.5, 0); test__fixdfsi(0.99, 0); test__fixdfsi(1.0, 1); test__fixdfsi(1.5, 1); test__fixdfsi(1.99, 1); test__fixdfsi(2.0, 2); test__fixdfsi(2.01, 2); test__fixdfsi(0x1.FFFFFEp+30, 0x7FFFFF80); test__fixdfsi(0x1.FFFFFFp+30, 0x7FFFFFC0); test__fixdfsi(0x1.000000p+31, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFCp+62, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFEp+62, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFF); test__fixdfsi(0x1.0000000000000p+63, 0x7FFFFFFF); test__fixdfsi(0x1.0000000000001p+63, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFF); test__fixdfsi(0x1.0000000000000p+127, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFF); test__fixdfsi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i32)); test__fixdfsi(math.f64_max, math.maxInt(i32)); }
lib/std/special/compiler_rt/fixdfsi_test.zig
const __mulodi4 = @import("mulodi4.zig").__mulodi4; const testing = @import("std").testing; fn test__mulodi4(a: i64, b: i64, expected: i64, expected_overflow: c_int) !void { var overflow: c_int = undefined; const x = __mulodi4(a, b, &overflow); try testing.expect(overflow == expected_overflow and (expected_overflow != 0 or x == expected)); } test "mulodi4" { try test__mulodi4(0, 0, 0, 0); try test__mulodi4(0, 1, 0, 0); try test__mulodi4(1, 0, 0, 0); try test__mulodi4(0, 10, 0, 0); try test__mulodi4(10, 0, 0, 0); try test__mulodi4(0, 81985529216486895, 0, 0); try test__mulodi4(81985529216486895, 0, 0, 0); try test__mulodi4(0, -1, 0, 0); try test__mulodi4(-1, 0, 0, 0); try test__mulodi4(0, -10, 0, 0); try test__mulodi4(-10, 0, 0, 0); try test__mulodi4(0, -81985529216486895, 0, 0); try test__mulodi4(-81985529216486895, 0, 0, 0); try test__mulodi4(1, 1, 1, 0); try test__mulodi4(1, 10, 10, 0); try test__mulodi4(10, 1, 10, 0); try test__mulodi4(1, 81985529216486895, 81985529216486895, 0); try test__mulodi4(81985529216486895, 1, 81985529216486895, 0); try test__mulodi4(1, -1, -1, 0); try test__mulodi4(1, -10, -10, 0); try test__mulodi4(-10, 1, -10, 0); try test__mulodi4(1, -81985529216486895, -81985529216486895, 0); try test__mulodi4(-81985529216486895, 1, -81985529216486895, 0); try test__mulodi4(3037000499, 3037000499, 9223372030926249001, 0); try test__mulodi4(-3037000499, 3037000499, -9223372030926249001, 0); try test__mulodi4(3037000499, -3037000499, -9223372030926249001, 0); try test__mulodi4(-3037000499, -3037000499, 9223372030926249001, 0); try test__mulodi4(4398046511103, 2097152, 9223372036852678656, 0); try test__mulodi4(-4398046511103, 2097152, -9223372036852678656, 0); try test__mulodi4(4398046511103, -2097152, -9223372036852678656, 0); try test__mulodi4(-4398046511103, -2097152, 9223372036852678656, 0); try test__mulodi4(2097152, 4398046511103, 9223372036852678656, 0); try test__mulodi4(-2097152, 4398046511103, -9223372036852678656, 0); try test__mulodi4(2097152, -4398046511103, -9223372036852678656, 0); try test__mulodi4(-2097152, -4398046511103, 9223372036852678656, 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, -2, 2, 1); try test__mulodi4(-2, 0x7FFFFFFFFFFFFFFF, 2, 1); try test__mulodi4(0x7FFFFFFFFFFFFFFF, -1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); try test__mulodi4(-1, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, 0, 0, 0); try test__mulodi4(0, 0x7FFFFFFFFFFFFFFF, 0, 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, 1, 0x7FFFFFFFFFFFFFFF, 0); try test__mulodi4(1, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, 2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); try test__mulodi4(2, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -1, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0, 0); try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 1, @bitCast(i64, @as(u64, 0x8000000000000000)), 0); try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 0); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 1); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -1, 0x7FFFFFFFFFFFFFFF, 0); try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0x7FFFFFFFFFFFFFFF, 0); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0, 0); try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 0); try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); }
lib/std/special/compiler_rt/mulodi4_test.zig
const std = @import("std"); const service_list = @import("models/service_manifest.zig"); const expectEqualStrings = std.testing.expectEqualStrings; pub fn Services(service_imports: anytype) type { if (service_imports.len == 0) return services; // From here, the fields of our structure can be generated at comptime... var fields: [serviceCount(service_imports)]std.builtin.TypeInfo.StructField = undefined; for (fields) |*item, i| { const import_field = @field(service_list, @tagName(service_imports[i])); item.* = .{ .name = @tagName(service_imports[i]), .field_type = @TypeOf(import_field), .default_value = import_field, .is_comptime = false, .alignment = 0, }; } // finally, generate the type return @Type(.{ .Struct = .{ .layout = .Auto, .fields = &fields, .decls = &[_]std.builtin.TypeInfo.Declaration{}, .is_tuple = false, }, }); } fn serviceCount(desired_services: anytype) usize { if (desired_services.len == 0) return @TypeOf(service_list).Struct.fields.len; return desired_services.len; } /// Using this constant may blow up build times. Recommed using Services() /// function directly, e.g. const services = Services(.{.sts, .ec2, .s3, .ddb}){}; pub const services = service_list; test "services includes sts" { try expectEqualStrings("2011-06-15", services.sts.version); } test "sts includes get_caller_identity" { try expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name); } test "can get service and action name from request" { // get request object. This call doesn't have parameters const metadata = services.sts.get_caller_identity.Request.metaInfo(); try expectEqualStrings("2011-06-15", metadata.service_metadata.version); } test "can filter services" { const filtered_services = Services(.{ .sts, .wafv2 }){}; try expectEqualStrings("2011-06-15", filtered_services.sts.version); }
src/servicemodel.zig
const builtin = @import("builtin"); const std = @import("std"); const Arch = std.Target.Cpu.Arch; const CrossTarget = std.zig.CrossTarget; const CpuFeature = std.Target.Cpu.Feature; pub fn build(kernel: *std.build.LibExeObjStep) void { const builder = kernel.builder; var kernel_tls = builder.step("kernel", "Build kernel ELF"); const cross_target = CrossTarget{ .cpu_arch = Arch.x86_64, .cpu_model = CrossTarget.CpuModel.baseline, .cpu_features_sub = std.Target.x86.featureSet(&[_]std.Target.x86.Feature{ .cmov, .cx8, .fxsr, .macrofusion, .mmx, .nopl, .slow_3ops_lea, .slow_incdec, .sse, .sse2, .vzeroupper, .x87, }), .os_tag = std.Target.Os.Tag.freestanding, .abi = std.Target.Abi.none, }; kernel.setTarget(cross_target); kernel.code_model = std.builtin.CodeModel.kernel; kernel.want_lto = false; kernel.setLinkerScriptPath(.{ .path = "kernel/arch/x86/linker.ld" }); kernel.addAssemblyFile("kernel/arch/x86/boot.S"); kernel.setOutputDir("build/x86_64"); kernel_tls.dependOn(&kernel.step); const trampolines = builder.addAssemble("trampolines", "kernel/arch/x86/trampolines.S"); trampolines.setOutputDir("build/x86_64"); kernel.step.dependOn(&trampolines.step); var iso_tls = builder.step("iso", "Build multiboot ISO"); var iso = builder.addSystemCommand(&[_][]const u8{"scripts/mkiso.sh"}); iso.addArtifactArg(kernel); iso_tls.dependOn(&iso.step); const memory = builder.option([]const u8, "vm-memory", "VM memory e.g. 1G, 128M") orelse "1G"; const cpus = builder.option([]const u8, "vm-cpus", "number of vCPUs") orelse "1"; const display = builder.option([]const u8, "qemu-display", "type of QEMU display") orelse "none"; const use_uefi = builder.option(bool, "vm-uefi", "use UEFI") orelse true; var qemu_tls = builder.step("qemu", "Run QEMU"); var qemu = builder.addSystemCommand(&[_][]const u8{"qemu-system-x86_64"}); qemu.addArgs(&[_][]const u8{ "-enable-kvm", "-cdrom", "build/x86_64/kernel.iso", "-s", "-serial", "stdio", "-display", display, "-m", memory, "-M", "q35", "-smp", cpus, }); if (use_uefi) { qemu.addArgs(&[_][]const u8{ "-bios", "/usr/share/edk2-ovmf/x64/OVMF.fd", }); } qemu.step.dependOn(&iso.step); qemu_tls.dependOn(&qemu.step); builder.default_step = kernel_tls; }
kernel/arch/x86/build.zig
const std = @import("std"); const Inon = @import("main.zig"); const Data = @import("Data.zig"); const Lib = struct { fn add(inon: *Inon, params: []Data) !Data { _ = inon; const data1 = params[0]; const data2 = params[1]; return Data{ .value = .{ .num = data1.get(.num) + data2.get(.num) } }; } fn mul(inon: *Inon, params: []Data) !Data { _ = inon; const data1 = params[0]; const data2 = params[1]; return Data{ .value = .{ .num = data1.get(.num) * data2.get(.num) } }; } fn find(inon: *Inon, params: []Data) !Data { const data1 = params[0]; const data2 = params[1]; return try data1.findEx(data2.get(.str).items).copy(inon.allocator); } fn self(inon: *Inon, params: []Data) !Data { const data1 = params[0]; return try inon.current_context.findEx(data1.get(.str).items).copy(inon.allocator); } fn index(inon: *Inon, params: []Data) !Data { _ = inon; const data1 = params[0]; const data2 = params[1]; const n = @floatToInt(usize, data2.get(.num)); return try data1.index(n); } fn eql(_: *Inon, params: []Data) !Data { const data1 = params[0]; const data2 = params[1]; if (std.meta.activeTag(data1.value) != std.meta.activeTag(data2.value)) { return Data{ .value = .{ .bool = false } }; } return Data{ .value = .{ .bool = data1.eql(&data2) } }; } }; pub fn addAll(inon: *Inon) !void { const functions: []const Inon.FuncType = &.{ .{ .name = "+", .params = &.{ .num, .num }, .run = Lib.add }, .{ .name = "*", .params = &.{ .num, .num }, .run = Lib.mul }, .{ .name = "find", .params = &.{ null, .str }, .run = Lib.find }, .{ .name = "self", .params = &.{.str}, .run = Lib.self }, .{ .name = "index", .params = &.{ null, .num }, .run = Lib.index }, .{ .name = "=", .params = &.{ null, null }, .run = Lib.eql }, }; for (functions) |f| { try inon.functions.put(inon.allocator, f.name, f); } }
src/Stdlib.zig
const std = @import("std"); const print = std.debug.print; const data = @embedFile("../data/day01.txt"); fn is_int(c : u8) bool { return switch (c) { '0' => true, '1' => true, '2' => true, '3' => true, '4' => true, '5' => true, '6' => true, '7' => true, '8' => true, '9' => true, else => false }; } fn day01() void { var index : usize = 0; var start : usize = index; // Initialize the previous depth to the maximum size of our integer, that way // we'll never mistake it for a valid depth increase! var previous_depth : u16 = std.math.maxInt(u16); var depth_increases : u32 = 0; while (index < data.len) : (index += 1) { if (!is_int(data[index])) { if (start < index) { var depth = std.fmt.parseInt(u16, data[start..index], 10) catch |err| @panic("SHIT"); if (depth > previous_depth) { depth_increases += 1; } previous_depth = depth; } start = index + 1; } } print("🎁 Depth increases: {}\n", .{depth_increases}); } fn day02() void { var index : usize = 0; var start : usize = index; var depth_increases : u32 = 0; var window = [3]u32{0, 0, 0}; var window_index : usize = 0; // First we accumulate enough data to get us started. while (index < data.len) : (index += 1) { if (!is_int(data[index])) { if (start < index) { var depth = std.fmt.parseInt(u32, data[start..index], 10) catch |err| @panic("SHIT"); window[window_index] = depth; window_index += 1; if (window_index == window.len) { break; } } start = index + 1; } } // We've parsed the first three elements into each bit of the window, which // isn't where they need to be. We accumulate all three into the first // element, the last two into the second element, and leave the third alone. window[0] += window[1] + window[2]; window[1] += window[2]; window_index = 0; // Now we do the actual check. while (index < data.len) : (index += 1) { if (!is_int(data[index])) { if (start < index) { var depth = std.fmt.parseInt(u32, data[start..index], 10) catch |err| @panic("SHIT"); var result = window[window_index]; // Wipe out the window because we've consumed its value for comparison. window[window_index] = 0; // Wrap the window around to its new location. window_index += 1; window_index %= window.len; // Record the depth into each window location. for (window) |*item| { item.* += depth; } if (window[window_index] > result) { depth_increases += 1; } } start = index + 1; } } print("🎁 Three-window increases: {}\n", .{depth_increases}); } pub fn main() !void { var timer = try std.time.Timer.start(); day01(); var part01 = timer.lap(); print("Day 01 - part 01 took {:15}ns\n", .{part01}); timer.reset(); day02(); var part02 = timer.lap(); print("Day 01 - part 02 took {:15}ns\n", .{part02}); print("❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️\n", .{}); }
src/day01.zig
const std = @import("std"); const math = std.math; const testing = std.testing; const print = std.debug.print; const root = @import("main.zig"); const vec4 = @import("vec4.zig"); const vec3 = @import("vec3.zig"); const quat = @import("quaternion.zig"); const Vec3 = vec3.Vec3; const Vector3 = vec3.Vector3; const Vector4 = vec4.Vector4; const Quaternion = quat.Quaternion; const Quat = quat.Quat; pub const Mat4 = Mat4x4(f32); pub const Mat4_f64 = Mat4x4(f64); pub const perspective = Mat4.perspective; pub const orthographic = Mat4.orthographic; pub const lookAt = Mat4.lookAt; /// A column-major 4x4 matrix /// Note: Column-major means accessing data like m.data[COLUMN][ROW]. pub fn Mat4x4(comptime T: type) type { if (@typeInfo(T) != .Float) { @compileError("Mat4x4 not implemented for " ++ @typeName(T)); } return struct { data: [4][4]T, const Self = @This(); pub fn identity() Self { return .{ .data = .{ .{ 1, 0, 0, 0 }, .{ 0, 1, 0, 0 }, .{ 0, 0, 1, 0 }, .{ 0, 0, 0, 1 }, }, }; } /// Construct new 4x4 matrix from given slice. pub fn fromSlice(data: *const [16]T) Self { return .{ .data = .{ data[0..4].*, data[4..8].*, data[8..12].*, data[12..16].*, }, }; } /// Return a pointer to the inner data of the matrix. pub fn getData(mat: *const Self) *const T { return @ptrCast(*const T, &mat.data); } pub fn eql(left: Self, right: Self) bool { var col: usize = 0; var row: usize = 0; while (col < 4) : (col += 1) { while (row < 4) : (row += 1) { if (left.data[col][row] != right.data[col][row]) { return false; } } } return true; } pub fn multByVec4(mat: Self, v: Vector4(T)) Vector4(T) { var result: Vector4(T) = undefined; result.x = (mat.data[0][0] * v.x) + (mat.data[1][0] * v.y) + (mat.data[2][0] * v.z) + (mat.data[3][0] * v.w); result.y = (mat.data[0][1] * v.x) + (mat.data[1][1] * v.y) + (mat.data[2][1] * v.z) + (mat.data[3][1] * v.w); result.z = (mat.data[0][2] * v.x) + (mat.data[1][2] * v.y) + (mat.data[2][2] * v.z) + (mat.data[3][2] * v.w); result.w = (mat.data[0][3] * v.x) + (mat.data[1][3] * v.y) + (mat.data[2][3] * v.z) + (mat.data[3][3] * v.w); return result; } /// Construct 4x4 translation matrix by multiplying identity matrix and /// given translation vector. pub fn fromTranslate(axis: Vector3(T)) Self { var mat = Self.identity(); mat.data[3][0] = axis.x; mat.data[3][1] = axis.y; mat.data[3][2] = axis.z; return mat; } /// Make a translation between the given matrix and the given axis. pub fn translate(mat: Self, axis: Vector3(T)) Self { const trans_mat = Self.fromTranslate(axis); return Self.mult(trans_mat, mat); } /// Get translation Vec3 from current matrix. pub fn extractTranslation(self: Self) Vector3(T) { return Vector3(T).new(self.data[3][0], self.data[3][1], self.data[3][2]); } /// Construct a 4x4 matrix from given axis and angle (in degrees). pub fn fromRotation(angle_in_degrees: T, axis: Vector3(T)) Self { var mat = Self.identity(); const norm_axis = axis.norm(); const sin_theta = math.sin(root.toRadians(angle_in_degrees)); const cos_theta = math.cos(root.toRadians(angle_in_degrees)); const cos_value = 1.0 - cos_theta; mat.data[0][0] = (norm_axis.x * norm_axis.x * cos_value) + cos_theta; mat.data[0][1] = (norm_axis.x * norm_axis.y * cos_value) + (norm_axis.z * sin_theta); mat.data[0][2] = (norm_axis.x * norm_axis.z * cos_value) - (norm_axis.y * sin_theta); mat.data[1][0] = (norm_axis.y * norm_axis.x * cos_value) - (norm_axis.z * sin_theta); mat.data[1][1] = (norm_axis.y * norm_axis.y * cos_value) + cos_theta; mat.data[1][2] = (norm_axis.y * norm_axis.z * cos_value) + (norm_axis.x * sin_theta); mat.data[2][0] = (norm_axis.z * norm_axis.x * cos_value) + (norm_axis.y * sin_theta); mat.data[2][1] = (norm_axis.z * norm_axis.y * cos_value) - (norm_axis.x * sin_theta); mat.data[2][2] = (norm_axis.z * norm_axis.z * cos_value) + cos_theta; return mat; } pub fn rotate(mat: Self, angle_in_degrees: T, axis: Vector3(T)) Self { const rotation_mat = Self.fromRotation(angle_in_degrees, axis); return Self.mult(mat, rotation_mat); } /// Construct a rotation matrix from euler angles (X * Y * Z). /// Order matters because matrix multiplication are NOT commutative. pub fn fromEulerAngle(euler_angle: Vector3(T)) Self { const x = Self.fromRotation(euler_angle.x, Vec3.new(1, 0, 0)); const y = Self.fromRotation(euler_angle.y, Vec3.new(0, 1, 0)); const z = Self.fromRotation(euler_angle.z, Vec3.new(0, 0, 1)); return z.mult(y.mult(x)); } /// Ortho normalize given matrix. pub fn orthoNormalize(mat: Self) Self { const column_1 = Vec3.new(mat.data[0][0], mat.data[0][1], mat.data[0][2]).norm(); const column_2 = Vec3.new(mat.data[1][0], mat.data[1][1], mat.data[1][2]).norm(); const column_3 = Vec3.new(mat.data[2][0], mat.data[2][1], mat.data[2][2]).norm(); var result = mat; result.data[0][0] = column_1.x; result.data[0][1] = column_1.y; result.data[0][2] = column_1.z; result.data[1][0] = column_2.x; result.data[1][1] = column_2.y; result.data[1][2] = column_2.z; result.data[2][0] = column_3.x; result.data[2][1] = column_3.y; result.data[2][2] = column_3.z; return result; } /// Return the rotation as Euler angles in degrees. /// Taken from Mike Day at Insomniac Games (and `glm` as the same function). /// For more details: https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2012/07/euler-angles1.pdf pub fn extractRotation(self: Self) Vector3(T) { const m = self.orthoNormalize(); const theta_x = math.atan2(T, m.data[1][2], m.data[2][2]); const c2 = math.sqrt(math.pow(f32, m.data[0][0], 2) + math.pow(f32, m.data[0][1], 2)); const theta_y = math.atan2(T, -m.data[0][2], math.sqrt(c2)); const s1 = math.sin(theta_x); const c1 = math.cos(theta_x); const theta_z = math.atan2(T, s1 * m.data[2][0] - c1 * m.data[1][0], c1 * m.data[1][1] - s1 * m.data[2][1]); return Vec3.new(root.toDegrees(theta_x), root.toDegrees(theta_y), root.toDegrees(theta_z)); } pub fn fromScale(axis: Vector3(T)) Self { var mat = Self.identity(); mat.data[0][0] = axis.x; mat.data[1][1] = axis.y; mat.data[2][2] = axis.z; return mat; } pub fn scale(mat: Self, axis: Vector3(T)) Self { const scale_mat = Self.fromScale(axis); return Self.mult(scale_mat, mat); } pub fn extractScale(mat: Self) Vector3(T) { const scale_x = Vec3.new(mat.data[0][0], mat.data[0][1], mat.data[0][2]).length(); const scale_y = Vec3.new(mat.data[1][0], mat.data[1][1], mat.data[1][2]).length(); const scale_z = Vec3.new(mat.data[2][0], mat.data[2][1], mat.data[2][2]).length(); return Vector3(T).new(scale_x, scale_y, scale_z); } /// Construct a perspective 4x4 matrix. /// Note: Field of view is given in degrees. /// Also for more details https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluPerspective.xml. pub fn perspective(fovy_in_degrees: T, aspect_ratio: T, z_near: T, z_far: T) Self { var mat: Self = Self.identity(); const f = 1.0 / math.tan(root.toRadians(fovy_in_degrees) * 0.5); mat.data[0][0] = f / aspect_ratio; mat.data[1][1] = f; mat.data[2][2] = (z_near + z_far) / (z_near - z_far); mat.data[2][3] = -1; mat.data[3][2] = 2 * z_far * z_near / (z_near - z_far); mat.data[3][3] = 0; return mat; } /// Construct an orthographic 4x4 matrix. pub fn orthographic(left: T, right: T, bottom: T, top: T, z_near: T, z_far: T) Self { var mat: Self = undefined; mat.data[0][0] = 2.0 / (right - left); mat.data[1][1] = 2.0 / (top - bottom); mat.data[2][2] = 2.0 / (z_near - z_far); mat.data[3][3] = 1.0; mat.data[3][0] = (left + right) / (left - right); mat.data[3][1] = (bottom + top) / (bottom - top); mat.data[3][2] = (z_far + z_near) / (z_near - z_far); return mat; } /// Right-handed lookAt function. pub fn lookAt(eye: Vector3(T), target: Vector3(T), up: Vector3(T)) Self { const f = Vector3(T).norm(Vector3(T).sub(target, eye)); const s = Vector3(T).norm(Vector3(T).cross(f, up)); const u = Vector3(T).cross(s, f); var mat: Self = undefined; mat.data[0][0] = s.x; mat.data[0][1] = u.x; mat.data[0][2] = -f.x; mat.data[0][3] = 0.0; mat.data[1][0] = s.y; mat.data[1][1] = u.y; mat.data[1][2] = -f.y; mat.data[1][3] = 0.0; mat.data[2][0] = s.z; mat.data[2][1] = u.z; mat.data[2][2] = -f.z; mat.data[2][3] = 0.0; mat.data[3][0] = -Vector3(T).dot(s, eye); mat.data[3][1] = -Vector3(T).dot(u, eye); mat.data[3][2] = Vector3(T).dot(f, eye); mat.data[3][3] = 1.0; return mat; } /// Matrices multiplication. /// Produce a new matrix from given two matrices. pub fn mult(left: Self, right: Self) Self { var mat = Self.identity(); var columns: usize = 0; while (columns < 4) : (columns += 1) { var rows: usize = 0; while (rows < 4) : (rows += 1) { var sum: T = 0.0; var current_mat: usize = 0; while (current_mat < 4) : (current_mat += 1) { sum += left.data[current_mat][rows] * right.data[columns][current_mat]; } mat.data[columns][rows] = sum; } } return mat; } /// Construct inverse 4x4 from given matrix. /// Note: This is not the most efficient way to do this. /// TODO: Make it more efficient. pub fn inv(mat: Self) Self { var inv_mat: Self = undefined; var s: [6]T = undefined; var c: [6]T = undefined; s[0] = mat.data[0][0] * mat.data[1][1] - mat.data[1][0] * mat.data[0][1]; s[1] = mat.data[0][0] * mat.data[1][2] - mat.data[1][0] * mat.data[0][2]; s[2] = mat.data[0][0] * mat.data[1][3] - mat.data[1][0] * mat.data[0][3]; s[3] = mat.data[0][1] * mat.data[1][2] - mat.data[1][1] * mat.data[0][2]; s[4] = mat.data[0][1] * mat.data[1][3] - mat.data[1][1] * mat.data[0][3]; s[5] = mat.data[0][2] * mat.data[1][3] - mat.data[1][2] * mat.data[0][3]; c[0] = mat.data[2][0] * mat.data[3][1] - mat.data[3][0] * mat.data[2][1]; c[1] = mat.data[2][0] * mat.data[3][2] - mat.data[3][0] * mat.data[2][2]; c[2] = mat.data[2][0] * mat.data[3][3] - mat.data[3][0] * mat.data[2][3]; c[3] = mat.data[2][1] * mat.data[3][2] - mat.data[3][1] * mat.data[2][2]; c[4] = mat.data[2][1] * mat.data[3][3] - mat.data[3][1] * mat.data[2][3]; c[5] = mat.data[2][2] * mat.data[3][3] - mat.data[3][2] * mat.data[2][3]; const determ = 1.0 / (s[0] * c[5] - s[1] * c[4] + s[2] * c[3] + s[3] * c[2] - s[4] * c[1] + s[5] * c[0]); inv_mat.data[0][0] = (mat.data[1][1] * c[5] - mat.data[1][2] * c[4] + mat.data[1][3] * c[3]) * determ; inv_mat.data[0][1] = (-mat.data[0][1] * c[5] + mat.data[0][2] * c[4] - mat.data[0][3] * c[3]) * determ; inv_mat.data[0][2] = (mat.data[3][1] * s[5] - mat.data[3][2] * s[4] + mat.data[3][3] * s[3]) * determ; inv_mat.data[0][3] = (-mat.data[2][1] * s[5] + mat.data[2][2] * s[4] - mat.data[2][3] * s[3]) * determ; inv_mat.data[1][0] = (-mat.data[1][0] * c[5] + mat.data[1][2] * c[2] - mat.data[1][3] * c[1]) * determ; inv_mat.data[1][1] = (mat.data[0][0] * c[5] - mat.data[0][2] * c[2] + mat.data[0][3] * c[1]) * determ; inv_mat.data[1][2] = (-mat.data[3][0] * s[5] + mat.data[3][2] * s[2] - mat.data[3][3] * s[1]) * determ; inv_mat.data[1][3] = (mat.data[2][0] * s[5] - mat.data[2][2] * s[2] + mat.data[2][3] * s[1]) * determ; inv_mat.data[2][0] = (mat.data[1][0] * c[4] - mat.data[1][1] * c[2] + mat.data[1][3] * c[0]) * determ; inv_mat.data[2][1] = (-mat.data[0][0] * c[4] + mat.data[0][1] * c[2] - mat.data[0][3] * c[0]) * determ; inv_mat.data[2][2] = (mat.data[3][0] * s[4] - mat.data[3][1] * s[2] + mat.data[3][3] * s[0]) * determ; inv_mat.data[2][3] = (-mat.data[2][0] * s[4] + mat.data[2][1] * s[2] - mat.data[2][3] * s[0]) * determ; inv_mat.data[3][0] = (-mat.data[1][0] * c[3] + mat.data[1][1] * c[1] - mat.data[1][2] * c[0]) * determ; inv_mat.data[3][1] = (mat.data[0][0] * c[3] - mat.data[0][1] * c[1] + mat.data[0][2] * c[0]) * determ; inv_mat.data[3][2] = (-mat.data[3][0] * s[3] + mat.data[3][1] * s[1] - mat.data[3][2] * s[0]) * determ; inv_mat.data[3][3] = (mat.data[2][0] * s[3] - mat.data[2][1] * s[1] + mat.data[2][2] * s[0]) * determ; return inv_mat; } /// Return 4x4 matrix from given all transform components; `translation`, `rotation` and `sclale`. /// The final order is T * R * S. /// Note: `rotation` could be `Vec3` (Euler angles) or a `quat`. pub fn recompose(translation: Vector3(T), rotation: anytype, scaler: Vector3(T)) Self { const t = Self.fromTranslate(translation); const s = Self.fromScale(scaler); const r = switch (@TypeOf(rotation)) { Quaternion(T) => Quaternion(T).toMat4(rotation), Vector3(T) => Self.fromEulerAngle(rotation), else => @compileError("Recompose not implemented for " ++ @typeName(@TypeOf(rotation))), }; return t.mult(r.mult(s)); } /// Return `translation`, `rotation` and `scale` components from given matrix. /// For now, the rotation returned is a quaternion. If you want to get Euler angles /// from it, just do: `returned_quat.extractRotation()`. /// Note: We ortho nornalize the given matrix before extracting the rotation. pub fn decompose(mat: Self) struct { t: Vector3(T), r: Quaternion(T), s: Vector3(T) } { const t = mat.extractTranslation(); const s = mat.extractScale(); const r = Quat.fromMat4(mat.orthoNormalize()); return .{ .t = t, .r = r, .s = s, }; } /// Print the 4x4 to stderr. pub fn debugPrint(self: Self) void { const string = \\ ({d}, {d}, {d}, {d}) \\ ({d}, {d}, {d}, {d}) \\ ({d}, {d}, {d}, {d}) \\ ({d}, {d}, {d}, {d}) \\ ; print(string, .{ self.data[0][0], self.data[1][0], self.data[2][0], self.data[3][0], self.data[0][1], self.data[1][1], self.data[2][1], self.data[3][1], self.data[0][2], self.data[1][2], self.data[2][2], self.data[3][2], self.data[0][3], self.data[1][3], self.data[2][3], self.data[3][3], }); } }; } test "zalgebra.Mat4.eql" { const a = Mat4.identity(); const b = Mat4.identity(); const c = Mat4{ .data = .{ .{ 0, 0, 0, 0 }, .{ 0, 0, 0, 0 }, .{ 0, 0, 0, 0 }, .{ 0, 0, 0, 0 }, }, }; try testing.expectEqual(Mat4.eql(a, b), true); try testing.expectEqual(Mat4.eql(a, c), false); } test "zalgebra.Mat4.fromSlice" { const data = [_]f32{ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }; const result = Mat4.fromSlice(&data); try testing.expectEqual(Mat4.eql(result, Mat4.identity()), true); } test "zalgebra.Mat4.fromTranslate" { const a = Mat4.fromTranslate(Vec3.new(2, 3, 4)); try testing.expectEqual(Mat4.eql(a, Mat4{ .data = .{ .{ 1, 0, 0, 0 }, .{ 0, 1, 0, 0 }, .{ 0, 0, 1, 0 }, .{ 2, 3, 4, 1 }, }, }), true); } test "zalgebra.Mat4.translate" { const a = Mat4.fromTranslate(Vec3.new(2, 3, 2)); const result = Mat4.translate(a, Vec3.new(2, 3, 4)); try testing.expectEqual(Mat4.eql(result, Mat4{ .data = .{ .{ 1, 0, 0, 0 }, .{ 0, 1, 0, 0 }, .{ 0, 0, 1, 0 }, .{ 4, 9, 8, 1 }, }, }), true); } test "zalgebra.Mat4.fromScale" { const a = Mat4.fromScale(Vec3.new(2, 3, 4)); try testing.expectEqual(Mat4.eql(a, Mat4{ .data = .{ .{ 2, 0, 0, 0 }, .{ 0, 3, 0, 0 }, .{ 0, 0, 4, 0 }, .{ 0, 0, 0, 1 }, }, }), true); } test "zalgebra.Mat4.scale" { const a = Mat4.fromScale(Vec3.new(2, 3, 4)); const result = Mat4.scale(a, Vec3.new(2, 2, 2)); try testing.expectEqual(Mat4.eql(result, Mat4{ .data = .{ .{ 4, 0, 0, 0 }, .{ 0, 6, 0, 0 }, .{ 0, 0, 4, 0 }, .{ 0, 0, 0, 1 }, }, }), true); } test "zalgebra.Mat4.inv" { const a: Mat4 = .{ .data = .{ .{ 2, 0, 0, 4 }, .{ 0, 2, 0, 0 }, .{ 0, 0, 2, 0 }, .{ 4, 0, 0, 2 }, }, }; try testing.expectEqual(Mat4.eql(a.inv(), Mat4{ .data = .{ .{ -0.1666666716337204, 0, 0, 0.3333333432674408 }, .{ 0, 0.5, 0, 0 }, .{ 0, 0, 0.5, 0 }, .{ 0.3333333432674408, 0, 0, -0.1666666716337204 }, }, }), true); } test "zalgebra.Mat4.extractTranslation" { var a = Mat4.fromTranslate(Vec3.new(2, 3, 2)); a = a.translate(Vec3.new(2, 3, 2)); try testing.expectEqual(Vec3.eql(a.extractTranslation(), Vec3.new(4, 6, 4)), true); } test "zalgebra.Mat4.extractRotation" { const a = Mat4.fromEulerAngle(Vec3.new(45, -5, 20)); try testing.expectEqual(Vec3.eql( a.extractRotation(), Vec3.new(45.000003814697266, -4.99052524, 19.999998092651367), ), true); } test "zalgebra.Mat4.extractScale" { var a = Mat4.fromScale(Vec3.new(2, 4, 8)); a = a.scale(Vec3.new(2, 4, 8)); try testing.expectEqual(Vec3.eql(a.extractScale(), Vec3.new(4, 16, 64)), true); } test "zalgebra.Mat4.recompose" { const result = Mat4.recompose( Vec3.new(2, 2, 2), Vec3.new(45, 5, 0), Vec3.new(1, 1, 1), ); try testing.expectEqual(Mat4.eql(result, Mat4{ .data = .{ .{ 0.9961947202682495, 0, -0.08715573698282242, 0 }, .{ 0.06162841618061066, 0.7071067690849304, 0.7044160962104797, 0 }, .{ 0.06162841245532036, -0.7071068286895752, 0.704416036605835, 0 }, .{ 2, 2, 2, 1 }, }, }), true); } test "zalgebra.Mat4.decompose" { const a = Mat4.recompose( Vec3.new(10, 5, 5), Vec3.new(45, 5, 0), Vec3.new(1, 1, 1), ); const result = a.decompose(); try testing.expectEqual(result.t.eql(Vec3.new(10, 5, 5)), true); try testing.expectEqual(result.s.eql(Vec3.new(1, 1, 1)), true); try testing.expectEqual(result.r.extractRotation().eql(Vec3.new(45, 5, -0.00000010712935250012379)), true); }
zalgebra/src/mat4.zig
const std = @import("std"); const mem = std.mem; const math = std.math; const ArrayList = std.ArrayList; const Module = @import("module.zig").Module; const ValueType = @import("module.zig").ValueType; const Instance = @import("instance.zig").Instance; const WasmError = @import("function.zig").WasmError; const instruction = @import("instruction.zig"); const Instruction = instruction.Instruction; const Opcode = @import("instruction.zig").Opcode; // Interpreter: // // The Interpreter interprets WebAssembly bytecode, i.e. it // is the engine of execution and the whole reason we're here. // // Whilst executing code, the Interpreter maintains three stacks. // An operand stack, a control stack and a label stack. // The WebAssembly spec models execution as a single stack where operands, // activation frames, and labels are all interleaved. Here we split // those out for convenience. // // Note: I had considered four stacks (separating out the params / locals) to // there own stack, but I don't think that's necessary. // pub const Interpreter = struct { op_stack: []u64 = undefined, op_ptr: usize = 0, frame_stack: []Frame = undefined, frame_ptr: usize = 0, label_stack: []Label = undefined, label_ptr: usize = 0, inst: *Instance = undefined, ip: usize = 0, pub fn init(op_stack: []u64, frame_stack: []Frame, label_stack: []Label, inst: *Instance) Interpreter { return Interpreter{ .op_stack = op_stack, .frame_stack = frame_stack, .label_stack = label_stack, .inst = inst, }; } fn rebug(self: *Interpreter, opcode: Instruction) void { // std.debug.warn("{}\n", .{opcode}); std.debug.warn("\n=====================================================\n", .{}); std.debug.warn("before: {}\n", .{opcode}); var i: usize = 0; while (i < self.op_ptr) : (i += 1) { std.debug.warn("stack[{}] = {}\n", .{ i, self.op_stack[i] }); } std.debug.warn("\n", .{}); i = 0; while (i < self.label_ptr) : (i += 1) { std.debug.warn("label_stack[{}] = [ret_ari: {}, ops_start: {}, break: {x}]\n", .{ i, self.label_stack[i].return_arity, self.label_stack[i].op_stack_len, self.label_stack[i].branch_target }); } std.debug.warn("\n", .{}); i = 0; while (i < self.frame_ptr) : (i += 1) { std.debug.warn("frame_stack[{}] = [ret_ari: {}, ops_start: {}, label_start: {}]\n", .{ i, self.frame_stack[i].return_arity, self.frame_stack[i].op_stack_len, self.frame_stack[i].label_stack_len }); } std.debug.warn("=====================================================\n", .{}); const stdin = std.io.getStdIn().reader(); const stdout = std.io.getStdOut().writer(); var buf: [10]u8 = undefined; // stdout.print("Continue:", .{}) catch |e| return; // _ = stdin.readUntilDelimiterOrEof(buf[0..], '\n') catch |e| return; } inline fn dispatch(self: *Interpreter, next_ip: usize, code: []Instruction, err: *?WasmError) void { const next_instr = code[next_ip]; return @call(.{ .modifier = .always_tail }, lookup[@enumToInt(next_instr)], .{ self, next_ip, code, err }); } fn impl_ni(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { std.debug.warn("not implemented: {any}\n", .{code[ip]}); err.* = error.NotImplemented; } fn @"unreachable"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { err.* = error.TrapUnreachable; } fn nop(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn block(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const meta = code[ip].block; self.pushLabel(Label{ .return_arity = meta.return_arity, .op_stack_len = self.op_ptr - meta.param_arity, .branch_target = meta.branch_target, }) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn loop(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const meta = code[ip].loop; self.pushLabel(Label{ // note that we use block_params rather than block_returns for return arity: .return_arity = meta.param_arity, .op_stack_len = self.op_ptr - meta.param_arity, .branch_target = meta.branch_target, }) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"if"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const meta = code[ip].@"if"; const condition = self.popOperand(u32); self.pushLabel(Label{ .return_arity = meta.return_arity, .op_stack_len = self.op_ptr - meta.param_arity, .branch_target = meta.branch_target, }) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, if (condition == 0) meta.else_ip else ip + 1, code, err }); } fn @"else"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const label = self.popLabel(); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, label.branch_target, code, err }); } fn if_no_else(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const meta = code[ip].if_no_else; const condition = self.popOperand(u32); if (condition == 0) { return @call(.{ .modifier = .always_tail }, dispatch, .{ self, meta.branch_target, code, err }); } else { // We are inside the if branch self.pushLabel(Label{ .return_arity = meta.return_arity, .op_stack_len = self.op_ptr - meta.param_arity, .branch_target = meta.branch_target, }) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } } fn end(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const label = self.popLabel(); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn br(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const next_ip = self.branch(code[ip].br); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, next_ip, code, err }); } fn br_if(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const condition = self.popOperand(u32); const next_ip = if (condition == 0) ip + 1 else self.branch(code[ip].br_if); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, next_ip, code, err }); } fn br_table(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const meta = code[ip].br_table; const i = self.popOperand(u32); const ls = self.inst.module.br_table_indices.items[meta.ls.offset .. meta.ls.offset + meta.ls.count]; const next_ip = if (i >= ls.len) self.branch(meta.ln) else self.branch(ls[i]); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, next_ip, code, err }); } fn @"return"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const frame = self.peekFrame(); const n = frame.return_arity; const label = self.label_stack[frame.label_stack_len]; // The mem copy is equivalent of popping n operands, doing everything // up to and including popFrame and then repushing the n operands var dst = self.op_stack[label.op_stack_len .. label.op_stack_len + n]; const src = self.op_stack[self.op_ptr - n .. self.op_ptr]; mem.copy(u64, dst, src); self.op_ptr = label.op_stack_len + n; self.label_ptr = frame.label_stack_len; _ = self.popFrame(); if (self.frame_ptr == 0) return; // If this is the last frame on the stack we're done invoking // We potentially change instance when returning from a function, so restore the inst const previous_frame = self.peekFrame(); self.inst = previous_frame.inst; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, label.branch_target, previous_frame.inst.module.parsed_code.items, err }); } fn call(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const function_index = code[ip].call; const function = self.inst.getFunc(function_index) catch |e| { err.* = e; return; }; var next_ip = ip; switch (function) { .function => |f| { // Check we have enough stack space self.checkStackSpace(f.required_stack_space + f.locals_count) catch |e| { err.* = e; return; }; // Make space for locals (again, params already on stack) self.op_ptr += f.locals_count; self.inst = self.inst.store.instance(f.instance) catch |e| { err.* = e; return; }; // Consume parameters from the stack self.pushFrame(Frame{ .op_stack_len = self.op_ptr - f.params.len - f.locals_count, .label_stack_len = self.label_ptr, .return_arity = f.results.len, .inst = self.inst, }, f.locals_count + f.params.len) catch |e| { err.* = e; return; }; // Our continuation is the code after call self.pushLabel(Label{ .return_arity = f.results.len, .op_stack_len = self.op_ptr - f.params.len - f.locals_count, .branch_target = ip + 1, }) catch |e| { err.* = e; return; }; next_ip = f.start; }, .host_function => |hf| { hf.func(self) catch |e| { err.* = e; return; }; next_ip = ip + 1; }, } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, next_ip, self.inst.module.parsed_code.items, err }); } fn call_indirect(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const call_indirect_instruction = code[ip].call_indirect; var module = self.inst.module; const op_func_type_index = call_indirect_instruction.@"type"; const table_index = call_indirect_instruction.table; // Read lookup index from stack const lookup_index = self.popOperand(u32); const table = self.inst.getTable(table_index) catch |e| { err.* = e; return; }; const function_handle = table.lookup(lookup_index) catch |e| { err.* = e; return; }; const function = self.inst.store.function(function_handle) catch |e| { err.* = e; return; }; var next_ip = ip; switch (function) { .function => |func| { // Check that signatures match const call_indirect_func_type = module.types.list.items[op_func_type_index]; if (!Module.signaturesEqual(func.params, func.results, call_indirect_func_type)) { err.* = error.IndirectCallTypeMismatch; return; } // Check we have enough stack space self.checkStackSpace(func.required_stack_space + func.locals_count) catch |e| { err.* = e; return; }; // Make space for locals (again, params already on stack) self.op_ptr += func.locals_count; self.inst = self.inst.store.instance(func.instance) catch |e| { err.* = e; return; }; // Consume parameters from the stack self.pushFrame(Frame{ .op_stack_len = self.op_ptr - func.params.len - func.locals_count, .label_stack_len = self.label_ptr, .return_arity = func.results.len, .inst = self.inst, }, func.locals_count + func.params.len) catch |e| { err.* = e; return; }; // Our continuation is the code after call self.pushLabel(Label{ .return_arity = func.results.len, .op_stack_len = self.op_ptr - func.params.len - func.locals_count, .branch_target = ip + 1, }) catch |e| { err.* = e; return; }; next_ip = func.start; }, .host_function => |host_func| { const call_indirect_func_type = module.types.list.items[op_func_type_index]; if (!Module.signaturesEqual(host_func.params, host_func.results, call_indirect_func_type)) { err.* = error.IndirectCallTypeMismatch; return; } host_func.func(self) catch |e| { err.* = e; return; }; next_ip = ip + 1; }, } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, next_ip, self.inst.module.parsed_code.items, err }); } fn fast_call(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const f = code[ip].fast_call; // Check we have enough stack space self.checkStackSpace(f.required_stack_space + f.locals) catch |e| { err.* = e; return; }; // Make space for locals (again, params already on stack) self.op_ptr += f.locals; // Consume parameters from the stack self.pushFrame(Frame{ .op_stack_len = self.op_ptr - f.params - f.locals, .label_stack_len = self.label_ptr, .return_arity = f.results, .inst = self.inst, }, f.locals + f.params) catch |e| { err.* = e; return; }; // Our continuation is the code after call self.pushLabel(Label{ .return_arity = f.results, .op_stack_len = self.op_ptr - f.params - f.locals, .branch_target = ip + 1, }) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, f.start, code, err }); } fn drop(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { _ = self.popAnyOperand(); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn select(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const condition = self.popOperand(u32); const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); if (condition != 0) { self.pushOperandNoCheck(u64, c1); } else { self.pushOperandNoCheck(u64, c2); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"local.get"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const local_index = code[ip].@"local.get"; const frame = self.peekFrame(); self.pushOperandNoCheck(u64, frame.locals[local_index]); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"local.set"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const local_index = code[ip].@"local.set"; const frame = self.peekFrame(); frame.locals[local_index] = self.popOperand(u64); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"local.tee"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const local_index = code[ip].@"local.tee"; const frame = self.peekFrame(); frame.locals[local_index] = self.peekOperand(); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"global.get"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const global_index = code[ip].@"global.get"; const global = self.inst.getGlobal(global_index) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u64, global.value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"global.set"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const global_index = code[ip].@"global.set"; const value = self.popAnyOperand(); const global = self.inst.getGlobal(global_index) catch |e| { err.* = e; return; }; global.value = value; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.load"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.load"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(u32, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u32, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.load"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.load"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(u64, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.load"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"f32.load"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(f32, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(f32, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.load"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"f64.load"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(f64, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(f64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.load8_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.load8_s"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(i8, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i32, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.load8_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.load8_u"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(u8, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u32, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.load16_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.load16_s"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(i16, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i32, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.load16_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.load16_u"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(u16, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u32, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.load8_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.load8_s"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(i8, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.load8_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.load8_u"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(u8, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.load16_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.load16_s"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(i16, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.load16_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.load16_u"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(u16, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.load32_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.load32_s"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(i32, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.load32_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.load32_u"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const address = self.popOperand(u32); const value = memory.read(u32, offset, address) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u64, value); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.store"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.store"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = self.popOperand(u32); const address = self.popOperand(u32); memory.write(u32, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.store"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.store"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = self.popOperand(u64); const address = self.popOperand(u32); memory.write(u64, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.store"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"f32.store"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = self.popOperand(f32); const address = self.popOperand(u32); memory.write(f32, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.store"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"f64.store"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = self.popOperand(f64); const address = self.popOperand(u32); memory.write(f64, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.store8"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.store8"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = @truncate(u8, self.popOperand(u32)); const address = self.popOperand(u32); memory.write(u8, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.store16"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i32.store16"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = @truncate(u16, self.popOperand(u32)); const address = self.popOperand(u32); memory.write(u16, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.store8"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.store8"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = @truncate(u8, self.popOperand(u64)); const address = self.popOperand(u32); memory.write(u8, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.store16"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.store16"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = @truncate(u16, self.popOperand(u64)); const address = self.popOperand(u32); memory.write(u16, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.store32"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const load_data = code[ip].@"i64.store32"; const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const offset = load_data.offset; const value = @truncate(u32, self.popOperand(u64)); const address = self.popOperand(u32); memory.write(u32, offset, address, value) catch |e| { err.* = e; return; }; return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"memory.size"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u32, @intCast(u32, memory.data.items.len)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"memory.grow"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const memory = self.inst.getMemory(0) catch |e| { err.* = e; return; }; const num_pages = self.popOperand(u32); if (memory.grow(num_pages)) |old_size| { self.pushOperandNoCheck(u32, @intCast(u32, old_size)); } else |_| { self.pushOperandNoCheck(i32, @as(i32, -1)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.const"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const instr = code[ip]; self.pushOperandNoCheck(i32, instr.@"i32.const"); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.const"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const instr = code[ip]; self.pushOperandNoCheck(i64, instr.@"i64.const"); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.const"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const instr = code[ip]; self.pushOperandNoCheck(f32, instr.@"f32.const"); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.const"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const instr = code[ip]; self.pushOperandNoCheck(f64, instr.@"f64.const"); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.eqz"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 == 0) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.eq"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 == c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.ne"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 != c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.lt_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 < c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.lt_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 < c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.gt_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 > c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.gt_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 > c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.le_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 <= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.le_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 <= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.ge_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 >= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.ge_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 >= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.eqz"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == 0) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.eq"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.ne"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.lt_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.lt_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.gt_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.gt_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.le_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.le_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.ge_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.ge_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.eq"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.ne"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.lt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.gt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.le"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.ge"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.eq"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.ne"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.lt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.gt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.le"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.ge"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.clz"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @clz(u32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.ctz"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @ctz(u32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.popcnt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @popCount(u32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.add"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 +% c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.sub"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 -% c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.mul"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 *% c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.div_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); const div = math.divTrunc(i32, c1, c2) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i32, div); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.div_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); const div = math.divTrunc(u32, c1, c2) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u32, div); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.rem_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); const abs = math.absInt(c2) catch |e| { err.* = e; return; }; const rem = math.rem(i32, c1, abs) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i32, rem); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.rem_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); const rem = math.rem(u32, c1, c2) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u32, rem); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.and"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 & c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.or"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 | c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.xor"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 ^ c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.shl"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.shl(u32, c1, c2 % 32)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.shr_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); const mod = math.mod(i32, c2, 32) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i32, math.shr(i32, c1, mod)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.shr_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.shr(u32, c1, c2 % 32)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.rotl"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.rotl(u32, c1, c2 % 32)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.rotr"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.rotr(u32, c1, c2 % 32)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.clz"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @clz(u64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.ctz"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @ctz(u64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.popcnt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @popCount(u64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.add"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 +% c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.sub"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 -% c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.mul"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 *% c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.div_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); const div = math.divTrunc(i64, c1, c2) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i64, div); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.div_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); const div = math.divTrunc(u64, c1, c2) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u64, div); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.rem_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); const abs = math.absInt(c2) catch |e| { err.* = e; return; }; const rem = math.rem(i64, c1, abs) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i64, rem); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.rem_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); const rem = math.rem(u64, c1, c2) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(u64, rem); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.and"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 & c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.or"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 | c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.xor"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 ^ c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.shl"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.shl(u64, c1, c2 % 64)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.shr_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); const mod = math.mod(i64, c2, 64) catch |e| { err.* = e; return; }; self.pushOperandNoCheck(i64, math.shr(i64, c1, mod)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.shr_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.shr(u64, c1, c2 % 64)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.rotl"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.rotl(u64, c1, c2 % 64)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.rotr"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.rotr(u64, c1, c2 % 64)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.abs"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, math.fabs(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.neg"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, -c1); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.ceil"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @ceil(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.floor"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @floor(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.trunc"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @trunc(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.nearest"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); const floor = @floor(c1); const ceil = @ceil(c1); if (ceil - c1 == c1 - floor) { if (@mod(ceil, 2) == 0) { self.pushOperandNoCheck(f32, ceil); } else { self.pushOperandNoCheck(f32, floor); } } else { self.pushOperandNoCheck(f32, @round(c1)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.sqrt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, math.sqrt(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.add"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 + c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.sub"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 - c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.mul"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 * c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.div"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 / c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.min"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); if (math.isNan(c1)) { self.pushOperandNoCheck(f32, math.nan_f32); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (math.isNan(c2)) { self.pushOperandNoCheck(f32, math.nan_f32); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (c1 == 0.0 and c2 == 0.0) { if (math.signbit(c1)) { self.pushOperandNoCheck(f32, c1); } else { self.pushOperandNoCheck(f32, c2); } } else { self.pushOperandNoCheck(f32, math.min(c1, c2)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.max"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); if (math.isNan(c1)) { self.pushOperandNoCheck(f32, math.nan_f32); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (math.isNan(c2)) { self.pushOperandNoCheck(f32, math.nan_f32); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (c1 == 0.0 and c2 == 0.0) { if (math.signbit(c1)) { self.pushOperandNoCheck(f32, c2); } else { self.pushOperandNoCheck(f32, c1); } } else { self.pushOperandNoCheck(f32, math.max(c1, c2)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.copysign"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); if (math.signbit(c2)) { self.pushOperandNoCheck(f32, -math.fabs(c1)); } else { self.pushOperandNoCheck(f32, math.fabs(c1)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.abs"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, math.fabs(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.neg"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, -c1); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.ceil"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @ceil(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.floor"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @floor(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.trunc"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @trunc(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.nearest"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); const floor = @floor(c1); const ceil = @ceil(c1); if (ceil - c1 == c1 - floor) { if (@mod(ceil, 2) == 0) { self.pushOperandNoCheck(f64, ceil); } else { self.pushOperandNoCheck(f64, floor); } } else { self.pushOperandNoCheck(f64, @round(c1)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.sqrt"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, math.sqrt(c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.add"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 + c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.sub"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 - c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.mul"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 * c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.div"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 / c2); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.min"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); if (math.isNan(c1)) { self.pushOperandNoCheck(f64, math.nan_f64); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (math.isNan(c2)) { self.pushOperandNoCheck(f64, math.nan_f64); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (c1 == 0.0 and c2 == 0.0) { if (math.signbit(c1)) { self.pushOperandNoCheck(f64, c1); } else { self.pushOperandNoCheck(f64, c2); } } else { self.pushOperandNoCheck(f64, math.min(c1, c2)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.max"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); if (math.isNan(c1)) { self.pushOperandNoCheck(f64, math.nan_f64); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (math.isNan(c2)) { self.pushOperandNoCheck(f64, math.nan_f64); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (c1 == 0.0 and c2 == 0.0) { if (math.signbit(c1)) { self.pushOperandNoCheck(f64, c2); } else { self.pushOperandNoCheck(f64, c1); } } else { self.pushOperandNoCheck(f64, math.max(c1, c2)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.copysign"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); if (math.signbit(c2)) { self.pushOperandNoCheck(f64, -math.fabs(c1)); } else { self.pushOperandNoCheck(f64, math.fabs(c1)); } return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.wrap_i64"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i32, @truncate(i32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.trunc_f32_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc >= @intToFloat(f32, std.math.maxInt(i32))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f32, std.math.minInt(i32))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(i32, @floatToInt(i32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.trunc_f32_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc >= @intToFloat(f32, std.math.maxInt(u32))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f32, std.math.minInt(u32))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(u32, @floatToInt(u32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.trunc_f64_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc > @intToFloat(f64, std.math.maxInt(i32))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f64, std.math.minInt(i32))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(i32, @floatToInt(i32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.trunc_f64_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc > @intToFloat(f64, std.math.maxInt(u32))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f64, std.math.minInt(u32))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(u32, @floatToInt(u32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.extend_i32_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @truncate(i32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.extend_i32_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @truncate(u32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.trunc_f32_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc >= @intToFloat(f32, std.math.maxInt(i64))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f32, std.math.minInt(i64))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(i64, @floatToInt(i64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.trunc_f32_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc >= @intToFloat(f32, std.math.maxInt(u64))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f32, std.math.minInt(u64))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(u64, @floatToInt(u64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.trunc_f64_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc >= @intToFloat(f64, std.math.maxInt(i64))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f64, std.math.minInt(i64))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(i64, @floatToInt(i64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.trunc_f64_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); if (math.isNan(c1)) { err.* = error.InvalidConversion; return; } const trunc = @trunc(c1); if (trunc >= @intToFloat(f64, std.math.maxInt(u64))) { err.* = error.Overflow; return; } if (trunc < @intToFloat(f64, std.math.minInt(u64))) { err.* = error.Overflow; return; } self.pushOperandNoCheck(u64, @floatToInt(u64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.convert_i32_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f32, @intToFloat(f32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.convert_i32_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(f32, @intToFloat(f32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.convert_i64_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f32, @intToFloat(f32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.convert_i64_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(f32, @intToFloat(f32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.demote_f64"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f32, @floatCast(f32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.convert_i32_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f64, @intToFloat(f64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.convert_i32_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(f64, @intToFloat(f64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.convert_i64_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f64, @intToFloat(f64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.convert_i64_u"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(f64, @intToFloat(f64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.promote_f32"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f64, @floatCast(f64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.reinterpret_f32"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(i32, @bitCast(i32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.reinterpret_f64"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(i64, @bitCast(i64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f32.reinterpret_i32"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f32, @bitCast(f32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"f64.reinterpret_i64"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f64, @bitCast(f64, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.extend8_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(i32, @truncate(i8, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i32.extend16_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(i32, @truncate(i16, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.extend8_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @truncate(i8, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.extend16_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @truncate(i16, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn @"i64.extend32_s"(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @truncate(i32, c1)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } fn trunc_sat(self: *Interpreter, ip: usize, code: []Instruction, err: *?WasmError) void { const meta = code[ip].trunc_sat; switch (meta) { 0 => { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i32, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f32, std.math.maxInt(i32))) { self.pushOperandNoCheck(i32, @bitCast(i32, @as(u32, 0x7fffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f32, std.math.minInt(i32))) { self.pushOperandNoCheck(i32, @bitCast(i32, @as(u32, 0x80000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(i32, @floatToInt(i32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, 1 => { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u32, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f32, std.math.maxInt(u32))) { self.pushOperandNoCheck(u32, @bitCast(u32, @as(u32, 0xffffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f32, std.math.minInt(u32))) { self.pushOperandNoCheck(u32, @bitCast(u32, @as(u32, 0x00000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(u32, @floatToInt(u32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, 2 => { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i32, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f64, std.math.maxInt(i32))) { self.pushOperandNoCheck(i32, @bitCast(i32, @as(u32, 0x7fffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f64, std.math.minInt(i32))) { self.pushOperandNoCheck(i32, @bitCast(i32, @as(u32, 0x80000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(i32, @floatToInt(i32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, 3 => { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u32, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f64, std.math.maxInt(u32))) { self.pushOperandNoCheck(u32, @bitCast(u32, @as(u32, 0xffffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f64, std.math.minInt(u32))) { self.pushOperandNoCheck(u32, @bitCast(u32, @as(u32, 0x00000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(u32, @floatToInt(u32, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, 4 => { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i64, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f32, std.math.maxInt(i64))) { self.pushOperandNoCheck(i64, @bitCast(i64, @as(u64, 0x7fffffffffffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f32, std.math.minInt(i64))) { self.pushOperandNoCheck(i64, @bitCast(i64, @as(u64, 0x8000000000000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(i64, @floatToInt(i64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, 5 => { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u64, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f32, std.math.maxInt(u64))) { self.pushOperandNoCheck(u64, @bitCast(u64, @as(u64, 0xffffffffffffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f32, std.math.minInt(u64))) { self.pushOperandNoCheck(u64, @bitCast(u64, @as(u64, 0x0000000000000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(u64, @floatToInt(u64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, 6 => { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i64, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f64, std.math.maxInt(i64))) { self.pushOperandNoCheck(i64, @bitCast(i64, @as(u64, 0x7fffffffffffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f64, std.math.minInt(i64))) { self.pushOperandNoCheck(i64, @bitCast(i64, @as(u64, 0x8000000000000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(i64, @floatToInt(i64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, 7 => { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u64, 0); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc >= @intToFloat(f64, std.math.maxInt(u64))) { self.pushOperandNoCheck(u64, @bitCast(u64, @as(u64, 0xffffffffffffffff))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } if (trunc < @intToFloat(f64, std.math.minInt(u64))) { self.pushOperandNoCheck(u64, @bitCast(u64, @as(u64, 0x0000000000000000))); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); } self.pushOperandNoCheck(u64, @floatToInt(u64, trunc)); return @call(.{ .modifier = .always_tail }, dispatch, .{ self, ip + 1, code, err }); }, else => { err.* = error.Trap; return; }, } } const InstructionFunction = fn (*Interpreter, usize, []Instruction, *?WasmError) void; const lookup = [256]InstructionFunction{ @"unreachable", nop, block, loop, @"if", @"else", if_no_else, impl_ni, impl_ni, impl_ni, impl_ni, end, br, br_if, br_table, @"return", call, call_indirect, fast_call, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, drop, select, impl_ni, impl_ni, impl_ni, impl_ni, @"local.get", @"local.set", @"local.tee", @"global.get", @"global.set", impl_ni, impl_ni, impl_ni, @"i32.load", @"i64.load", @"f32.load", @"f64.load", @"i32.load8_s", @"i32.load8_u", @"i32.load16_s", @"i32.load16_u", @"i64.load8_s", @"i64.load8_u", @"i64.load16_s", @"i64.load16_u", @"i64.load32_s", @"i64.load32_u", @"i32.store", @"i64.store", @"f32.store", @"f64.store", @"i32.store8", @"i32.store16", @"i64.store8", @"i64.store16", @"i64.store32", @"memory.size", @"memory.grow", @"i32.const", @"i64.const", @"f32.const", @"f64.const", @"i32.eqz", @"i32.eq", @"i32.ne", @"i32.lt_s", @"i32.lt_u", @"i32.gt_s", @"i32.gt_u", @"i32.le_s", @"i32.le_u", @"i32.ge_s", @"i32.ge_u", @"i64.eqz", @"i64.eq", @"i64.ne", @"i64.lt_s", @"i64.lt_u", @"i64.gt_s", @"i64.gt_u", @"i64.le_s", @"i64.le_u", @"i64.ge_s", @"i64.ge_u", @"f32.eq", @"f32.ne", @"f32.lt", @"f32.gt", @"f32.le", @"f32.ge", @"f64.eq", @"f64.ne", @"f64.lt", @"f64.gt", @"f64.le", @"f64.ge", @"i32.clz", @"i32.ctz", @"i32.popcnt", @"i32.add", @"i32.sub", @"i32.mul", @"i32.div_s", @"i32.div_u", @"i32.rem_s", @"i32.rem_u", @"i32.and", @"i32.or", @"i32.xor", @"i32.shl", @"i32.shr_s", @"i32.shr_u", @"i32.rotl", @"i32.rotr", @"i64.clz", @"i64.ctz", @"i64.popcnt", @"i64.add", @"i64.sub", @"i64.mul", @"i64.div_s", @"i64.div_u", @"i64.rem_s", @"i64.rem_u", @"i64.and", @"i64.or", @"i64.xor", @"i64.shl", @"i64.shr_s", @"i64.shr_u", @"i64.rotl", @"i64.rotr", @"f32.abs", @"f32.neg", @"f32.ceil", @"f32.floor", @"f32.trunc", @"f32.nearest", @"f32.sqrt", @"f32.add", @"f32.sub", @"f32.mul", @"f32.div", @"f32.min", @"f32.max", @"f32.copysign", @"f64.abs", @"f64.neg", @"f64.ceil", @"f64.floor", @"f64.trunc", @"f64.nearest", @"f64.sqrt", @"f64.add", @"f64.sub", @"f64.mul", @"f64.div", @"f64.min", @"f64.max", @"f64.copysign", @"i32.wrap_i64", @"i32.trunc_f32_s", @"i32.trunc_f32_u", @"i32.trunc_f64_s", @"i32.trunc_f64_u", @"i64.extend_i32_s", @"i64.extend_i32_u", @"i64.trunc_f32_s", @"i64.trunc_f32_u", @"i64.trunc_f64_s", @"i64.trunc_f64_u", @"f32.convert_i32_s", @"f32.convert_i32_u", @"f32.convert_i64_s", @"f32.convert_i64_u", @"f32.demote_f64", @"f64.convert_i32_s", @"f64.convert_i32_u", @"f64.convert_i64_s", @"f64.convert_i64_u", @"f64.promote_f32", @"i32.reinterpret_f32", @"i64.reinterpret_f64", @"f32.reinterpret_i32", @"f64.reinterpret_i64", @"i32.extend8_s", @"i32.extend16_s", @"i64.extend8_s", @"i64.extend16_s", @"i64.extend32_s", impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, trunc_sat, impl_ni, impl_ni, impl_ni, }; pub fn invoke(self: *Interpreter, ip: usize) !void { const instr = self.inst.module.parsed_code.items[ip]; var err: ?WasmError = null; @call(.{}, lookup[@enumToInt(instr)], .{ self, ip, self.inst.module.parsed_code.items, &err }); if (err) |e| return e; } // https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-control-mathsf-br-l pub fn branch(self: *Interpreter, target: u32) usize { const label = self.peekNthLabel(target); const n = label.return_arity; // var dest = self.op_stack[label.op_stack_len .. label.op_stack_len + n]; // const src = self.op_stack[self.op_stack.len - n ..]; var i: usize = 0; while (i < n) : (i += 1) { self.op_stack[label.op_stack_len + i] = self.op_stack[self.op_ptr - n + i]; } // mem.copy(u64, dest, src); // self.op_stack = self.op_stack[0 .. label.op_stack_len + n]; self.op_ptr = label.op_stack_len + n; _ = self.popLabels(target); return label.branch_target; } pub fn pushOperand(self: *Interpreter, comptime T: type, value: T) !void { if (self.op_ptr == self.op_stack.len) return error.OperandStackOverflow; self.op_ptr += 1; self.op_stack[self.op_ptr - 1] = switch (T) { i32 => @as(u64, @bitCast(u32, value)), i64 => @bitCast(u64, value), f32 => @as(u64, @bitCast(u32, value)), f64 => @bitCast(u64, value), u32 => @as(u64, value), // TODO: figure out types u64 => value, else => |t| @compileError("Unsupported operand type: " ++ @typeName(t)), }; } pub fn checkStackSpace(self: *Interpreter, n: usize) !void { if (self.op_ptr + n > self.op_stack.len) return error.CheckStackSpace; } pub fn pushOperandNoCheck(self: *Interpreter, comptime T: type, value: T) void { self.op_ptr += 1; self.op_stack[self.op_ptr - 1] = switch (T) { i32 => @as(u64, @bitCast(u32, value)), i64 => @bitCast(u64, value), f32 => @as(u64, @bitCast(u32, value)), f64 => @bitCast(u64, value), u32 => @as(u64, value), // TODO: figure out types u64 => value, else => |t| @compileError("Unsupported operand type: " ++ @typeName(t)), }; } pub fn popOperand(self: *Interpreter, comptime T: type) T { defer self.op_ptr -= 1; const value = self.op_stack[self.op_ptr - 1]; return switch (T) { i32 => @bitCast(i32, @truncate(u32, value)), i64 => @bitCast(i64, value), f32 => @bitCast(f32, @truncate(u32, value)), f64 => @bitCast(f64, value), u32 => @truncate(u32, value), u64 => value, else => |t| @compileError("Unsupported operand type: " ++ @typeName(t)), }; } pub fn popAnyOperand(self: *Interpreter) u64 { defer self.op_ptr -= 1; return self.op_stack[self.op_ptr - 1]; } fn peekOperand(self: *Interpreter) u64 { return self.op_stack[self.op_ptr - 1]; } fn peekNthOperand(self: *Interpreter, index: u32) u64 { return self.op_stack[self.op_ptr - index - 1]; } // TODO: if the code is validated, do we need to know the params count // i.e. can we get rid of the dependency on params so that we don't // have to lookup a function (necessarily) pub fn pushFrame(self: *Interpreter, frame: Frame, params_and_locals_count: usize) !void { if (self.frame_ptr == self.frame_stack.len) return error.ControlStackOverflow; self.frame_ptr += 1; const current_frame = &self.frame_stack[self.frame_ptr - 1]; current_frame.* = frame; // TODO: index out of bounds (error if we've run out of operand stack space): current_frame.locals = self.op_stack[frame.op_stack_len .. frame.op_stack_len + params_and_locals_count]; } pub fn popFrame(self: *Interpreter) Frame { defer self.frame_ptr -= 1; return self.frame_stack[self.frame_ptr - 1]; } fn peekFrame(self: *Interpreter) *Frame { return &self.frame_stack[self.frame_ptr - 1]; } pub fn pushLabel(self: *Interpreter, label: Label) !void { if (self.label_ptr == self.label_stack.len) return error.LabelStackOverflow; self.label_ptr += 1; const current_label = self.peekNthLabel(0); current_label.* = label; } pub fn popLabel(self: *Interpreter) Label { defer self.label_ptr -= 1; return self.label_stack[self.label_ptr - 1]; } // peekNthLabel // // Returns nth label on the Label stack relative to the top of the stack // fn peekNthLabel(self: *Interpreter, index: u32) *Label { return &self.label_stack[self.label_ptr - index - 1]; } // popLabels // // target: branch target (relative to current scope which is 0) // // popLabels pops labels up to and including `target`. Returns the // the label at `target`. pub fn popLabels(self: *Interpreter, target: u32) Label { defer self.label_ptr = self.label_ptr - target - 1; return self.label_stack[self.label_stack.len - target - 1]; } pub const Frame = struct { locals: []u64 = undefined, // TODO: we're in trouble if we move our stacks in memory return_arity: usize = 0, op_stack_len: usize, label_stack_len: usize, inst: *Instance, }; // Label // // - code: the code we should interpret after `end` pub const Label = struct { return_arity: usize = 0, branch_target: usize = 0, op_stack_len: usize, // u32? }; }; const testing = std.testing; test "operand push / pop test" { var op_stack: [6]u64 = [_]u64{0} ** 6; var frame_stack_mem: [1024]Interpreter.Frame = [_]Interpreter.Frame{undefined} ** 1024; var label_stack_mem: [1024]Interpreter.Label = [_]Interpreter.Label{undefined} ** 1024; var inst: Instance = undefined; var i = Interpreter.init(op_stack[0..], frame_stack_mem[0..], label_stack_mem[0..], &inst); try i.pushOperand(i32, 22); try i.pushOperand(i32, -23); try i.pushOperand(i64, 44); try i.pushOperand(i64, -43); try i.pushOperand(f32, 22.07); try i.pushOperand(f64, 43.07); // stack overflow: if (i.pushOperand(i32, 0)) |_| { return error.TestExpectedError; } else |err| { if (err != error.OperandStackOverflow) return error.TestUnexpectedError; } try testing.expectEqual(@as(f64, 43.07), i.popOperand(f64)); try testing.expectEqual(@as(f32, 22.07), i.popOperand(f32)); try testing.expectEqual(@as(i64, -43), i.popOperand(i64)); try testing.expectEqual(@as(i64, 44), i.popOperand(i64)); try testing.expectEqual(@as(i32, -23), i.popOperand(i32)); try testing.expectEqual(@as(i32, 22), i.popOperand(i32)); } // TODO: reinstate this. I think we need to build up a valid instance with module + code // test "simple interpret tests" { // var op_stack: [6]u64 = [_]u64{0} ** 6; // var frame_stack: [1024]Interpreter.Frame = [_]Interpreter.Frame{undefined} ** 1024; // var label_stack_mem: [1024]Interpreter.Label = [_]Interpreter.Label{undefined} ** 1024; // var inst: Instance = undefined; // var i = Interpreter.init(op_stack[0..], frame_stack[0..], label_stack_mem[0..], &inst); // try i.pushOperand(i32, 22); // try i.pushOperand(i32, -23); // var code = [_]Instruction{Instruction.@"i32.add"}; // try i.invoke(0); // try testing.expectEqual(@as(i32, -1), i.popOperand(i32)); // }
src/interpreter.zig
const std = @import("std"); const builtin = @import("builtin"); const liu = @import("liu"); const assets = @import("assets").kilordle; const Spec = assets.Spec; const wasm = liu.wasm; pub const WasmCommand = void; pub usingnamespace wasm; const ArrayList = std.ArrayList; const ext = struct { extern fn setPuzzles(obj: wasm.Obj) void; extern fn setWordsLeft(count: usize) void; extern fn addChar(code: u32) void; extern fn incrementSubmissionCount() void; extern fn resetSubmission() void; fn submitWordExt(l0: u8, l1: u8, l2: u8, l3: u8, l4: u8) callconv(.C) bool { return submitWord([_]u8{ l0, l1, l2, l3, l4 }) catch @panic("submitWord failed"); } fn initExt(l_asset: wasm.Obj) callconv(.C) void { init(l_asset) catch @panic("init failed"); } }; // I think this needs to be in root. I tried moving it inside `ext` and most of // the code got deleted. comptime { @export(ext.initExt, .{ .name = "init", .linkage = .Strong }); @export(ext.submitWordExt, .{ .name = "submitWord", .linkage = .Strong }); } const Wordle = struct { text: [5]u8, matches: [5]Match, letters_found: u8, places_found: u8, }; const Puzzle = struct { solution: [5]u8, filled: [5]u8, submits: []u8, }; // The values matter here, because we use the enum value for an ordered // comparison later on in the file const MatchKind = enum(u8) { none = 0, letter = 1, exact = 2 }; const Match = union(MatchKind) { none: void, exact: void, letter: u8, }; const Keys = struct { solution: wasm.Obj, filled: wasm.Obj, submits: wasm.Obj, }; // Initialized at start of program var wordles: [5][]const u8 = undefined; var wordle_words: [5][]const u8 = undefined; var keys: Keys = undefined; var wordles_left: ArrayList(Wordle) = undefined; var submissions: ArrayList([5]u8) = undefined; fn makeKeys() Keys { return .{ .solution = wasm.make.string(.manual, "solution"), .filled = wasm.make.string(.manual, "filled"), .submits = wasm.make.string(.manual, "submits"), }; } fn setPuzzles(puzzles: []Puzzle) void { const mark = wasm.watermark(); defer wasm.setWatermark(mark); const arr = wasm.out.array(); for (puzzles) |puzzle| { const obj = wasm.out.obj(); const solution = wasm.out.string(&puzzle.solution); const filled = wasm.out.string(&puzzle.filled); const submits = wasm.out.string(puzzle.submits); obj.objSet(keys.solution, solution); obj.objSet(keys.filled, filled); obj.objSet(keys.submits, submits); arr.arrayPush(obj); } ext.setPuzzles(arr); } // wordle-words: binary search for first 2, linear for last 3 fn searchList(word: [5]u8, dict: [5][]const u8) bool { var start: u32 = 0; var end: u32 = dict[0].len; for (word) |letter, idx| { const letter_dict = dict[idx][0..end]; // find loop found: { for (letter_dict[start..]) |l, offset| { if (l == letter) { start += offset; break :found; } } return false; } if (idx == 4) return true; } return false; } // Returns array of matches. Value v at index i is a match between wordle[i] // and submission[v], or null if that match doesn't exist. fn matchWordle(wordle: [5]u8, submission: [5]u8) [5]Match { var text = submission; var match = [_]Match{.none} ** 5; for (wordle) |c, idx| { if (submission[idx] == c) { match[idx] = .exact; text[idx] = 0; } } for (wordle) |c, idx| { if (match[idx] == .exact) { continue; } for (text) |*slot, text_idx| { if (slot.* == c) { match[idx] = .{ .letter = @truncate(u8, text_idx) }; slot.* = 0; } } } return match; } pub fn submitWord(word: [5]u8) !bool { var mark = liu.TempMark; defer liu.TempMark = mark; // lowercase for (word) |letter| { if (letter < 'a' or letter > 'z') { wasm.post(.err, "invalid string {s}", .{word}); return false; } } const is_wordle = searchList(word, wordles); if (!is_wordle and !searchList(word, wordle_words)) { return false; } try submissions.append(word); // We use a buffer that's 1 bigger than what we'll eventually read so // that we can add to the end and then sort the whole thing. This strategy // also has the benefit that insertion sort is guaranteed linear time // over our buffer, since it does one sweep up and then one sweep down. const top_count = 32; var top_values = try std.BoundedArray(Wordle, top_count + 1).init(0); var write_head: u32 = 0; var read_head: u32 = 0; const arena_len = wordles_left.items.len; while (read_head < arena_len) : (read_head += 1) { const wordle = &wordles_left.items[read_head]; const new_matches = matchWordle(wordle.text, word); for (new_matches) |new_match, idx| { const old_match = wordle.matches[idx]; if (@enumToInt(old_match) >= @enumToInt(new_match)) continue; wordle.matches[idx] = new_match; if (old_match == .none) wordle.letters_found += 1; if (new_match == .exact) wordle.places_found += 1; } // wordle is done, so we "delete" it by not writing it back to the buffer if (wordle.places_found >= 5) { continue; } try top_values.append(wordle.*); std.sort.insertionSort(Wordle, top_values.slice(), {}, compareWordles); if (top_values.len > top_count) { _ = top_values.pop(); } // write-back would be no-op; this also guarantees that the read and // write pointers don't alias, for whatever that's worth if (read_head == write_head) { write_head += 1; continue; } wordles_left.items[write_head] = wordle.*; write_head += 1; } wordles_left.items.len = write_head; var puzzles = ArrayList(Puzzle).init(liu.Temp); for (top_values.slice()) |wordle| { var relevant_submits = ArrayList(u8).init(liu.Temp); var matches = [_]Match{.none} ** 5; // This gets displayed in the app; in debug mode, we output the lowercase // letter so we can see it in the UI to spot-check math. In release, // we don't do that, because tha'd be bad. var filled = if (builtin.mode == .Debug) wordle.text else [_]u8{' '} ** 5; var found: u32 = 0; for (wordle.matches) |match, idx| { if (match == .exact) { matches[idx] = .exact; filled[idx] = wordle.text[idx] - 'a' + 'A'; } } for (submissions.items) |submit| { if (found >= 5) { break; } const found_before = found; var submit_letters = submit; const new_matches = matchWordle(wordle.text, submit); for (matches) |*slot, idx| { switch (slot.*) { .exact => continue, .letter => continue, .none => {}, } switch (new_matches[idx]) { // if we have an exact match, it should have been handled // earlier on when we matched the remaining wordles against // the new submission .exact => unreachable, .none => continue, .letter => |submit_idx| { // Uppercase means the output text should be orange. submit_letters[submit_idx] = submit[submit_idx] - 'a' + 'A'; slot.* = .{ .letter = submit_idx }; found += 1; }, } } if (found_before < found) { try relevant_submits.appendSlice(&submit_letters); try relevant_submits.append(','); } } if (relevant_submits.items.len > 0) { _ = relevant_submits.pop(); } try puzzles.append(.{ .solution = wordle.text, .filled = filled, .submits = relevant_submits.items, }); } setPuzzles(puzzles.items); ext.setWordsLeft(wordles_left.items.len); ext.resetSubmission(); ext.incrementSubmissionCount(); if (builtin.mode != .Debug) return true; if (puzzles.items.len == 0) return true; for (puzzles.items[0].solution) |c| ext.addChar(c); return true; } fn compareWordles(context: void, left: Wordle, right: Wordle) bool { _ = context; if (left.places_found != right.places_found) { return left.places_found > right.places_found; } if (left.letters_found != right.letters_found) { return left.letters_found > right.letters_found; } return false; } fn initData() !void { const wordle_count = wordles[0].len; try wordles_left.ensureUnusedCapacity(wordle_count); var i: u32 = 0; while (i < wordle_count) : (i += 1) { var wordle = Wordle{ .text = undefined, .matches = .{.none} ** 5, .letters_found = 0, .places_found = 0, }; for (wordle.text) |*slot, idx| { slot.* = wordles[idx][i]; } wordles_left.appendAssumeCapacity(wordle); } ext.setWordsLeft(wordles_left.items.len); } export fn reset() void { wordles_left.items.len = 0; submissions.items.len = 0; initData() catch @panic("initData failed"); } fn initDict(dict: *[5][]const u8, data: []const u8) void { const len = data.len / 5 - 1; var start: usize = 0; var end: usize = len; for (dict) |*slot| { slot.* = data[start..end]; start += len + 1; end += len + 1; } } pub fn init(l_asset: wasm.Obj) !void { wasm.initIfNecessary(); const asset_data = try wasm.in.alignedBytes(l_asset, liu.Pages, 8); const parsed = try liu.packed_asset.parse(Spec, asset_data); wordles[0] = parsed.wordles[0].slice(); wordles[1] = parsed.wordles[1].slice(); wordles[2] = parsed.wordles[2].slice(); wordles[3] = parsed.wordles[3].slice(); wordles[4] = parsed.wordles[4].slice(); wordle_words[0] = parsed.words[0].slice(); wordle_words[1] = parsed.words[1].slice(); wordle_words[2] = parsed.words[2].slice(); wordle_words[3] = parsed.words[3].slice(); wordle_words[4] = parsed.words[4].slice(); keys = makeKeys(); wordles_left = ArrayList(Wordle).init(liu.Pages); submissions = ArrayList([5]u8).init(liu.Pages); try initData(); // std.log.info("{}", .{wordles[0].len}); std.log.info("WASM initialized!", .{}); }
src/routes/kilordle/kilordle.zig
const std = @import("std"); const aoc = @import("aoc-lib.zig"); const Menu = struct { const Result = struct { all: []const u8, ing: []const u8 }; allergens: std.StringHashMap(std.AutoHashMap(usize, bool)), ingredients: std.StringHashMap(std.AutoHashMap(usize, bool)), possible: std.StringHashMap(std.StringHashMap(bool)), alloc: std.mem.Allocator, pub fn init(alloc: std.mem.Allocator, in: [][]const u8) !*Menu { var self = try alloc.create(Menu); self.alloc = alloc; self.allergens = std.StringHashMap(std.AutoHashMap(usize, bool)).init(alloc); self.ingredients = std.StringHashMap(std.AutoHashMap(usize, bool)).init(alloc); self.possible = std.StringHashMap(std.StringHashMap(bool)).init(alloc); for (in) |line, i| { var ls = std.mem.split(u8, line, " (contains "); const ingstr = ls.next().?; var ings = std.mem.split(u8, ingstr, " "); while (ings.next()) |ing| { var e = try self.ingredients.getOrPutValue(ing, std.AutoHashMap(usize, bool).init(alloc)); try e.value_ptr.put(i, true); } var allstr = ls.next().?; var alls = std.mem.split(u8, allstr[0 .. allstr.len - 1], ", "); while (alls.next()) |all| { var e = try self.allergens.getOrPutValue(all, std.AutoHashMap(usize, bool).init(alloc)); try e.value_ptr.put(i, true); } } var it = self.ingredients.iterator(); while (it.next()) |ingentry| { var allit = self.allergens.iterator(); while (allit.next()) |allentry| { var maybeThisAllergen = true; var lineit = allentry.value_ptr.iterator(); while (lineit.next()) |lineentry| { if (!ingentry.value_ptr.contains(lineentry.key_ptr.*)) { maybeThisAllergen = false; } } if (maybeThisAllergen) { var e = try self.possible.getOrPutValue(ingentry.key_ptr.*, std.StringHashMap(bool).init(alloc)); try e.value_ptr.put(allentry.key_ptr.*, true); } } } return self; } pub fn deinit(m: *Menu) void { var ita = m.allergens.iterator(); while (ita.next()) |e| { e.value_ptr.*.deinit(); } m.allergens.deinit(); var iti = m.ingredients.iterator(); while (iti.next()) |e| { e.value_ptr.*.deinit(); } m.ingredients.deinit(); var itp = m.possible.iterator(); while (itp.next()) |e| { e.value_ptr.*.deinit(); } m.possible.deinit(); m.alloc.destroy(m); } pub fn Part1(m: *Menu) usize { var c: usize = 0; var it = m.ingredients.iterator(); while (it.next()) |ingentry| { const ing = ingentry.key_ptr.*; if (!m.possible.contains(ing)) { c += ingentry.value_ptr.count(); } } return c; } fn resultLessThan(c: void, a: Result, b: Result) bool { return aoc.stringLessThan(c, a.all, b.all); } pub fn Part2(m: *Menu) []const u8 { var resList = std.ArrayList(Result).init(m.alloc); defer resList.deinit(); var resLength: usize = 0; while (m.possible.count() > 0) { var it = m.possible.iterator(); while (it.next()) |possentry| { var ing = possentry.key_ptr.*; var allergens = possentry.value_ptr.*; if (allergens.count() == 1) { var allit = allergens.iterator(); const all = allit.next().?.key_ptr.*; //std.debug.print("{} is {}\n", .{ ing, all }); resList.append(Result{ .all = all, .ing = ing }) catch unreachable; resLength += ing.len + 1; _ = m.possible.remove(ing); var pit = m.possible.iterator(); while (pit.next()) |pe| { _ = pe.value_ptr.remove(all); } allergens.deinit(); } } } var res = resList.items; std.sort.sort(Result, res, {}, resultLessThan); if (resLength == 0) { return ""; } var rs = m.alloc.alloc(u8, resLength) catch unreachable; var i: usize = 0; for (res) |r| { std.mem.copy(u8, rs[i..], r.ing); i += r.ing.len; rs[i] = ','; i += 1; } return rs[0 .. rs.len - 1]; } }; test "examples" { const test1 = aoc.readLines(aoc.talloc, aoc.test1file); defer aoc.talloc.free(test1); const inp = aoc.readLines(aoc.talloc, aoc.inputfile); defer aoc.talloc.free(inp); var mt = Menu.init(aoc.talloc, test1) catch unreachable; defer mt.deinit(); try aoc.assertEq(@as(usize, 5), mt.Part1()); var rt = "mxmxvkd,sqjhc,fvjkl"; var resTest = mt.Part2(); defer aoc.talloc.free(resTest); try aoc.assert(std.mem.eql(u8, rt, resTest)); var m = Menu.init(aoc.talloc, inp) catch unreachable; defer m.deinit(); try aoc.assertEq(@as(usize, 2874), m.Part1()); var r = "gfvrr,ndkkq,jxcxh,bthjz,sgzr,mbkbn,pkkg,mjbtz"; var res = m.Part2(); defer aoc.talloc.free(res); try aoc.assert(std.mem.eql(u8, r, res)); } fn day21(inp: []const u8, bench: bool) anyerror!void { const lines = aoc.readLines(aoc.halloc, inp); defer aoc.halloc.free(lines); var m = try Menu.init(aoc.halloc, lines); defer m.deinit(); var p1 = m.Part1(); var p2 = m.Part2(); if (!bench) { try aoc.print("Part 1: {}\nPart 2: {s}\n", .{ p1, p2 }); } } pub fn main() anyerror!void { try aoc.benchme(aoc.input(), day21); }
2020/21/aoc.zig
const zang = @import("zang"); const note_frequencies = @import("zang-12tet"); const common = @import("common.zig"); const c = @import("common/c.zig"); pub const AUDIO_FORMAT: zang.AudioFormat = .signed16_lsb; pub const AUDIO_SAMPLE_RATE = 48000; pub const AUDIO_BUFFER_SIZE = 1024; pub const DESCRIPTION = \\example_vibrato ; const a4 = 440.0; pub const Instrument = struct { pub const num_outputs = 2; pub const num_temps = 3; pub const Params = struct { sample_rate: f32, freq: f32, note_on: bool, }; osc: zang.PulseOsc, gate: zang.Gate, vib: zang.SineOsc, pub fn init() Instrument { return .{ .osc = zang.PulseOsc.init(), .gate = zang.Gate.init(), .vib = zang.SineOsc.init(), }; } pub fn paint( self: *Instrument, span: zang.Span, outputs: [num_outputs][]f32, temps: [num_temps][]f32, note_id_changed: bool, params: Params, ) void { zang.zero(span, temps[2]); self.vib.paint(span, .{temps[2]}, .{}, note_id_changed, .{ .sample_rate = params.sample_rate, .freq = zang.constant(4.0), .phase = zang.constant(0.0), }); var i = span.start; while (i < span.end) : (i += 1) { temps[2][i] = params.freq * (1.0 + 0.02 * temps[2][i]); } zang.addInto(span, outputs[1], temps[2]); // output the frequency for syncing the oscilloscope zang.zero(span, temps[0]); self.osc.paint(span, .{temps[0]}, .{}, note_id_changed, .{ .sample_rate = params.sample_rate, .freq = zang.buffer(temps[2]), .color = 0.5, }); zang.zero(span, temps[1]); self.gate.paint(span, .{temps[1]}, .{}, note_id_changed, .{ .note_on = params.note_on, }); zang.multiply(span, outputs[0], temps[0], temps[1]); } }; pub const MainModule = struct { pub const num_outputs = 2; pub const num_temps = 3; pub const output_audio = common.AudioOut{ .mono = 0 }; pub const output_visualize = 0; pub const output_sync_oscilloscope = 1; key0: ?i32, iq0: zang.Notes(Instrument.Params).ImpulseQueue, idgen0: zang.IdGenerator, instr0: Instrument, trig0: zang.Trigger(Instrument.Params), pub fn init() MainModule { return .{ .key0 = null, .iq0 = zang.Notes(Instrument.Params).ImpulseQueue.init(), .idgen0 = zang.IdGenerator.init(), .instr0 = Instrument.init(), .trig0 = zang.Trigger(Instrument.Params).init(), }; } pub fn paint( self: *MainModule, span: zang.Span, outputs: [num_outputs][]f32, temps: [num_temps][]f32, ) void { var ctr0 = self.trig0.counter(span, self.iq0.consume()); while (self.trig0.next(&ctr0)) |result| { self.instr0.paint( result.span, outputs, temps, result.note_id_changed, result.params, ); } } pub fn keyEvent(self: *MainModule, key: i32, down: bool, impulse_frame: usize) bool { if (common.getKeyRelFreq(key)) |rel_freq| { if (down or (if (self.key0) |nh| nh == key else false)) { self.key0 = if (down) key else null; self.iq0.push(impulse_frame, self.idgen0.nextId(), .{ .sample_rate = AUDIO_SAMPLE_RATE, .freq = a4 * rel_freq, .note_on = down, }); } } return true; } };
examples/example_vibrato.zig
const std = @import("std"); const testing = std.testing; const stdout = std.io.getStdOut().writer(); const stderr = std.io.getStdErr().writer(); const C = @cImport({ @cInclude("vbyte.h"); }); /// vbyte compress u32 or u64 data. if sorted, use `initial_value` for delta /// compression. `inital_value` is only used when `sorted` is true. pub fn compress(comptime T: type, values: std.ArrayList(T), comptime sorted: bool, out: *std.ArrayList(u8), initial_value: T) !void { var size_bytes: usize = 0; if (T == u64) { if (sorted) { const exp_size_bytes = C.vbyte_compressed_size_sorted64(&values.items[0], values.items.len, initial_value); try out.*.resize(@intCast(usize, exp_size_bytes)); size_bytes = C.vbyte_compress_sorted64(&values.items[0], &out.items[0], initial_value, values.items.len); } else { const exp_size_bytes = C.vbyte_compressed_size_unsorted64(&values.items[0], values.items.len); try out.resize(@intCast(usize, exp_size_bytes)); size_bytes = C.vbyte_compress_unsorted64(&values.items[0], &out.items[0], values.items.len); } } else if (T == u32) { if (sorted) { const exp_size_bytes = C.vbyte_compressed_size_sorted32(&values.items[0], values.items.len, initial_value); try out.*.resize(@intCast(usize, exp_size_bytes)); size_bytes = C.vbyte_compress_sorted32(&values.items[0], &out.items[0], initial_value, values.items.len); } else { const exp_size_bytes = C.vbyte_compressed_size_unsorted32(&values.items[0], values.items.len); try out.resize(@intCast(usize, exp_size_bytes)); size_bytes = C.vbyte_compress_unsorted32(&values.items[0], &out.items[0], values.items.len); } } else { @compileError("only u64 and u32 supported to compress"); } try out.resize(size_bytes / @sizeOf(T)); } //extern size_t //vbyte_uncompress_sorted64(const uint8_t *in, uint64_t *out, uint64_t previous, // size_t length); //extern size_t //vbyte_uncompress_unsorted64(const uint8_t *in, uint64_t *out, size_t length); const VByteError = error{ UnexpectedNumberOfBytes, }; /// out must be the correct size. pub fn decompress(comptime T: type, compressed: [*]u8, comptime sorted: bool, out: *std.ArrayList(T), initial_value: T) !void { var dec_bytes: usize = 0; if (T == u64) { if (sorted) { dec_bytes = C.vbyte_uncompress_sorted64(compressed, &out.items[0], initial_value, out.items.len); } else { dec_bytes = C.vbyte_uncompress_unsorted64(compressed, &out.items[0], out.items.len); } } else if (T == u32) { if (sorted) { dec_bytes = C.vbyte_uncompress_sorted32(compressed, &out.items[0], initial_value, out.items.len); } else { dec_bytes = C.vbyte_uncompress_unsorted32(compressed, &out.items[0], out.items.len); } } else { @compileError("only u64 and u32 supported to compress"); } // NOTE it's not actually bytes as indicated in the header. if (dec_bytes != out.items.len) { if (!sorted and dec_bytes == out.items.len * 4) { return; } try stderr.print("dec_bytes: {d}, len:{d}\n", .{ dec_bytes, out.items.len }); return VByteError.UnexpectedNumberOfBytes; } } test "basic compression round trip" { // TODO //vbyte_select_sorted64(const uint8_t *in, size_t size, uint64_t previous, //vbyte_select_unsorted64(const uint8_t *in, size_t size, size_t index); //vbyte_search_unsorted64(const uint8_t *in, size_t length, uint64_t value); //vbyte_search_lower_bound_sorted64(const uint8_t *in, size_t length, //vbyte_append_sorted64(uint8_t *end, uint64_t previous, uint64_t value); //vbyte_append_unsorted64(uint8_t *end, uint64_t value); const sorted = false; const T = u64; const allocator = std.testing.allocator; var values = std.ArrayList(T).init(allocator); var compressed = std.ArrayList(u8).init(allocator); defer values.deinit(); defer compressed.deinit(); values.resize(10001) catch {}; var i: T = 1; values.items[0] = 14566576; while (i < values.items.len) { values.items[i] = values.items[i - 1] + 2; i += 1; } var out = std.ArrayList(T).init(allocator); defer out.deinit(); out.resize(values.items.len) catch {}; try compress(T, values, sorted, &compressed, values.items[0]); try stdout.print("compressed size bytes:{d} uncompressed size bytes: {d} ratio: {d}\n", .{ compressed.items.len, values.items.len * @sizeOf(T), (values.items.len * @sizeOf(T)) / compressed.items.len }); try decompress(T, compressed.items.ptr, sorted, &out, values.items[0]); try std.testing.expect(std.mem.eql(T, out.items, values.items)); }
src/main.zig
const std = @import("std"); const testing = std.testing; const allocator = std.testing.allocator; pub const Report = struct { const Line = struct { const MAX_SIZE = 16; bits: [MAX_SIZE]usize, alive: bool, pub fn init() Line { var self = Line{ .bits = [_]usize{0} ** MAX_SIZE, .alive = true, }; return self; } pub fn deinit(_: *Line) void {} pub fn process(self: *Line, line: []const u8) void { for (line) |c, j| { self.bits[j] += c - '0'; } } pub fn set(self: *Line, bit: usize, value: usize) void { self.bits[bit] = value; } pub fn to_decimal(self: *Line, width: usize) usize { var num: usize = 0; for (self.bits) |b, j| { if (j >= width) break; num *= 2; num += b; } // std.debug.warn("To decimal {d}: {}\n", .{ self.bits[0..width], num }); return num; } }; width: usize, lines: std.ArrayList(Line), pub fn init() Report { var self = Report{ .width = 0, .lines = std.ArrayList(Line).init(allocator), }; return self; } pub fn deinit(self: *Report) void { for (self.lines.items) |*line| { line.deinit(); } self.lines.deinit(); } pub fn process_line(self: *Report, data: []const u8) void { if (data.len == 0) return; if (self.width == 0) { self.width = data.len; } if (self.width != data.len) { unreachable; } var line = Line.init(); line.process(data); self.lines.append(line) catch unreachable; } pub fn get_power_consumption(self: *Report) usize { var gamma = Line.init(); defer gamma.deinit(); var epsilon = Line.init(); defer epsilon.deinit(); var j: usize = 0; while (j < self.width) : (j += 1) { if (self.has_more_ones_for_bit(j, self.lines.items.len)) { gamma.set(j, 1); } else { epsilon.set(j, 1); } } return gamma.to_decimal(self.width) * epsilon.to_decimal(self.width); } pub fn get_life_support_rating(self: *Report) usize { return self.get_oxygen_generator_rating() * self.get_co2_scrubber_rating(); } fn reset_alive(self: Report) void { for (self.lines.items) |*line| { line.alive = true; } } fn mark_alive_for_bit(self: Report, bit: usize, wanted: usize) usize { if (bit >= self.width) unreachable; var count: usize = 0; for (self.lines.items) |*line| { if (line.bits[bit] != wanted) { line.alive = false; } if (line.alive) { // std.debug.warn("Keeping alive line {} bit {}: {d}\n", .{ j, bit, line.bits[0..self.width] }); count += 1; } } return count; } fn count_ones_for_bit(self: Report, bit: usize) usize { if (bit >= self.width) unreachable; var count: usize = 0; for (self.lines.items) |*line| { if (!line.alive) continue; if (line.bits[bit] != 1) continue; count += 1; } return count; } fn has_more_ones_for_bit(self: *Report, bit: usize, size: usize) bool { const half = (size + 1) / 2; const count = self.count_ones_for_bit(bit); return count >= half; } fn get_rating(self: *Report, mark: usize) usize { self.reset_alive(); var j: usize = 0; var pass: usize = 0; var left: usize = self.lines.items.len; while (j < self.width) : (j += 1) { var count: usize = 0; if (self.has_more_ones_for_bit(j, left)) { count = self.mark_alive_for_bit(j, mark); } else { count = self.mark_alive_for_bit(j, 1 - mark); } if (count == 1) break; pass += 1; left = count; } for (self.lines.items) |*line| { if (!line.alive) continue; return line.to_decimal(self.width); } return 0; } fn get_oxygen_generator_rating(self: *Report) usize { return self.get_rating(1); } fn get_co2_scrubber_rating(self: *Report) usize { return self.get_rating(0); } }; test "sample part a" { const data: []const u8 = \\00100 \\11110 \\10110 \\10111 \\10101 \\01111 \\00111 \\11100 \\10000 \\11001 \\00010 \\01010 ; var report = Report.init(); defer report.deinit(); var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { report.process_line(line); } const pc = report.get_power_consumption(); try testing.expect(pc == 198); } test "sample part b" { const data: []const u8 = \\00100 \\11110 \\10110 \\10111 \\10101 \\01111 \\00111 \\11100 \\10000 \\11001 \\00010 \\01010 ; var report = Report.init(); defer report.deinit(); var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { report.process_line(line); } const lsr = report.get_life_support_rating(); try testing.expect(lsr == 230); }
2021/p03/report.zig
const wlr = @import("../wlroots.zig"); const wayland = @import("wayland"); const wl = wayland.server.wl; pub const Presentation = extern struct { global: *wl.Global, // TODO: use std.os.clockid_t when available. clock: c_int, events: extern struct { destroy: wl.Signal(*wlr.Presentation), }, server_destroy: wl.Listener(*wl.Server), extern fn wlr_presentation_create(server: *wl.Server, backend: *wlr.Backend) ?*wlr.Presentation; pub fn create(server: *wl.Server, backend: *wlr.Backend) !*wlr.Presentation { return wlr_presentation_create(server, backend) orelse error.OutOfMemory; } extern fn wlr_presentation_surface_sampled(presentation: *wlr.Presentation, surface: *wlr.Surface) ?*wlr.PresentationFeedback; pub const surfaceSampled = wlr_presentation_surface_sampled; extern fn wlr_presentation_surface_sampled_on_output(presentation: *wlr.Presentation, surface: *wlr.Surface, output: *wlr.Output) void; pub const surfaceSampledOnOutput = wlr_presentation_surface_sampled_on_output; }; pub const PresentationFeedback = extern struct { resources: wl.list.Head(wl.Resource, null), output: ?*wlr.Output, output_committed: bool, output_commit_seq: u32, output_commit: wl.Listener(*wlr.Output.event.Commit), output_present: wl.Listener(*wlr.Output.event.Present), output_destroy: wl.Listener(*wlr.Output), extern fn wlr_presentation_feedback_send_presented(feedback: *wlr.PresentationFeedback, event: *wlr.PresentationEvent) void; pub const sendPresented = wlr_presentation_feedback_send_presented; extern fn wlr_presentation_feedback_destroy(feedback: *wlr.PresentationFeedback) void; pub const destroy = wlr_presentation_feedback_destroy; }; pub const PresentationEvent = extern struct { output: *wlr.Output, tv_sec: u64, tv_nsec: u32, refresh: u32, seq: u64, flags: u32, extern fn wlr_presentation_event_from_output(event: *wlr.PresentationEvent, output_event: *const wlr.Output.event.Present) void; pub const fromOutput = wlr_presentation_event_from_output; };
src/types/presentation_time.zig
const std = @import("std"); extern "kernel32" fn LoadLibraryA(name: [*c]const u8) callconv(.Stdcall) ?*c_void; extern "kernel32" fn GetProcAddress(mod: ?*c_void, nax: [*c]const u8) callconv(.Stdcall) ?*c_void; extern "kernel32" fn GetModuleFileNameA(mod: ?*c_void, name: [*c]u8, len: c_int) callconv(.Stdcall) c_int; extern "kernel32" fn ExitProcess(code: c_int) callconv(.Stdcall) noreturn; extern "user32" fn MessageBoxA(hw: ?*c_void, a: [*c]const u8, b: [*c]const u8, c: c_uint) callconv(.Stdcall) c_int; extern "msvcrt" fn malloc(s: usize) ?*c_void; extern "msvcrt" fn realloc(p: ?*c_void, s: usize) ?*c_void; extern "msvcrt" fn free(p: ?*c_void) ?*c_void; extern "msvcrt" fn strcpy(p: [*c]u8, px: [*c]const u8) ?*c_void; pub const newstate_t = ?fn (...) callconv(.C) ?*c_void; pub const openlibs_t = ?fn (?*c_void) callconv(.C) void; pub const loadfile_t = ?fn (?*c_void, [*c]const u8) callconv(.C) c_int; pub const pcall_t = ?fn (?*c_void, c_int, c_int, c_int) callconv(.C) c_int; pub export fn findDir() [*c]u8 { var pathBufferLen: c_int = 512; while (true) { var buffer: [*c]u8 = @ptrCast([*c]u8, @alignCast(@alignOf(u8), malloc(@bitCast(c_uint, pathBufferLen)))); var res: c_int = GetModuleFileNameA(null, buffer, pathBufferLen); if (res == pathBufferLen) { _ = free(buffer); pathBufferLen *= @as(c_int, 2); } else { buffer = @ptrCast([*c]u8, @alignCast(@alignOf(u8), realloc(@ptrCast(?*c_void, buffer), @bitCast(c_uint, (res + @as(c_int, 1)))))); { var i: c_int = res; while (i >= @as(c_int, 0)) : (i -= 1) { var ch: u8 = buffer[@intCast(c_uint, i)]; if ((@bitCast(c_int, @as(c_uint, ch)) == @as(c_int, '\\')) or (@bitCast(c_int, @as(c_uint, ch)) == @as(c_int, '/'))) { buffer[@intCast(c_uint, (i + @as(c_int, 1)))] = @bitCast(u8, @truncate(i8, @as(c_int, 0))); buffer = @ptrCast([*c]u8, @alignCast(@alignOf(u8), realloc(@ptrCast(?*c_void, buffer), @bitCast(c_uint, (i + @as(c_int, 2)))))); return buffer; } } } buffer = @ptrCast([*c]u8, @alignCast(@alignOf(u8), realloc(@ptrCast(?*c_void, buffer), @bitCast(c_uint, @as(c_int, 1))))); buffer[@intCast(c_uint, @as(c_int, 0))] = @bitCast(u8, @truncate(i8, @as(c_int, 0))); return buffer; } } return null; } pub export fn concat(arg_a: [*c]const u8, arg_b: [*c]const u8) [*c]u8 { var a = arg_a; var b = arg_b; var lenA: usize = std.mem.len(a); var fin: [*c]u8 = @ptrCast([*c]u8, @alignCast(@alignOf(u8), malloc(((lenA +% std.mem.len(b)) +% @bitCast(c_uint, @as(c_int, 1)))))); _ = strcpy(fin, a); _ = strcpy((fin + lenA), b); return fin; } pub export fn luaboot() void { var dll: ?*c_void = LoadLibraryA("lua51"); if (!(dll != null)) { _ = MessageBoxA(null, "moonboot failed to find LuaJIT (lua51.dll)", "moonboot diagnostic", @as(c_int, 0)); ExitProcess(@as(c_int, 1)); } var luaL_newstate: newstate_t = @ptrCast(newstate_t, GetProcAddress(dll, "luaL_newstate")); var luaL_openlibs: openlibs_t = @ptrCast(openlibs_t, GetProcAddress(dll, "luaL_openlibs")); var luaL_loadfile: loadfile_t = @ptrCast(loadfile_t, GetProcAddress(dll, "luaL_loadfile")); var lua_pcall: pcall_t = @ptrCast(pcall_t, GetProcAddress(dll, "lua_pcall")); var L: ?*c_void = luaL_newstate.?(); luaL_openlibs.?(L); var dir: [*c]u8 = findDir(); var boot: [*c]u8 = concat(dir, "moonboot/bootloader.lua"); _ = free(dir); if (luaL_loadfile.?(L, boot) != 0) { _ = MessageBoxA(null, "moonboot failed to load \'moonboot/bootloader.lua\'", "moonboot diagnostic", @as(c_int, 0)); ExitProcess(@as(c_int, 1)); } _ = free(boot); if (lua_pcall.?(L, @as(c_int, 0), -@as(c_int, 1), @as(c_int, 0)) != 0) { _ = MessageBoxA(null, "moonboot failed to run bootloader", "moonboot diagnostic", @as(c_int, 0)); ExitProcess(@as(c_int, 1)); } } pub export fn DllMain(arg_x: ?*c_void, arg_y: u32, arg_z: ?*c_void) callconv(.Stdcall) c_int { var x = arg_x; var y = arg_y; var z = arg_z; @"switch": { case: { switch (y) { @as(c_int, 1) => break :case, else => break :@"switch", } } luaboot(); break :@"switch"; } return 1; }
src/moonboot_dll.zig
usingnamespace @import("root").preamble; const log = lib.output.log.scoped(.{ .prefix = "VirtioGpu", .filter = .info, }).write; const virtio_pci = os.drivers.misc.virtio_pci; fn physalloc(comptime T: type) struct { ptr: *T, phys: usize } { const phys = os.memory.pmm.allocPhys(@sizeOf(T)) catch unreachable; const virt = os.platform.phys_ptr(*T).from_int(phys).get_writeback(); return .{ .ptr = virt, .phys = phys }; } /// virtio-gpu driver instance const Driver = struct { transport: virtio_pci.Driver, display_region: lib.graphics.image_region.ImageRegion = undefined, // Initialize the virtio transport, but don't change modes pub fn init(pciaddr: os.platform.pci.Addr) !Driver { var v = try virtio_pci.Driver.init(pciaddr, 0, 0); var self: Driver = .{ .transport = v }; self.transport.addIRQ(0, &interrupt, &self); return self; } fn invalidateRectFunc(region: *lib.graphics.image_region.ImageRegion, x: usize, y: usize, width: usize, height: usize) void { const self = @fieldParentPtr(Driver, "display_region", region); self.updateRect(@ptrToInt(self.display_region.subregion(x, y, width, height).bytes.ptr) - @ptrToInt(self.display_region.bytes.ptr), .{ .x = @intCast(u32, x), .y = @intCast(u32, y), .width = @intCast(u32, width), .height = @intCast(u32, height), }); } // Do a modeswitch to the described mode pub fn modeset(self: *Driver, phys: usize) void { var iter = self.transport.iter(0); { var msg: ResourceCreate2D = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_res_create_2d, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .format = 1, .width = @intCast(u32, self.display_region.width), .height = @intCast(u32, self.display_region.height), }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(ResourceCreate2D), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); } { var msg: ResourceAttachBacking = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_res_attach_backing, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .entrynum = 1, }; var msg1: ResourceAttachBackingEntry = .{ .addr = phys, .len = @intCast(u32, self.display_region.width) * @intCast(u32, self.display_region.height) * 4, }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(ResourceAttachBacking), virtio_pci.vring_desc_flag_next); iter.put(&msg1, @sizeOf(ResourceAttachBackingEntry), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); } { var msg: SetScanout = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_set_scanout, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .scanid = 0, .rect = .{ .x = 0, .y = 0, .width = @intCast(u32, self.display_region.width), .height = @intCast(u32, self.display_region.height), }, }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(SetScanout), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); } self.transport.start(0); self.wait(); } /// Update *only* the rectangle pub fn updateRect(self: *Driver, offset: u64, rect: Rect) void { const RectangleReq = struct { th2d: TransferHost2D, r1: ConfHdr = undefined, rflush: ResourceFlush, r2: ConfHdr = undefined }; while (self.transport.queues[0].num_unused < 2) { if (self.transport.processOne(0)) |idx| { const addr = self.transport.queues[0].desc[idx].addr; os.memory.pmm.freePhys(addr, @sizeOf(RectangleReq)); self.transport.freeChain(0, idx); } else break; if (self.transport.processOne(0)) |idx| { self.transport.freeChain(0, idx); } else break; } var iter = self.transport.iter(0); var msg = physalloc(RectangleReq); msg.ptr.* = .{ .th2d = .{ .resid = 1, .offset = offset, .rect = rect, }, .rflush = .{ .resid = 1, .rect = rect, } }; iter.begin(); iter.putPhys(msg.phys + @offsetOf(RectangleReq, "th2d"), @sizeOf(TransferHost2D), virtio_pci.vring_desc_flag_next); iter.putPhys(msg.phys + @offsetOf(RectangleReq, "r1"), @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); iter.begin(); iter.putPhys(msg.phys + @offsetOf(RectangleReq, "rflush"), @sizeOf(ResourceFlush), virtio_pci.vring_desc_flag_next); iter.putPhys(msg.phys + @offsetOf(RectangleReq, "r2"), @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); self.transport.start(0); } /// Wait for request to finish. fn wait(self: *Driver) void { while (self.transport.queues[0].num_unused != self.transport.queues[0].size) { self.transport.process(0); } } }; const ConfHdr = packed struct { cmdtype: u32 = 0, flags: u32 = 0, fenceid: u64 = 0, ctxid: u32 = 0, _: u32 = 0, }; const ResourceCreate2D = packed struct { hdr: ConfHdr, resid: u32, format: u32, width: u32, height: u32 }; const ResourceAttachBacking = packed struct { hdr: ConfHdr, resid: u32, entrynum: u32 }; const ResourceAttachBackingEntry = packed struct { addr: u64, len: u32, _: u32 = 0, }; const Rect = packed struct { x: u32, y: u32, width: u32, height: u32 }; const SetScanout = packed struct { hdr: ConfHdr, rect: Rect, scanid: u32, resid: u32, }; const TransferHost2D = packed struct { hdr: ConfHdr = .{ .cmdtype = virtio_gpu_cmd_transfer_to_host_2d }, rect: Rect, offset: u64, resid: u32, _: u32 = 0 }; const ResourceFlush = packed struct { hdr: ConfHdr = .{ .cmdtype = virtio_gpu_cmd_res_flush }, rect: Rect, resid: u32, _: u32 = 0 }; // Feature bits const virtio_feature_version_1 = 32; const virtio_feature_access_platform = 33; const virtio_feature_ring_packed = 34; const virtio_feature_order_platform = 36; const virtio_feature_sr_iov = 37; // 2D cmds const virtio_gpu_cmd_get_display_info = 0x0100; const virtio_gpu_cmd_res_create_2d = 0x101; const virtio_gpu_cmd_res_unref = 0x102; const virtio_gpu_cmd_set_scanout = 0x103; const virtio_gpu_cmd_res_flush = 0x104; const virtio_gpu_cmd_transfer_to_host_2d = 0x105; const virtio_gpu_cmd_res_attach_backing = 0x106; const virtio_gpu_cmd_res_detatch_backing = 0x107; const virtio_gpu_cmd_get_capset_info = 0x108; const virtio_gpu_cmd_get_capset = 0x109; const virtio_gpu_cmd_get_edid = 0x10A; // Cursor cmds const virtio_gpu_cmd_update_cursor = 0x0300; const virtio_gpu_cmd_move_cursor = 0x301; // Success const virtio_gpu_resp_ok_nodata = 0x1100; const virtio_gpu_resp_ok_display_info = 0x1101; const virtio_gpu_resp_ok_capset_info = 0x1102; const virtio_gpu_resp_ok_capset = 0x1103; const virtio_gpu_resp_ok_edid = 0x1104; // Error const virtio_gpu_resp_err_unspecified = 0x1200; const virtio_gpu_resp_err_out_of_mem = 0x1201; const virtio_gpu_resp_err_invalid_scanout_id = 0x1202; const virtio_gpu_resp_err_invalid_res_id = 0x1203; const virtio_gpu_resp_err_invalid_ctx_id = 0x1204; const virtio_gpu_resp_err_invalid_parameter = 0x1205; const virtio_gpu_flag_fence = (1 << 0); fn oautb(port: u16, val: u8) void { asm volatile ("outb %[val], %[port]\n\t" : : [val] "{al}" (val), [port] "N{dx}" (port) ); } /// Global rectangle update, but with a global context fn updater( bb: [*]u8, yoff_src: usize, yoff_dest: usize, ysize: usize, pitch: usize, ctx: usize, ) void { var self = @intToPtr(*Driver, ctx); self.updateRect(self.pitch * yoff_src, .{ .x = 0, .y = @truncate(u32, yoff_dest), .width = self.width, .height = @truncate(u32, ysize), }); } pub fn registerController(addr: os.platform.pci.Addr) void { if (comptime (!config.drivers.gpu.virtio.enable)) return; const alloc = os.memory.pmm.phys_heap; const drv = alloc.create(Driver) catch { log(.crit, "Virtio display controller: Allocation failure", .{}); return; }; errdefer alloc.destroy(drv); drv.* = Driver.init(addr) catch { log(.crit, "Virtio display controller: Init has failed!", .{}); return; }; errdefer drv.deinit(); if (comptime (!config.drivers.output.vesa_log.enable)) return; // @TODO: Get the actual screen resolution const res = config.drivers.gpu.virtio.default_resolution; const num_bytes = res.width * res.height * 4; const phys = os.memory.pmm.allocPhys(num_bytes) catch return; errdefer os.memory.pmm.freePhys(phys, num_bytes); drv.display_region = .{ .bytes = os.platform.phys_ptr([*]u8).from_int(phys).get_writeback()[0..num_bytes], .pitch = res.width * 4, .width = res.width, .height = res.height, .invalidateRectFunc = Driver.invalidateRectFunc, .pixel_format = .rgbx, }; drv.modeset(phys); os.drivers.output.vesa_log.use(&drv.display_region); } pub fn interrupt(drv: *Driver) void {}
subprojects/flork/src/drivers/gpu/virtio.zig
const builtin = @import("builtin"); const std = @import("std"); const mem = std.mem; const sort = std.sort.sort; const comptimeutils = @import("comptimeutils.zig"); const setField = comptimeutils.setField; const append = comptimeutils.append; const empty = comptimeutils.empty; const map = comptimeutils.map; const mathutils = @import("math.zig"); const roundUpAlign = mathutils.roundUpAlign; const storageutils = @import("storage.zig"); const AlignedStorage = storageutils.AlignedStorage; const Tuple = storageutils.Tuple; // Defining classes and signatures // -------------------------------------------------------------------------- /// Define a signature. pub fn defineSig(comptime Factory: VtableFactory) Sig { // Non-comprehensive check, so easy to bypass const Marker = struct { blah: usize, }; switch (@typeInfo(Factory(Marker))) { builtin.TypeId.Struct => |info| { for (info.fields) |field| { switch (@typeInfo(field.field_type)) { builtin.TypeId.Fn => |fn_info| { if (fn_info.args.len == 0) { @compileError(field.name ++ " doesn't have a receiver parameter"); } else if (fn_info.args[0].is_generic) { @compileError(field.name ++ " has an invalid receiver type. Got: (generic)"); } else if (fn_info.args[0].arg_type.? != Marker) { @compileError(field.name ++ " has an invalid receiver type. Got: " ++ @typeName(fn_info.args[0].arg_type)); } }, else => @compileError(field.name ++ " is not a function type"), } } }, else => @compileError("must specify a struct type"), } return Sig{ .VtableFactory = Factory, }; } pub const Sig = struct { VtableFactory: VtableFactory, const Self = @This(); /// Make a vtable type for the specified receiver type. /// /// This is a user-facing API. pub fn Vtable(comptime self: Self, comptime VtSelf: type) type { return self.VtableFactory(VtSelf); } }; const Method = struct { name: []const u8, func: builtin.TypeInfo.Fn, }; /// Start defining a cell class. pub fn defineClass() ClassBuilder { return ClassBuilder{}; } /// The builder type used to define a cell class. const ClassBuilder = struct { // Compiler bug: Changing this to `void` causes assertion failure. // The root cause is not identified yet. _StateTy: type = u8, _AttrTy: type = void, built: bool = false, in_ports: []const InPortInfo = empty(InPortInfo), out_ports: []const OutPortInfo = empty(OutPortInfo), _ctor: fn (var) void = defaultCtor, _AttrInit: ValueFactory = MakeValueFactory({}), const Self = @This(); fn defaultCtor(self: var) void {} /// Set the state type of the class. It defaults to `void`. pub fn state(comptime self: Self, comptime ty: type) Self { return setField(self, "_StateTy", ty); } /// Set the attribute type of the class. It defaults to `void`. pub fn attr(comptime self: Self, comptime ty: type) Self { return setField(self, "_AttrTy", ty); } /// Set the default value of the attribute. It defaults to `{}`. pub fn attrDefault(comptime self: Self, comptime value: var) Self { return setField(self, "_AttrInit", MakeValueFactory(value)); } /// Set the constructor function of the class. /// /// Construction functions are automatically called for all cells when a /// system is instantiated. pub fn ctor(comptime self: Self, comptime impl: fn (var) void) Self { return setField(self, "_ctor", impl); } /// Define an outbound port. pub fn out(comptime self: Self, comptime name: []const u8, comptime sig: Sig) Self { return setField(self, "out_ports", append(self.out_ports, OutPortInfo{ .name = name, .sig = sig, })); } /// Define an inbound port as well as its handler functions. pub fn in(comptime self: Self, comptime name: []const u8, comptime sig: Sig, comptime Factory: ImplFactory) Self { return setField(self, "in_ports", append(self.in_ports, InPortInfo{ .name = name, .sig = sig, .ImplFactory = Factory, })); } /// Finalize the `ClassBuilder`. pub fn build(comptime self: Self) Class { return Class{ .StateTy = self._StateTy, .AttrTy = self._AttrTy, .built = self.built, .in_ports = self.in_ports, .out_ports = self.out_ports, .ctor = self._ctor, .DefaultAttrInit = self._AttrInit, }; } }; pub const Class = struct { StateTy: type, AttrTy: type, built: bool, in_ports: []const InPortInfo, out_ports: []const OutPortInfo, ctor: fn (var) void, DefaultAttrInit: ValueFactory, const Self = @This(); /// Get a reference to the specified outbound port. pub fn out_port_info(comptime self: Self, comptime name: []const u8) *const OutPortInfo { return &self.out_ports[self.out_port_i(name)]; } /// Get an index of the specified outbound port. pub fn out_port_i(comptime self: Self, comptime name: []const u8) usize { for (self.out_ports) |*out_port, i| { if (mem.eql(u8, out_port.name, name)) { return i; } } @compileError("unknown outbound port name: " ++ name); } }; /// A function that produces a type containing the fields for the handler /// functions of a signature. The given type is used as their receiver type. /// /// (They can be automatically generated from a prototype that doesn't /// have a receiver parameter once <https://github.com/ziglang/zig/issues/383> /// lands.) pub const VtableFactory = fn (type) type; /// A function that produces a type containing the handler functions of a /// signature. The parameter type `VtSelf` is used as their receiver type. /// /// The handler functions are provided by the type in one of the following ways: /// /// - Function declarations with corresponding method names. The functions must /// accept `self: VtSelf` as the first parameter. /// /// - A single function declaration named `__vtable__`, having type /// `fn () sig.Vtable(VtSelf)`, where `sig` represents a signature and is /// a value of type `Sig`. The return value is a raw vtable and must be /// `comptime`-known. /// /// The second usage is an advanced feature intended to be used for /// metaprogramming. For example, it can be used to automatically generate /// an implementation from a signature. pub const ImplFactory = fn (type) type; const InPortInfo = struct { name: []const u8, sig: Sig, ImplFactory: ImplFactory, }; const OutPortInfo = struct { name: []const u8, sig: Sig, }; // Value producer // -------------------------------------------------------------------------- /// A type-erased constant value producer. const ValueFactory = type; fn MakeValueFactory(value: var) ValueFactory { return struct { fn get(comptime T: type) T { if (@typeOf(value) == void and T != void) { @compileError("An attribute value of type " ++ @typeName(T) ++ " is required, but missing."); } return value; } }; } // Defining a system // -------------------------------------------------------------------------- /// Represents a reference to a cell defined in a `ComposeCtx`. pub const Cell = struct { ctx: *ComposeCtx, cell_id: usize, const Self = @This(); /// Get a reference to the specified inbound port of the cell. pub fn in(comptime self: Self, comptime name: []const u8) InPort { const class = &self.getInner().class; for (class.in_ports) |in_port, i| { if (mem.eql(u8, in_port.name, name)) { return InPort{ .cell_id = self.cell_id, .in_port_id = i, ._sig = in_port.sig }; } } @compileError("unknown inbound port name: " ++ name); } /// Get a reference to the specified outbound port of the cell. pub fn out(comptime self: Self, comptime name: []const u8) OutPort { const class = &self.getInner().class; for (class.out_ports) |out_port, i| { if (mem.eql(u8, out_port.name, name)) { return OutPort{ .cell_id = self.cell_id, .out_port_id = i, ._sig = out_port.sig }; } } @compileError("unknown outbound port name: " ++ name); } /// Set the attribute value of the cell. pub fn withAttr(comptime self: Self, value: self.getInner().class.AttrTy) Self { const inner = self.getInner(); inner.AttrInit = MakeValueFactory(value); return self; } fn getInner(comptime self: *const Self) *ComposeCtxCell { return self.ctx.cells[self.cell_id]; } }; /// Represents a reference to an inbound port of a cell defined in /// a `ComposeCtx`. pub const InPort = struct { cell_id: usize, in_port_id: usize, _sig: Sig, /// Get the signature of the port. pub fn PortSig(comptime self: @This()) Sig { return self._sig; } }; /// Represents a reference to an outbound port of a cell defined in /// a `ComposeCtx`. pub const OutPort = struct { cell_id: usize, out_port_id: usize, _sig: Sig, /// Get the signature of the port. pub fn PortSig(comptime self: @This()) Sig { return self._sig; } }; const ComposeCtxCell = struct { class: Class, /// The initializer for the cell's attribute. AttrInit: ValueFactory, }; const ComposeCtxConn = struct { out: OutPort, in: InPort, }; pub const ComposeCtx = struct { cells: []const *ComposeCtxCell = empty(*ComposeCtxCell), conns: []const ComposeCtxConn = empty(ComposeCtxConn), entry_port: ?InPort = null, const Self = @This(); /// Instantiate a cell of the specified class. pub fn new(comptime self: *Self, comptime class: Class) Cell { const cell_id = self.cells.len; var cell = ComposeCtxCell{ .class = class, .AttrInit = class.DefaultAttrInit, }; self.cells = append(self.cells, &cell); return Cell{ .cell_id = cell_id, .ctx = self }; } /// Create a connection between an outbound port and an inbound port. pub fn connect(comptime self: *Self, out: OutPort, in: InPort) void { self.conns = append(self.conns, ComposeCtxConn{ .out = out, .in = in, }); } /// Define the entrypoint. pub fn entry(comptime self: *Self, in: InPort) void { self.entry_port = in; } }; // Implementing a system // -------------------------------------------------------------------------- /// Construct a "system" type from a compose function. pub fn Compose(comptime desc: fn (*ComposeCtx) void) type { var ctx = ComposeCtx{}; comptime desc(&ctx); const state_layout = layoutState(ctx.cells); const StateTy = AlignedStorage(state_layout.size, state_layout.alignment); return struct { state: []align(state_layout.alignment) u8, const Self = @This(); /// Get the type for storing the state of the system. /// /// The application should create a global variable of this type, /// initialized with `undefined`. The address to the global variable /// must be explicitly supplied to `link` so that its address can be /// known. pub fn State() type { return StateTy; } /// Hoge pub fn link(state: *StateTy) Self { return Self{ .state = state.toBytes() }; } /// Invoke the constructors. pub fn init(comptime self: *const Self) void { inline for (ctx.cells) |cell, cell_id| { const cell_static = comptime self.cellStatic(cell_id); cell.class.ctor(cell_static); } } /// Invoke an entrypoint method. pub fn invoke(comptime self: *const Self, comptime name: []const u8, args: ...) InvokeReturnType(name) { const entry = ctx.entry_port.?; const cell = ctx.cells[entry.cell_id]; const cell_static = comptime self.cellStatic(entry.cell_id); const vtable = comptime makeVtable(cell.class, entry.in_port_id, *const CellStaticOfCell(entry.cell_id)); const func = @field(vtable, name); // Compiler bug: This crashes the compiler: // `func(self.target_self, args)` switch (args.len) { 0 => return func(cell_static), 1 => return func(cell_static, args[0]), 2 => return func(cell_static, args[0], args[1]), 3 => return func(cell_static, args[0], args[1], args[2]), 4 => return func(cell_static, args[0], args[1], args[2], args[3]), 5 => return func(cell_static, args[0], args[1], args[2], args[3], args[4]), 6 => return func(cell_static, args[0], args[1], args[2], args[3], args[4], args[5]), 7 => return func(cell_static, args[0], args[1], args[2], args[3], args[4], args[5], args[6]), else => @compileLog("Too many arguments (cases only up to 7 are implemented)"), } return 42; } // Since return type inference isn't implemented yet... // <https://github.com/ziglang/zig/issues/447> fn InvokeReturnType(comptime name: []const u8) type { const entry = ctx.entry_port.?; const cell = ctx.cells[entry.cell_id]; const vtable = comptime makeVtable(cell.class, entry.in_port_id, *const CellStaticOfCell(entry.cell_id)); return @typeOf(@field(vtable, name)).ReturnType; } fn CellStateOfCell(comptime cell_id: usize) type { return ctx.cells[cell_id].class.StateTy; } fn cellState(self: *const Self, comptime cell_id: usize) *CellStateOfCell(cell_id) { const off = comptime state_layout.cell_offs[cell_id]; const T = CellStateOfCell(cell_id); if (@sizeOf(T) == 0) { return undefined; } else { const bytes = &self.state[off]; return @ptrCast(*T, @alignCast(@alignOf(T), bytes)); } } fn CellStaticOfCell(comptime cell_id: usize) type { return CellStatic(ctx.cells[cell_id].class); } /// Get a pointer to `CellStatic(cell_id)`. fn cellStatic(self: *const Self, comptime cell_id: usize) *const CellStaticOfCell(cell_id) { // Construct a `CellStatic`. var st: CellStaticOfCell(cell_id) = undefined; const cell = ctx.cells[cell_id]; st._attr = cell.AttrInit.get(@typeOf(st._attr)); st._state = self.cellState(cell_id); inline for (cell.class.out_ports) |*out_port, out_port_id| { // Find the corresponding inbound port // TODO: This is very inefficient // TODO: improve diagnostics const conn: ?ComposeCtxConn = comptime findConn(cell_id, out_port_id); if (conn == null) { @compileError("each outbound port must have exactly one connection"); } // TODO: see if circular reference works? const target_cell = ctx.cells[conn.?.in.cell_id]; const target_cell_static = self.cellStatic(conn.?.in.cell_id); const target_vtable = comptime makeVtable(target_cell.class, conn.?.in.in_port_id, *const CellStaticOfCell(conn.?.in.cell_id)); // Erase `*const CellStaticOfCell` const vtable_ty = @typeOf(st._out_port_vtables.get(out_port_id).*); const target_vtable_erased = @ptrCast(vtable_ty, &target_vtable); const target_cell_static_erased = @ptrCast(CellStaticErased, target_cell_static); st._out_port_vtables.get(out_port_id).* = target_vtable_erased; st._out_port_target_selves[out_port_id] = target_cell_static_erased; } return &st; } fn findConn(cell_id: usize, out_port_id: usize) ?ComposeCtxConn { var conn: ?ComposeCtxConn = null; for (ctx.conns) |c| { if (c.out.cell_id == cell_id and c.out.out_port_id == out_port_id) { if (conn != null) { return null; } conn = c; } } return conn; } }; } // Dispatch // -------------------------------------------------------------------------- /// Create a vtable for an inbound port. /// /// The handler functions are instantiated using the receiver type `Self`. Thus, /// `Self` must be appropriate for the context of `class`. fn makeVtable(comptime class: Class, comptime in_port_id: usize, comptime Self: type) class.in_ports[in_port_id].sig.VtableFactory(Self) { const sig = class.in_ports[in_port_id].sig; const Vtable = sig.VtableFactory(Self); var vtable: Vtable = undefined; const Impl = class.in_ports[in_port_id].ImplFactory(Self); if (@hasDecl(Impl, "__vtable__")) { // Raw vtable mode - see `ImplFactory`'s documentation.' const vt = Impl.__vtable__(); if (@typeOf(vt) != Vtable) { @compileError("`__vtable__()` returned a value of type " ++ @typeName(@typeOf(vt)) ++ ", which is not a valid vtable type"); } return vt; } comptime var i = 0; inline while (i < @memberCount(Vtable)) { const name = @memberName(Vtable, i); const func = @field(Impl, name); @field(vtable, name) = func; i += 1; } return vtable; } // Cell memory layout // -------------------------------------------------------------------------- // TODO: Abstract and classify memory allocations into static and dynamic /// A type-erased `*const CellStatic`. const CellStaticErased = [*]const u8; /// A structure containing the state, attributes, and outbound port vtables of /// a cell. /// /// This type can be used as a `Self` type of `makeVtable`. fn CellStatic(comptime class: Class) type { return struct { _attr: class.AttrTy, _state: *class.StateTy, // TODO: Optimize, e.g., devirtualize _out_port_vtables: Tuple(map(type, struct { fn ___(comptime x: *const OutPortInfo) type { return *const x.sig.VtableFactory(CellStaticErased); } }.___, class.out_ports)), _out_port_target_selves: [class.out_ports.len]CellStaticErased, const Self = @This(); pub fn attr(self: *const Self) *const class.AttrTy { return &self._attr; } pub fn state(self: *const Self) *class.StateTy { return self._state; } pub fn out(self: *const Self, comptime name: []const u8) CellStaticOut(class.out_port_info(name)) { const index = comptime class.out_port_i(name); return CellStaticOut(class.out_port_info(name)){ .vtable = self._out_port_vtables.getConst(index).*, .target_self = self._out_port_target_selves[index], }; } }; } fn CellStaticOut(comptime out_port: *const OutPortInfo) type { return struct { vtable: *const out_port.sig.VtableFactory(CellStaticErased), target_self: CellStaticErased, const Self = @This(); /// Invoke a method in an outbound port. pub fn invoke(self: Self, comptime name: []const u8, args: ...) @typeOf(@field(self.vtable, name)).ReturnType { const func = @field(self.vtable, name); // Compiler bug: This crashes the compiler: // `func(self.target_self, args)` switch (args.len) { 0 => return func(self.target_self), 1 => return func(self.target_self, args[0]), 2 => return func(self.target_self, args[0], args[1]), 3 => return func(self.target_self, args[0], args[1], args[2]), 4 => return func(self.target_self, args[0], args[1], args[2], args[3]), 5 => return func(self.target_self, args[0], args[1], args[2], args[3], args[4]), 6 => return func(self.target_self, args[0], args[1], args[2], args[3], args[4], args[5]), 7 => return func(self.target_self, args[0], args[1], args[2], args[3], args[4], args[5], args[6]), else => @compileLog("Too many arguments (cases only up to 7 are implemented)"), } } }; } // State memory layout // -------------------------------------------------------------------------- const CellStateLayout = usize; const StateLayout = struct { cell_offs: []const usize, size: usize, alignment: u29, }; fn layoutState(comptime cells: []const *ComposeCtxCell) StateLayout { // Sort by alignment const Ent = struct { i: usize, alignment: u29, size: usize, }; var ents: [cells.len]Ent = undefined; var count: usize = 0; for (cells) |cell, i| { if (@sizeOf(cell.class.StateTy) == 0) { continue; } const ent = Ent{ .i = i, .size = @sizeOf(cell.class.StateTy), .alignment = @alignOf(cell.class.StateTy), }; ents[count] = ent; count += 1; } sort(Ent, ents[0..count], struct { fn ___(lhs: Ent, rhs: Ent) bool { return lhs.alignment > rhs.alignment; } }.___); // Pack 'em var off: usize = 0; var cell_offs = [1]usize{0} ** cells.len; for (ents[0..count]) |ent| { off = roundUpAlign(off, ent.alignment); cell_offs[ent.i] = off; off += ent.size; } return StateLayout{ .cell_offs = cell_offs, .size = off, .alignment = if (count > 0) ents[0].alignment else 1, }; }
druzhba/druzhba.zig
pub const struct_WrenVM = opaque {}; pub const WrenVM = struct_WrenVM; pub const struct_WrenHandle = opaque {}; pub const WrenHandle = struct_WrenHandle; pub const WrenReallocateFn = ?fn (?*c_void, usize, ?*c_void) callconv(.C) ?*c_void; pub const WrenForeignMethodFn = ?fn (?*WrenVM) callconv(.C) void; pub const WrenFinalizerFn = ?fn (?*c_void) callconv(.C) void; pub const WrenResolveModuleFn = ?fn (?*WrenVM, [*c]const u8, [*c]const u8) callconv(.C) [*c]const u8; pub const WrenLoadModuleCompleteFn = ?fn (?*WrenVM, [*c]const u8, struct_WrenLoadModuleResult) callconv(.C) void; pub const struct_WrenLoadModuleResult = extern struct { source: [*c]const u8, /// Called after loadModuleFn is called for module [name]. The original returned result /// is handed back to you in this callback, so that you can free memory if appropriate. onComplete: WrenLoadModuleCompleteFn, userData: ?*c_void, }; pub const WrenLoadModuleResult = struct_WrenLoadModuleResult; pub const WrenLoadModuleFn = ?fn (?*WrenVM, [*c]const u8) callconv(.C) WrenLoadModuleResult; pub const WrenBindForeignMethodFn = ?fn (?*WrenVM, [*c]const u8, [*c]const u8, bool, [*c]const u8) callconv(.C) WrenForeignMethodFn; pub const WrenWriteFn = ?fn (?*WrenVM, [*c]const u8) callconv(.C) void; pub const WREN_ERROR_COMPILE: c_int = 0; pub const WREN_ERROR_RUNTIME: c_int = 1; pub const WREN_ERROR_STACK_TRACE: c_int = 2; pub const WrenErrorType = c_uint; pub const WrenErrorFn = ?fn (?*WrenVM, WrenErrorType, [*c]const u8, c_int, [*c]const u8) callconv(.C) void; pub const WrenForeignClassMethods = extern struct { /// The callback invoked when the foreign object is created. /// This must be provided. Inside the body of this, it must call /// [setSlotNewForeign()] exactly once. allocate: WrenForeignMethodFn, /// The callback invoked when the garbage collector is about to collect a /// foreign object's memory. /// This may be `NULL` if the foreign class does not need to finalize. finalize: WrenFinalizerFn, }; pub const WrenBindForeignClassFn = ?fn (?*WrenVM, [*c]const u8, [*c]const u8) callconv(.C) WrenForeignClassMethods; pub const WrenConfiguration = extern struct { /// The callback Wren will use to allocate, reallocate, and deallocate memory. /// /// If `NULL`, defaults to a built-in function that uses `realloc` and `free`. reallocateFn: WrenReallocateFn, /// The callback Wren uses to resolve a module name. /// /// Some host applications may wish to support "relative" imports, where the /// meaning of an import string depends on the module that contains it. To /// support that without baking any policy into Wren itself, the VM gives the /// host a chance to resolve an import string. /// /// Before an import is loaded, it calls this, passing in the name of the /// module that contains the import and the import string. The host app can /// look at both of those and produce a new "canonical" string that uniquely /// identifies the module. This string is then used as the name of the module /// going forward. It is what is passed to [loadModuleFn], how duplicate /// imports of the same module are detected, and how the module is reported in /// stack traces. /// /// If you leave this function NULL, then the original import string is /// treated as the resolved string. /// /// If an import cannot be resolved by the embedder, it should return NULL and /// Wren will report that as a runtime error. /// /// Wren will take ownership of the string you return and free it for you, so /// it should be allocated using the same allocation function you provide /// above. resolveModuleFn: WrenResolveModuleFn, /// The callback Wren uses to load a module. /// /// Since Wren does not talk directly to the file system, it relies on the /// embedder to physically locate and read the source code for a module. The /// first time an import appears, Wren will call this and pass in the name of /// the module being imported. The method will return a result, which contains /// the source code for that module. Memory for the source is owned by the /// host application, and can be freed using the onComplete callback. /// /// This will only be called once for any given module name. Wren caches the /// result internally so subsequent imports of the same module will use the /// previous source and not call this. /// /// If a module with the given name could not be found by the embedder, it /// should return NULL and Wren will report that as a runtime error. loadModuleFn: WrenLoadModuleFn, /// The callback Wren uses to find a foreign method and bind it to a class. /// /// When a foreign method is declared in a class, this will be called with the /// foreign method's module, class, and signature when the class body is /// executed. It should return a pointer to the foreign function that will be /// bound to that method. /// /// If the foreign function could not be found, this should return NULL and /// Wren will report it as runtime error. bindForeignMethodFn: WrenBindForeignMethodFn, /// The callback Wren uses to find a foreign class and get its foreign methods. /// /// When a foreign class is declared, this will be called with the class's /// module and name when the class body is executed. It should return the /// foreign functions uses to allocate and (optionally) finalize the bytes /// stored in the foreign object when an instance is created. bindForeignClassFn: WrenBindForeignClassFn, /// The callback Wren uses to display text when `System.print()` or the other /// related functions are called. /// /// If this is `NULL`, Wren discards any printed text. writeFn: WrenWriteFn, /// The callback Wren uses to report errors. /// /// When an error occurs, this will be called with the module name, line /// number, and an error message. If this is `NULL`, Wren doesn't report any errorFn: WrenErrorFn, /// The number of bytes Wren will allocate before triggering the first garbage /// collection. /// /// If zero, defaults to 10MB. initialHeapSize: usize, /// After a collection occurs, the threshold for the next collection is /// determined based on the number of bytes remaining in use. This allows Wren /// to shrink its memory usage automatically after reclaiming a large amount /// of memory. /// /// This can be used to ensure that the heap does not get too small, which can /// in turn lead to a large number of collections afterwards as the heap grows /// back to a usable size. /// /// If zero, defaults to 1MB. minHeapSize: usize, /// Wren will resize the heap automatically as the number of bytes /// remaining in use after a collection changes. This number determines the /// amount of additional memory Wren will use after a collection, as a /// percentage of the current heap size. /// /// For example, say that this is 50. After a garbage collection, when there /// are 400 bytes of memory still in use, the next collection will be triggered /// after a total of 600 bytes are allocated (including the 400 already in /// use.) /// /// Setting this to a smaller number wastes less memory, but triggers more /// frequent garbage collections. /// /// If zero, defaults to 50. heapGrowthPercent: c_int, /// User-defined data associated with the VM. userData: ?*c_void, }; pub const WREN_RESULT_SUCCESS: c_int = 0; pub const WREN_RESULT_COMPILE_ERROR: c_int = 1; pub const WREN_RESULT_RUNTIME_ERROR: c_int = 2; pub const WrenInterpretResult = c_uint; pub const WREN_TYPE_BOOL: c_int = 0; pub const WREN_TYPE_NUM: c_int = 1; pub const WREN_TYPE_FOREIGN: c_int = 2; pub const WREN_TYPE_LIST: c_int = 3; pub const WREN_TYPE_MAP: c_int = 4; pub const WREN_TYPE_NULL: c_int = 5; pub const WREN_TYPE_STRING: c_int = 6; pub const WREN_TYPE_UNKNOWN: c_int = 7; pub const WrenType = c_uint; // Extern fns in externs.zig pub const WREN_VERSION_MAJOR = @as(c_int, 0); pub const WREN_VERSION_MINOR = @as(c_int, 4); pub const WREN_VERSION_PATCH = @as(c_int, 0); pub const WREN_VERSION_STRING = "0.4.0"; pub const WREN_VERSION_NUMBER = ((WREN_VERSION_MAJOR * @import("std").zig.c_translation.promoteIntLiteral(c_int, 1000000, .decimal)) + (WREN_VERSION_MINOR * @as(c_int, 1000))) + WREN_VERSION_PATCH;
src/c.zig
const std = @import("std"); const Context = @import("client.zig").Context; const Object = @import("client.zig").Object; // wl_display pub const wl_display_interface = struct { // core global object sync: ?fn (*Context, Object, u32) anyerror!void, get_registry: ?fn (*Context, Object, u32) anyerror!void, }; fn wl_display_sync_default(context: *Context, object: Object, callback: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_display_get_registry_default(context: *Context, object: Object, registry: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_DISPLAY = wl_display_interface{ .sync = wl_display_sync_default, .get_registry = wl_display_get_registry_default, }; pub fn new_wl_display(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_display_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_display_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // sync 0 => { var callback: u32 = try object.context.next_u32(); if (WL_DISPLAY.sync) |sync| { try sync(object.context, object, callback); } }, // get_registry 1 => { var registry: u32 = try object.context.next_u32(); if (WL_DISPLAY.get_registry) |get_registry| { try get_registry(object.context, object, registry); } }, else => {}, } } pub const wl_display_error = enum(u32) { invalid_object = 0, invalid_method = 1, no_memory = 2, implementation = 3, }; // // The error event is sent out when a fatal (non-recoverable) // error has occurred. The object_id argument is the object // where the error occurred, most often in response to a request // to that object. The code identifies the error and is defined // by the object interface. As such, each interface defines its // own set of error codes. The message is a brief description // of the error, for (debugging) convenience. // pub fn wl_display_send_error(object: Object, object_id: u32, code: u32, message: []const u8) anyerror!void { object.context.startWrite(); object.context.putU32(object_id); object.context.putU32(code); object.context.putString(message); object.context.finishWrite(object.id, 0); } // // This event is used internally by the object ID management // logic. When a client deletes an object, the server will send // this event to acknowledge that it has seen the delete request. // When the client receives this event, it will know that it can // safely reuse the object ID. // pub fn wl_display_send_delete_id(object: Object, id: u32) anyerror!void { object.context.startWrite(); object.context.putU32(id); object.context.finishWrite(object.id, 1); } // wl_registry pub const wl_registry_interface = struct { // global registry object bind: ?fn (*Context, Object, u32, []u8, u32, u32) anyerror!void, }; fn wl_registry_bind_default(context: *Context, object: Object, name: u32, name_string: []u8, version: u32, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_REGISTRY = wl_registry_interface{ .bind = wl_registry_bind_default, }; pub fn new_wl_registry(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_registry_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_registry_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // bind 0 => { var name: u32 = try object.context.next_u32(); var name_string: []u8 = try object.context.next_string(); var version: u32 = try object.context.next_u32(); var id: u32 = try object.context.next_u32(); if (WL_REGISTRY.bind) |bind| { try bind(object.context, object, name, name_string, version, id); } }, else => {}, } } // // Notify the client of global objects. // // The event notifies the client that a global object with // the given name is now available, and it implements the // given version of the given interface. // pub fn wl_registry_send_global(object: Object, name: u32, interface: []const u8, version: u32) anyerror!void { object.context.startWrite(); object.context.putU32(name); object.context.putString(interface); object.context.putU32(version); object.context.finishWrite(object.id, 0); } // // Notify the client of removed global objects. // // This event notifies the client that the global identified // by name is no longer available. If the client bound to // the global using the bind request, the client should now // destroy that object. // // The object remains valid and requests to the object will be // ignored until the client destroys it, to avoid races between // the global going away and a client sending a request to it. // pub fn wl_registry_send_global_remove(object: Object, name: u32) anyerror!void { object.context.startWrite(); object.context.putU32(name); object.context.finishWrite(object.id, 1); } // wl_callback pub const wl_callback_interface = struct { // callback object }; pub var WL_CALLBACK = wl_callback_interface{}; pub fn new_wl_callback(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_callback_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_callback_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { else => {}, } } // // Notify the client when the related request is done. // pub fn wl_callback_send_done(object: Object, callback_data: u32) anyerror!void { object.context.startWrite(); object.context.putU32(callback_data); object.context.finishWrite(object.id, 0); } // wl_compositor pub const wl_compositor_interface = struct { // the compositor singleton create_surface: ?fn (*Context, Object, u32) anyerror!void, create_region: ?fn (*Context, Object, u32) anyerror!void, }; fn wl_compositor_create_surface_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_compositor_create_region_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_COMPOSITOR = wl_compositor_interface{ .create_surface = wl_compositor_create_surface_default, .create_region = wl_compositor_create_region_default, }; pub fn new_wl_compositor(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_compositor_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_compositor_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // create_surface 0 => { var id: u32 = try object.context.next_u32(); if (WL_COMPOSITOR.create_surface) |create_surface| { try create_surface(object.context, object, id); } }, // create_region 1 => { var id: u32 = try object.context.next_u32(); if (WL_COMPOSITOR.create_region) |create_region| { try create_region(object.context, object, id); } }, else => {}, } } // wl_shm_pool pub const wl_shm_pool_interface = struct { // a shared memory pool create_buffer: ?fn (*Context, Object, u32, i32, i32, i32, i32, u32) anyerror!void, destroy: ?fn ( *Context, Object, ) anyerror!void, resize: ?fn (*Context, Object, i32) anyerror!void, }; fn wl_shm_pool_create_buffer_default(context: *Context, object: Object, id: u32, offset: i32, width: i32, height: i32, stride: i32, format: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shm_pool_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shm_pool_resize_default(context: *Context, object: Object, size: i32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SHM_POOL = wl_shm_pool_interface{ .create_buffer = wl_shm_pool_create_buffer_default, .destroy = wl_shm_pool_destroy_default, .resize = wl_shm_pool_resize_default, }; pub fn new_wl_shm_pool(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_shm_pool_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_shm_pool_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // create_buffer 0 => { var id: u32 = try object.context.next_u32(); var offset: i32 = try object.context.next_i32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); var stride: i32 = try object.context.next_i32(); var format: u32 = try object.context.next_u32(); if (WL_SHM_POOL.create_buffer) |create_buffer| { try create_buffer(object.context, object, id, offset, width, height, stride, format); } }, // destroy 1 => { if (WL_SHM_POOL.destroy) |destroy| { try destroy( object.context, object, ); } }, // resize 2 => { var size: i32 = try object.context.next_i32(); if (WL_SHM_POOL.resize) |resize| { try resize(object.context, object, size); } }, else => {}, } } // wl_shm pub const wl_shm_interface = struct { // shared memory support create_pool: ?fn (*Context, Object, u32, i32, i32) anyerror!void, }; fn wl_shm_create_pool_default(context: *Context, object: Object, id: u32, fd: i32, size: i32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SHM = wl_shm_interface{ .create_pool = wl_shm_create_pool_default, }; pub fn new_wl_shm(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_shm_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_shm_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // create_pool 0 => { var id: u32 = try object.context.next_u32(); var fd: i32 = try object.context.next_fd(); var size: i32 = try object.context.next_i32(); if (WL_SHM.create_pool) |create_pool| { try create_pool(object.context, object, id, fd, size); } }, else => {}, } } pub const wl_shm_error = enum(u32) { invalid_format = 0, invalid_stride = 1, invalid_fd = 2, }; pub const wl_shm_format = enum(u32) { argb8888 = 0, xrgb8888 = 1, c8 = 0x20203843, rgb332 = 0x38424752, bgr233 = 0x38524742, xrgb4444 = 0x32315258, xbgr4444 = 0x32314258, rgbx4444 = 0x32315852, bgrx4444 = 0x32315842, argb4444 = 0x32315241, abgr4444 = 0x32314241, rgba4444 = 0x32314152, bgra4444 = 0x32314142, xrgb1555 = 0x35315258, xbgr1555 = 0x35314258, rgbx5551 = 0x35315852, bgrx5551 = 0x35315842, argb1555 = 0x35315241, abgr1555 = 0x35314241, rgba5551 = 0x35314152, bgra5551 = 0x35314142, rgb565 = 0x36314752, bgr565 = 0x36314742, rgb888 = 0x34324752, bgr888 = 0x34324742, xbgr8888 = 0x34324258, rgbx8888 = 0x34325852, bgrx8888 = 0x34325842, abgr8888 = 0x34324241, rgba8888 = 0x34324152, bgra8888 = 0x34324142, xrgb2101010 = 0x30335258, xbgr2101010 = 0x30334258, rgbx1010102 = 0x30335852, bgrx1010102 = 0x30335842, argb2101010 = 0x30335241, abgr2101010 = 0x30334241, rgba1010102 = 0x30334152, bgra1010102 = 0x30334142, yuyv = 0x56595559, yvyu = 0x55595659, uyvy = 0x59565955, vyuy = 0x59555956, ayuv = 0x56555941, nv12 = 0x3231564e, nv21 = 0x3132564e, nv16 = 0x3631564e, nv61 = 0x3136564e, yuv410 = 0x39565559, yvu410 = 0x39555659, yuv411 = 0x31315559, yvu411 = 0x31315659, yuv420 = 0x32315559, yvu420 = 0x32315659, yuv422 = 0x36315559, yvu422 = 0x36315659, yuv444 = 0x34325559, yvu444 = 0x34325659, }; // // Informs the client about a valid pixel format that // can be used for buffers. Known formats include // argb8888 and xrgb8888. // pub fn wl_shm_send_format(object: Object, format: u32) anyerror!void { object.context.startWrite(); object.context.putU32(format); object.context.finishWrite(object.id, 0); } // wl_buffer pub const wl_buffer_interface = struct { // content for a wl_surface destroy: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_buffer_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_BUFFER = wl_buffer_interface{ .destroy = wl_buffer_destroy_default, }; pub fn new_wl_buffer(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_buffer_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_buffer_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (WL_BUFFER.destroy) |destroy| { try destroy( object.context, object, ); } }, else => {}, } } // // Sent when this wl_buffer is no longer used by the compositor. // The client is now free to reuse or destroy this buffer and its // backing storage. // // If a client receives a release event before the frame callback // requested in the same wl_surface.commit that attaches this // wl_buffer to a surface, then the client is immediately free to // reuse the buffer and its backing storage, and does not need a // second buffer for the next surface content update. Typically // this is possible, when the compositor maintains a copy of the // wl_surface contents, e.g. as a GL texture. This is an important // optimization for GL(ES) compositors with wl_shm clients. // pub fn wl_buffer_send_release(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 0); } // wl_data_offer pub const wl_data_offer_interface = struct { // offer to transfer data accept: ?fn (*Context, Object, u32, []u8) anyerror!void, receive: ?fn (*Context, Object, []u8, i32) anyerror!void, destroy: ?fn ( *Context, Object, ) anyerror!void, finish: ?fn ( *Context, Object, ) anyerror!void, set_actions: ?fn (*Context, Object, u32, u32) anyerror!void, }; fn wl_data_offer_accept_default(context: *Context, object: Object, serial: u32, mime_type: []u8) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_offer_receive_default(context: *Context, object: Object, mime_type: []u8, fd: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_offer_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_offer_finish_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_offer_set_actions_default(context: *Context, object: Object, dnd_actions: u32, preferred_action: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_DATA_OFFER = wl_data_offer_interface{ .accept = wl_data_offer_accept_default, .receive = wl_data_offer_receive_default, .destroy = wl_data_offer_destroy_default, .finish = wl_data_offer_finish_default, .set_actions = wl_data_offer_set_actions_default, }; pub fn new_wl_data_offer(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_data_offer_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_data_offer_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // accept 0 => { var serial: u32 = try object.context.next_u32(); var mime_type: []u8 = try object.context.next_string(); if (WL_DATA_OFFER.accept) |accept| { try accept(object.context, object, serial, mime_type); } }, // receive 1 => { var mime_type: []u8 = try object.context.next_string(); var fd: i32 = try object.context.next_fd(); if (WL_DATA_OFFER.receive) |receive| { try receive(object.context, object, mime_type, fd); } }, // destroy 2 => { if (WL_DATA_OFFER.destroy) |destroy| { try destroy( object.context, object, ); } }, // finish 3 => { if (WL_DATA_OFFER.finish) |finish| { try finish( object.context, object, ); } }, // set_actions 4 => { var dnd_actions: u32 = try object.context.next_u32(); var preferred_action: u32 = try object.context.next_u32(); if (WL_DATA_OFFER.set_actions) |set_actions| { try set_actions(object.context, object, dnd_actions, preferred_action); } }, else => {}, } } pub const wl_data_offer_error = enum(u32) { invalid_finish = 0, invalid_action_mask = 1, invalid_action = 2, invalid_offer = 3, }; // // Sent immediately after creating the wl_data_offer object. One // event per offered mime type. // pub fn wl_data_offer_send_offer(object: Object, mime_type: []const u8) anyerror!void { object.context.startWrite(); object.context.putString(mime_type); object.context.finishWrite(object.id, 0); } // // This event indicates the actions offered by the data source. It // will be sent right after wl_data_device.enter, or anytime the source // side changes its offered actions through wl_data_source.set_actions. // pub fn wl_data_offer_send_source_actions(object: Object, source_actions: u32) anyerror!void { object.context.startWrite(); object.context.putU32(source_actions); object.context.finishWrite(object.id, 1); } // // This event indicates the action selected by the compositor after // matching the source/destination side actions. Only one action (or // none) will be offered here. // // This event can be emitted multiple times during the drag-and-drop // operation in response to destination side action changes through // wl_data_offer.set_actions. // // This event will no longer be emitted after wl_data_device.drop // happened on the drag-and-drop destination, the client must // honor the last action received, or the last preferred one set // through wl_data_offer.set_actions when handling an "ask" action. // // Compositors may also change the selected action on the fly, mainly // in response to keyboard modifier changes during the drag-and-drop // operation. // // The most recent action received is always the valid one. Prior to // receiving wl_data_device.drop, the chosen action may change (e.g. // due to keyboard modifiers being pressed). At the time of receiving // wl_data_device.drop the drag-and-drop destination must honor the // last action received. // // Action changes may still happen after wl_data_device.drop, // especially on "ask" actions, where the drag-and-drop destination // may choose another action afterwards. Action changes happening // at this stage are always the result of inter-client negotiation, the // compositor shall no longer be able to induce a different action. // // Upon "ask" actions, it is expected that the drag-and-drop destination // may potentially choose a different action and/or mime type, // based on wl_data_offer.source_actions and finally chosen by the // user (e.g. popping up a menu with the available options). The // final wl_data_offer.set_actions and wl_data_offer.accept requests // must happen before the call to wl_data_offer.finish. // pub fn wl_data_offer_send_action(object: Object, dnd_action: u32) anyerror!void { object.context.startWrite(); object.context.putU32(dnd_action); object.context.finishWrite(object.id, 2); } // wl_data_source pub const wl_data_source_interface = struct { // offer to transfer data offer: ?fn (*Context, Object, []u8) anyerror!void, destroy: ?fn ( *Context, Object, ) anyerror!void, set_actions: ?fn (*Context, Object, u32) anyerror!void, }; fn wl_data_source_offer_default(context: *Context, object: Object, mime_type: []u8) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_source_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_source_set_actions_default(context: *Context, object: Object, dnd_actions: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_DATA_SOURCE = wl_data_source_interface{ .offer = wl_data_source_offer_default, .destroy = wl_data_source_destroy_default, .set_actions = wl_data_source_set_actions_default, }; pub fn new_wl_data_source(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_data_source_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_data_source_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // offer 0 => { var mime_type: []u8 = try object.context.next_string(); if (WL_DATA_SOURCE.offer) |offer| { try offer(object.context, object, mime_type); } }, // destroy 1 => { if (WL_DATA_SOURCE.destroy) |destroy| { try destroy( object.context, object, ); } }, // set_actions 2 => { var dnd_actions: u32 = try object.context.next_u32(); if (WL_DATA_SOURCE.set_actions) |set_actions| { try set_actions(object.context, object, dnd_actions); } }, else => {}, } } pub const wl_data_source_error = enum(u32) { invalid_action_mask = 0, invalid_source = 1, }; // // Sent when a target accepts pointer_focus or motion events. If // a target does not accept any of the offered types, type is NULL. // // Used for feedback during drag-and-drop. // pub fn wl_data_source_send_target(object: Object, mime_type: []const u8) anyerror!void { object.context.startWrite(); object.context.putString(mime_type); object.context.finishWrite(object.id, 0); } // // Request for data from the client. Send the data as the // specified mime type over the passed file descriptor, then // close it. // pub fn wl_data_source_send_send(object: Object, mime_type: []const u8, fd: i32) anyerror!void { object.context.startWrite(); object.context.putString(mime_type); object.context.putFd(fd); object.context.finishWrite(object.id, 1); } // // This data source is no longer valid. There are several reasons why // this could happen: // // - The data source has been replaced by another data source. // - The drag-and-drop operation was performed, but the drop destination // did not accept any of the mime types offered through // wl_data_source.target. // - The drag-and-drop operation was performed, but the drop destination // did not select any of the actions present in the mask offered through // wl_data_source.action. // - The drag-and-drop operation was performed but didn't happen over a // surface. // - The compositor cancelled the drag-and-drop operation (e.g. compositor // dependent timeouts to avoid stale drag-and-drop transfers). // // The client should clean up and destroy this data source. // // For objects of version 2 or older, wl_data_source.cancelled will // only be emitted if the data source was replaced by another data // source. // pub fn wl_data_source_send_cancelled(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 2); } // // The user performed the drop action. This event does not indicate // acceptance, wl_data_source.cancelled may still be emitted afterwards // if the drop destination does not accept any mime type. // // However, this event might however not be received if the compositor // cancelled the drag-and-drop operation before this event could happen. // // Note that the data_source may still be used in the future and should // not be destroyed here. // pub fn wl_data_source_send_dnd_drop_performed(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 3); } // // The drop destination finished interoperating with this data // source, so the client is now free to destroy this data source and // free all associated data. // // If the action used to perform the operation was "move", the // source can now delete the transferred data. // pub fn wl_data_source_send_dnd_finished(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 4); } // // This event indicates the action selected by the compositor after // matching the source/destination side actions. Only one action (or // none) will be offered here. // // This event can be emitted multiple times during the drag-and-drop // operation, mainly in response to destination side changes through // wl_data_offer.set_actions, and as the data device enters/leaves // surfaces. // // It is only possible to receive this event after // wl_data_source.dnd_drop_performed if the drag-and-drop operation // ended in an "ask" action, in which case the final wl_data_source.action // event will happen immediately before wl_data_source.dnd_finished. // // Compositors may also change the selected action on the fly, mainly // in response to keyboard modifier changes during the drag-and-drop // operation. // // The most recent action received is always the valid one. The chosen // action may change alongside negotiation (e.g. an "ask" action can turn // into a "move" operation), so the effects of the final action must // always be applied in wl_data_offer.dnd_finished. // // Clients can trigger cursor surface changes from this point, so // they reflect the current action. // pub fn wl_data_source_send_action(object: Object, dnd_action: u32) anyerror!void { object.context.startWrite(); object.context.putU32(dnd_action); object.context.finishWrite(object.id, 5); } // wl_data_device pub const wl_data_device_interface = struct { // data transfer device start_drag: ?fn (*Context, Object, ?Object, Object, ?Object, u32) anyerror!void, set_selection: ?fn (*Context, Object, ?Object, u32) anyerror!void, release: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_data_device_start_drag_default(context: *Context, object: Object, source: ?Object, origin: Object, icon: ?Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_device_set_selection_default(context: *Context, object: Object, source: ?Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_device_release_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_DATA_DEVICE = wl_data_device_interface{ .start_drag = wl_data_device_start_drag_default, .set_selection = wl_data_device_set_selection_default, .release = wl_data_device_release_default, }; pub fn new_wl_data_device(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_data_device_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_data_device_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // start_drag 0 => { var source: ?Object = object.context.objects.get(try object.context.next_u32()); if (source != null) { if (source.?.dispatch != wl_data_source_dispatch) { return error.ObjectWrongType; } } var origin: Object = object.context.objects.get(try object.context.next_u32()).?; if (origin.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } var icon: ?Object = object.context.objects.get(try object.context.next_u32()); if (icon != null) { if (icon.?.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } } var serial: u32 = try object.context.next_u32(); if (WL_DATA_DEVICE.start_drag) |start_drag| { try start_drag(object.context, object, source, origin, icon, serial); } }, // set_selection 1 => { var source: ?Object = object.context.objects.get(try object.context.next_u32()); if (source != null) { if (source.?.dispatch != wl_data_source_dispatch) { return error.ObjectWrongType; } } var serial: u32 = try object.context.next_u32(); if (WL_DATA_DEVICE.set_selection) |set_selection| { try set_selection(object.context, object, source, serial); } }, // release 2 => { if (WL_DATA_DEVICE.release) |release| { try release( object.context, object, ); } }, else => {}, } } pub const wl_data_device_error = enum(u32) { role = 0, }; // // The data_offer event introduces a new wl_data_offer object, // which will subsequently be used in either the // data_device.enter event (for drag-and-drop) or the // data_device.selection event (for selections). Immediately // following the data_device_data_offer event, the new data_offer // object will send out data_offer.offer events to describe the // mime types it offers. // pub fn wl_data_device_send_data_offer(object: Object, id: u32) anyerror!void { object.context.startWrite(); object.context.putU32(id); object.context.finishWrite(object.id, 0); } // // This event is sent when an active drag-and-drop pointer enters // a surface owned by the client. The position of the pointer at // enter time is provided by the x and y arguments, in surface-local // coordinates. // pub fn wl_data_device_send_enter(object: Object, serial: u32, surface: u32, x: f32, y: f32, id: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(surface); object.context.putFixed(x); object.context.putFixed(y); object.context.putU32(id); object.context.finishWrite(object.id, 1); } // // This event is sent when the drag-and-drop pointer leaves the // surface and the session ends. The client must destroy the // wl_data_offer introduced at enter time at this point. // pub fn wl_data_device_send_leave(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 2); } // // This event is sent when the drag-and-drop pointer moves within // the currently focused surface. The new position of the pointer // is provided by the x and y arguments, in surface-local // coordinates. // pub fn wl_data_device_send_motion(object: Object, time: u32, x: f32, y: f32) anyerror!void { object.context.startWrite(); object.context.putU32(time); object.context.putFixed(x); object.context.putFixed(y); object.context.finishWrite(object.id, 3); } // // The event is sent when a drag-and-drop operation is ended // because the implicit grab is removed. // // The drag-and-drop destination is expected to honor the last action // received through wl_data_offer.action, if the resulting action is // "copy" or "move", the destination can still perform // wl_data_offer.receive requests, and is expected to end all // transfers with a wl_data_offer.finish request. // // If the resulting action is "ask", the action will not be considered // final. The drag-and-drop destination is expected to perform one last // wl_data_offer.set_actions request, or wl_data_offer.destroy in order // to cancel the operation. // pub fn wl_data_device_send_drop(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 4); } // // The selection event is sent out to notify the client of a new // wl_data_offer for the selection for this device. The // data_device.data_offer and the data_offer.offer events are // sent out immediately before this event to introduce the data // offer object. The selection event is sent to a client // immediately before receiving keyboard focus and when a new // selection is set while the client has keyboard focus. The // data_offer is valid until a new data_offer or NULL is received // or until the client loses keyboard focus. The client must // destroy the previous selection data_offer, if any, upon receiving // this event. // pub fn wl_data_device_send_selection(object: Object, id: u32) anyerror!void { object.context.startWrite(); object.context.putU32(id); object.context.finishWrite(object.id, 5); } // wl_data_device_manager pub const wl_data_device_manager_interface = struct { // data transfer interface create_data_source: ?fn (*Context, Object, u32) anyerror!void, get_data_device: ?fn (*Context, Object, u32, Object) anyerror!void, }; fn wl_data_device_manager_create_data_source_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_data_device_manager_get_data_device_default(context: *Context, object: Object, id: u32, seat: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_DATA_DEVICE_MANAGER = wl_data_device_manager_interface{ .create_data_source = wl_data_device_manager_create_data_source_default, .get_data_device = wl_data_device_manager_get_data_device_default, }; pub fn new_wl_data_device_manager(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_data_device_manager_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_data_device_manager_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // create_data_source 0 => { var id: u32 = try object.context.next_u32(); if (WL_DATA_DEVICE_MANAGER.create_data_source) |create_data_source| { try create_data_source(object.context, object, id); } }, // get_data_device 1 => { var id: u32 = try object.context.next_u32(); var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } if (WL_DATA_DEVICE_MANAGER.get_data_device) |get_data_device| { try get_data_device(object.context, object, id, seat); } }, else => {}, } } pub const wl_data_device_manager_dnd_action = enum(u32) { none = 0, copy = 1, move = 2, ask = 4, }; // wl_shell pub const wl_shell_interface = struct { // create desktop-style surfaces get_shell_surface: ?fn (*Context, Object, u32, Object) anyerror!void, }; fn wl_shell_get_shell_surface_default(context: *Context, object: Object, id: u32, surface: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SHELL = wl_shell_interface{ .get_shell_surface = wl_shell_get_shell_surface_default, }; pub fn new_wl_shell(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_shell_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_shell_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // get_shell_surface 0 => { var id: u32 = try object.context.next_u32(); var surface: Object = object.context.objects.get(try object.context.next_u32()).?; if (surface.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } if (WL_SHELL.get_shell_surface) |get_shell_surface| { try get_shell_surface(object.context, object, id, surface); } }, else => {}, } } pub const wl_shell_error = enum(u32) { role = 0, }; // wl_shell_surface pub const wl_shell_surface_interface = struct { // desktop-style metadata interface pong: ?fn (*Context, Object, u32) anyerror!void, move: ?fn (*Context, Object, Object, u32) anyerror!void, resize: ?fn (*Context, Object, Object, u32, u32) anyerror!void, set_toplevel: ?fn ( *Context, Object, ) anyerror!void, set_transient: ?fn (*Context, Object, Object, i32, i32, u32) anyerror!void, set_fullscreen: ?fn (*Context, Object, u32, u32, ?Object) anyerror!void, set_popup: ?fn (*Context, Object, Object, u32, Object, i32, i32, u32) anyerror!void, set_maximized: ?fn (*Context, Object, ?Object) anyerror!void, set_title: ?fn (*Context, Object, []u8) anyerror!void, set_class: ?fn (*Context, Object, []u8) anyerror!void, }; fn wl_shell_surface_pong_default(context: *Context, object: Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_move_default(context: *Context, object: Object, seat: Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_resize_default(context: *Context, object: Object, seat: Object, serial: u32, edges: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_set_toplevel_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_set_transient_default(context: *Context, object: Object, parent: Object, x: i32, y: i32, flags: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_set_fullscreen_default(context: *Context, object: Object, method: u32, framerate: u32, output: ?Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_set_popup_default(context: *Context, object: Object, seat: Object, serial: u32, parent: Object, x: i32, y: i32, flags: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_set_maximized_default(context: *Context, object: Object, output: ?Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_set_title_default(context: *Context, object: Object, title: []u8) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_shell_surface_set_class_default(context: *Context, object: Object, class_: []u8) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SHELL_SURFACE = wl_shell_surface_interface{ .pong = wl_shell_surface_pong_default, .move = wl_shell_surface_move_default, .resize = wl_shell_surface_resize_default, .set_toplevel = wl_shell_surface_set_toplevel_default, .set_transient = wl_shell_surface_set_transient_default, .set_fullscreen = wl_shell_surface_set_fullscreen_default, .set_popup = wl_shell_surface_set_popup_default, .set_maximized = wl_shell_surface_set_maximized_default, .set_title = wl_shell_surface_set_title_default, .set_class = wl_shell_surface_set_class_default, }; pub fn new_wl_shell_surface(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_shell_surface_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_shell_surface_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // pong 0 => { var serial: u32 = try object.context.next_u32(); if (WL_SHELL_SURFACE.pong) |pong| { try pong(object.context, object, serial); } }, // move 1 => { var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } var serial: u32 = try object.context.next_u32(); if (WL_SHELL_SURFACE.move) |move| { try move(object.context, object, seat, serial); } }, // resize 2 => { var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } var serial: u32 = try object.context.next_u32(); var edges: u32 = try object.context.next_u32(); if (WL_SHELL_SURFACE.resize) |resize| { try resize(object.context, object, seat, serial, edges); } }, // set_toplevel 3 => { if (WL_SHELL_SURFACE.set_toplevel) |set_toplevel| { try set_toplevel( object.context, object, ); } }, // set_transient 4 => { var parent: Object = object.context.objects.get(try object.context.next_u32()).?; if (parent.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var flags: u32 = try object.context.next_u32(); if (WL_SHELL_SURFACE.set_transient) |set_transient| { try set_transient(object.context, object, parent, x, y, flags); } }, // set_fullscreen 5 => { var method: u32 = try object.context.next_u32(); var framerate: u32 = try object.context.next_u32(); var output: ?Object = object.context.objects.get(try object.context.next_u32()); if (output != null) { if (output.?.dispatch != wl_output_dispatch) { return error.ObjectWrongType; } } if (WL_SHELL_SURFACE.set_fullscreen) |set_fullscreen| { try set_fullscreen(object.context, object, method, framerate, output); } }, // set_popup 6 => { var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } var serial: u32 = try object.context.next_u32(); var parent: Object = object.context.objects.get(try object.context.next_u32()).?; if (parent.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var flags: u32 = try object.context.next_u32(); if (WL_SHELL_SURFACE.set_popup) |set_popup| { try set_popup(object.context, object, seat, serial, parent, x, y, flags); } }, // set_maximized 7 => { var output: ?Object = object.context.objects.get(try object.context.next_u32()); if (output != null) { if (output.?.dispatch != wl_output_dispatch) { return error.ObjectWrongType; } } if (WL_SHELL_SURFACE.set_maximized) |set_maximized| { try set_maximized(object.context, object, output); } }, // set_title 8 => { var title: []u8 = try object.context.next_string(); if (WL_SHELL_SURFACE.set_title) |set_title| { try set_title(object.context, object, title); } }, // set_class 9 => { var class_: []u8 = try object.context.next_string(); if (WL_SHELL_SURFACE.set_class) |set_class| { try set_class(object.context, object, class_); } }, else => {}, } } pub const wl_shell_surface_resize = enum(u32) { none = 0, top = 1, bottom = 2, left = 4, top_left = 5, bottom_left = 6, right = 8, top_right = 9, bottom_right = 10, }; pub const wl_shell_surface_transient = enum(u32) { inactive = 0x1, }; pub const wl_shell_surface_fullscreen_method = enum(u32) { default = 0, scale = 1, driver = 2, fill = 3, }; // // Ping a client to check if it is receiving events and sending // requests. A client is expected to reply with a pong request. // pub fn wl_shell_surface_send_ping(object: Object, serial: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.finishWrite(object.id, 0); } // // The configure event asks the client to resize its surface. // // The size is a hint, in the sense that the client is free to // ignore it if it doesn't resize, pick a smaller size (to // satisfy aspect ratio or resize in steps of NxM pixels). // // The edges parameter provides a hint about how the surface // was resized. The client may use this information to decide // how to adjust its content to the new size (e.g. a scrolling // area might adjust its content position to leave the viewable // content unmoved). // // The client is free to dismiss all but the last configure // event it received. // // The width and height arguments specify the size of the window // in surface-local coordinates. // pub fn wl_shell_surface_send_configure(object: Object, edges: u32, width: i32, height: i32) anyerror!void { object.context.startWrite(); object.context.putU32(edges); object.context.putI32(width); object.context.putI32(height); object.context.finishWrite(object.id, 1); } // // The popup_done event is sent out when a popup grab is broken, // that is, when the user clicks a surface that doesn't belong // to the client owning the popup surface. // pub fn wl_shell_surface_send_popup_done(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 2); } // wl_surface pub const wl_surface_interface = struct { // an onscreen surface destroy: ?fn ( *Context, Object, ) anyerror!void, attach: ?fn (*Context, Object, ?Object, i32, i32) anyerror!void, damage: ?fn (*Context, Object, i32, i32, i32, i32) anyerror!void, frame: ?fn (*Context, Object, u32) anyerror!void, set_opaque_region: ?fn (*Context, Object, ?Object) anyerror!void, set_input_region: ?fn (*Context, Object, ?Object) anyerror!void, commit: ?fn ( *Context, Object, ) anyerror!void, set_buffer_transform: ?fn (*Context, Object, i32) anyerror!void, set_buffer_scale: ?fn (*Context, Object, i32) anyerror!void, damage_buffer: ?fn (*Context, Object, i32, i32, i32, i32) anyerror!void, }; fn wl_surface_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_attach_default(context: *Context, object: Object, buffer: ?Object, x: i32, y: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_damage_default(context: *Context, object: Object, x: i32, y: i32, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_frame_default(context: *Context, object: Object, callback: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_set_opaque_region_default(context: *Context, object: Object, region: ?Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_set_input_region_default(context: *Context, object: Object, region: ?Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_commit_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_set_buffer_transform_default(context: *Context, object: Object, transform: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_set_buffer_scale_default(context: *Context, object: Object, scale: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_surface_damage_buffer_default(context: *Context, object: Object, x: i32, y: i32, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SURFACE = wl_surface_interface{ .destroy = wl_surface_destroy_default, .attach = wl_surface_attach_default, .damage = wl_surface_damage_default, .frame = wl_surface_frame_default, .set_opaque_region = wl_surface_set_opaque_region_default, .set_input_region = wl_surface_set_input_region_default, .commit = wl_surface_commit_default, .set_buffer_transform = wl_surface_set_buffer_transform_default, .set_buffer_scale = wl_surface_set_buffer_scale_default, .damage_buffer = wl_surface_damage_buffer_default, }; pub fn new_wl_surface(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_surface_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_surface_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (WL_SURFACE.destroy) |destroy| { try destroy( object.context, object, ); } }, // attach 1 => { var buffer: ?Object = object.context.objects.get(try object.context.next_u32()); if (buffer != null) { if (buffer.?.dispatch != wl_buffer_dispatch) { return error.ObjectWrongType; } } var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); if (WL_SURFACE.attach) |attach| { try attach(object.context, object, buffer, x, y); } }, // damage 2 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (WL_SURFACE.damage) |damage| { try damage(object.context, object, x, y, width, height); } }, // frame 3 => { var callback: u32 = try object.context.next_u32(); if (WL_SURFACE.frame) |frame| { try frame(object.context, object, callback); } }, // set_opaque_region 4 => { var region: ?Object = object.context.objects.get(try object.context.next_u32()); if (region != null) { if (region.?.dispatch != wl_region_dispatch) { return error.ObjectWrongType; } } if (WL_SURFACE.set_opaque_region) |set_opaque_region| { try set_opaque_region(object.context, object, region); } }, // set_input_region 5 => { var region: ?Object = object.context.objects.get(try object.context.next_u32()); if (region != null) { if (region.?.dispatch != wl_region_dispatch) { return error.ObjectWrongType; } } if (WL_SURFACE.set_input_region) |set_input_region| { try set_input_region(object.context, object, region); } }, // commit 6 => { if (WL_SURFACE.commit) |commit| { try commit( object.context, object, ); } }, // set_buffer_transform 7 => { var transform: i32 = try object.context.next_i32(); if (WL_SURFACE.set_buffer_transform) |set_buffer_transform| { try set_buffer_transform(object.context, object, transform); } }, // set_buffer_scale 8 => { var scale: i32 = try object.context.next_i32(); if (WL_SURFACE.set_buffer_scale) |set_buffer_scale| { try set_buffer_scale(object.context, object, scale); } }, // damage_buffer 9 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (WL_SURFACE.damage_buffer) |damage_buffer| { try damage_buffer(object.context, object, x, y, width, height); } }, else => {}, } } pub const wl_surface_error = enum(u32) { invalid_scale = 0, invalid_transform = 1, }; // // This is emitted whenever a surface's creation, movement, or resizing // results in some part of it being within the scanout region of an // output. // // Note that a surface may be overlapping with zero or more outputs. // pub fn wl_surface_send_enter(object: Object, output: u32) anyerror!void { object.context.startWrite(); object.context.putU32(output); object.context.finishWrite(object.id, 0); } // // This is emitted whenever a surface's creation, movement, or resizing // results in it no longer having any part of it within the scanout region // of an output. // pub fn wl_surface_send_leave(object: Object, output: u32) anyerror!void { object.context.startWrite(); object.context.putU32(output); object.context.finishWrite(object.id, 1); } // wl_seat pub const wl_seat_interface = struct { // group of input devices get_pointer: ?fn (*Context, Object, u32) anyerror!void, get_keyboard: ?fn (*Context, Object, u32) anyerror!void, get_touch: ?fn (*Context, Object, u32) anyerror!void, release: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_seat_get_pointer_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_seat_get_keyboard_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_seat_get_touch_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_seat_release_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SEAT = wl_seat_interface{ .get_pointer = wl_seat_get_pointer_default, .get_keyboard = wl_seat_get_keyboard_default, .get_touch = wl_seat_get_touch_default, .release = wl_seat_release_default, }; pub fn new_wl_seat(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_seat_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_seat_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // get_pointer 0 => { var id: u32 = try object.context.next_u32(); if (WL_SEAT.get_pointer) |get_pointer| { try get_pointer(object.context, object, id); } }, // get_keyboard 1 => { var id: u32 = try object.context.next_u32(); if (WL_SEAT.get_keyboard) |get_keyboard| { try get_keyboard(object.context, object, id); } }, // get_touch 2 => { var id: u32 = try object.context.next_u32(); if (WL_SEAT.get_touch) |get_touch| { try get_touch(object.context, object, id); } }, // release 3 => { if (WL_SEAT.release) |release| { try release( object.context, object, ); } }, else => {}, } } pub const wl_seat_capability = enum(u32) { pointer = 1, keyboard = 2, touch = 4, }; // // This is emitted whenever a seat gains or loses the pointer, // keyboard or touch capabilities. The argument is a capability // enum containing the complete set of capabilities this seat has. // // When the pointer capability is added, a client may create a // wl_pointer object using the wl_seat.get_pointer request. This object // will receive pointer events until the capability is removed in the // future. // // When the pointer capability is removed, a client should destroy the // wl_pointer objects associated with the seat where the capability was // removed, using the wl_pointer.release request. No further pointer // events will be received on these objects. // // In some compositors, if a seat regains the pointer capability and a // client has a previously obtained wl_pointer object of version 4 or // less, that object may start sending pointer events again. This // behavior is considered a misinterpretation of the intended behavior // and must not be relied upon by the client. wl_pointer objects of // version 5 or later must not send events if created before the most // recent event notifying the client of an added pointer capability. // // The above behavior also applies to wl_keyboard and wl_touch with the // keyboard and touch capabilities, respectively. // pub fn wl_seat_send_capabilities(object: Object, capabilities: u32) anyerror!void { object.context.startWrite(); object.context.putU32(capabilities); object.context.finishWrite(object.id, 0); } // // In a multiseat configuration this can be used by the client to help // identify which physical devices the seat represents. Based on // the seat configuration used by the compositor. // pub fn wl_seat_send_name(object: Object, name: []const u8) anyerror!void { object.context.startWrite(); object.context.putString(name); object.context.finishWrite(object.id, 1); } // wl_pointer pub const wl_pointer_interface = struct { // pointer input device set_cursor: ?fn (*Context, Object, u32, ?Object, i32, i32) anyerror!void, release: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_pointer_set_cursor_default(context: *Context, object: Object, serial: u32, surface: ?Object, hotspot_x: i32, hotspot_y: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_pointer_release_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_POINTER = wl_pointer_interface{ .set_cursor = wl_pointer_set_cursor_default, .release = wl_pointer_release_default, }; pub fn new_wl_pointer(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_pointer_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_pointer_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // set_cursor 0 => { var serial: u32 = try object.context.next_u32(); var surface: ?Object = object.context.objects.get(try object.context.next_u32()); if (surface != null) { if (surface.?.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } } var hotspot_x: i32 = try object.context.next_i32(); var hotspot_y: i32 = try object.context.next_i32(); if (WL_POINTER.set_cursor) |set_cursor| { try set_cursor(object.context, object, serial, surface, hotspot_x, hotspot_y); } }, // release 1 => { if (WL_POINTER.release) |release| { try release( object.context, object, ); } }, else => {}, } } pub const wl_pointer_error = enum(u32) { role = 0, }; pub const wl_pointer_button_state = enum(u32) { released = 0, pressed = 1, }; pub const wl_pointer_axis = enum(u32) { vertical_scroll = 0, horizontal_scroll = 1, }; pub const wl_pointer_axis_source = enum(u32) { wheel = 0, finger = 1, continuous = 2, wheel_tilt = 3, }; // // Notification that this seat's pointer is focused on a certain // surface. // // When a seat's focus enters a surface, the pointer image // is undefined and a client should respond to this event by setting // an appropriate pointer image with the set_cursor request. // pub fn wl_pointer_send_enter(object: Object, serial: u32, surface: u32, surface_x: f32, surface_y: f32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(surface); object.context.putFixed(surface_x); object.context.putFixed(surface_y); object.context.finishWrite(object.id, 0); } // // Notification that this seat's pointer is no longer focused on // a certain surface. // // The leave notification is sent before the enter notification // for the new focus. // pub fn wl_pointer_send_leave(object: Object, serial: u32, surface: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(surface); object.context.finishWrite(object.id, 1); } // // Notification of pointer location change. The arguments // surface_x and surface_y are the location relative to the // focused surface. // pub fn wl_pointer_send_motion(object: Object, time: u32, surface_x: f32, surface_y: f32) anyerror!void { object.context.startWrite(); object.context.putU32(time); object.context.putFixed(surface_x); object.context.putFixed(surface_y); object.context.finishWrite(object.id, 2); } // // Mouse button click and release notifications. // // The location of the click is given by the last motion or // enter event. // The time argument is a timestamp with millisecond // granularity, with an undefined base. // // The button is a button code as defined in the Linux kernel's // linux/input-event-codes.h header file, e.g. BTN_LEFT. // // Any 16-bit button code value is reserved for future additions to the // kernel's event code list. All other button codes above 0xFFFF are // currently undefined but may be used in future versions of this // protocol. // pub fn wl_pointer_send_button(object: Object, serial: u32, time: u32, button: u32, state: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(time); object.context.putU32(button); object.context.putU32(state); object.context.finishWrite(object.id, 3); } // // Scroll and other axis notifications. // // For scroll events (vertical and horizontal scroll axes), the // value parameter is the length of a vector along the specified // axis in a coordinate space identical to those of motion events, // representing a relative movement along the specified axis. // // For devices that support movements non-parallel to axes multiple // axis events will be emitted. // // When applicable, for example for touch pads, the server can // choose to emit scroll events where the motion vector is // equivalent to a motion event vector. // // When applicable, a client can transform its content relative to the // scroll distance. // pub fn wl_pointer_send_axis(object: Object, time: u32, axis: u32, value: f32) anyerror!void { object.context.startWrite(); object.context.putU32(time); object.context.putU32(axis); object.context.putFixed(value); object.context.finishWrite(object.id, 4); } // // Indicates the end of a set of events that logically belong together. // A client is expected to accumulate the data in all events within the // frame before proceeding. // // All wl_pointer events before a wl_pointer.frame event belong // logically together. For example, in a diagonal scroll motion the // compositor will send an optional wl_pointer.axis_source event, two // wl_pointer.axis events (horizontal and vertical) and finally a // wl_pointer.frame event. The client may use this information to // calculate a diagonal vector for scrolling. // // When multiple wl_pointer.axis events occur within the same frame, // the motion vector is the combined motion of all events. // When a wl_pointer.axis and a wl_pointer.axis_stop event occur within // the same frame, this indicates that axis movement in one axis has // stopped but continues in the other axis. // When multiple wl_pointer.axis_stop events occur within the same // frame, this indicates that these axes stopped in the same instance. // // A wl_pointer.frame event is sent for every logical event group, // even if the group only contains a single wl_pointer event. // Specifically, a client may get a sequence: motion, frame, button, // frame, axis, frame, axis_stop, frame. // // The wl_pointer.enter and wl_pointer.leave events are logical events // generated by the compositor and not the hardware. These events are // also grouped by a wl_pointer.frame. When a pointer moves from one // surface to another, a compositor should group the // wl_pointer.leave event within the same wl_pointer.frame. // However, a client must not rely on wl_pointer.leave and // wl_pointer.enter being in the same wl_pointer.frame. // Compositor-specific policies may require the wl_pointer.leave and // wl_pointer.enter event being split across multiple wl_pointer.frame // groups. // pub fn wl_pointer_send_frame(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 5); } // // Source information for scroll and other axes. // // This event does not occur on its own. It is sent before a // wl_pointer.frame event and carries the source information for // all events within that frame. // // The source specifies how this event was generated. If the source is // wl_pointer.axis_source.finger, a wl_pointer.axis_stop event will be // sent when the user lifts the finger off the device. // // If the source is wl_pointer.axis_source.wheel, // wl_pointer.axis_source.wheel_tilt or // wl_pointer.axis_source.continuous, a wl_pointer.axis_stop event may // or may not be sent. Whether a compositor sends an axis_stop event // for these sources is hardware-specific and implementation-dependent; // clients must not rely on receiving an axis_stop event for these // scroll sources and should treat scroll sequences from these scroll // sources as unterminated by default. // // This event is optional. If the source is unknown for a particular // axis event sequence, no event is sent. // Only one wl_pointer.axis_source event is permitted per frame. // // The order of wl_pointer.axis_discrete and wl_pointer.axis_source is // not guaranteed. // pub fn wl_pointer_send_axis_source(object: Object, axis_source: u32) anyerror!void { object.context.startWrite(); object.context.putU32(axis_source); object.context.finishWrite(object.id, 6); } // // Stop notification for scroll and other axes. // // For some wl_pointer.axis_source types, a wl_pointer.axis_stop event // is sent to notify a client that the axis sequence has terminated. // This enables the client to implement kinetic scrolling. // See the wl_pointer.axis_source documentation for information on when // this event may be generated. // // Any wl_pointer.axis events with the same axis_source after this // event should be considered as the start of a new axis motion. // // The timestamp is to be interpreted identical to the timestamp in the // wl_pointer.axis event. The timestamp value may be the same as a // preceding wl_pointer.axis event. // pub fn wl_pointer_send_axis_stop(object: Object, time: u32, axis: u32) anyerror!void { object.context.startWrite(); object.context.putU32(time); object.context.putU32(axis); object.context.finishWrite(object.id, 7); } // // Discrete step information for scroll and other axes. // // This event carries the axis value of the wl_pointer.axis event in // discrete steps (e.g. mouse wheel clicks). // // This event does not occur on its own, it is coupled with a // wl_pointer.axis event that represents this axis value on a // continuous scale. The protocol guarantees that each axis_discrete // event is always followed by exactly one axis event with the same // axis number within the same wl_pointer.frame. Note that the protocol // allows for other events to occur between the axis_discrete and // its coupled axis event, including other axis_discrete or axis // events. // // This event is optional; continuous scrolling devices // like two-finger scrolling on touchpads do not have discrete // steps and do not generate this event. // // The discrete value carries the directional information. e.g. a value // of -2 is two steps towards the negative direction of this axis. // // The axis number is identical to the axis number in the associated // axis event. // // The order of wl_pointer.axis_discrete and wl_pointer.axis_source is // not guaranteed. // pub fn wl_pointer_send_axis_discrete(object: Object, axis: u32, discrete: i32) anyerror!void { object.context.startWrite(); object.context.putU32(axis); object.context.putI32(discrete); object.context.finishWrite(object.id, 8); } // wl_keyboard pub const wl_keyboard_interface = struct { // keyboard input device release: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_keyboard_release_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_KEYBOARD = wl_keyboard_interface{ .release = wl_keyboard_release_default, }; pub fn new_wl_keyboard(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_keyboard_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_keyboard_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // release 0 => { if (WL_KEYBOARD.release) |release| { try release( object.context, object, ); } }, else => {}, } } pub const wl_keyboard_keymap_format = enum(u32) { no_keymap = 0, xkb_v1 = 1, }; pub const wl_keyboard_key_state = enum(u32) { released = 0, pressed = 1, }; // // This event provides a file descriptor to the client which can be // memory-mapped to provide a keyboard mapping description. // // From version 7 onwards, the fd must be mapped with MAP_PRIVATE by // the recipient, as MAP_SHARED may fail. // pub fn wl_keyboard_send_keymap(object: Object, format: u32, fd: i32, size: u32) anyerror!void { object.context.startWrite(); object.context.putU32(format); object.context.putFd(fd); object.context.putU32(size); object.context.finishWrite(object.id, 0); } // // Notification that this seat's keyboard focus is on a certain // surface. // pub fn wl_keyboard_send_enter(object: Object, serial: u32, surface: u32, keys: []u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(surface); object.context.putArray(keys); object.context.finishWrite(object.id, 1); } // // Notification that this seat's keyboard focus is no longer on // a certain surface. // // The leave notification is sent before the enter notification // for the new focus. // pub fn wl_keyboard_send_leave(object: Object, serial: u32, surface: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(surface); object.context.finishWrite(object.id, 2); } // // A key was pressed or released. // The time argument is a timestamp with millisecond // granularity, with an undefined base. // pub fn wl_keyboard_send_key(object: Object, serial: u32, time: u32, key: u32, state: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(time); object.context.putU32(key); object.context.putU32(state); object.context.finishWrite(object.id, 3); } // // Notifies clients that the modifier and/or group state has // changed, and it should update its local state. // pub fn wl_keyboard_send_modifiers(object: Object, serial: u32, mods_depressed: u32, mods_latched: u32, mods_locked: u32, group: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(mods_depressed); object.context.putU32(mods_latched); object.context.putU32(mods_locked); object.context.putU32(group); object.context.finishWrite(object.id, 4); } // // Informs the client about the keyboard's repeat rate and delay. // // This event is sent as soon as the wl_keyboard object has been created, // and is guaranteed to be received by the client before any key press // event. // // Negative values for either rate or delay are illegal. A rate of zero // will disable any repeating (regardless of the value of delay). // // This event can be sent later on as well with a new value if necessary, // so clients should continue listening for the event past the creation // of wl_keyboard. // pub fn wl_keyboard_send_repeat_info(object: Object, rate: i32, delay: i32) anyerror!void { object.context.startWrite(); object.context.putI32(rate); object.context.putI32(delay); object.context.finishWrite(object.id, 5); } // wl_touch pub const wl_touch_interface = struct { // touchscreen input device release: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_touch_release_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_TOUCH = wl_touch_interface{ .release = wl_touch_release_default, }; pub fn new_wl_touch(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_touch_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_touch_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // release 0 => { if (WL_TOUCH.release) |release| { try release( object.context, object, ); } }, else => {}, } } // // A new touch point has appeared on the surface. This touch point is // assigned a unique ID. Future events from this touch point reference // this ID. The ID ceases to be valid after a touch up event and may be // reused in the future. // pub fn wl_touch_send_down(object: Object, serial: u32, time: u32, surface: u32, id: i32, x: f32, y: f32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(time); object.context.putU32(surface); object.context.putI32(id); object.context.putFixed(x); object.context.putFixed(y); object.context.finishWrite(object.id, 0); } // // The touch point has disappeared. No further events will be sent for // this touch point and the touch point's ID is released and may be // reused in a future touch down event. // pub fn wl_touch_send_up(object: Object, serial: u32, time: u32, id: i32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.putU32(time); object.context.putI32(id); object.context.finishWrite(object.id, 1); } // // A touch point has changed coordinates. // pub fn wl_touch_send_motion(object: Object, time: u32, id: i32, x: f32, y: f32) anyerror!void { object.context.startWrite(); object.context.putU32(time); object.context.putI32(id); object.context.putFixed(x); object.context.putFixed(y); object.context.finishWrite(object.id, 2); } // // Indicates the end of a set of events that logically belong together. // A client is expected to accumulate the data in all events within the // frame before proceeding. // // A wl_touch.frame terminates at least one event but otherwise no // guarantee is provided about the set of events within a frame. A client // must assume that any state not updated in a frame is unchanged from the // previously known state. // pub fn wl_touch_send_frame(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 3); } // // Sent if the compositor decides the touch stream is a global // gesture. No further events are sent to the clients from that // particular gesture. Touch cancellation applies to all touch points // currently active on this client's surface. The client is // responsible for finalizing the touch points, future touch points on // this surface may reuse the touch point ID. // pub fn wl_touch_send_cancel(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 4); } // // Sent when a touchpoint has changed its shape. // // This event does not occur on its own. It is sent before a // wl_touch.frame event and carries the new shape information for // any previously reported, or new touch points of that frame. // // Other events describing the touch point such as wl_touch.down, // wl_touch.motion or wl_touch.orientation may be sent within the // same wl_touch.frame. A client should treat these events as a single // logical touch point update. The order of wl_touch.shape, // wl_touch.orientation and wl_touch.motion is not guaranteed. // A wl_touch.down event is guaranteed to occur before the first // wl_touch.shape event for this touch ID but both events may occur within // the same wl_touch.frame. // // A touchpoint shape is approximated by an ellipse through the major and // minor axis length. The major axis length describes the longer diameter // of the ellipse, while the minor axis length describes the shorter // diameter. Major and minor are orthogonal and both are specified in // surface-local coordinates. The center of the ellipse is always at the // touchpoint location as reported by wl_touch.down or wl_touch.move. // // This event is only sent by the compositor if the touch device supports // shape reports. The client has to make reasonable assumptions about the // shape if it did not receive this event. // pub fn wl_touch_send_shape(object: Object, id: i32, major: f32, minor: f32) anyerror!void { object.context.startWrite(); object.context.putI32(id); object.context.putFixed(major); object.context.putFixed(minor); object.context.finishWrite(object.id, 5); } // // Sent when a touchpoint has changed its orientation. // // This event does not occur on its own. It is sent before a // wl_touch.frame event and carries the new shape information for // any previously reported, or new touch points of that frame. // // Other events describing the touch point such as wl_touch.down, // wl_touch.motion or wl_touch.shape may be sent within the // same wl_touch.frame. A client should treat these events as a single // logical touch point update. The order of wl_touch.shape, // wl_touch.orientation and wl_touch.motion is not guaranteed. // A wl_touch.down event is guaranteed to occur before the first // wl_touch.orientation event for this touch ID but both events may occur // within the same wl_touch.frame. // // The orientation describes the clockwise angle of a touchpoint's major // axis to the positive surface y-axis and is normalized to the -180 to // +180 degree range. The granularity of orientation depends on the touch // device, some devices only support binary rotation values between 0 and // 90 degrees. // // This event is only sent by the compositor if the touch device supports // orientation reports. // pub fn wl_touch_send_orientation(object: Object, id: i32, orientation: f32) anyerror!void { object.context.startWrite(); object.context.putI32(id); object.context.putFixed(orientation); object.context.finishWrite(object.id, 6); } // wl_output pub const wl_output_interface = struct { // compositor output region release: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_output_release_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_OUTPUT = wl_output_interface{ .release = wl_output_release_default, }; pub fn new_wl_output(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_output_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_output_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // release 0 => { if (WL_OUTPUT.release) |release| { try release( object.context, object, ); } }, else => {}, } } pub const wl_output_subpixel = enum(u32) { unknown = 0, none = 1, horizontal_rgb = 2, horizontal_bgr = 3, vertical_rgb = 4, vertical_bgr = 5, }; pub const wl_output_transform = enum(u32) { normal = 0, @"90" = 1, @"180" = 2, @"270" = 3, flipped = 4, flipped_90 = 5, flipped_180 = 6, flipped_270 = 7, }; pub const wl_output_mode = enum(u32) { current = 0x1, preferred = 0x2, }; // // The geometry event describes geometric properties of the output. // The event is sent when binding to the output object and whenever // any of the properties change. // // The physical size can be set to zero if it doesn't make sense for this // output (e.g. for projectors or virtual outputs). // // Note: wl_output only advertises partial information about the output // position and identification. Some compositors, for instance those not // implementing a desktop-style output layout or those exposing virtual // outputs, might fake this information. Instead of using x and y, clients // should use xdg_output.logical_position. Instead of using make and model, // clients should use xdg_output.name and xdg_output.description. // pub fn wl_output_send_geometry(object: Object, x: i32, y: i32, physical_width: i32, physical_height: i32, subpixel: i32, make: []const u8, model: []const u8, transform: i32) anyerror!void { object.context.startWrite(); object.context.putI32(x); object.context.putI32(y); object.context.putI32(physical_width); object.context.putI32(physical_height); object.context.putI32(subpixel); object.context.putString(make); object.context.putString(model); object.context.putI32(transform); object.context.finishWrite(object.id, 0); } // // The mode event describes an available mode for the output. // // The event is sent when binding to the output object and there // will always be one mode, the current mode. The event is sent // again if an output changes mode, for the mode that is now // current. In other words, the current mode is always the last // mode that was received with the current flag set. // // The size of a mode is given in physical hardware units of // the output device. This is not necessarily the same as // the output size in the global compositor space. For instance, // the output may be scaled, as described in wl_output.scale, // or transformed, as described in wl_output.transform. Clients // willing to retrieve the output size in the global compositor // space should use xdg_output.logical_size instead. // // Clients should not use the refresh rate to schedule frames. Instead, // they should use the wl_surface.frame event or the presentation-time // protocol. // // Note: this information is not always meaningful for all outputs. Some // compositors, such as those exposing virtual outputs, might fake the // refresh rate or the size. // pub fn wl_output_send_mode(object: Object, flags: u32, width: i32, height: i32, refresh: i32) anyerror!void { object.context.startWrite(); object.context.putU32(flags); object.context.putI32(width); object.context.putI32(height); object.context.putI32(refresh); object.context.finishWrite(object.id, 1); } // // This event is sent after all other properties have been // sent after binding to the output object and after any // other property changes done after that. This allows // changes to the output properties to be seen as // atomic, even if they happen via multiple events. // pub fn wl_output_send_done(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 2); } // // This event contains scaling geometry information // that is not in the geometry event. It may be sent after // binding the output object or if the output scale changes // later. If it is not sent, the client should assume a // scale of 1. // // A scale larger than 1 means that the compositor will // automatically scale surface buffers by this amount // when rendering. This is used for very high resolution // displays where applications rendering at the native // resolution would be too small to be legible. // // It is intended that scaling aware clients track the // current output of a surface, and if it is on a scaled // output it should use wl_surface.set_buffer_scale with // the scale of the output. That way the compositor can // avoid scaling the surface, and the client can supply // a higher detail image. // pub fn wl_output_send_scale(object: Object, factor: i32) anyerror!void { object.context.startWrite(); object.context.putI32(factor); object.context.finishWrite(object.id, 3); } // wl_region pub const wl_region_interface = struct { // region interface destroy: ?fn ( *Context, Object, ) anyerror!void, add: ?fn (*Context, Object, i32, i32, i32, i32) anyerror!void, subtract: ?fn (*Context, Object, i32, i32, i32, i32) anyerror!void, }; fn wl_region_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_region_add_default(context: *Context, object: Object, x: i32, y: i32, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_region_subtract_default(context: *Context, object: Object, x: i32, y: i32, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_REGION = wl_region_interface{ .destroy = wl_region_destroy_default, .add = wl_region_add_default, .subtract = wl_region_subtract_default, }; pub fn new_wl_region(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_region_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_region_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (WL_REGION.destroy) |destroy| { try destroy( object.context, object, ); } }, // add 1 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (WL_REGION.add) |add| { try add(object.context, object, x, y, width, height); } }, // subtract 2 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (WL_REGION.subtract) |subtract| { try subtract(object.context, object, x, y, width, height); } }, else => {}, } } // wl_subcompositor pub const wl_subcompositor_interface = struct { // sub-surface compositing destroy: ?fn ( *Context, Object, ) anyerror!void, get_subsurface: ?fn (*Context, Object, u32, Object, Object) anyerror!void, }; fn wl_subcompositor_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_subcompositor_get_subsurface_default(context: *Context, object: Object, id: u32, surface: Object, parent: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SUBCOMPOSITOR = wl_subcompositor_interface{ .destroy = wl_subcompositor_destroy_default, .get_subsurface = wl_subcompositor_get_subsurface_default, }; pub fn new_wl_subcompositor(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_subcompositor_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_subcompositor_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (WL_SUBCOMPOSITOR.destroy) |destroy| { try destroy( object.context, object, ); } }, // get_subsurface 1 => { var id: u32 = try object.context.next_u32(); var surface: Object = object.context.objects.get(try object.context.next_u32()).?; if (surface.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } var parent: Object = object.context.objects.get(try object.context.next_u32()).?; if (parent.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } if (WL_SUBCOMPOSITOR.get_subsurface) |get_subsurface| { try get_subsurface(object.context, object, id, surface, parent); } }, else => {}, } } pub const wl_subcompositor_error = enum(u32) { bad_surface = 0, }; // wl_subsurface pub const wl_subsurface_interface = struct { // sub-surface interface to a wl_surface destroy: ?fn ( *Context, Object, ) anyerror!void, set_position: ?fn (*Context, Object, i32, i32) anyerror!void, place_above: ?fn (*Context, Object, Object) anyerror!void, place_below: ?fn (*Context, Object, Object) anyerror!void, set_sync: ?fn ( *Context, Object, ) anyerror!void, set_desync: ?fn ( *Context, Object, ) anyerror!void, }; fn wl_subsurface_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_subsurface_set_position_default(context: *Context, object: Object, x: i32, y: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_subsurface_place_above_default(context: *Context, object: Object, sibling: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_subsurface_place_below_default(context: *Context, object: Object, sibling: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_subsurface_set_sync_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn wl_subsurface_set_desync_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var WL_SUBSURFACE = wl_subsurface_interface{ .destroy = wl_subsurface_destroy_default, .set_position = wl_subsurface_set_position_default, .place_above = wl_subsurface_place_above_default, .place_below = wl_subsurface_place_below_default, .set_sync = wl_subsurface_set_sync_default, .set_desync = wl_subsurface_set_desync_default, }; pub fn new_wl_subsurface(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = wl_subsurface_dispatch, .context = context, .version = 0, .container = container, }; } fn wl_subsurface_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (WL_SUBSURFACE.destroy) |destroy| { try destroy( object.context, object, ); } }, // set_position 1 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); if (WL_SUBSURFACE.set_position) |set_position| { try set_position(object.context, object, x, y); } }, // place_above 2 => { var sibling: Object = object.context.objects.get(try object.context.next_u32()).?; if (sibling.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } if (WL_SUBSURFACE.place_above) |place_above| { try place_above(object.context, object, sibling); } }, // place_below 3 => { var sibling: Object = object.context.objects.get(try object.context.next_u32()).?; if (sibling.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } if (WL_SUBSURFACE.place_below) |place_below| { try place_below(object.context, object, sibling); } }, // set_sync 4 => { if (WL_SUBSURFACE.set_sync) |set_sync| { try set_sync( object.context, object, ); } }, // set_desync 5 => { if (WL_SUBSURFACE.set_desync) |set_desync| { try set_desync( object.context, object, ); } }, else => {}, } } pub const wl_subsurface_error = enum(u32) { bad_surface = 0, }; // xdg_wm_base pub const xdg_wm_base_interface = struct { // create desktop-style surfaces destroy: ?fn ( *Context, Object, ) anyerror!void, create_positioner: ?fn (*Context, Object, u32) anyerror!void, get_xdg_surface: ?fn (*Context, Object, u32, Object) anyerror!void, pong: ?fn (*Context, Object, u32) anyerror!void, }; fn xdg_wm_base_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_wm_base_create_positioner_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_wm_base_get_xdg_surface_default(context: *Context, object: Object, id: u32, surface: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_wm_base_pong_default(context: *Context, object: Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var XDG_WM_BASE = xdg_wm_base_interface{ .destroy = xdg_wm_base_destroy_default, .create_positioner = xdg_wm_base_create_positioner_default, .get_xdg_surface = xdg_wm_base_get_xdg_surface_default, .pong = xdg_wm_base_pong_default, }; pub fn new_xdg_wm_base(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = xdg_wm_base_dispatch, .context = context, .version = 0, .container = container, }; } fn xdg_wm_base_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (XDG_WM_BASE.destroy) |destroy| { try destroy( object.context, object, ); } }, // create_positioner 1 => { var id: u32 = try object.context.next_u32(); if (XDG_WM_BASE.create_positioner) |create_positioner| { try create_positioner(object.context, object, id); } }, // get_xdg_surface 2 => { var id: u32 = try object.context.next_u32(); var surface: Object = object.context.objects.get(try object.context.next_u32()).?; if (surface.dispatch != wl_surface_dispatch) { return error.ObjectWrongType; } if (XDG_WM_BASE.get_xdg_surface) |get_xdg_surface| { try get_xdg_surface(object.context, object, id, surface); } }, // pong 3 => { var serial: u32 = try object.context.next_u32(); if (XDG_WM_BASE.pong) |pong| { try pong(object.context, object, serial); } }, else => {}, } } pub const xdg_wm_base_error = enum(u32) { role = 0, defunct_surfaces = 1, not_the_topmost_popup = 2, invalid_popup_parent = 3, invalid_surface_state = 4, invalid_positioner = 5, }; // // The ping event asks the client if it's still alive. Pass the // serial specified in the event back to the compositor by sending // a "pong" request back with the specified serial. See xdg_wm_base.pong. // // Compositors can use this to determine if the client is still // alive. It's unspecified what will happen if the client doesn't // respond to the ping request, or in what timeframe. Clients should // try to respond in a reasonable amount of time. // // A compositor is free to ping in any way it wants, but a client must // always respond to any xdg_wm_base object it created. // pub fn xdg_wm_base_send_ping(object: Object, serial: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.finishWrite(object.id, 0); } // xdg_positioner pub const xdg_positioner_interface = struct { // child surface positioner destroy: ?fn ( *Context, Object, ) anyerror!void, set_size: ?fn (*Context, Object, i32, i32) anyerror!void, set_anchor_rect: ?fn (*Context, Object, i32, i32, i32, i32) anyerror!void, set_anchor: ?fn (*Context, Object, u32) anyerror!void, set_gravity: ?fn (*Context, Object, u32) anyerror!void, set_constraint_adjustment: ?fn (*Context, Object, u32) anyerror!void, set_offset: ?fn (*Context, Object, i32, i32) anyerror!void, }; fn xdg_positioner_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_positioner_set_size_default(context: *Context, object: Object, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_positioner_set_anchor_rect_default(context: *Context, object: Object, x: i32, y: i32, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_positioner_set_anchor_default(context: *Context, object: Object, anchor: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_positioner_set_gravity_default(context: *Context, object: Object, gravity: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_positioner_set_constraint_adjustment_default(context: *Context, object: Object, constraint_adjustment: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_positioner_set_offset_default(context: *Context, object: Object, x: i32, y: i32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var XDG_POSITIONER = xdg_positioner_interface{ .destroy = xdg_positioner_destroy_default, .set_size = xdg_positioner_set_size_default, .set_anchor_rect = xdg_positioner_set_anchor_rect_default, .set_anchor = xdg_positioner_set_anchor_default, .set_gravity = xdg_positioner_set_gravity_default, .set_constraint_adjustment = xdg_positioner_set_constraint_adjustment_default, .set_offset = xdg_positioner_set_offset_default, }; pub fn new_xdg_positioner(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = xdg_positioner_dispatch, .context = context, .version = 0, .container = container, }; } fn xdg_positioner_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (XDG_POSITIONER.destroy) |destroy| { try destroy( object.context, object, ); } }, // set_size 1 => { var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (XDG_POSITIONER.set_size) |set_size| { try set_size(object.context, object, width, height); } }, // set_anchor_rect 2 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (XDG_POSITIONER.set_anchor_rect) |set_anchor_rect| { try set_anchor_rect(object.context, object, x, y, width, height); } }, // set_anchor 3 => { var anchor: u32 = try object.context.next_u32(); if (XDG_POSITIONER.set_anchor) |set_anchor| { try set_anchor(object.context, object, anchor); } }, // set_gravity 4 => { var gravity: u32 = try object.context.next_u32(); if (XDG_POSITIONER.set_gravity) |set_gravity| { try set_gravity(object.context, object, gravity); } }, // set_constraint_adjustment 5 => { var constraint_adjustment: u32 = try object.context.next_u32(); if (XDG_POSITIONER.set_constraint_adjustment) |set_constraint_adjustment| { try set_constraint_adjustment(object.context, object, constraint_adjustment); } }, // set_offset 6 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); if (XDG_POSITIONER.set_offset) |set_offset| { try set_offset(object.context, object, x, y); } }, else => {}, } } pub const xdg_positioner_error = enum(u32) { invalid_input = 0, }; pub const xdg_positioner_anchor = enum(u32) { none = 0, top = 1, bottom = 2, left = 3, right = 4, top_left = 5, bottom_left = 6, top_right = 7, bottom_right = 8, }; pub const xdg_positioner_gravity = enum(u32) { none = 0, top = 1, bottom = 2, left = 3, right = 4, top_left = 5, bottom_left = 6, top_right = 7, bottom_right = 8, }; pub const xdg_positioner_constraint_adjustment = enum(u32) { none = 0, slide_x = 1, slide_y = 2, flip_x = 4, flip_y = 8, resize_x = 16, resize_y = 32, }; // xdg_surface pub const xdg_surface_interface = struct { // desktop user interface surface base interface destroy: ?fn ( *Context, Object, ) anyerror!void, get_toplevel: ?fn (*Context, Object, u32) anyerror!void, get_popup: ?fn (*Context, Object, u32, ?Object, Object) anyerror!void, set_window_geometry: ?fn (*Context, Object, i32, i32, i32, i32) anyerror!void, ack_configure: ?fn (*Context, Object, u32) anyerror!void, }; fn xdg_surface_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_surface_get_toplevel_default(context: *Context, object: Object, id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_surface_get_popup_default(context: *Context, object: Object, id: u32, parent: ?Object, positioner: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_surface_set_window_geometry_default(context: *Context, object: Object, x: i32, y: i32, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_surface_ack_configure_default(context: *Context, object: Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var XDG_SURFACE = xdg_surface_interface{ .destroy = xdg_surface_destroy_default, .get_toplevel = xdg_surface_get_toplevel_default, .get_popup = xdg_surface_get_popup_default, .set_window_geometry = xdg_surface_set_window_geometry_default, .ack_configure = xdg_surface_ack_configure_default, }; pub fn new_xdg_surface(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = xdg_surface_dispatch, .context = context, .version = 0, .container = container, }; } fn xdg_surface_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (XDG_SURFACE.destroy) |destroy| { try destroy( object.context, object, ); } }, // get_toplevel 1 => { var id: u32 = try object.context.next_u32(); if (XDG_SURFACE.get_toplevel) |get_toplevel| { try get_toplevel(object.context, object, id); } }, // get_popup 2 => { var id: u32 = try object.context.next_u32(); var parent: ?Object = object.context.objects.get(try object.context.next_u32()); if (parent != null) { if (parent.?.dispatch != xdg_surface_dispatch) { return error.ObjectWrongType; } } var positioner: Object = object.context.objects.get(try object.context.next_u32()).?; if (positioner.dispatch != xdg_positioner_dispatch) { return error.ObjectWrongType; } if (XDG_SURFACE.get_popup) |get_popup| { try get_popup(object.context, object, id, parent, positioner); } }, // set_window_geometry 3 => { var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (XDG_SURFACE.set_window_geometry) |set_window_geometry| { try set_window_geometry(object.context, object, x, y, width, height); } }, // ack_configure 4 => { var serial: u32 = try object.context.next_u32(); if (XDG_SURFACE.ack_configure) |ack_configure| { try ack_configure(object.context, object, serial); } }, else => {}, } } pub const xdg_surface_error = enum(u32) { not_constructed = 1, already_constructed = 2, unconfigured_buffer = 3, }; // // The configure event marks the end of a configure sequence. A configure // sequence is a set of one or more events configuring the state of the // xdg_surface, including the final xdg_surface.configure event. // // Where applicable, xdg_surface surface roles will during a configure // sequence extend this event as a latched state sent as events before the // xdg_surface.configure event. Such events should be considered to make up // a set of atomically applied configuration states, where the // xdg_surface.configure commits the accumulated state. // // Clients should arrange their surface for the new states, and then send // an ack_configure request with the serial sent in this configure event at // some point before committing the new surface. // // If the client receives multiple configure events before it can respond // to one, it is free to discard all but the last event it received. // pub fn xdg_surface_send_configure(object: Object, serial: u32) anyerror!void { object.context.startWrite(); object.context.putU32(serial); object.context.finishWrite(object.id, 0); } // xdg_toplevel pub const xdg_toplevel_interface = struct { // toplevel surface destroy: ?fn ( *Context, Object, ) anyerror!void, set_parent: ?fn (*Context, Object, ?Object) anyerror!void, set_title: ?fn (*Context, Object, []u8) anyerror!void, set_app_id: ?fn (*Context, Object, []u8) anyerror!void, show_window_menu: ?fn (*Context, Object, Object, u32, i32, i32) anyerror!void, move: ?fn (*Context, Object, Object, u32) anyerror!void, resize: ?fn (*Context, Object, Object, u32, u32) anyerror!void, set_max_size: ?fn (*Context, Object, i32, i32) anyerror!void, set_min_size: ?fn (*Context, Object, i32, i32) anyerror!void, set_maximized: ?fn ( *Context, Object, ) anyerror!void, unset_maximized: ?fn ( *Context, Object, ) anyerror!void, set_fullscreen: ?fn (*Context, Object, ?Object) anyerror!void, unset_fullscreen: ?fn ( *Context, Object, ) anyerror!void, set_minimized: ?fn ( *Context, Object, ) anyerror!void, }; fn xdg_toplevel_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_parent_default(context: *Context, object: Object, parent: ?Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_title_default(context: *Context, object: Object, title: []u8) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_app_id_default(context: *Context, object: Object, app_id: []u8) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_show_window_menu_default(context: *Context, object: Object, seat: Object, serial: u32, x: i32, y: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_move_default(context: *Context, object: Object, seat: Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_resize_default(context: *Context, object: Object, seat: Object, serial: u32, edges: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_max_size_default(context: *Context, object: Object, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_min_size_default(context: *Context, object: Object, width: i32, height: i32) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_maximized_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_unset_maximized_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_fullscreen_default(context: *Context, object: Object, output: ?Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_unset_fullscreen_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_toplevel_set_minimized_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var XDG_TOPLEVEL = xdg_toplevel_interface{ .destroy = xdg_toplevel_destroy_default, .set_parent = xdg_toplevel_set_parent_default, .set_title = xdg_toplevel_set_title_default, .set_app_id = xdg_toplevel_set_app_id_default, .show_window_menu = xdg_toplevel_show_window_menu_default, .move = xdg_toplevel_move_default, .resize = xdg_toplevel_resize_default, .set_max_size = xdg_toplevel_set_max_size_default, .set_min_size = xdg_toplevel_set_min_size_default, .set_maximized = xdg_toplevel_set_maximized_default, .unset_maximized = xdg_toplevel_unset_maximized_default, .set_fullscreen = xdg_toplevel_set_fullscreen_default, .unset_fullscreen = xdg_toplevel_unset_fullscreen_default, .set_minimized = xdg_toplevel_set_minimized_default, }; pub fn new_xdg_toplevel(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = xdg_toplevel_dispatch, .context = context, .version = 0, .container = container, }; } fn xdg_toplevel_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (XDG_TOPLEVEL.destroy) |destroy| { try destroy( object.context, object, ); } }, // set_parent 1 => { var parent: ?Object = object.context.objects.get(try object.context.next_u32()); if (parent != null) { if (parent.?.dispatch != xdg_toplevel_dispatch) { return error.ObjectWrongType; } } if (XDG_TOPLEVEL.set_parent) |set_parent| { try set_parent(object.context, object, parent); } }, // set_title 2 => { var title: []u8 = try object.context.next_string(); if (XDG_TOPLEVEL.set_title) |set_title| { try set_title(object.context, object, title); } }, // set_app_id 3 => { var app_id: []u8 = try object.context.next_string(); if (XDG_TOPLEVEL.set_app_id) |set_app_id| { try set_app_id(object.context, object, app_id); } }, // show_window_menu 4 => { var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } var serial: u32 = try object.context.next_u32(); var x: i32 = try object.context.next_i32(); var y: i32 = try object.context.next_i32(); if (XDG_TOPLEVEL.show_window_menu) |show_window_menu| { try show_window_menu(object.context, object, seat, serial, x, y); } }, // move 5 => { var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } var serial: u32 = try object.context.next_u32(); if (XDG_TOPLEVEL.move) |move| { try move(object.context, object, seat, serial); } }, // resize 6 => { var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } var serial: u32 = try object.context.next_u32(); var edges: u32 = try object.context.next_u32(); if (XDG_TOPLEVEL.resize) |resize| { try resize(object.context, object, seat, serial, edges); } }, // set_max_size 7 => { var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (XDG_TOPLEVEL.set_max_size) |set_max_size| { try set_max_size(object.context, object, width, height); } }, // set_min_size 8 => { var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); if (XDG_TOPLEVEL.set_min_size) |set_min_size| { try set_min_size(object.context, object, width, height); } }, // set_maximized 9 => { if (XDG_TOPLEVEL.set_maximized) |set_maximized| { try set_maximized( object.context, object, ); } }, // unset_maximized 10 => { if (XDG_TOPLEVEL.unset_maximized) |unset_maximized| { try unset_maximized( object.context, object, ); } }, // set_fullscreen 11 => { var output: ?Object = object.context.objects.get(try object.context.next_u32()); if (output != null) { if (output.?.dispatch != wl_output_dispatch) { return error.ObjectWrongType; } } if (XDG_TOPLEVEL.set_fullscreen) |set_fullscreen| { try set_fullscreen(object.context, object, output); } }, // unset_fullscreen 12 => { if (XDG_TOPLEVEL.unset_fullscreen) |unset_fullscreen| { try unset_fullscreen( object.context, object, ); } }, // set_minimized 13 => { if (XDG_TOPLEVEL.set_minimized) |set_minimized| { try set_minimized( object.context, object, ); } }, else => {}, } } pub const xdg_toplevel_resize_edge = enum(u32) { none = 0, top = 1, bottom = 2, left = 4, top_left = 5, bottom_left = 6, right = 8, top_right = 9, bottom_right = 10, }; pub const xdg_toplevel_state = enum(u32) { maximized = 1, fullscreen = 2, resizing = 3, activated = 4, tiled_left = 5, tiled_right = 6, tiled_top = 7, tiled_bottom = 8, }; // // This configure event asks the client to resize its toplevel surface or // to change its state. The configured state should not be applied // immediately. See xdg_surface.configure for details. // // The width and height arguments specify a hint to the window // about how its surface should be resized in window geometry // coordinates. See set_window_geometry. // // If the width or height arguments are zero, it means the client // should decide its own window dimension. This may happen when the // compositor needs to configure the state of the surface but doesn't // have any information about any previous or expected dimension. // // The states listed in the event specify how the width/height // arguments should be interpreted, and possibly how it should be // drawn. // // Clients must send an ack_configure in response to this event. See // xdg_surface.configure and xdg_surface.ack_configure for details. // pub fn xdg_toplevel_send_configure(object: Object, width: i32, height: i32, states: []u32) anyerror!void { object.context.startWrite(); object.context.putI32(width); object.context.putI32(height); object.context.putArray(states); object.context.finishWrite(object.id, 0); } // // The close event is sent by the compositor when the user // wants the surface to be closed. This should be equivalent to // the user clicking the close button in client-side decorations, // if your application has any. // // This is only a request that the user intends to close the // window. The client may choose to ignore this request, or show // a dialog to ask the user to save their data, etc. // pub fn xdg_toplevel_send_close(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 1); } // xdg_popup pub const xdg_popup_interface = struct { // short-lived, popup surfaces for menus destroy: ?fn ( *Context, Object, ) anyerror!void, grab: ?fn (*Context, Object, Object, u32) anyerror!void, }; fn xdg_popup_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn xdg_popup_grab_default(context: *Context, object: Object, seat: Object, serial: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var XDG_POPUP = xdg_popup_interface{ .destroy = xdg_popup_destroy_default, .grab = xdg_popup_grab_default, }; pub fn new_xdg_popup(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = xdg_popup_dispatch, .context = context, .version = 0, .container = container, }; } fn xdg_popup_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (XDG_POPUP.destroy) |destroy| { try destroy( object.context, object, ); } }, // grab 1 => { var seat: Object = object.context.objects.get(try object.context.next_u32()).?; if (seat.dispatch != wl_seat_dispatch) { return error.ObjectWrongType; } var serial: u32 = try object.context.next_u32(); if (XDG_POPUP.grab) |grab| { try grab(object.context, object, seat, serial); } }, else => {}, } } pub const xdg_popup_error = enum(u32) { invalid_grab = 0, }; // // This event asks the popup surface to configure itself given the // configuration. The configured state should not be applied immediately. // See xdg_surface.configure for details. // // The x and y arguments represent the position the popup was placed at // given the xdg_positioner rule, relative to the upper left corner of the // window geometry of the parent surface. // pub fn xdg_popup_send_configure(object: Object, x: i32, y: i32, width: i32, height: i32) anyerror!void { object.context.startWrite(); object.context.putI32(x); object.context.putI32(y); object.context.putI32(width); object.context.putI32(height); object.context.finishWrite(object.id, 0); } // // The popup_done event is sent out when a popup is dismissed by the // compositor. The client should destroy the xdg_popup object at this // point. // pub fn xdg_popup_send_popup_done(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 1); } // zwp_linux_dmabuf_v1 pub const zwp_linux_dmabuf_v1_interface = struct { // factory for creating dmabuf-based wl_buffers destroy: ?fn ( *Context, Object, ) anyerror!void, create_params: ?fn (*Context, Object, u32) anyerror!void, }; fn zwp_linux_dmabuf_v1_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn zwp_linux_dmabuf_v1_create_params_default(context: *Context, object: Object, params_id: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var ZWP_LINUX_DMABUF_V1 = zwp_linux_dmabuf_v1_interface{ .destroy = zwp_linux_dmabuf_v1_destroy_default, .create_params = zwp_linux_dmabuf_v1_create_params_default, }; pub fn new_zwp_linux_dmabuf_v1(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = zwp_linux_dmabuf_v1_dispatch, .context = context, .version = 0, .container = container, }; } fn zwp_linux_dmabuf_v1_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (ZWP_LINUX_DMABUF_V1.destroy) |destroy| { try destroy( object.context, object, ); } }, // create_params 1 => { var params_id: u32 = try object.context.next_u32(); if (ZWP_LINUX_DMABUF_V1.create_params) |create_params| { try create_params(object.context, object, params_id); } }, else => {}, } } // // This event advertises one buffer format that the server supports. // All the supported formats are advertised once when the client // binds to this interface. A roundtrip after binding guarantees // that the client has received all supported formats. // // For the definition of the format codes, see the // zwp_linux_buffer_params_v1::create request. // // Warning: the 'format' event is likely to be deprecated and replaced // with the 'modifier' event introduced in zwp_linux_dmabuf_v1 // version 3, described below. Please refrain from using the information // received from this event. // pub fn zwp_linux_dmabuf_v1_send_format(object: Object, format: u32) anyerror!void { object.context.startWrite(); object.context.putU32(format); object.context.finishWrite(object.id, 0); } // // This event advertises the formats that the server supports, along with // the modifiers supported for each format. All the supported modifiers // for all the supported formats are advertised once when the client // binds to this interface. A roundtrip after binding guarantees that // the client has received all supported format-modifier pairs. // // For legacy support, DRM_FORMAT_MOD_INVALID (that is, modifier_hi == // 0x00ffffff and modifier_lo == 0xffffffff) is allowed in this event. // It indicates that the server can support the format with an implicit // modifier. When a plane has DRM_FORMAT_MOD_INVALID as its modifier, it // is as if no explicit modifier is specified. The effective modifier // will be derived from the dmabuf. // // For the definition of the format and modifier codes, see the // zwp_linux_buffer_params_v1::create and zwp_linux_buffer_params_v1::add // requests. // pub fn zwp_linux_dmabuf_v1_send_modifier(object: Object, format: u32, modifier_hi: u32, modifier_lo: u32) anyerror!void { object.context.startWrite(); object.context.putU32(format); object.context.putU32(modifier_hi); object.context.putU32(modifier_lo); object.context.finishWrite(object.id, 1); } // zwp_linux_buffer_params_v1 pub const zwp_linux_buffer_params_v1_interface = struct { // parameters for creating a dmabuf-based wl_buffer destroy: ?fn ( *Context, Object, ) anyerror!void, add: ?fn (*Context, Object, i32, u32, u32, u32, u32, u32) anyerror!void, create: ?fn (*Context, Object, i32, i32, u32, u32) anyerror!void, create_immed: ?fn (*Context, Object, u32, i32, i32, u32, u32) anyerror!void, }; fn zwp_linux_buffer_params_v1_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn zwp_linux_buffer_params_v1_add_default(context: *Context, object: Object, fd: i32, plane_idx: u32, offset: u32, stride: u32, modifier_hi: u32, modifier_lo: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn zwp_linux_buffer_params_v1_create_default(context: *Context, object: Object, width: i32, height: i32, format: u32, flags: u32) anyerror!void { return error.DebugFunctionNotImplemented; } fn zwp_linux_buffer_params_v1_create_immed_default(context: *Context, object: Object, buffer_id: u32, width: i32, height: i32, format: u32, flags: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub var ZWP_LINUX_BUFFER_PARAMS_V1 = zwp_linux_buffer_params_v1_interface{ .destroy = zwp_linux_buffer_params_v1_destroy_default, .add = zwp_linux_buffer_params_v1_add_default, .create = zwp_linux_buffer_params_v1_create_default, .create_immed = zwp_linux_buffer_params_v1_create_immed_default, }; pub fn new_zwp_linux_buffer_params_v1(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = zwp_linux_buffer_params_v1_dispatch, .context = context, .version = 0, .container = container, }; } fn zwp_linux_buffer_params_v1_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // destroy 0 => { if (ZWP_LINUX_BUFFER_PARAMS_V1.destroy) |destroy| { try destroy( object.context, object, ); } }, // add 1 => { var fd: i32 = try object.context.next_fd(); var plane_idx: u32 = try object.context.next_u32(); var offset: u32 = try object.context.next_u32(); var stride: u32 = try object.context.next_u32(); var modifier_hi: u32 = try object.context.next_u32(); var modifier_lo: u32 = try object.context.next_u32(); if (ZWP_LINUX_BUFFER_PARAMS_V1.add) |add| { try add(object.context, object, fd, plane_idx, offset, stride, modifier_hi, modifier_lo); } }, // create 2 => { var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); var format: u32 = try object.context.next_u32(); var flags: u32 = try object.context.next_u32(); if (ZWP_LINUX_BUFFER_PARAMS_V1.create) |create| { try create(object.context, object, width, height, format, flags); } }, // create_immed 3 => { var buffer_id: u32 = try object.context.next_u32(); var width: i32 = try object.context.next_i32(); var height: i32 = try object.context.next_i32(); var format: u32 = try object.context.next_u32(); var flags: u32 = try object.context.next_u32(); if (ZWP_LINUX_BUFFER_PARAMS_V1.create_immed) |create_immed| { try create_immed(object.context, object, buffer_id, width, height, format, flags); } }, else => {}, } } pub const zwp_linux_buffer_params_v1_error = enum(u32) { already_used = 0, plane_idx = 1, plane_set = 2, incomplete = 3, invalid_format = 4, invalid_dimensions = 5, out_of_bounds = 6, invalid_wl_buffer = 7, }; pub const zwp_linux_buffer_params_v1_flags = enum(u32) { y_invert = 1, interlaced = 2, bottom_first = 4, }; // // This event indicates that the attempted buffer creation was // successful. It provides the new wl_buffer referencing the dmabuf(s). // // Upon receiving this event, the client should destroy the // zlinux_dmabuf_params object. // pub fn zwp_linux_buffer_params_v1_send_created(object: Object, buffer: u32) anyerror!void { object.context.startWrite(); object.context.putU32(buffer); object.context.finishWrite(object.id, 0); } // // This event indicates that the attempted buffer creation has // failed. It usually means that one of the dmabuf constraints // has not been fulfilled. // // Upon receiving this event, the client should destroy the // zlinux_buffer_params object. // pub fn zwp_linux_buffer_params_v1_send_failed(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 1); } // fw_control pub const fw_control_interface = struct { // protocol for querying and controlling foxwhale get_clients: ?fn ( *Context, Object, ) anyerror!void, get_windows: ?fn ( *Context, Object, ) anyerror!void, get_window_trees: ?fn ( *Context, Object, ) anyerror!void, destroy: ?fn ( *Context, Object, ) anyerror!void, }; fn fw_control_get_clients_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn fw_control_get_windows_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn fw_control_get_window_trees_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } fn fw_control_destroy_default(context: *Context, object: Object) anyerror!void { return error.DebugFunctionNotImplemented; } pub var FW_CONTROL = fw_control_interface{ .get_clients = fw_control_get_clients_default, .get_windows = fw_control_get_windows_default, .get_window_trees = fw_control_get_window_trees_default, .destroy = fw_control_destroy_default, }; pub fn new_fw_control(id: u32, context: *Context, container: usize) Object { return Object{ .id = id, .dispatch = fw_control_dispatch, .context = context, .version = 0, .container = container, }; } fn fw_control_dispatch(object: Object, opcode: u16) anyerror!void { switch (opcode) { // get_clients 0 => { if (FW_CONTROL.get_clients) |get_clients| { try get_clients( object.context, object, ); } }, // get_windows 1 => { if (FW_CONTROL.get_windows) |get_windows| { try get_windows( object.context, object, ); } }, // get_window_trees 2 => { if (FW_CONTROL.get_window_trees) |get_window_trees| { try get_window_trees( object.context, object, ); } }, // destroy 3 => { if (FW_CONTROL.destroy) |destroy| { try destroy( object.context, object, ); } }, else => {}, } } pub const fw_control_surface_type = enum(u32) { wl_surface = 0, wl_subsurface = 1, xdg_toplevel = 2, xdg_popup = 3, }; pub fn fw_control_send_client(object: Object, index: u32) anyerror!void { object.context.startWrite(); object.context.putU32(index); object.context.finishWrite(object.id, 0); } pub fn fw_control_send_window(object: Object, index: u32, parent: i32, wl_surface_id: u32, surface_type: u32, x: i32, y: i32, width: i32, height: i32, sibling_prev: i32, sibling_next: i32, children_prev: i32, children_next: i32, input_region_id: u32) anyerror!void { object.context.startWrite(); object.context.putU32(index); object.context.putI32(parent); object.context.putU32(wl_surface_id); object.context.putU32(surface_type); object.context.putI32(x); object.context.putI32(y); object.context.putI32(width); object.context.putI32(height); object.context.putI32(sibling_prev); object.context.putI32(sibling_next); object.context.putI32(children_prev); object.context.putI32(children_next); object.context.putU32(input_region_id); object.context.finishWrite(object.id, 1); } pub fn fw_control_send_toplevel_window(object: Object, index: u32, parent: i32, wl_surface_id: u32, surface_type: u32, x: i32, y: i32, width: i32, height: i32, input_region_id: u32) anyerror!void { object.context.startWrite(); object.context.putU32(index); object.context.putI32(parent); object.context.putU32(wl_surface_id); object.context.putU32(surface_type); object.context.putI32(x); object.context.putI32(y); object.context.putI32(width); object.context.putI32(height); object.context.putU32(input_region_id); object.context.finishWrite(object.id, 2); } pub fn fw_control_send_region_rect(object: Object, index: u32, x: i32, y: i32, width: i32, height: i32, op: i32) anyerror!void { object.context.startWrite(); object.context.putU32(index); object.context.putI32(x); object.context.putI32(y); object.context.putI32(width); object.context.putI32(height); object.context.putI32(op); object.context.finishWrite(object.id, 3); } pub fn fw_control_send_done(object: Object) anyerror!void { object.context.startWrite(); object.context.finishWrite(object.id, 4); }
src/protocols.zig
const aoc = @import("../aoc.zig"); const std = @import("std"); const ArenaAllocator = std.heap.ArenaAllocator; const RawRules = struct { const Rule = union(enum) { Literal: []const u8, Goto: [][][]const u8, }; const RuleMap = std.StringHashMap(Rule); arena: ArenaAllocator = undefined, unprocessed_rule_map: RuleMap = undefined, processed_rule_map: RuleMap = undefined, fn init(self: *RawRules, allocator: std.mem.Allocator) void { self.arena = ArenaAllocator.init(allocator); self.unprocessed_rule_map = RuleMap.init(self.arena.allocator()); } fn deinit(self: *RawRules) void { self.arena.deinit(); } fn addRule(self: *RawRules, line: []const u8) !void { var colon = std.mem.split(u8, line, ": "); const rule_idx = colon.next().?; const raw_rule = colon.next().?; const rule = blk: { if (raw_rule[0] == '"') { break :blk Rule { .Literal = raw_rule[1..2] }; } else { var goto = std.ArrayList([][]const u8).init(self.arena.allocator()); var pipe = std.mem.split(u8, raw_rule, " | "); while (pipe.next()) |chunk| { var indexes = std.ArrayList([]const u8).init(self.arena.allocator()); var tokens = std.mem.tokenize(u8, chunk, " "); while (tokens.next()) |token| { try indexes.append(token); } try goto.append(indexes.toOwnedSlice()); } break :blk Rule { .Goto = goto.toOwnedSlice() }; } }; try self.unprocessed_rule_map.put(rule_idx, rule); } fn formValidMessages(self: *RawRules) !aoc.Regex { self.processed_rule_map = try self.unprocessed_rule_map.clone(); const pattern = try self.formMessage("0"); const patternz = try std.fmt.allocPrintZ(self.arena.allocator(), "^{s}$", .{pattern}); defer self.arena.allocator().free(patternz); return aoc.Regex.compilez(patternz); } fn formMessage(self: *RawRules, rule_idx: []const u8) anyerror![]const u8 { switch (self.processed_rule_map.get(rule_idx).?) { .Literal => |l| return l, .Goto => |goto| { var pattern = try std.fmt.allocPrint( self.arena.allocator(), "({s}", .{ try self.formMessageFromSingleRule(goto[0]) } ); for (goto[1..]) |rule| { pattern = try std.fmt.allocPrint( self.arena.allocator(), "{s}|{s}", .{ pattern, try self.formMessageFromSingleRule(rule) } ); } pattern = try std.fmt.allocPrint(self.arena.allocator(), "{s})", .{pattern}); try self.processed_rule_map.put(rule_idx, .{ .Literal = pattern }); return pattern; } } } fn formMessageFromSingleRule(self: *RawRules, rules: [][]const u8) anyerror![]const u8 { var pattern: []const u8 = &[_]u8 {}; for (rules) |rule| { pattern = try std.fmt.allocPrint( self.arena.allocator(), "{s}{s}", .{ pattern, try self.formMessage(rule) } ); } return pattern; } }; pub fn run(problem: *aoc.Problem) !aoc.Solution { var raw_rules = RawRules {}; raw_rules.init(problem.allocator); defer raw_rules.deinit(); var rule_lines = std.mem.tokenize(u8, problem.group().?, "\n"); while (rule_lines.next()) |line| { try raw_rules.addRule(line); } var res1: usize = 0; var valid_messages1 = try raw_rules.formValidMessages(); defer valid_messages1.deinit(); var res2: usize = 0; try raw_rules.addRule("8: 42 | 42 42 | 42 42 42 | 42 42 42 42 | 42 42 42 42 42"); try raw_rules.addRule("11: 42 31 | 42 42 31 31 | 42 42 42 31 31 31 | 42 42 42 42 31 31 31 31 | 42 42 42 42 42 31 31 31 31 31"); var valid_messages2 = try raw_rules.formValidMessages(); defer valid_messages2.deinit(); var message_lines = std.mem.tokenize(u8, problem.group().?, "\n"); while (message_lines.next()) |line| { if (valid_messages1.matches(line)) { res1 += 1; } if (valid_messages2.matches(line)) { res2 += 1; } } return problem.solution(res1, res2); }
src/main/zig/2020/day19.zig
const std = @import("std"); const math = std.math; const assert = std.debug.assert; const warn = std.debug.warn; const Allocator = std.mem.Allocator; const io = std.io; const mem = std.mem; pub const max_num_lit = 286; pub const max_bits_limit = 16; const max_i32 = math.maxInt(i32); const log_window_size: usize = 15; const window_size: usize = 1 << log_window_size; const window_mask: usize = window_size - 1; // The LZ77 step produces a sequence of literal tokens and <length, offset> // pair tokens. The offset is also known as distance. The underlying wire // format limits the range of lengths and offsets. For example, there are // 256 legitimate lengths: those in the range [3, 258]. This package's // compressor uses a higher minimum match length, enabling optimizations // such as finding matches via 32-bit loads and compares. const base_match_length: usize = 3; // The smallest match length per the RFC section 3.2.5 const min_match_length: usize = 4; // The smallest match length that the compressor actually emits const max_match_length: usize = 258; // The largest match length const base_match_offset: usize = 1; // The smallest match offset const max_match_offset: usize = 1 << 15; // The largest match offset // The maximum number of tokens we put into a single flate block, just to // stop things from getting too large. const max_flate_block_tokens: usize = 1 << 14; const max_store_block_size: usize = 65535; const hash_bits: usize = 17; // After 17 performance degrades const hash_size: usize = 1 << hash_bits; const hashMask: usize = (1 << hash_bits) - 1; const maxHash_offset: usize = 1 << 24; pub const Huffman = struct { codes: [max_num_lit]Code, codes_len: usize, freq_cache: [max_num_lit]LitaralNode, bit_count: [17]i32, /// sorted by literal lns: LiteralList, ///sorted by freq lfs: LiteralList, pub const Code = struct { code: u16, len: u16, }; pub const LitaralNode = struct { literal: u16, freq: i32, pub fn max() LitaralNode { return LitaralNode{ .literal = math.maxInt(u16), .freq = math.maxInt(i32), }; } pub const SortBy = enum { Literal, Freq, }; fn sort(ls: []LitaralNode, by: SortBy) void { switch (by) { .Literal => { std.sort.sort(LitaralNode, ls, sortByLiteralFn); }, .Freq => { std.sort.sort(LitaralNode, ls, sortByFreqFn); }, } } fn sortByLiteralFn(lhs: LitaralNode, rhs: LitaralNode) bool { return lhs.literal < rhs.literal; } fn sortByFreqFn(lhs: LitaralNode, rhs: LitaralNode) bool { if (lhs.freq == rhs.freq) { return lhs.literal < rhs.literal; } return lhs.freq < rhs.freq; } }; pub const LiteralList = std.ArrayList(LitaralNode); const LevelInfo = struct { level: i32, last_freq: i32, next_char_freq: i32, next_pair_freq: i32, needed: i32, }; pub fn init(size: usize) Huffman { assert(size <= max_num_lit); var h: Huffman = undefined; h.codes_len = size; return h; } pub fn initAlloc(allocator: *Allocator, size: usize) Huffman { var h = init(size); h.lhs = LiteralList.init(a); h.rhs = LiteralList.init(a); return h; } pub fn generateFixedLiteralEncoding() Huffman { var h = init(max_num_lit); var codes = h.codes[0..h.codes_len]; var ch: u16 = 0; while (ch < max_num_lit) : (ch += 1) { var bits: u16 = 0; var size: u16 = 0; if (ch < 144) { // size 8, 000110000 .. 10111111 bits = ch + 48; size = 8; } else if (ch < 256) { // size 9, 110010000 .. 111111111 bits = ch + 400 - 144; size = 9; } else if (ch < 280) { // size 7, 0000000 .. 0010111 bits = ch - 256; size = 7; } else { // size 8, 11000000 .. 11000111 bits = ch + 192 - 280; size = 8; } codes[@intCast(usize, ch)] = Code{ .code = reverseBits(bits, size), .len = size, }; } return h; } pub fn generateFixedOffsetEncoding() Huffman { var h = init(30); var codes = h.codes[0..h.codes_len]; var i: usize = 0; while (i < h.codes_len) : (i += 1) { codes[i] = Code{ .code = reverseBits(@intCast(u16, i), 5), .len = 5, }; } return h; } pub fn bitLength(self: *Huffman, freq: []i32) isize { var total: isize = 0; for (freq) |f, i| { if (f != 0) { total += @intCast(isize, f) + @intCast(isize, h.codes[i].len); } } return total; } pub fn bitCounts( self: *Huffman, list: LitaralNode, max_bits_arg: i32, ) []i32 { var amx_bits = max_bits_arg; assert(max_bits <= max_bits_limit); const n = @intCast(i32, list.len); var last_node = n + 1; if (max_bits > n - 1) { max_bits = n - 1; } var levels: [max_bits_limit]LevelInfo = undefined; var leaf_counts: [max_bits_limit][max_bits_limit]i32 = undefined; var level: i32 = 0; while (level <= max_bits) : (level += 1) { levels[@intCast(usize, level)] = LevelInfo{ .level = level, .last_freq = list[1].freq, .next_char_freq = list[2].freq, .next_pair_freq = list[0].freq + list[1].freq, }; leaf_counts[level][level] = 2; if (level == 1) { levels[@intCast(usize, level)].next_pair_freq = max_i32; } } levels[max_bits].needed = 2 * n - 4; level = max_bits; while (true) { var l = &levels[@intCast(usize, level)]; if (l.next_pair_freq == max_i32 and l.next_char_freq == max_i32) { l.needed = 0; levels[@intCast(usize, level + 1)].next_pair_freq = max_i32; level += 1; continue; } const prev_freq = l.last_freq; if (l.next_char_freq < l.next_pair_freq) { const nx = leaf_counts[level][level] + 1; l.last_freq = l.next_char_freq; leaf_counts[level][level] = nx; l.next_char_freq = if (nx == last_node) LitaralNode.max().freq else list[nx].freq; } else { l.last_freq = l.next_pair_freq; mem.copy(i32, leaf_counts[level][0..level], leaf_counts[level - 1][0..level]); levels[l.level - 1].needed = 2; l.needed -= 1; if (l.needed == 0) { if (l.level == max_bits) { break; } levels[l.level + 1].next_pair_freq = prev_freq + l.last_freq; level += 1; } else { while (level - 1 >= 0 and levels[level - 1].needed > 0) : (level -= 1) {} } } } if (leaf_counts[max_bits][max_bits] != n) { @panic("leaf_counts[max_bits][max_bits] != n"); } var bit_count = self.bit_count[0 .. max_bits + 1]; var bits = 1; const counts = leaf_counts[max_bits]; level = max_bits; while (level > 0) : (level -= 1) { bit_count[bits] = counts[level] - counts[level - 1]; bits += 1; } return bit_count; } /// Look at the leaves and assign them a bit count and an encoding as specified /// in RFC 1951 3.2.2 pub fn assignEncodingAndSize( self: *Huffman, bit_count: []const i32, list: []LitaralNode, ) !void { var ls = list; var code: u16 = 0; for (bit_count) |bits, n| { code = math.shl(u16, code, 1); if (n == 0 or bits == 0) { continue; } // The literals list[len(list)-bits] .. list[len(list)-bits] // are encoded using "bits" bits, and get the values // code, code + 1, .... The code values are // assigned in literal order (not frequency order). var chunk = ls[ls.len - @intCast(usize, bits) ..]; LitaralNode.sort(chunk, .Literal); try self.lhs.append(chunk); for (chunk) |node| { self.codes[@intCast(usize, node.literal)] = Code{ .code = reverseBits(code, @intCast(u16, n)), .len = @intCast(u16, n), }; } ls = ls[0 .. ls.len - @intCast(usize, bits)]; } } pub fn generate( self: *Huffman, freq: []const i32, max_bits: i32, ) !void { var list = self.freq_cache[0 .. freq.len + 1]; var count: usize = 0; for (freq) |f, i| { if (f != 0) { list[count] = LitaralNode{ .literal = @intCast(u16, i), .freq = f, }; count += 1; } else { ls[count] = LitaralNode{ .literal = 0, .freq = 0, }; self.codes[i].len = 0; } } ls[freq.len] = LitaralNode{ .literal = 0, .freq = 0, }; ls = ls[0..count]; if (count <= 2) { for (ls) |node, i| { // Handle the small cases here, because they are awkward for the general case code. With // two or fewer literals, everything has bit length 1. var x = &self.codes[@intCast(usize, node.literal)]; x.code = @intCast(u16, i); x.len = 1; } return; } LitaralNode.sort(ls, .Freq); try self.lfs.append(ls); const bit_counts = try self.bitCounts(ls, max_bits); try self.assignEncodingAndSize(bit_count, ls); } }; fn reverseBits(number: u16, bit_length: u16) u16 { return @bitReverse(u16, math.shl(u16, number, 16 - bit_length)); } test "huffman" { var h = Huffman.generateFixedOffsetEncoding(); warn("\n"); for (h.codes[0..h.codes_len]) |code| { warn("{}, {}\n", code.code, code.len); } } const offset_code_count = 30; // The special code used to mark the end of a block. const end_block_marker = 256; // The first length code. const length_codes_start = 257; // The number of codegen codes. const codegen_code_count = 19; const bad_code = 255; // buffer_flush_size indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since // we accumulate 6 bytes between writes to the buffer. const buffer_flush_size = 240; // buffer_size is the actual output byte buffer size. // It must have additional headroom for a flush // which can contain up to 8 bytes. const buffer_size = buffer_flush_size + 8; //zig fmt: off const length_extra_bits = [_]u8{ 0, 0, 0, //257 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,// 260 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, //270 4, 5, 5, 5, 5, 0, //280 }; //zig fmt: on const length_base = [_]u32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 255, }; const offset_extra_bits = [_]u8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, }; const offset_base = []u32{ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, }; const codegen_order = [_]u32{ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, }; const code_gen_size = max_num_lit + offset_code_count + 1; pub fn Writer(comptime Error: type) type { return struct { const Self = @This(); pub const Stream = io.OutStream(Error); stream: Stream, out_stream: *Stream, bits: u64, nbits: u8, bytes: [buffer_size]u8 = []u8{0} ** buffer_size, code_gen_freq: [codegen_code_count]u32 = []u32{0} ** codegen_code_count, nbytes: usize, literal_freq: [max_num_lit]i32 = []i32{0} ** max_num_lit, offset_freq: []i32 = []i32 ** offset_code_count, code_gen: [code_gen_size]u8 = []u8{0} ** code_gen_size, literal_encoding: Huffman = Huffman.init(max_num_lit), offset_encoding: Huffman = Huffman.init(codegen_code_count), code_gen_encoding: Huffman = Huffman.init(offset_code_count), fn writeFn(out_stream: *Stream, bytes: []const u8) !void { const self = @fieldParentPtr(Self, "stream", out_stream); return self.write(bytes); } fn write(self: *Self, bytes: []const u8) !void { try self.out_stream.write(bytes); } fn flush(sel: *Self) !void { var n = self.nbytes; while (self.nbits != 0) { self.bytes[n] = @intCast(usize, self.bits); self.bits >>= 8; if (self.bits > 8) { self.nbits -= 8; } else { self.nbits = 0; } n += 1; } self.bits = 0; try self.write(self.bytes[0..n]); self.nbytes = 0; } }; } // dictDecoder implements the LZ77 sliding dictionary as used in decompression. // LZ77 decompresses data through sequences of two forms of commands: // // stream as is. This is accomplished through the writeByte method for a // single symbol, or combinations of writeSlice/writeMark for multiple symbols. // Any valid stream must start with a literal insertion if no preset dictionary // is used. // // * Backward copies: Runs of one or more symbols are copied from previously // emitted data. Backward copies come as the tuple (dist, length) where dist // determines how far back in the stream to copy from and length determines how // many bytes to copy. Note that it is valid for the length to be greater than // the distance. Since LZ77 uses forward copies, that situation is used to // perform a form of run-length encoding on repeated runs of symbols. // The writeCopy and tryWriteCopy are used to implement this command. // // For performance reasons, this implementation performs little to no sanity // checks about the arguments. As such, the invariants documented for each // method call must be respected. const DictDecoder = struct { hist: []u8 = blk: { var a: [max_match_offset]u8 = []u8{0} ** max_match_offset; break a[0..]; }, w_pos: usize = 0, r_pos: usize = 0, full: bool = false, fn init(dict: ?[]u8) DictDecoder { var d = DictDecoder{}; if (dict != null and dict.?.len > 0) { mem.copy(u8, d.hist, dict.?); } return d; } fn histSize(self: *DictDecoder) usize { if (self.full) return self.hist.len; return self.w_pos; } fn avaiRead(self: *DictDecoder) usize { return self.w_pos - self.r_pos; } fn avaiWrite(self: *DictDecoder) usize { return self.hist.len - self.w_pos; } };
src/flate.zig
nodes: std.MultiArrayList(Node).Slice, // @TODO: Would it be reasonable to change the alignment of elements? elements: []const Element, const std = @import("std"); const Allocator = std.mem.Allocator; const super = @import("!mod.zig"); const Cst = @This(); pub fn deinit(self: *Cst, allocator: Allocator) void { self.nodes.deinit(allocator); allocator.free(self.elements); } pub const Node = struct { kind: Kind, count: u24, // 4 bytes in size, no wast pub const Kind = enum(u8) { root, skipped, /// '::'? ident ( '::' ident )* path, string, /// 'fn' ident ( ';' | expr ) decl_function, literal_int, literal_float, /// 'let' ident '=' expr ';' stmt_let, /// '{' stmt* '}' expr_block, /// expr operator expr expr_infix, /// '()' expr_unit, /// '(' expr ')' expr_grouping, /// '(' expr ( ',' expr )* ','? ')' expr_tuple, // expr tuple expr_call, }; }; const NodeKind = Node.Kind; pub const Element = enum { token, node }; /// Type used by the `Parser` to construct a `Cst` pub const NodeBuilder = struct { kind: Node.Kind, elements: std.ArrayListUnmanaged(Element) = .{}, child_nodes: std.ArrayListUnmanaged(NodeBuilder) = .{}, pub fn deinit(self: *NodeBuilder, allocator: Allocator) void { self.elements.deinit(allocator); for (self.child_nodes.items) |*node| { node.deinit(allocator); } self.child_nodes.deinit(allocator); } pub fn appendToken(self: *NodeBuilder, allocator: Allocator) !void { try self.elements.append(allocator, .token); } pub fn appendNode(self: *NodeBuilder, allocator: Allocator, node: NodeBuilder) !void { try self.elements.append(allocator, .node); try self.child_nodes.append(allocator, node); } }; /// Takes a root node and produces a `Cst` pub fn buildCst(allocator: Allocator, root: NodeBuilder) !Cst { var nodes = std.MultiArrayList(Node){}; var elements = std.ArrayListUnmanaged(Element){}; try buildNode(allocator, &nodes, &elements, root); return Cst{ .nodes = nodes.toOwnedSlice(), .elements = elements.toOwnedSlice(allocator), }; } fn buildNode( allocator: Allocator, nodes: *std.MultiArrayList(Node), elements: *std.ArrayListUnmanaged(Element), node: NodeBuilder, ) anyerror!void { const node_index = nodes.len; try nodes.append(allocator, .{ .kind = node.kind, .count = undefined }); const count_start = elements.items.len; for (node.elements.items) |elem| { switch (elem) { .token => try elements.append(allocator, .token), .node => try elements.append(allocator, .node), } } const count = elements.items.len - count_start; nodes.items(.count)[node_index] = @intCast(u24, count); for (node.child_nodes.items) |n| { // recurse try buildNode(allocator, nodes, elements, n); } } /// Helper struct to print the tree structure of a `Cst` pub const Printer = struct { allocator: Allocator, cst: Cst, tokens: []const super.TokenKind, spans: []const super.SpanData, src: []const u8, node_count: usize = 1, // the first node is the root, so we ignore it token_count: usize = 0, elem_count: usize = 0, level: u8 = 0, const Self = @This(); const term = @import("../term.zig"); const debug = @import("../debug.zig"); pub fn print(self: *Self) !void { try self.printElements(0); } fn printElements(self: *Self, index: usize) anyerror!void { const node_count = self.cst.nodes.items(.count)[index]; const elem_start = self.elem_count; self.elem_count += node_count; const elems = self.cst.elements[elem_start..self.elem_count]; for (elems) |elem| { var i: usize = 0; term.print("\x1b[38;2;100;100;100m", .{}); while (i < self.level) : (i += 1) { term.print("│ ", .{}); } term.print("{}", .{term.Attr{ .reset = true }}); switch (elem) { .token => { const kind = self.tokens[self.token_count]; const span = self.spans[self.token_count]; self.token_count += 1; try debug.printToken(self.allocator, kind, span, self.src); term.println("", .{}); }, .node => { const kind = self.cst.nodes.items(.kind)[self.node_count]; const name = try std.fmt.allocPrint(self.allocator, "{?}", .{kind}); defer self.allocator.free(name); term.printAttr("{s}", .{name[5..]}, .{ .col = .magenta }); term.println("", .{}); self.level += 1; self.node_count += 1; // recurse try self.printElements(self.node_count - 1); self.level -= 1; }, } } } }; // @TODO: Print specialized nodes // pub fn printNodeName( // allocator: Allocator, // node: Cst.Node, // tokens: []const parse.TokenKind, // spans: []const parse.SpanData, // src: []const u8, // ) !bool { // const name = try std.fmt.allocPrint(allocator, "{?}", .{node.kind}); // defer allocator.free(name); // term.printColor("{s}", .{name[5..]}, .Magenta); // switch (node.kind) { // .path => { // term.print("{} -> ", .{term.Color.White}); // defer term.println("{}", .{term.Color.Reset}); // for (node.children.items) |el| { // switch (el.kind) { // .token => switch (tokens[el.index]) { // .ident => try printLexeme(allocator, spans[el.index], src), // .punct_dblColon => term.print("::", .{}), // .newline => {}, // else => unreachable, // }, // .node => unreachable, // } // } // }, // .string => { // term.print("{} -> \"", .{term.Color.White}); // defer term.println("\"{}", .{term.Color.Reset}); // for (node.children.items) |el| { // switch (el.kind) { // .token => switch (tokens[el.index]) { // .string_open, .string_close, .newline => {}, // .string_literal, // .esc_quote_single, // .esc_quote_double, // .esc_ascii_newline, // .esc_ascii_carriageReturn, // .esc_ascii_tab, // .esc_ascii_backslash, // .esc_ascii_null, // .esc_ascii_escape, // => try printLexeme(allocator, spans[el.index], src), // else => unreachable, // }, // .node => unreachable, // } // } // }, // .literal_int => { // term.print("{} -> ", .{term.Color.White}); // defer term.println("{}", .{term.Color.Reset}); // for (node.children.items) |el| { // switch (el.kind) { // .token => { // const ty = switch (tokens[el.index]) { // .int_dec => "dec", // .int_hex => "hex", // .int_oct => "oct", // .int_bin => "bin", // .newline => continue, // else => unreachable, // }; // term.print("{s}: ", .{ty}); // try printLexeme(allocator, spans[el.index], src); // }, // .node => unreachable, // } // } // }, // .literal_float => { // term.print("{} -> ", .{term.Color.White}); // defer term.println("{}", .{term.Color.Reset}); // for (node.children.items) |el| { // switch (el.kind) { // .token => switch (tokens[el.index]) { // .newline => {}, // .float => try printLexeme(allocator, spans[el.index], src), // else => unreachable, // }, // .node => unreachable, // } // } // }, // else => { // term.println("", .{}); // return true; // }, // } // return false; // }
fexc/src/parse/Cst.zig
const xcb_connection_t = opaque {}; const xcb_window_t = opaque {}; const xcb_colormap_t = opaque {}; const xcb_visualid_t = opaque {}; pub const Defines = struct { pub const CopyFromParent = 0; pub const Atom = enum(u32) { String = 31, WmName = 39, }; pub const Config = enum(u32) { WindowWidth = 4, WindowHeight = 8, }; }; pub const Connection = xcb_connection_t; pub const Setup = extern struct { status: u8, pad0: u8, protocol_major_version: u16, protocol_minor_version: u16, length: u16, release_number: u32, resource_id_base: u32, resource_id_mask: u32, motion_buffer_size: u32, vendor_len: u16, maximum_request_length: u16, roots_len: u8, pixmap_formats_len: u8, image_byte_order: u8, bitmap_format_bit_order: u8, bitmap_format_scanline_unit: u8, bitmap_format_scanline_pad: u8, min_keycode: KeyCode, max_keycode: KeyCode, pad1: [4]u8, }; pub const SetupIterator = extern struct { data: ?*Setup, rem: c_int, index: c_int, }; pub const Window = u32; pub const WindowClass = enum(u16) { CopyFromParent, InputOutput, InputOnly, }; pub const Colormap = u32; pub const VisualId = u32; pub const Screen = extern struct { root: Window, default_colormap: Colormap, white_pixel: u32, black_pixel: u32, current_input_masks: u32, width_in_pixels: u16, height_in_pixels: u16, width_in_millimeters: u16, height_in_millimeters: u16, min_installed_maps: u16, max_installed_maps: u16, root_visual: VisualId, backing_stores: u8, save_unders: u8, root_depth: u8, allowed_depths_len: u8, }; pub const ScreenIterator = extern struct { data: ?*Screen, rem: c_int, index: c_int, }; pub const VoidCookie = extern struct { sequence: c_uint, }; pub const Cw = enum(u32) { BackPixmap = 1, BackPixel = 2, BorderPixmap = 4, BorderPixel = 8, BitGravity = 16, WinGravity = 32, BackingStore = 64, BackingPlanes = 128, BackingPixel = 256, OverrideRedirect = 512, SaveUnder = 1024, EventMask = 2048, DontPropagate = 4096, Colormap = 8192, Cursor = 16384, }; pub const EventMask = enum(u32) { NoEvent = 0, KeyPress = 1, KeyRelease = 2, ButtonPress = 4, ButtonRelease = 8, EnterWindow = 16, LeaveWindow = 32, PointerMotion = 64, StructureNotify = 131072, FocusChange = 2097152, }; pub const EventType = enum(u32) { KeyPress = 2, KeyRelease = 3, ButtonPress = 4, ButtonRelease = 5, MotionNotify = 6, EnterNotify = 7, LeaveNotify = 8, FocusIn = 9, FocusOut = 10, MapNotify = 19, ReparentNotify = 21, ConfigureNotify = 22, ClientMessage = 33, }; pub const GenericEvent = extern struct { response_type: u8, pad0: u8, sequence: u16, pad: [7]u32, full_sequence: u32, }; pub const Timestamp = u32; pub const KeyCode = u8; pub const KeySym = u32; pub const KeyPressEvent = struct { response_type: u8, detail: KeyCode, sequence: u16, time: Timestamp, root: Window, event: Window, child: Window, root_x: i16, root_y: i16, event_x: i16, event_y: i16, state: u16, same_screen: u8, pad0: u8, }; pub const KeyReleaseEvent = KeyPressEvent; pub const Button = u8; pub const ButtonPressEvent = extern struct { response_type: u8, detail: Button, sequence: u16, time: Timestamp, root: Window, event: Window, child: Window, root_x: i16, root_y: i16, event_x: i16, event_y: i16, state: u16, same_screen: u8, }; pub const ButtonReleaseEvent = ButtonPressEvent; pub const MotionNotifyEvent = extern struct { response_type: u8, detail: Button, sequence: u16, time: Timestamp, root: Window, event: Window, child: Window, root_x: i16, root_y: i16, event_x: i16, event_y: i16, state: u16, same_screen: u8, pad0: u8, }; pub const ClientMessageData = extern union { data8: [20]u8, data16: [10]u16, data32: [5]u32, }; pub const ClientMessageEvent = extern struct { response_type: u8, format: u8, sequence: u16, window: Window, _type: Atom, data: ClientMessageData, }; pub const NotifyMode = enum(u8) { normal, grab, ungrab, while_grabbed, }; pub const FocusInEvent = extern struct { response_type: u8, detail: u8, sequence: u16, event: Window, mode: NotifyMode, pad0: [3]u8, }; pub const FocusOutEvent = FocusInEvent; pub const EnterNotifyEvent = extern struct { response_type: u8, detail: u8, sequence: u16, time: Timestamp, root: Window, event: Window, child: Window, root_x: i16, root_y: i16, event_x: i16, event_y: i16, state: u16, mode: u8, same_screen_focus: u8, }; pub const LeaveNotifyEvent = EnterNotifyEvent; pub const ConfigureNotifyEvent = struct { response_type: u8, pad0: u8, sequence: u16, event: Window, window: Window, above_sibling: Window, x: i16, y: i16, width: u16, height: u16, border_width: u16, override_redirect: u8, pad1: u8, }; pub const Atom = u32; pub const PropMode = enum(u8) { Replace, Prepend, Append, }; pub const InternAtomCookie = VoidCookie; pub const InternAtomReply = extern struct { response: u8, pad0: u8, sequence: u16, length: u32, atom: Atom, }; pub const GenericError = opaque {}; pub const QueryKeymapCookie = VoidCookie; pub const QueryKeymapReply = extern struct { response_type: u8, pad0: u8, sequence: u16, length: u32, keys: [32]u8, }; pub const KeyboardMappingCookie = VoidCookie; pub const KeyboardMappingReply = extern struct { response_type: u8, keysyms_per_keycode: u8, sequence: u16, length: u32, pad0: [24]u8, }; pub const GeometryCookie = VoidCookie; pub const GeometryReply = extern struct { response_type: u8, depth: u8, sequence: u16, length: u16, root: Window, x: i16, y: i16, width: u16, height: u16, border_width: u16, }; extern "xcb" fn xcb_connect(displayname: [*]const u8, screenp: ?*c_int) ?*Connection; pub fn connect(displayname: []const u8, screenp: ?*c_int) !*Connection { if (xcb_connect(displayname.ptr, screenp)) |connection| { return connection; } return error.CannotConnectToServer; } extern "xcb" fn xcb_disconnect(c: *Connection) void; pub fn disconnect(c: *Connection) void { return xcb_disconnect(c); } extern "xcb" fn xcb_get_setup(c: *Connection) ?*Setup; pub fn getSetup(c: *Connection) !*Setup { if (xcb_get_setup(c)) |setup| { return setup; } return error.CannotRetriveSetup; } extern "xcb" fn xcb_setup_roots_iterator(r: *const Setup) ScreenIterator; pub fn setupRootsIterator(r: *const Setup) ScreenIterator { return xcb_setup_roots_iterator(r); } extern "xcb" fn xcb_generate_id(c: *Connection) Window; pub fn generateId(c: *Connection) Window { return xcb_generate_id(c); } extern "xcb" fn xcb_create_window( c: *Connection, depth: u8, wid: Window, parent: Window, x: i16, y: i16, width: u16, height: u16, border_width: u16, _class: u16, visual: VisualId, value_mask: u32, value_list: ?[*]const u32, ) VoidCookie; pub fn createWindow( c: *Connection, depth: u8, wid: Window, parent: Window, x: i16, y: i16, width: u16, height: u16, border_width: u16, _class: WindowClass, visual: VisualId, value_mask: u32, value_list: ?[*]const u32, ) VoidCookie { return xcb_create_window( c, depth, wid, parent, x, y, width, height, border_width, @enumToInt(_class), visual, value_mask, value_list, ); } extern "xcb" fn xcb_destroy_window(c: *Connection, window: Window) void; pub fn destroyWindow(c: *Connection, window: Window) void { return xcb_destroy_window(c, window); } extern "xcb" fn xcb_change_window_attributes( c: *Connection, window: Window, value_mask: u32, value_list: ?[*]const u32, ) VoidCookie; pub fn changeWindowAttributes(c: *Connection, window: Window, value_mask: u32, value_list: []const u32) VoidCookie { return xcb_change_window_attributes(c, window, value_mask, value_list.ptr); } extern "xcb" fn xcb_configure_window( c: *Connection, window: Window, value_mask: u16, value_list: ?*const anyopaque, ) VoidCookie; pub fn configureWindow( c: *Connection, window: Window, value_mask: u16, value_list: ?*const anyopaque, ) VoidCookie { return xcb_configure_window(c, window, value_mask, value_list); } extern "xcb" fn xcb_change_property( c: *Connection, mode: u8, window: Window, property: Atom, _type: Atom, format: u8, data_len: u32, data: ?*const anyopaque, ) VoidCookie; pub fn changeProperty( c: *Connection, mode: PropMode, window: Window, property: Atom, _type: Atom, format: u8, data_len: u32, data: ?*const anyopaque, ) VoidCookie { return xcb_change_property(c, @enumToInt(mode), window, property, _type, format, data_len, data); } extern "xcb" fn xcb_map_window(c: *Connection, window: Window) VoidCookie; pub fn mapWindow(c: *Connection, window: Window) VoidCookie { return xcb_map_window(c, window); } extern "xcb" fn xcb_flush(c: *Connection) c_int; pub fn flush(c: *Connection) i32 { return @intCast(i32, xcb_flush(c)); } extern "xcb" fn xcb_wait_for_event(c: *Connection) ?*GenericEvent; pub fn waitForEvent(c: *Connection) ?*GenericEvent { return xcb_wait_for_event(c); } extern "xcb" fn xcb_poll_for_event(c: *Connection) ?*GenericEvent; pub fn pollForEvent(c: *Connection) ?*GenericEvent { return xcb_poll_for_event(c); } pub fn eventResponse(event: *GenericEvent) EventType { return @intToEnum(EventType, event.response_type & 0x7f); } extern "xcb" fn xcb_intern_atom(c: *Connection, only_if_exists: u8, name_len: u16, name: ?[*]const u8) InternAtomCookie; pub fn internAtom(c: *Connection, only_if_exists: bool, name_len: u16, name: []const u8) InternAtomCookie { return xcb_intern_atom(c, @boolToInt(only_if_exists), name_len, name.ptr); } extern "xcb" fn xcb_intern_atom_reply(c: *Connection, cookie: InternAtomCookie, e: ?**GenericError) *InternAtomReply; pub fn internAtomReply(c: *Connection, cookie: InternAtomCookie, e: ?**GenericError) *InternAtomReply { return xcb_intern_atom_reply(c, cookie, e); } extern "xcb" fn xcb_query_keymap(c: *Connection) QueryKeymapCookie; pub fn queryKeymap(c: *Connection) QueryKeymapCookie { return xcb_query_keymap(c); } extern "xcb" fn xcb_query_keymap_reply(c: *Connection, cookie: QueryKeymapCookie, e: ?**GenericError) *QueryKeymapReply; pub fn queryKeymapReply(c: *Connection, cookie: QueryKeymapCookie, e: ?**GenericError) *QueryKeymapReply { return xcb_query_keymap_reply(c, cookie, e); } extern "xcb" fn xcb_get_keyboard_mapping(c: *Connection, first_keycode: KeyCode, count: u8) KeyboardMappingCookie; pub fn getKeyboardMapping(c: *Connection, first_keycode: KeyCode, count: u8) KeyboardMappingCookie { return xcb_get_keyboard_mapping(c, first_keycode, count); } extern "xcb" fn xcb_get_keyboard_mapping_reply(c: *Connection, cookie: KeyboardMappingCookie, e: ?**GenericError) *KeyboardMappingReply; pub fn getKeyboardMappingReply(c: *Connection, cookie: KeyboardMappingCookie, e: ?**GenericError) *KeyboardMappingReply { return xcb_get_keyboard_mapping_reply(c, cookie, e); } extern "xcb" fn xcb_get_keyboard_mapping_keysyms(r: *const KeyboardMappingReply) [*]KeySym; pub fn getKeyboardMappingKeysyms(r: *const KeyboardMappingReply) [*]KeySym { return xcb_get_keyboard_mapping_keysyms(r); } extern "xcb" fn xcb_get_geometry(c: *Connection, window: Window) GeometryCookie; pub fn getGeometry(c: *Connection, window: Window) GeometryCookie { return xcb_get_geometry(c, window); } extern "xcb" fn xcb_get_geometry_reply(c: *Connection, cookie: GeometryCookie, e: ?**GenericError) *GeometryReply; pub fn getGeometryReply(c: *Connection, cookie: GeometryCookie, e: ?**GenericError) *GeometryReply { return xcb_get_geometry_reply(c, cookie, e); }
src/xcb/bindings.zig
const std = @import("std"); const string = []const u8; const range = @import("range").range; const input = @embedFile("../input/day14.txt"); const Rule = struct { left: string, right: u8, }; const CountMap = std.AutoHashMap(u8, u64); const MemoMap = std.AutoHashMap([2]u8, []?CountMap); pub fn main() !void { // // part 1 { var iter = std.mem.split(u8, input, "\n"); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const alloc = &arena.allocator; var all_rules = std.ArrayList(Rule).init(alloc); defer all_rules.deinit(); // ingest input var polymer = std.ArrayList(u8).init(alloc); defer polymer.deinit(); try polymer.appendSlice(iter.next().?); std.debug.assert(iter.next().?.len == 0); while (iter.next()) |line| { if (line.len == 0) continue; try all_rules.append(Rule{ .left = line[0..2], .right = line[6], }); } // run program const steps = 10; for (range(steps)) |_| { var i: usize = 0; blk: while (true) { for (all_rules.items) |item| { if (i >= polymer.items.len - 1) { break :blk; } if (std.mem.eql(u8, polymer.items[i..][0..2], item.left)) { try polymer.insert(i + 1, item.right); i += 2; } } } } // count the character occurences var counts = CountMap.init(alloc); defer counts.deinit(); for (polymer.items) |c| { const result = try counts.getOrPut(c); if (result.found_existing) { result.value_ptr.* += 1; } else { result.value_ptr.* = 0; } } // find the max and min var max: u64 = 0; var min: u64 = std.math.maxInt(u64); var iter2 = counts.iterator(); while (iter2.next()) |entry| { const value = entry.value_ptr.*; if (value > max) max = value; if (value < min) min = value; } // find the difference std.debug.print("{d}\n", .{max - min}); } // part 2 { var iter = std.mem.split(u8, input, "\n"); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const alloc = &arena.allocator; // ingest input var all_rules = std.ArrayList(Rule).init(alloc); defer all_rules.deinit(); const start_string = iter.next().?; std.debug.assert(iter.next().?.len == 0); while (iter.next()) |line| { if (line.len == 0) continue; try all_rules.append(Rule{ .left = line[0..2], .right = line[6], }); } // setup counts map var counts = CountMap.init(alloc); defer counts.deinit(); for (all_rules.items) |item| { try counts.put(item.left[0], 0); try counts.put(item.left[1], 0); try counts.put(item.right, 0); } for (start_string) |c| { const result = counts.getEntry(c); result.?.value_ptr.* += 1; } // setup memoization var memo = MemoMap.init(alloc); defer memo.deinit(); const steps = 40; // setup memoization { var kiter1 = counts.keyIterator(); while (kiter1.next()) |c1| { var kiter2 = counts.keyIterator(); while (kiter2.next()) |c2| { var list = std.ArrayList(?CountMap).init(alloc); defer list.deinit(); for (range(steps)) |_| { try list.append(null); } try memo.put(.{ c1.*, c2.* }, list.toOwnedSlice()); } } } // run program for (range(start_string.len - 1)) |_, i| { try mergeMaps(&counts, try findCounts(&memo, alloc, all_rules.items, start_string[i..][0..2], steps)); } // find the max and min var max: u64 = 0; var min: u64 = std.math.maxInt(u64); var iter2 = counts.iterator(); while (iter2.next()) |entry| { const value = entry.value_ptr.* - 1; if (value > max) max = value; if (value < min) min = value; } // find the difference std.debug.print("{d}\n", .{max - min}); } } fn findCounts(memo: *MemoMap, alloc: *std.mem.Allocator, rules: []const Rule, slice: string, max_depth: u64) std.mem.Allocator.Error!CountMap { const memo_list = memo.getEntry(slice[0..2].*).?.value_ptr; if (memo_list.*[max_depth - 1]) |val| { return val; } if (max_depth == 0) { return CountMap.init(undefined); } for (rules) |item| { if (std.mem.eql(u8, slice, item.left)) { var counts = CountMap.init(alloc); try counts.put(item.right, 1); try mergeMaps(&counts, try findCounts(memo, alloc, rules, &[_]u8{ slice[0], item.right }, max_depth - 1)); try mergeMaps(&counts, try findCounts(memo, alloc, rules, &[_]u8{ item.right, slice[1] }, max_depth - 1)); memo_list.*[max_depth - 1] = counts; return counts; } } unreachable; } fn mergeMaps(a: *CountMap, b: CountMap) !void { var iter = b.iterator(); while (iter.next()) |entry| { const k = entry.key_ptr.*; const v = entry.value_ptr.*; const result = try a.getOrPut(k); if (result.found_existing) { result.value_ptr.* += v; } else { result.value_ptr.* = v; } } return; }
src/day14.zig
// https://news.ycombinator.com/item?id=12334270 const std = @import("std"); const print = std.debug.print; var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var allocator = &arena.allocator; // Tree construction =========================================================== const NodeTag = enum { internal, leaf }; const Node = union(NodeTag) { internal: struct { left: *Node, right: *Node, count: u64 }, leaf: struct { byte: u8, count: u64 }, fn get_count(self: Node) u64 { return switch (self) { .leaf => self.leaf.count, .internal => self.internal.count }; } fn print(self: Node) void { if (self == .leaf) { print("Leaf node:\n Count: {}\n Byte: {} « {c} »\n\n", .{ self.leaf.count, self.leaf.byte, self.leaf.byte}); } else if (self == .internal) { print("Internal node:\n Count: {}\n Left: {}\n Right: {}\n\n", .{self.internal.count, self.internal.left, self.internal.right}); } } fn print_recursive(self: *Node) void { self.print(); if (self.* == .internal) { self.internal.left.print_recursive(); self.internal.right.print_recursive(); } } }; fn compare_nodes_desc(ctx: void, a: *Node, b: *Node) bool { return a.get_count() > b.get_count(); } /// Build Huffman (binary) tree. // todo: read files through 4K / page-sized chunks. // Would this matter? Buffer size for the compressed file would still be // large. Can't chunk compressed buffer easily since header data at the // start of the file changes at end of processing. // todo: use package-merge algorithm to ensure length-limited codes. fn tree_build(text: []const u8) !*Node { // Build ArrayList of pointers to alloc'd leaf nodes. var nodes = std.ArrayList(*Node).init(allocator); outer: for (text) |byte| { for (nodes.items) |node| { if (byte == node.leaf.byte) { node.leaf.count += 1; continue :outer; } } // Reached if byte not in `nodes`. var node: *Node = try allocator.create(Node); node.* = Node { .leaf = .{ .byte = byte, .count = 1 } }; try nodes.append(node); } for (nodes.items) |n| { if (n.* == .leaf) { print("{X} {c} = {}\n", .{n.leaf.byte, n.leaf.byte, n.get_count()}); } } // Build tree. while (nodes.items.len > 1) { std.sort.sort(*Node, nodes.items, {}, compare_nodes_desc); var child_left: *Node = nodes.pop(); var child_right: *Node = nodes.pop(); var node: *Node = try allocator.create(Node); node.* = Node { .internal = .{ .left = child_left, .right = child_right, .count = child_left.get_count() + child_right.get_count() } }; try nodes.append(node); } return nodes.pop(); // Tree root. } // BitList ===================================================================== /// An ArrayList of bytes that can have bits appended to it directly. pub const BitList = struct { bytes: std.ArrayList(u8) = undefined, bit_index: u32 = 0, pub fn init(a: *std.mem.Allocator) !BitList { return BitList {.bytes = std.ArrayList(u8).init(a)}; } /// Shift bits into list, left-to-right. Add bytes if needed. pub fn append_bit(self: *BitList, bit: u1) !void { if (self.bytes.items.len*8 < self.bit_index+1) { try self.bytes.append(0); } if (bit == 1) { self.bytes.items[self.bit_index/8] |= @as(u8, 128) >> self.get_trailing_bit_count(); } self.bit_index += 1; } pub fn get_trailing_bit_count(self: *BitList) u3 { return @intCast(u3, self.bit_index-(self.bit_index/8*8)); } pub fn append_code_word(self: *BitList, cw: CodeWord) !void { var i: u8 = 0; while (i < cw.used) : (i += 1) { try self.append_bit(cw.get_bit(i)); } } pub fn append_bytes(self: *BitList, bytes: []u8) !void { for (bytes) |byte| { var i: u8 = 0; while (i < 8) : (i += 1) { try self.append_bit(@intCast(u1, (byte >> @intCast(u3, 7 - i)) & 1)); } } } // pub fn append(self: *BitList, comptime T: type, data: T) !void { // var i: u32 = 0; // while (i < @sizeOf(T)) : (i += 1) { // try self.append_bit(@intCast(u1, (data >> (@sizeOf(T)-1) - i) & 1)); // } // } pub fn append_byte(self: *BitList, byte: u8) !void { var i: u8 = 0; while (i < 8) : (i += 1) { try self.append_bit(@intCast(u1, (byte >> @intCast(u3, 7 - i)) & 1)); } } pub fn append_padding(self: *BitList, count: u32) !void { var i: i32 = 0; while (i < count) : (i += 1) { try self.append_bit(0); } } }; // Table construction ========================================================== const Entry = struct { byte: u8, bit_count: u6 }; /// Sort first by bit count, then by numerical precedence. Ascending. fn compare_entries(ctx: void, a: Entry, b: Entry) bool { if (a.bit_count == b.bit_count) { return (a.byte < b.byte); } else { return (a.bit_count < b.bit_count); } } /// Fill `entries` list by walking the tree recursively. fn entry_list_build(node: *Node, bit_count: u6, entries: *std.ArrayList(Entry)) std.mem.Allocator.Error!void { if (node.* == .internal) { try entry_list_build(node.internal.left, bit_count+1, entries); try entry_list_build(node.internal.right, bit_count+1, entries); } else if (node.* == .leaf) { try entries.append(Entry{.byte = node.leaf.byte, .bit_count = bit_count}); } } /// The pattern of bits to map a byte to. Essentially just a big integer and /// the number of bits that are used. const CodeWord = struct { pattern: u64 = undefined, used: u8 = undefined, /// Get bit at index of pattern. Left to right. fn get_bit(self: CodeWord, index: u8) u1 { // (Subtract one from `.used` as indexing is zero-based.) return @intCast(u1, ((self.pattern) >> @intCast(u6, self.used-1-index)) & 1); } fn print_bits(self: CodeWord) void { var j: u8 = 0; while (j < self.used) : (j += 1) { print("{}", .{self.get_bit(j)}); } print("\n", .{}); } }; /// Although `map` is all that is needed to encode the data, `entries` provides /// the orer for the bytes, which is required to build the canonical header. const Table = struct { map: [256]?CodeWord, entries: std.ArrayList(Entry) }; /// Build and return canonical table from the Huffman tree. // todo: also return sorted entries and bit-length counts so table can be // canonically encoded in the file. fn table_build(root_node: *Node) !*Table { // Build and sort list of entries. var entries = std.ArrayList(Entry).init(allocator); try entry_list_build(root_node, 0, &entries); std.sort.sort(Entry, entries.items, {}, compare_entries); // Build canonical table from sorted entries. // First symbol: // - Pattern is the same length of its original, but all zeros. // // Subsequent symbols: // - Pattern is incremented by one mathematically. // - If original pattern is longer than previous symbol's, everything is // shifted over to the right by the difference. Right shift and update // `used` bits. var map = [_]?CodeWord {null} ** 256; var used: u8 = entries.items[0].bit_count; var pattern: u64 = 0; map[entries.items[0].byte] = CodeWord{ .pattern=pattern, .used=used }; for (entries.items[1..]) |entry| { pattern += 1; var bc_diff: u8 = entry.bit_count - used; if (bc_diff > 0) { pattern <<= @intCast(u6, bc_diff); used += bc_diff; } map[entry.byte] = CodeWord{.pattern=pattern, .used=used}; } var t = try allocator.create(Table); t.* = Table { .map=map, .entries=entries }; return t; } // Checksum ==================================================================== fn hash_FNV1a(data: []const u8) u32 { var hash: u32 = 2166136261; for (data) |byte| { hash = (byte ^ hash) *% 16777619; } return hash; } // ============================================================================= /// Encode entire file / stream. fn encode(text: []const u8) !void { var table = try table_build(try tree_build(text)); var i: u8 = 0; while (i < 255) : (i += 1) { if (table.map[i]) |element| { print("{} {c} = ", .{i, i}); element.print_bits(); } } var data = try BitList.init(allocator); // Write signature and make room for header fields. var signature = [_]u8{'R','Z'}; try data.append_bytes(signature[0..]); try data.append_padding(49); // // => Add count of bit count values. // // => Add bit count values. // // var bc_counts = [_]u6 {0} ** 256; // var highest_bit_count: u6 = 0; // // for (table.entries.items) |e| { // bc_counts[e.bit_count] += 1; // if (e.bit_count > highest_bit_count) { // highest_bit_count = e.bit_count; // } // } // try data.append(u6, highest_bit_count); // for (bc_counts[0..highest_bit_count]) |bc_count| { // data.append(u6, bc_count); // } // // // // Append present bytes in order. // for (table.entries.items) |e| { // try data.append_byte(e.byte); // } // // // Encode and append data. // for (text) |byte| { // try data.append_code_word(table.map[byte].?); // } // // // Calculate header fields, add to header. // const trailing_bit_count: u3 = data.get_trailing_bit_count(); // for (data.bytes.items) |item| { print("{X:0>2}", .{ item }); } print("\n", .{}); } /// Decode entire file / stream. fn decode(data: []const u8) !void { // todo: decode using u64 and bitmasks. // iterate through "table" (array) from most to least likely, ie lowest // numerical value to highest. // No need for hash table, all values will be numerically unique. // Bitmasked values will have to be shifted? // Could bitshift values in table instead. } pub fn main() !void { print("\n\n============\n----\n", .{}); try encode(@embedFile("main.zig")); arena.deinit(); }
src/main.zig
const std = @import("std"); const debug = std.debug; const mem = std.mem; const fmt = std.fmt; const log = std.log; const testing = std.testing; const Allocator = mem.Allocator; const StringHashMap = std.StringHashMap; const ArrayList = std.ArrayList; const ComptimeStringMap = std.ComptimeStringMap; const intcode = @import("intcode"); const combination = @import("combination"); const IntcodeProgram = intcode.IntcodeProgram; const Empty = struct {}; const file_input = @embedFile("../input.txt"); // found manually, by playing the game const first_checkpoint = \\north \\take sand \\north \\take space heater \\east \\take semiconductor \\west \\south \\south \\east \\take ornament \\south \\take festive hat \\east \\take asterisk \\south \\west \\take food ration \\east \\east \\take cake \\west \\north \\west \\north \\west \\west \\north \\north ; const repl = false; // do not pick up any of these items const blacklist = ComptimeStringMap(Empty, .{ .{ "photons", Empty{} }, .{ "escape pod", Empty{} }, .{ "giant electromagnet", Empty{} }, .{ "molten lava", Empty{} }, .{ "infinite loop", Empty{} }, }); pub fn part1(allocator: Allocator) !i64 { var code: []const i64 = try intcode.parseInput(allocator, file_input); defer allocator.free(code); var machine = try Machine.init(allocator, code); defer machine.deinit(); var it = mem.split(u8, first_checkpoint[0..], "\n"); while (it.next()) |line| { _ = try machine.runInstruction(line); } const stdout = std.io.getStdOut().writer(); { const resp = try machine.runInstruction("crack west"); try stdout.writeAll(resp); } if (repl) { var line_buf: [128]u8 = undefined; const stdin = &std.io.getStdIn(); { const resp = try machine.runInstruction("inv"); try stdout.writeAll(resp); } while (stdin.reader().readUntilDelimiterOrEof(line_buf[0..], '\n')) |segment| { if (segment == null) break; const user_input = segment.?; const resp = try machine.runInstruction(user_input); try stdout.writeAll(resp); } else |err| return err; } return 25165890; } const Machine = struct { const Self = @This(); allocator: Allocator, arena: std.heap.ArenaAllocator, program: *IntcodeProgram, backup_program: *IntcodeProgram, items: StringHashMap(Empty), instruction_log: ArrayList(u8), input: ArrayList(u8), output: ArrayList(u8), pub fn init(allocator: Allocator, code: []const i64) !Self { var program = try allocator.create(IntcodeProgram); program.* = try IntcodeProgram.init(allocator, code); var backup_program = try allocator.create(IntcodeProgram); backup_program.* = try IntcodeProgram.init(allocator, code); var arena = std.heap.ArenaAllocator.init(allocator); var items = StringHashMap(Empty).init(allocator); var instruction_log = try ArrayList(u8).initCapacity(allocator, 64); var output = ArrayList(u8).init(allocator); var input = ArrayList(u8).init(allocator); return Self{ .allocator = allocator, .arena = arena, .program = program, .backup_program = backup_program, .items = items, .instruction_log = instruction_log, .input = input, .output = output, }; } pub fn deinit(self: *Self) void { self.program.deinit(); self.allocator.destroy(self.program); self.backup_program.deinit(); self.allocator.destroy(self.backup_program); self.arena.deinit(); self.items.deinit(); self.instruction_log.deinit(); self.output.deinit(); self.input.deinit(); } pub fn runInstruction(self: *Self, user_input: []const u8) (Allocator.Error || fmt.BufPrintError)![]const u8 { // reset output self.output.items.len = 0; // log instruction try self.instruction_log.appendSlice(user_input); try self.instruction_log.append('\n'); // implement our own special commands and preprocessing if (mem.startsWith(u8, user_input, "take ")) { const some_item = user_input[5..]; if (blacklist.has(some_item)) { log.warn("item '{s}' is blacklisted, not picking it up!", .{some_item}); return self.output.items; } log.debug("picking up '{s}'", .{some_item}); var copy_some_item = try self.arena.allocator().alloc(u8, some_item.len); mem.copy(u8, copy_some_item, some_item); try self.items.put(copy_some_item, Empty{}); } else if (mem.startsWith(u8, user_input, "drop ")) { const some_item = user_input[5..]; log.debug("dropping '{s}'", .{some_item}); if (self.items.fetchRemove(some_item)) |entry| { self.arena.allocator().free(entry.key); } } else if (mem.startsWith(u8, user_input, "save")) { log.debug("saving state", .{}); self.backup_program.deinit(); self.allocator.destroy(self.backup_program); self.backup_program = try self.allocator.create(IntcodeProgram); self.backup_program.* = try self.program.clone(self.allocator); return self.output.items; } else if (mem.startsWith(u8, user_input, "restore")) { log.debug("restoring state", .{}); self.program.deinit(); self.allocator.destroy(self.program); self.program = try self.allocator.create(IntcodeProgram); self.program.* = try self.backup_program.clone(self.allocator); return self.output.items; } else if (mem.startsWith(u8, user_input, "serialize")) { log.debug("dumping instruction_log", .{}); return self.instruction_log.items; } else if (mem.startsWith(u8, user_input, "crack ")) { const direction = user_input[6..]; log.debug("cracking door in direction '{s}'", .{direction}); var subArena = std.heap.ArenaAllocator.init(self.allocator); defer subArena.deinit(); var item_list = try ArrayList([]const u8).initCapacity(subArena.allocator(), self.items.count()); { var it = self.items.iterator(); while (it.next()) |entry| { debug.assert(entry.key_ptr.*.len > 0); var buf = try subArena.allocator().alloc(u8, entry.key_ptr.*.len); // we need to copy, because entry is invalidated in recursive calls mem.copy(u8, buf, entry.key_ptr.*); item_list.appendAssumeCapacity(buf); } } var crack_instructions: [32]u8 = undefined; // 1. Drop everything that we are carrying for (item_list.items) |item| { const written = try fmt.bufPrint(crack_instructions[0..], "drop {s}", .{item}); _ = try self.runInstruction(written); } var _indices: [10]u32 = undefined; const n = @intCast(u32, item_list.items.len); var k: u32 = 1; while (k <= n) : (k += 1) { { // init indices var idx: u32 = 0; while (idx < k) : (idx += 1) { _indices[idx] = idx; } } var it = combination.combinations(n, _indices[0..k]); while (it.next()) |indices| { // 2. Pick up subset for (indices) |i| { const written = try fmt.bufPrint(crack_instructions[0..], "take {s}", .{item_list.items[i]}); _ = try self.runInstruction(written); } // 3. Walk to direction const written = try fmt.bufPrint(crack_instructions[0..], "{s}", .{direction}); const resp = try self.runInstruction(written); // 4. Did not find it if (mem.indexOf(u8, resp, "you are ejected back to the checkpoint")) |_| { for (indices) |i| { const written2 = try fmt.bufPrint(crack_instructions[0..], "drop {s}", .{item_list.items[i]}); _ = try self.runInstruction(written2); } } else { log.debug("{s}", .{resp}); // 5. If ok, we are done. Report successful subset. log.debug("We cracked it!", .{}); for (indices) |i| { log.debug("- {s}", .{item_list.items[i]}); } return resp; } } } // fallback return self.output.items; } // reset input self.input.items.len = 0; try self.input.appendSlice(user_input); try self.input.append('\n'); log.debug("running with input: {s}", .{user_input}); _ = try self.program.run(u8, self.input.items, u8, &self.output); return self.output.items; } };
day25/src/solve.zig
const std = @import("std"); const testing = std.testing; const mem = std.mem; const Allocator = mem.Allocator; pub const Semver = struct { major: u64, minor: u64, patch: u64, pub fn parse(str: []const u8) !Semver { var it = std.mem.tokenize(u8, str, "."); const semver = Semver{ .major = try std.fmt.parseInt(usize, it.next() orelse return error.MajorNotFound, 10), .minor = try std.fmt.parseInt(usize, it.next() orelse return error.MinorNotFound, 10), .patch = try std.fmt.parseInt(usize, it.next() orelse return error.PatchNotFound, 10), }; if (it.next() != null) return error.TooManyTokens; return semver; } pub fn format( self: Semver, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { _ = fmt; _ = options; try writer.print("{}.{}.{}", .{ self.major, self.minor, self.patch, }); } pub fn cmp(self: Semver, other: Semver) std.math.Order { return if (self.major != other.major) std.math.order(self.major, other.major) else if (self.minor != other.minor) std.math.order(self.minor, other.minor) else std.math.order(self.patch, other.patch); } pub fn inside(self: Semver, range: Range) bool { return self.cmp(range.min).compare(.gte) and self.cmp(range.lessThan()).compare(.lt); } }; test "empty string" { try testing.expectError(error.MajorNotFound, Semver.parse("")); } test "bad strings" { try testing.expectError(error.MinorNotFound, Semver.parse("1")); try testing.expectError(error.MinorNotFound, Semver.parse("1.")); try testing.expectError(error.PatchNotFound, Semver.parse("1.2")); try testing.expectError(error.PatchNotFound, Semver.parse("1.2.")); try testing.expectError(error.Overflow, Semver.parse("1.-2.3")); try testing.expectError(error.InvalidCharacter, Semver.parse("^1.2.3-3.4.5")); } test "semver-suffix" { try testing.expectError(error.InvalidCharacter, Semver.parse("1.2.3-dev")); } test "regular semver" { const expected = Semver{ .major = 1, .minor = 2, .patch = 3 }; try testing.expectEqual(expected, try Semver.parse("1.2.3")); } test "semver formatting" { var buf: [80]u8 = undefined; var stream = std.io.fixedBufferStream(&buf); const semver = Semver{ .major = 4, .minor = 2, .patch = 1 }; try stream.writer().print("{}", .{semver}); try testing.expectEqualStrings("4.2.1", stream.getWritten()); } test "semver contains/inside range" { const range_pre = try Range.parse("^0.4.1"); const range_post = try Range.parse("^1.4.1"); try testing.expect(!range_pre.contains(try Semver.parse("0.2.0"))); try testing.expect(!range_pre.contains(try Semver.parse("0.4.0"))); try testing.expect(!range_pre.contains(try Semver.parse("0.5.0"))); try testing.expect(range_pre.contains(try Semver.parse("0.4.2"))); try testing.expect(range_pre.contains(try Semver.parse("0.4.128"))); try testing.expect(!range_post.contains(try Semver.parse("1.2.0"))); try testing.expect(!range_post.contains(try Semver.parse("1.4.0"))); try testing.expect(!range_post.contains(try Semver.parse("2.0.0"))); try testing.expect(range_post.contains(try Semver.parse("1.5.0"))); try testing.expect(range_post.contains(try Semver.parse("1.4.2"))); try testing.expect(range_post.contains(try Semver.parse("1.4.128"))); } pub const Range = struct { min: Semver, kind: Kind, pub const Kind = enum { approx, caret, exact, }; fn lessThan(self: Range) Semver { return switch (self.kind) { .exact => Semver{ .major = self.min.major, .minor = self.min.minor, .patch = self.min.patch + 1, }, .approx => Semver{ .major = self.min.major, .minor = self.min.minor + 1, .patch = 0, }, .caret => if (self.min.major == 0) Semver{ .major = self.min.major, .minor = self.min.minor + 1, .patch = 0, } else Semver{ .major = self.min.major + 1, .minor = 0, .patch = 0, }, }; } pub fn parse(str: []const u8) !Range { if (str.len == 0) return error.Empty; var semver_str: []const u8 = undefined; const kind: Kind = switch (str[0]) { '^' => blk: { semver_str = str[1..]; break :blk .caret; }, '~' => blk: { semver_str = str[1..]; break :blk .approx; }, else => blk: { if (!std.ascii.isDigit(str[0])) return error.InvalidCharacter; semver_str = str; break :blk .exact; }, }; return Range{ .kind = kind, .min = try Semver.parse(semver_str), }; } pub fn format( self: Range, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { _ = fmt; _ = options; switch (self.kind) { .exact => try writer.print("{}", .{self.min}), .approx => try writer.print("~{}", .{self.min}), .caret => if (fmt.len == 1 and fmt[0] == 'u') try writer.print("%5E{}", .{self.min}) else try writer.print("^{}", .{self.min}), } } pub fn contains(self: Range, semver: Semver) bool { return semver.inside(self); } }; test "empty string" { try testing.expectError(error.Empty, Range.parse("")); } test "approximate" { const expected = Range{ .kind = .approx, .min = Semver{ .major = 1, .minor = 2, .patch = 3, }, }; try testing.expectEqual(expected, try Range.parse("~1.2.3")); } test "caret" { const expected = Range{ .kind = .caret, .min = Semver{ .major = 1, .minor = 2, .patch = 3, }, }; try testing.expectEqual(expected, try Range.parse("^1.2.3")); } test "exact range" { const expected = Range{ .kind = .exact, .min = Semver{ .major = 1, .minor = 2, .patch = 3, }, }; try testing.expectEqual(expected, try Range.parse("1.2.3")); } test "range formatting: exact" { var buf: [80]u8 = undefined; var stream = std.io.fixedBufferStream(&buf); const range = Range{ .kind = .exact, .min = Semver{ .major = 1, .minor = 2, .patch = 3, }, }; try stream.writer().print("{}", .{range}); try testing.expectEqualStrings("1.2.3", stream.getWritten()); } test "range formatting: approx" { var buf: [80]u8 = undefined; var stream = std.io.fixedBufferStream(&buf); const range = Range{ .kind = .approx, .min = Semver{ .major = 1, .minor = 2, .patch = 3, }, }; try stream.writer().print("{}", .{range}); try testing.expectEqualStrings("~1.2.3", stream.getWritten()); } test "range formatting: caret" { var buf: [80]u8 = undefined; var stream = std.io.fixedBufferStream(&buf); const range = Range{ .kind = .caret, .min = Semver{ .major = 1, .minor = 2, .patch = 3, }, }; try stream.writer().print("{}", .{range}); try testing.expectEqualStrings("^1.2.3", stream.getWritten()); }
.gyro/version-mattnite-github.com-19baf08f/pkg/src/main.zig
const std = @import("std"); const mem = std.mem; const server = &@import("../main.zig").server; const util = @import("../util.zig"); const c = @import("../c.zig"); const Error = @import("../command.zig").Error; const Seat = @import("../Seat.zig"); const InputConfig = @import("../InputConfig.zig"); const InputManager = @import("../InputManager.zig"); pub fn listInputs( allocator: *mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { var input_list = std.ArrayList(u8).init(allocator); const writer = input_list.writer(); var prev = false; var it = server.input_manager.input_devices.first; while (it) |node| : (it = node.next) { const configured = for (server.input_manager.input_configs.items) |*input_config| { if (mem.eql(u8, input_config.identifier, mem.sliceTo(node.data.identifier, 0))) { break true; } } else false; if (prev) try input_list.appendSlice("\n"); prev = true; try writer.print("{s}\n\ttype: {s}\n\tconfigured: {s}\n", .{ node.data.identifier, @tagName(node.data.device.type), configured, }); } out.* = input_list.toOwnedSlice(); } pub fn listInputConfigs( allocator: *mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { var input_list = std.ArrayList(u8).init(allocator); const writer = input_list.writer(); for (server.input_manager.input_configs.items) |*input_config, i| { if (i > 0) try input_list.appendSlice("\n"); try writer.print("{s}\n", .{input_config.identifier}); if (input_config.event_state) |event_state| { try writer.print("\tevents: {s}\n", .{@tagName(event_state)}); } if (input_config.accel_profile) |accel_profile| { try writer.print("\taccel-profile: {s}\n", .{@tagName(accel_profile)}); } if (input_config.click_method) |click_method| { try writer.print("\tclick-method: {s}\n", .{@tagName(click_method)}); } if (input_config.drag_state) |drag_state| { try writer.print("\tdrag: {s}\n", .{@tagName(drag_state)}); } if (input_config.drag_lock) |drag_lock| { try writer.print("\tdrag-lock: {s}\n", .{@tagName(drag_lock)}); } if (input_config.dwt_state) |dwt_state| { try writer.print("\tdisable-while-typing: {s}\n", .{@tagName(dwt_state)}); } if (input_config.middle_emulation) |middle_emulation| { try writer.print("\tmiddle-emulation: {s}\n", .{@tagName(middle_emulation)}); } if (input_config.natural_scroll) |natural_scroll| { try writer.print("\tnatual-scroll: {s}\n", .{@tagName(natural_scroll)}); } if (input_config.left_handed) |left_handed| { try writer.print("\tleft-handed: {s}\n", .{@tagName(left_handed)}); } if (input_config.tap_state) |tap_state| { try writer.print("\ttap: {s}\n", .{@tagName(tap_state)}); } if (input_config.tap_button_map) |tap_button_map| { try writer.print("\ttap-button-map: {s}\n", .{@tagName(tap_button_map)}); } if (input_config.pointer_accel) |pointer_accel| { try writer.print("\tpointer-accel: {d}\n", .{pointer_accel.value}); } if (input_config.scroll_method) |scroll_method| { try writer.print("\tscroll-method: {s}\n", .{scroll_method}); } if (input_config.scroll_button) |scroll_button| { try writer.print("\tscroll-button: {s}\n", .{ mem.sliceTo(c.libevdev_event_code_get_name(c.EV_KEY, scroll_button.button), 0), }); } } out.* = input_list.toOwnedSlice(); } pub fn input( allocator: *mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { if (args.len < 4) return Error.NotEnoughArguments; if (args.len > 4) return Error.TooManyArguments; // Try to find an existing InputConfig with matching identifier, or create // a new one if none was found. var new = false; const input_config = for (server.input_manager.input_configs.items) |*input_config| { if (mem.eql(u8, input_config.identifier, args[1])) break input_config; } else blk: { // Use util.gpa instead of allocator to assure the identifier is // allocated by the same allocator as the ArrayList. try server.input_manager.input_configs.ensureUnusedCapacity(1); server.input_manager.input_configs.appendAssumeCapacity(.{ .identifier = try util.gpa.dupe(u8, args[1]), }); new = true; break :blk &server.input_manager.input_configs.items[server.input_manager.input_configs.items.len - 1]; }; errdefer { if (new) { var cfg = server.input_manager.input_configs.pop(); cfg.deinit(); } } if (mem.eql(u8, "events", args[2])) { input_config.event_state = std.meta.stringToEnum(InputConfig.EventState, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "accel-profile", args[2])) { input_config.accel_profile = std.meta.stringToEnum(InputConfig.AccelProfile, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "click-method", args[2])) { input_config.click_method = std.meta.stringToEnum(InputConfig.ClickMethod, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "drag", args[2])) { input_config.drag_state = std.meta.stringToEnum(InputConfig.DragState, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "drag-lock", args[2])) { input_config.drag_lock = std.meta.stringToEnum(InputConfig.DragLock, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "disable-while-typing", args[2])) { input_config.dwt_state = std.meta.stringToEnum(InputConfig.DwtState, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "middle-emulation", args[2])) { input_config.middle_emulation = std.meta.stringToEnum(InputConfig.MiddleEmulation, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "natural-scroll", args[2])) { input_config.natural_scroll = std.meta.stringToEnum(InputConfig.NaturalScroll, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "left-handed", args[2])) { input_config.left_handed = std.meta.stringToEnum(InputConfig.LeftHanded, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "tap", args[2])) { input_config.tap_state = std.meta.stringToEnum(InputConfig.TapState, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "tap-button-map", args[2])) { input_config.tap_button_map = std.meta.stringToEnum(InputConfig.TapButtonMap, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "pointer-accel", args[2])) { input_config.pointer_accel = InputConfig.PointerAccel{ .value = std.math.clamp( try std.fmt.parseFloat(f32, args[3]), @as(f32, -1.0), @as(f32, 1.0), ), }; } else if (mem.eql(u8, "scroll-method", args[2])) { input_config.scroll_method = std.meta.stringToEnum(InputConfig.ScrollMethod, args[3]) orelse return Error.UnknownOption; } else if (mem.eql(u8, "scroll-button", args[2])) { const ret = c.libevdev_event_code_from_name(c.EV_KEY, args[3]); if (ret < 1) return Error.InvalidButton; input_config.scroll_button = InputConfig.ScrollButton{ .button = @intCast(u32, ret) }; } else { return Error.UnknownCommand; } // Update matching existing input devices. var it = server.input_manager.input_devices.first; while (it) |device_node| : (it = device_node.next) { if (mem.eql(u8, device_node.data.identifier, args[1])) { input_config.apply(&device_node.data); // We don't break here because it is common to have multiple input // devices with the same identifier. } } }
source/river-0.1.0/river/command/input.zig
const x86_64 = @import("../../../index.zig"); const bitjuggle = @import("bitjuggle"); const std = @import("std"); const paging = x86_64.structures.paging; const mapping = paging.mapping; const Mapper = mapping.Mapper; const physToVirt = struct { pub fn physToVirt(offset: x86_64.VirtAddr, phys_frame: paging.PhysFrame) *paging.PageTable { return x86_64.VirtAddr.initPanic(offset.value + phys_frame.start_address.value).toPtr(*paging.PageTable); } }.physToVirt; pub const OffsetPageTable = MappedPageTable(x86_64.VirtAddr, physToVirt); /// A Mapper implementation that relies on a x86_64.PhysAddr to x86_64.VirtAddr conversion function. pub fn MappedPageTable( comptime context_type: type, comptime frame_to_pointer: fn (context_type, phys_frame: paging.PhysFrame) *paging.PageTable, ) type { return struct { const Self = @This(); const page_table_walker = PageTableWalker(context_type, frame_to_pointer); mapper: Mapper, level_4_table: *paging.PageTable, context: context_type, pub fn init(context: context_type, level_4_table: *paging.PageTable) Self { return .{ .mapper = makeMapper(), .context = context, .level_4_table = level_4_table, }; } inline fn getSelfPtr(mapper: *Mapper) *Self { return @fieldParentPtr(Self, "mapper", mapper); } /// This does *not* deallocate the physical frames that are mapped, /// it deallocates the frames used for the page table itself pub fn deallocateAllPageTableFrames( self: *Self, frame_allocator: *paging.FrameAllocator, ) mapping.UnmapError!mapping.MapperFlushAll { for (self.level_4_table.entries) |*l4_entry| { const l4_flags = l4_entry.getFlags(); if (!l4_flags.present) continue; const p3 = page_table_walker.nextTable(self.context, l4_entry) catch unreachable; for (p3.entries) |*l3_entry| { const l3_flags = l3_entry.getFlags(); if (!l3_flags.present or l3_flags.huge) continue; const p2 = page_table_walker.nextTable(self.context, l3_entry) catch unreachable; for (p2.entries) |*l2_entry| { const l2_flags = l2_entry.getFlags(); if (!l2_flags.present or l2_flags.huge) continue; frame_allocator.deallocate4KiB(paging.PhysFrame.fromStartAddressUnchecked(l2_entry.getAddr())); } frame_allocator.deallocate4KiB(paging.PhysFrame.fromStartAddressUnchecked(l3_entry.getAddr())); } frame_allocator.deallocate4KiB(paging.PhysFrame.fromStartAddressUnchecked(l4_entry.getAddr())); } return mapping.MapperFlushAll{}; } pub fn mapTo1GiB( self: *Self, page: paging.Page1GiB, frame: paging.PhysFrame1GiB, flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush1GiB { return self.mapToWithTableFlags1GiB(page, frame, flags, flags, frame_allocator); } pub fn mapToWithTableFlags1GiB( self: *Self, page: paging.Page1GiB, frame: paging.PhysFrame1GiB, flags: paging.PageTableFlags, parent_table_flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush1GiB { const parent_flags = parent_table_flags.sanitizeForParent(); const p3 = page_table_walker.createNextTable( self.context, self.level_4_table.getAtIndex(page.p4Index()), parent_flags, frame_allocator, ) catch |err| switch (err) { PageTableCreateError.MappedToHugePage => return mapping.MapToError.ParentEntryHugePage, PageTableCreateError.FrameAllocationFailed => return mapping.MapToError.FrameAllocationFailed, }; var entry = p3.getAtIndex(page.p3Index()); if (!entry.isUnused()) return mapping.MapToError.PageAlreadyMapped; entry.setAddr(frame.start_address); var new_flags = flags; new_flags.huge = true; entry.setFlags(new_flags); return mapping.MapperFlush1GiB.init(page); } fn impl_mapToWithTableFlags1GiB( mapper: *Mapper, page: paging.Page1GiB, frame: paging.PhysFrame1GiB, flags: paging.PageTableFlags, parent_table_flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush1GiB { return getSelfPtr(mapper).mapToWithTableFlags1GiB(page, frame, flags, parent_table_flags, frame_allocator); } pub fn unmap1GiB(self: *Self, page: paging.Page1GiB) mapping.UnmapError!mapping.UnmapResult1GiB { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.UnmapError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.UnmapError.PageNotMapped, }; const p3_entry = p3.getAtIndex(page.p3Index()); const flags = p3_entry.getFlags(); if (!flags.present) return mapping.UnmapError.PageNotMapped; if (!flags.huge) return mapping.UnmapError.ParentEntryHugePage; const frame = paging.PhysFrame1GiB.fromStartAddress(p3_entry.getAddr()) catch return mapping.UnmapError.InvalidFrameAddress; p3_entry.setUnused(); return mapping.UnmapResult1GiB{ .frame = frame, .flush = mapping.MapperFlush1GiB.init(page), }; } fn impl_unmap1GiB(mapper: *Mapper, page: paging.Page1GiB) mapping.UnmapError!mapping.UnmapResult1GiB { return getSelfPtr(mapper).unmap1GiB(page); } pub fn updateFlags1GiB( self: *Self, page: paging.Page1GiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlush1GiB { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; var p3_entry = p3.getAtIndex(page.p3Index()); if (p3_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; var new_flags = flags; new_flags.huge = true; p3_entry.setFlags(new_flags); return mapping.MapperFlush1GiB.init(page); } fn impl_updateFlags1GiB( mapper: *Mapper, page: paging.Page1GiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlush1GiB { return getSelfPtr(mapper).updateFlags1GiB(page, flags); } pub fn setFlagsP4Entry1GiB( self: *Self, page: paging.Page1GiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { const p4_entry = self.level_4_table.getAtIndex(page.p4Index()); if (p4_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; p4_entry.setFlags(flags); return mapping.MapperFlushAll{}; } fn impl_setFlagsP4Entry1GiB( mapper: *Mapper, page: paging.Page1GiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { return getSelfPtr(mapper).setFlagsP4Entry1GiB(page, flags); } pub fn translatePage1GiB(self: *const Self, page: paging.Page1GiB) mapping.TranslateError!paging.PhysFrame1GiB { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.TranslateError.InvalidFrameAddress, PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p3_entry = p3.getAtIndex(page.p3Index()); if (p3_entry.isUnused()) return mapping.TranslateError.NotMapped; return paging.PhysFrame1GiB.fromStartAddress(p3_entry.getAddr()) catch return mapping.TranslateError.InvalidFrameAddress; } fn impl_translatePage1GiB(mapper: *Mapper, page: paging.Page1GiB) mapping.TranslateError!paging.PhysFrame1GiB { return getSelfPtr(mapper).translatePage1GiB(page); } pub fn mapTo2MiB( self: *Self, page: paging.Page2MiB, frame: paging.PhysFrame2MiB, flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush2MiB { return self.mapToWithTableFlags2MiB(page, frame, flags, flags, frame_allocator); } pub fn mapToWithTableFlags2MiB( self: *Self, page: paging.Page2MiB, frame: paging.PhysFrame2MiB, flags: paging.PageTableFlags, parent_table_flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush2MiB { const parent_flags = parent_table_flags.sanitizeForParent(); const p3 = page_table_walker.createNextTable( self.context, self.level_4_table.getAtIndex(page.p4Index()), parent_flags, frame_allocator, ) catch |err| switch (err) { PageTableCreateError.MappedToHugePage => return mapping.MapToError.ParentEntryHugePage, PageTableCreateError.FrameAllocationFailed => return mapping.MapToError.FrameAllocationFailed, }; const p2 = page_table_walker.createNextTable( self.context, p3.getAtIndex(page.p3Index()), parent_flags, frame_allocator, ) catch |err| switch (err) { PageTableCreateError.MappedToHugePage => return mapping.MapToError.ParentEntryHugePage, PageTableCreateError.FrameAllocationFailed => return mapping.MapToError.FrameAllocationFailed, }; var entry = p2.getAtIndex(page.p2Index()); if (!entry.isUnused()) return mapping.MapToError.PageAlreadyMapped; entry.setAddr(frame.start_address); var new_flags = flags; new_flags.huge = true; entry.setFlags(new_flags); return mapping.MapperFlush2MiB.init(page); } fn impl_mapToWithTableFlags2MiB( mapper: *Mapper, page: paging.Page2MiB, frame: paging.PhysFrame2MiB, flags: paging.PageTableFlags, parent_table_flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush2MiB { return getSelfPtr(mapper).mapToWithTableFlags2MiB(page, frame, flags, parent_table_flags, frame_allocator); } pub fn unmap2MiB(self: *Self, page: paging.Page2MiB) mapping.UnmapError!mapping.UnmapResult2MiB { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.UnmapError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.UnmapError.PageNotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(page.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.UnmapError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.UnmapError.PageNotMapped, }; const p2_entry = p2.getAtIndex(page.p2Index()); const flags = p2_entry.getFlags(); if (!flags.present) return mapping.UnmapError.PageNotMapped; if (!flags.huge) return mapping.UnmapError.ParentEntryHugePage; const frame = paging.PhysFrame2MiB.fromStartAddress(p2_entry.getAddr()) catch return mapping.UnmapError.InvalidFrameAddress; p2_entry.setUnused(); return mapping.UnmapResult2MiB{ .frame = frame, .flush = mapping.MapperFlush2MiB.init(page), }; } fn impl_unmap2MiB(mapper: *Mapper, page: paging.Page2MiB) mapping.UnmapError!mapping.UnmapResult2MiB { return getSelfPtr(mapper).unmap2MiB(page); } pub fn updateFlags2MiB( self: *Self, page: paging.Page2MiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlush2MiB { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(page.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; var p2_entry = p2.getAtIndex(page.p2Index()); if (p2_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; var new_flags = flags; new_flags.huge = true; p2_entry.setFlags(new_flags); return mapping.MapperFlush2MiB.init(page); } fn impl_updateFlags2MiB( mapper: *Mapper, page: paging.Page2MiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlush2MiB { return getSelfPtr(mapper).updateFlags2MiB(page, flags); } pub fn setFlagsP4Entry2MiB( self: *Self, page: paging.Page2MiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { const p4_entry = self.level_4_table.getAtIndex(page.p4Index()); if (p4_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; p4_entry.setFlags(flags); return mapping.MapperFlushAll{}; } fn impl_setFlagsP4Entry2MiB( mapper: *Mapper, page: paging.Page2MiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { return getSelfPtr(mapper).setFlagsP4Entry2MiB(page, flags); } pub fn setFlagsP3Entry2MiB( self: *Self, page: paging.Page2MiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; const p3_entry = p3.getAtIndex(page.p3Index()); if (p3_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; p3_entry.setFlags(flags); return mapping.MapperFlushAll{}; } fn impl_setFlagsP3Entry2MiB( mapper: *Mapper, page: paging.Page2MiB, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { return getSelfPtr(mapper).setFlagsP3Entry2MiB(page, flags); } pub fn translatePage2MiB(self: *const Self, page: paging.Page2MiB) mapping.TranslateError!paging.PhysFrame2MiB { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.TranslateError.InvalidFrameAddress, PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(page.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.TranslateError.InvalidFrameAddress, PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p2_entry = p2.getAtIndex(page.p2Index()); if (p2_entry.isUnused()) return mapping.TranslateError.NotMapped; return paging.PhysFrame2MiB.fromStartAddress(p2_entry.getAddr()) catch return mapping.TranslateError.InvalidFrameAddress; } fn impl_translatePage2MiB(mapper: *Mapper, page: paging.Page2MiB) mapping.TranslateError!paging.PhysFrame2MiB { return getSelfPtr(mapper).translatePage2MiB(page); } pub fn mapTo( self: *Self, page: paging.Page, frame: paging.PhysFrame, flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush { return self.mapToWithTableFlags(page, frame, flags, flags, frame_allocator); } pub fn mapToWithTableFlags( self: *Self, page: paging.Page, frame: paging.PhysFrame, flags: paging.PageTableFlags, parent_table_flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush { const parent_flags = parent_table_flags.sanitizeForParent(); const p3 = page_table_walker.createNextTable( self.context, self.level_4_table.getAtIndex(page.p4Index()), parent_flags, frame_allocator, ) catch |err| switch (err) { PageTableCreateError.MappedToHugePage => return mapping.MapToError.ParentEntryHugePage, PageTableCreateError.FrameAllocationFailed => return mapping.MapToError.FrameAllocationFailed, }; const p2 = page_table_walker.createNextTable( self.context, p3.getAtIndex(page.p3Index()), parent_flags, frame_allocator, ) catch |err| switch (err) { PageTableCreateError.MappedToHugePage => return mapping.MapToError.ParentEntryHugePage, PageTableCreateError.FrameAllocationFailed => return mapping.MapToError.FrameAllocationFailed, }; const p1 = page_table_walker.createNextTable( self.context, p2.getAtIndex(page.p2Index()), parent_flags, frame_allocator, ) catch |err| switch (err) { PageTableCreateError.MappedToHugePage => return mapping.MapToError.ParentEntryHugePage, PageTableCreateError.FrameAllocationFailed => return mapping.MapToError.FrameAllocationFailed, }; var entry = p1.getAtIndex(page.p1Index()); if (!entry.isUnused()) return mapping.MapToError.PageAlreadyMapped; entry.setAddr(frame.start_address); entry.setFlags(flags); return mapping.MapperFlush.init(page); } fn impl_mapToWithTableFlags( mapper: *Mapper, page: paging.Page, frame: paging.PhysFrame, flags: paging.PageTableFlags, parent_table_flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) mapping.MapToError!mapping.MapperFlush { return getSelfPtr(mapper).mapToWithTableFlags(page, frame, flags, parent_table_flags, frame_allocator); } pub fn unmap(self: *Self, page: paging.Page) mapping.UnmapError!mapping.UnmapResult { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.UnmapError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.UnmapError.PageNotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(page.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.UnmapError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.UnmapError.PageNotMapped, }; const p1 = page_table_walker.nextTable(self.context, p2.getAtIndex(page.p2Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.UnmapError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.UnmapError.PageNotMapped, }; const p1_entry = p1.getAtIndex(page.p1Index()); const flags = p1_entry.getFlags(); if (!flags.present) return mapping.UnmapError.PageNotMapped; if (flags.huge) return mapping.UnmapError.ParentEntryHugePage; const frame = paging.PhysFrame.fromStartAddress(p1_entry.getAddr()) catch return mapping.UnmapError.InvalidFrameAddress; p1_entry.setUnused(); return mapping.UnmapResult{ .frame = frame, .flush = mapping.MapperFlush.init(page), }; } fn impl_unmap(mapper: *Mapper, page: paging.Page) mapping.UnmapError!mapping.UnmapResult { return getSelfPtr(mapper).unmap(page); } pub fn updateFlags( self: *Self, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlush { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(page.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; const p1 = page_table_walker.nextTable(self.context, p2.getAtIndex(page.p2Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; var p1_entry = p1.getAtIndex(page.p1Index()); if (p1_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; p1_entry.setFlags(flags); return mapping.MapperFlush.init(page); } fn impl_updateFlags( mapper: *Mapper, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlush { return getSelfPtr(mapper).updateFlags(page, flags); } pub fn setFlagsP4Entry( self: *Self, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { const p4_entry = self.level_4_table.getAtIndex(page.p4Index()); if (p4_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; p4_entry.setFlags(flags); return mapping.MapperFlushAll{}; } fn impl_setFlagsP4Entry( mapper: *Mapper, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { return getSelfPtr(mapper).setFlagsP4Entry(page, flags); } pub fn setFlagsP3Entry( self: *Self, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; const p3_entry = p3.getAtIndex(page.p3Index()); if (p3_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; p3_entry.setFlags(flags); return mapping.MapperFlushAll{}; } fn impl_setFlagsP3Entry( mapper: *Mapper, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { return getSelfPtr(mapper).setFlagsP3Entry(page, flags); } pub fn setFlagsP2Entry( self: *Self, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(page.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.FlagUpdateError.ParentEntryHugePage, PageTableWalkError.NotMapped => return mapping.FlagUpdateError.PageNotMapped, }; const p2_entry = p2.getAtIndex(page.p2Index()); if (p2_entry.isUnused()) return mapping.FlagUpdateError.PageNotMapped; p2_entry.setFlags(flags); return mapping.MapperFlushAll{}; } fn impl_setFlagsP2Entry( mapper: *Mapper, page: paging.Page, flags: paging.PageTableFlags, ) mapping.FlagUpdateError!mapping.MapperFlushAll { return getSelfPtr(mapper).setFlagsP2Entry(page, flags); } pub fn translatePage(self: *const Self, page: paging.Page) mapping.TranslateError!paging.PhysFrame { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(page.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.TranslateError.InvalidFrameAddress, PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(page.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.TranslateError.InvalidFrameAddress, PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p1 = page_table_walker.nextTable(self.context, p2.getAtIndex(page.p2Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => return mapping.TranslateError.InvalidFrameAddress, PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p1_entry = p1.getAtIndex(page.p1Index()); if (p1_entry.isUnused()) return mapping.TranslateError.NotMapped; return paging.PhysFrame.fromStartAddress(p1_entry.getAddr()) catch return mapping.TranslateError.InvalidFrameAddress; } fn impl_translatePage(mapper: *Mapper, page: paging.Page) mapping.TranslateError!paging.PhysFrame { return getSelfPtr(mapper).translatePage(page); } pub fn translate(self: *const Self, addr: x86_64.VirtAddr) mapping.TranslateError!mapping.TranslateResult { const p3 = page_table_walker.nextTable(self.context, self.level_4_table.getAtIndex(addr.p4Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => @panic("level 4 entry has huge page bit set"), PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p2 = page_table_walker.nextTable(self.context, p3.getAtIndex(addr.p3Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => { const entry = p3.getAtIndex(addr.p3Index()); const frame = paging.PhysFrame1GiB.containingAddress(entry.getAddr()); const offset = addr.value & 0o777_777_7777; return mapping.TranslateResult{ .Frame1GiB = .{ .frame = frame, .offset = offset, .flags = entry.getFlags(), }, }; }, PageTableWalkError.NotMapped => return mapping.TranslateError.NotMapped, }; const p1 = page_table_walker.nextTable(self.context, p2.getAtIndex(addr.p2Index())) catch |err| switch (err) { PageTableWalkError.MappedToHugePage => { const entry = p2.getAtIndex(addr.p2Index()); const frame = paging.PhysFrame2MiB.containingAddress(entry.getAddr()); const offset = addr.value & 0o777_7777; return mapping.TranslateResult{ .Frame2MiB = .{ .frame = frame, .offset = offset, .flags = entry.getFlags(), }, }; }, PageTableWalkError.NotMapped => { return mapping.TranslateError.NotMapped; }, }; const p1_entry = p1.getAtIndex(addr.p1Index()); if (p1_entry.isUnused()) return mapping.TranslateError.NotMapped; const frame = paging.PhysFrame.fromStartAddress(p1_entry.getAddr()) catch return mapping.TranslateError.InvalidFrameAddress; return mapping.TranslateResult{ .Frame4KiB = .{ .frame = frame, .offset = @as(u64, addr.pageOffset().value), .flags = p1_entry.getFlags(), }, }; } fn impl_translate(mapper: *Mapper, addr: x86_64.VirtAddr) mapping.TranslateError!mapping.TranslateResult { return getSelfPtr(mapper).translate(addr); } /// Translates the given virtual address to the physical address that it maps to. /// /// Returns `None` if there is no valid mapping for the given address. /// /// This is a convenience method. For more information about a mapping see the /// `translate` function. pub fn translateAddr(self: *const Self, addr: x86_64.VirtAddr) ?x86_64.PhysAddr { return switch (self.translate(addr) catch return null) { .Frame4KiB => |res| x86_64.PhysAddr.initPanic(res.frame.start_address.value + res.offset), .Frame2MiB => |res| x86_64.PhysAddr.initPanic(res.frame.start_address.value + res.offset), .Frame1GiB => |res| x86_64.PhysAddr.initPanic(res.frame.start_address.value + res.offset), }; } fn makeMapper() Mapper { return .{ .z_impl_mapToWithTableFlags1GiB = impl_mapToWithTableFlags1GiB, .z_impl_unmap1GiB = impl_unmap1GiB, .z_impl_updateFlags1GiB = impl_updateFlags1GiB, .z_impl_setFlagsP4Entry1GiB = impl_setFlagsP4Entry1GiB, .z_impl_translatePage1GiB = impl_translatePage1GiB, .z_impl_mapToWithTableFlags2MiB = impl_mapToWithTableFlags2MiB, .z_impl_unmap2MiB = impl_unmap2MiB, .z_impl_updateFlags2MiB = impl_updateFlags2MiB, .z_impl_setFlagsP4Entry2MiB = impl_setFlagsP4Entry2MiB, .z_impl_setFlagsP3Entry2MiB = impl_setFlagsP3Entry2MiB, .z_impl_translatePage2MiB = impl_translatePage2MiB, .z_impl_mapToWithTableFlags = impl_mapToWithTableFlags, .z_impl_unmap = impl_unmap, .z_impl_updateFlags = impl_updateFlags, .z_impl_setFlagsP4Entry = impl_setFlagsP4Entry, .z_impl_setFlagsP3Entry = impl_setFlagsP3Entry, .z_impl_setFlagsP2Entry = impl_setFlagsP2Entry, .z_impl_translatePage = impl_translatePage, .z_impl_translate = impl_translate, }; } comptime { std.testing.refAllDecls(@This()); } }; } fn PageTableWalker( comptime context_type: type, comptime frame_to_pointer: fn (context_type, phys_frame: paging.PhysFrame) *paging.PageTable, ) type { return struct { const Self = @This(); /// Internal helper function to get a reference to the page table of the next level. pub fn nextTable(context: context_type, entry: *paging.PageTableEntry) PageTableWalkError!*paging.PageTable { return frame_to_pointer(context, entry.getFrame() catch |err| switch (err) { error.HugeFrame => return PageTableWalkError.MappedToHugePage, error.FrameNotPresent => return PageTableWalkError.NotMapped, }); } /// Internal helper function to create the page table of the next level if needed. pub fn createNextTable( context: context_type, entry: *paging.PageTableEntry, insert_flags: paging.PageTableFlags, frame_allocator: *paging.FrameAllocator, ) PageTableCreateError!*paging.PageTable { var created = false; if (entry.isUnused()) { if (frame_allocator.allocate4KiB()) |frame| { entry.setAddr(frame.start_address); entry.setFlags(insert_flags); created = true; } else { return PageTableCreateError.FrameAllocationFailed; } } else { const raw_insert_flags = insert_flags.toU64(); const raw_entry_flags = entry.getFlags().toU64(); const combined_raw_flags = raw_insert_flags | raw_entry_flags; if (raw_insert_flags != 0 and combined_raw_flags != raw_insert_flags) { entry.setFlags(paging.PageTableFlags.fromU64(combined_raw_flags)); } } const page_table = nextTable(context, entry) catch |err| switch (err) { error.MappedToHugePage => return PageTableCreateError.MappedToHugePage, error.NotMapped => @panic("entry should be mapped at this point"), }; if (created) page_table.zero(); return page_table; } comptime { std.testing.refAllDecls(@This()); } }; } const PageTableWalkError = error{ NotMapped, MappedToHugePage, }; const PageTableCreateError = error{ MappedToHugePage, FrameAllocationFailed, }; comptime { std.testing.refAllDecls(@This()); }
src/structures/paging/mapping/mapped_page_table.zig
const std = @import("std"); usingnamespace @import("zalgebra"); usingnamespace @import("utils.zig"); usingnamespace @import("shader.zig"); usingnamespace @import("../glfw_gl3.zig"); const stb = @import("c.zig").stb; usingnamespace @import("c.zig").gl; const mem = std.mem; const Allocator = mem.Allocator; const json = std.json; const print = std.debug.print; const ArrayList = std.ArrayList; const panic = std.debug.panic; const page_alloc = std.heap.page_allocator; const StringHashMap = std.StringHashMap; const ValueTree = json.ValueTree; pub const Font = struct { name: []const u8, size: f32, is_bold: bool, // It's in pixel, but we're gonna cast them anyway. atlas_width: f32, atlas_height: f32, characters: StringBufSet(CharacterInfo), texture_id: u32, /// Also in pixels. const CharacterInfo = struct { x: f32, y: f32, width: f32, height: f32, origin_x: f32, origin_y: f32, advance: f32, }; const Self = @This(); pub fn init(font_name: []const u8) !Self { var font: Self = undefined; var tree = try parse_font_descriptor(font_name); defer tree.deinit(); const root = tree.root.Object; const fonts_folder = "demo/assets/fonts"; const atlas_path = try mem.join( page_alloc, "", &[_][]const u8{ fonts_folder, "/", font_name, "/", font_name, "_atlas", ".png", }, ); defer page_alloc.free(atlas_path); font.texture_id = load_to_gpu(atlas_path); font.name = try Allocator.dupe(page_alloc, u8, root.get("name").?.String); font.size = @intToFloat(f32, root.get("size").?.Integer); font.is_bold = root.get("bold").?.Bool; font.atlas_width = @intToFloat(f32, root.get("width").?.Integer); font.atlas_height = @intToFloat(f32, root.get("height").?.Integer); font.characters = StringBufSet(CharacterInfo).init(page_alloc); var it = root.get("characters").?.Object.iterator(); while (it.next()) |entry| { const char_name = entry.key_ptr.*; const char_value = entry.value_ptr.*.Object; try font.characters.put(char_name, .{ .x = @intToFloat(f32, char_value.get("x").?.Integer), .y = @intToFloat(f32, char_value.get("y").?.Integer), .width = @intToFloat(f32, char_value.get("width").?.Integer), .height = @intToFloat(f32, char_value.get("height").?.Integer), .origin_x = @intToFloat(f32, char_value.get("originX").?.Integer), .origin_y = @intToFloat(f32, char_value.get("originY").?.Integer), .advance = @intToFloat(f32, char_value.get("advance").?.Integer), }); } print("Font '{s}' successfuly loaded.\n", .{font.name}); return font; } pub fn deinit(self: *Self) void { glDeleteTextures(1, &[_]u32{self.texture_id}); page_alloc.free(self.name); self.characters.deinit(); } }; fn load_to_gpu(font_path: []const u8) u32 { var width: i32 = undefined; var height: i32 = undefined; var channels: i32 = undefined; const should_flip = @boolToInt(false); stb.stbi_set_flip_vertically_on_load(should_flip); const data = stb.stbi_load(font_path.ptr, &width, &height, &channels, 0); defer stb.stbi_image_free(data); if (data == 0) { panic("STB crashed while loading image '{s}'!\n", .{font_path}); } var texture_id: u32 = undefined; glGenTextures(1, &texture_id); glBindTexture(GL_TEXTURE_2D, texture_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Should be always RGBA for SDF fonts. const format: c_int = switch (channels) { 1 => GL_RED, 3 => GL_RGB, 4 => GL_RGBA, else => panic("Provided channels currently not supported!\n", .{}), }; glTexImage2D( GL_TEXTURE_2D, 0, format, width, height, 0, @intCast(c_uint, format), GL_UNSIGNED_BYTE, data, ); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); return texture_id; } fn parse_font_descriptor(font_name: []const u8) !ValueTree { const fonts_folder = "demo/assets/fonts"; const path = try mem.join( page_alloc, "", &[_][]const u8{ fonts_folder, "/", font_name, "/", font_name, ".json" }, ); defer page_alloc.free(path); const max_bytes: usize = 20480; const buf = try std.fs.cwd().readFileAlloc(page_alloc, path, max_bytes); defer page_alloc.free(buf); var parser = json.Parser.init(page_alloc, true); defer parser.deinit(); return parser.parse(buf); } pub fn immediate_draw_text( args: struct { text: []const u8, size: f32 = 42, color: vec3 = vec3.new(1, 0, 1), pos_x: f32, pos_y: f32, }, font: *const Font, render_obj: *RenderObject ) void { const ratio_size = args.size / font.size; // Color alpha value. const a: f32 = 1; var text_vertex_buffer = ArrayList(f32).init(page_alloc); defer text_vertex_buffer.deinit(); var text_element_buffer = ArrayList(u32).init(page_alloc); defer text_element_buffer.deinit(); const color = args.color; // Render each glyph. var text_cursor: f32 = 0; var count: u32 = 0; for (args.text) |letter| { if (!font.characters.contains(&[_]u8{letter})) continue; var c = font.characters.get(&[_]u8{letter}).?; const top_left_x = c.x / font.atlas_width; const top_left_y = c.y / font.atlas_height; const top_right_x = top_left_x + (c.width / font.atlas_width); const top_right_y = top_left_y; const bottom_left_x = top_left_x; const bottom_left_y = top_left_y + (c.height / font.atlas_height); const bottom_right_x = top_right_x; const bottom_right_y = bottom_left_y; const x = args.pos_x + text_cursor - (c.origin_x * ratio_size); const y = args.pos_y + 0 - (c.height - c.origin_y) * ratio_size; const w = c.width * ratio_size; const h = c.height * ratio_size; // Quad graph, you're welcome! // // A----B // | | // | | // C----D // // const glyph_data = [_]f32{ x, y + h, top_left_x, top_left_y, color.x, color.y, color.z, a, // A x, y, bottom_left_x, bottom_left_y, color.x, color.y, color.z, a, // C x + w, y, bottom_right_x, bottom_right_y, color.x, color.y, color.z, a, // D x + w, y + h, top_right_x, top_right_y, color.x, color.y, color.z, a, // B }; text_vertex_buffer.appendSlice(&glyph_data) catch unreachable; text_element_buffer.appendSlice(&[_]u32{ 0 + count, 1 + count, 2 + count, 0 + count, 2 + count, 3 + count }) catch unreachable; text_cursor += c.advance * ratio_size; count += 4; } send_data_to_gpu( render_obj, text_vertex_buffer.items, text_element_buffer.items ); { glBindVertexArray(render_obj.vao); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, render_obj.ebo.?); glDrawElements( GL_TRIANGLES, @intCast(c_int, text_element_buffer.items.len), GL_UNSIGNED_INT, @intToPtr(*allowzero c_void, 0), ); glBindVertexArray(0); } }
demo/common/font.zig
const std = @import("std"); const Fundude = @import("main.zig"); const Savestate = @This(); data: [size]u8, pub fn dumpFrom(self: *Savestate, fd: Fundude) void { var stream = std.io.fixedBufferStream(&self.data); dump(fd, stream.writer()) catch unreachable; } pub fn restoreInto(self: Savestate, fd: *Fundude) !void { var stream = std.io.fixedBufferStream(&self.data); try restore(fd, stream.reader()); } fn Serializer(comptime T: type, comptime field_names: []const []const u8) type { return struct { pub const ssize: comptime_int = blk: { var result = 0; inline for (field_names) |field_name| { var fake: T = undefined; const FieldType = @TypeOf(@field(fake, field_name)); result += @sizeOf(u32) + @sizeOf(FieldType); } break :blk result; }; fn dump(self: T, writer: anytype) !void { inline for (field_names) |field_name| { const field_ptr = &@field(self, field_name); const FieldType = @TypeOf(field_ptr.*); try writer.writeIntNative(u32, @sizeOf(FieldType)); try writer.writeAll(std.mem.asBytes(field_ptr)); } } fn validate(reader: anytype) !void { var fake: T = undefined; inline for (field_names) |field_name| { const FieldType = @TypeOf(@field(fake, field_name)); const wire_size = @sizeOf(FieldType); if (wire_size != try reader.readIntNative(u32)) { return error.SizeMismatch; } try reader.skipBytes(wire_size, .{ .buf_size = 64 }); } } fn restore(self: *T, reader: anytype) !void { inline for (field_names) |field_name| { const FieldType = @TypeOf(@field(self, field_name)); const wire_size = @sizeOf(FieldType); if (wire_size != try reader.readIntNative(u32)) { return error.SizeMismatch; } switch (@typeInfo(FieldType)) { .Bool => @field(self, field_name) = 0 != try reader.readByte(), .Int => |int_info| { const WireType = std.meta.Int(.unsigned, 8 * wire_size); const raw = try reader.readIntNative(WireType); @field(self, field_name) = @intCast(FieldType, raw); }, else => { const result_location = &@field(self, field_name); try reader.readNoEof(std.mem.asBytes(result_location)); }, } } } }; } const Foo = struct { bar: u8, baz: u16, const S = Serializer(@This(), &[_][]const u8{ "bar", "baz" }); }; test "dump" { const foo = Foo{ .bar = 0x12, .baz = 0x3456 }; var buf: [0x1000]u8 = undefined; var stream = std.io.fixedBufferStream(&buf); try Foo.S.dump(foo, stream.writer()); // Size (bar = u8) std.testing.expectEqual(@as(u8, 1), buf[0]); std.testing.expectEqual(@as(u8, 0), buf[1]); std.testing.expectEqual(@as(u8, 0), buf[2]); std.testing.expectEqual(@as(u8, 0), buf[3]); // Payload (bar = 0x12) std.testing.expectEqual(@as(u8, 0x12), buf[4]); // Size (baz = u16) std.testing.expectEqual(@as(u8, 2), buf[5]); std.testing.expectEqual(@as(u8, 0), buf[6]); std.testing.expectEqual(@as(u8, 0), buf[7]); std.testing.expectEqual(@as(u8, 0), buf[8]); // Payload (bar = 0x3456) std.testing.expectEqual(@as(u8, 0x56), buf[9]); std.testing.expectEqual(@as(u8, 0x34), buf[10]); } test "restore" { const buf = [_]u8{ 1, 0, 0, 0, 0x12, 2, 0, 0, 0, 0x56, 0x34 }; var stream = std.io.fixedBufferStream(&buf); var foo: Foo = undefined; try Foo.S.restore(&foo, stream.reader()); std.testing.expectEqual(@as(u8, 0x12), foo.bar); std.testing.expectEqual(@as(u16, 0x3456), foo.baz); } const Cpu = Serializer(Fundude.Cpu, &[_][]const u8{ "mode", "interrupt_master", "reg", "duration", "remaining", "next", }); const Mmu = Serializer(Fundude.Mmu, &[_][]const u8{ "dyn", "bank", }); const Video = Serializer(Fundude.Video, &[_][]const u8{ "clock", }); const Timer = Serializer(Fundude.Timer, &[_][]const u8{ "clock", "timer", }); pub const size = magic_number.len + cart_meta_len + Cpu.ssize + Mmu.ssize + Video.ssize + Timer.ssize; const version = 0x00; const magic_number = [_]u8{ 0xDC, version, 0x46, 0x44, 0x0D, 0x0A, 0x1A, 0x0A }; const cart_meta_len = 0x18; pub fn dump(fd: Fundude, writer: anytype) !void { try writer.writeAll(&magic_number); try writer.writeAll(fd.mmu.cart[0x134..][0..cart_meta_len]); try Cpu.dump(fd.cpu, writer); try Mmu.dump(fd.mmu, writer); try Timer.dump(fd.timer, writer); try Video.dump(fd.video, writer); } fn validateHeader(fd: *Fundude, reader: anytype) !void { const header = try reader.readBytesNoEof(magic_number.len); if (!std.mem.eql(u8, &header, &magic_number)) { return error.HeaderMismatch; } const cart_meta = try reader.readBytesNoEof(0x18); if (!std.mem.eql(u8, &cart_meta, fd.mmu.cart[0x134..][0..cart_meta_len])) { return error.CartMismatch; } } pub fn validate(fd: *Fundude, reader: anytype) !void { try validateHeader(fd, reader); try Cpu.validate(reader); try Mmu.validate(reader); try Timer.validate(reader); try Video.validate(reader); } pub fn restore(fd: *Fundude, reader: anytype) !void { try validateHeader(fd, reader); try Cpu.restore(&fd.cpu, reader); try Mmu.restore(&fd.mmu, reader); try Timer.restore(&fd.timer, reader); fd.video.reset(); try Video.restore(&fd.video, reader); }
src/Savestate.zig
const std = @import("std"); const rl = @import("raylib"); const math = @import("utils/math.zig"); const GameSimulation = @import("GameSimulation.zig"); pub fn GameLoop() void { // The ArenaAllocator lets use free all the persistent store memory at once. var ArenaAllocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); // Free all memory used by the allocator at once defer ArenaAllocator.deinit(); // Our game state var gameState = GameSimulation.GameState{.allocator = ArenaAllocator.allocator() }; gameState.Init(); // Initialize our game object gameState.physicsComponents[0].position = .{.x = 400000, .y = 200000 }; // Main game loop while (!rl.WindowShouldClose()) { // Detect window close button or ESC key // Reset input to not held down before polling gameState.inputComponents[0].inputCommand.Reset(); if(rl.IsWindowFocused() and rl.IsGamepadAvailable(0)) { if(rl.IsGamepadButtonDown(0, rl.GamepadButton.GAMEPAD_BUTTON_LEFT_FACE_UP)) { gameState.inputComponents[0].inputCommand.Up = true; } if(rl.IsGamepadButtonDown(0, rl.GamepadButton.GAMEPAD_BUTTON_LEFT_FACE_DOWN)) { gameState.inputComponents[0].inputCommand.Down = true; } if(rl.IsGamepadButtonDown(0, rl.GamepadButton.GAMEPAD_BUTTON_LEFT_FACE_LEFT)) { gameState.inputComponents[0].inputCommand.Left = true; } if(rl.IsGamepadButtonDown(0, rl.GamepadButton.GAMEPAD_BUTTON_LEFT_FACE_RIGHT)) { gameState.inputComponents[0].inputCommand.Right = true; } } // Game Simulation { GameSimulation.UpdateGame(&gameState); } // Draw rl.BeginDrawing(); rl.ClearBackground(rl.WHITE); const ScreenX = math.WorldToScreen(gameState.physicsComponents[0].position.x); const ScreenY = math.WorldToScreen(gameState.physicsComponents[0].position.y); // Reflect the position of our game object on screen. rl.DrawCircle(ScreenX, ScreenY, 50, rl.MAROON); if(gameState.gameData) | gameData | { const hitbox = gameData.HitboxGroup.Hitboxes.items[0]; rl.DrawRectangleLines(hitbox.left, hitbox.top, hitbox.right - hitbox.left, hitbox.top - hitbox.bottom, rl.RED); } rl.EndDrawing(); //---------------------------------------------------------------------------------- } }
src/Game.zig
const std = @import("std"); const fs = std.fs; const prot = @import("protocols.zig"); const epoll = @import("epoll"); const connections = @import("connection.zig"); const Connection = @import("connection.zig").Connection; const Context = @import("connection.zig").Context; const Object = @import("connection.zig").Object; var conn: Connection = undefined; var waiting: bool = true; const Operation = enum { Clients, Windows, WindowTrees, }; var operation: ?Operation = null; pub fn main() anyerror!void { try epoll.init(); var args_it = std.process.args(); while (args_it.nextPosix()) |arg| { if (args_it.inner.index == 2) { if (std.mem.eql(u8, arg, "clients")) { operation = .Clients; } if (std.mem.eql(u8, arg, "windows")) { operation = .Windows; } if (std.mem.eql(u8, arg, "window-tree")) { operation = .WindowTrees; } } } if (operation == null) { return error.NoValidOperationProvided; } prot.WL_DISPLAY.delete_id = delete_id; prot.WL_REGISTRY.global = global; prot.WL_CALLBACK.done = callback_done; prot.FW_CONTROL.client = client; prot.FW_CONTROL.window = window; prot.FW_CONTROL.toplevel_window = toplevel_window; prot.FW_CONTROL.region_rect = region_rect; prot.FW_CONTROL.done = done; var file = try std.net.connectUnixSocket("/run/user/1000/wayland-0"); conn.dispatchable.impl = connections.dispatch; conn.context.init(file.handle, &conn); try epoll.addFd(file.handle, &conn.dispatchable); var wl_display = prot.new_wl_display(1, &conn.context, 0); try conn.context.register(wl_display); var wl_registry = prot.new_wl_registry(2, &conn.context, 0); try conn.context.register(wl_registry); var wl_callback = prot.new_wl_callback(3, &conn.context, 0); try conn.context.register(wl_callback); try prot.wl_display_send_get_registry(wl_display, 2); try prot.wl_display_send_sync(wl_display, 3); while (waiting) { var i: usize = 0; var n = epoll.wait(-1); while (i < n) { try epoll.dispatch(i); i = i + 1; } } } fn delete_id(context: *Context, wl_display: Object, id: u32) anyerror!void { if (context.objects.get(id)) |wl_object| { try context.unregister(wl_object); } } fn global(context: *Context, wl_registry: Object, name: u32, interface: []u8, version: u32) anyerror!void { if (std.mem.eql(u8, interface, "fw_control\x00\x00")) { try prot.wl_registry_send_bind(wl_registry, name, "fw_control\x00", 1, 4); var fw_control = prot.new_fw_control(4, context, 0); try conn.context.register(fw_control); // As soon as we've bound the interface we can send our query switch (operation.?) { .Clients => try prot.fw_control_send_get_clients(fw_control), .Windows => try prot.fw_control_send_get_windows(fw_control), .WindowTrees => try prot.fw_control_send_get_window_trees(fw_control), } } } fn callback_done(context: *Context, wl_callback: Object, callback_data: u32) anyerror!void { // std.debug.warn("done!\n", .{}); } fn client(context: *Context, fw_control: Object, client_index: u32) anyerror!void { std.debug.warn("client[{}]\n", .{client_index}); } fn window(context: *Context, fw_control: Object, index: u32, parent: i32, wl_surface_id: u32, surface_type: u32, x: i32, y: i32, width: i32, height: i32, sibling_prev: i32, sibling_next: i32, children_prev: i32, children_next: i32, input_region_id: u32) anyerror!void { switch (operation.?) { .Windows => windowsWindow(index, parent, wl_surface_id, surface_type, x, y, width, height, sibling_prev, sibling_next, children_prev, children_next, input_region_id), .WindowTrees => windowTressWindow(index, parent, wl_surface_id, surface_type, x, y, width, height, sibling_prev, sibling_next, children_prev, children_next, input_region_id), else => return error.WindowNotExpectedForOp, } } fn windowsWindow(index: u32, parent: i32, wl_surface_id: u32, surface_type: u32, x: i32, y: i32, width: i32, height: i32, sibling_prev: i32, sibling_next: i32, children_prev: i32, children_next: i32, input_region_id: u32) void { var st = @intToEnum(prot.fw_control_surface_type, surface_type); std.debug.warn("window[{} ^", .{index}); if (parent < 0) { std.debug.warn(" null]", .{}); } else { std.debug.warn(" {}]", .{parent}); } std.debug.warn(" @{}", .{wl_surface_id}); switch (st) { prot.fw_control_surface_type.wl_surface => std.debug.warn(" (wl_surface)", .{}), prot.fw_control_surface_type.wl_subsurface => std.debug.warn(" (wl_subsurface)", .{}), prot.fw_control_surface_type.xdg_toplevel => std.debug.warn(" (xdg_toplevel)", .{}), prot.fw_control_surface_type.xdg_popup => std.debug.warn(" (xdg_popup)", .{}), } std.debug.warn(" ({}, {}) ({}, {}) [{}, {}] [{}, {}]\n", .{ x, y, width, height, sibling_prev, sibling_next, children_prev, children_next }); if (input_region_id > 0) { std.debug.warn("\tinput_region_id: {}\n", .{input_region_id}); } } fn windowTressWindow(index: u32, parent: i32, wl_surface_id: u32, surface_type: u32, x: i32, y: i32, width: i32, height: i32, sibling_prev: i32, sibling_next: i32, children_prev: i32, children_next: i32, input_region_id: u32) void { var st = @intToEnum(prot.fw_control_surface_type, surface_type); std.debug.warn(" window[{} ^", .{index}); if (parent < 0) { std.debug.warn(" null]", .{}); } else { std.debug.warn(" {}]", .{parent}); } std.debug.warn(" @{}", .{wl_surface_id}); switch (st) { prot.fw_control_surface_type.wl_surface => std.debug.warn(" (wl_surface)", .{}), prot.fw_control_surface_type.wl_subsurface => std.debug.warn(" (wl_subsurface)", .{}), prot.fw_control_surface_type.xdg_toplevel => std.debug.warn(" (xdg_toplevel)", .{}), prot.fw_control_surface_type.xdg_popup => std.debug.warn(" (xdg_popup)", .{}), } std.debug.warn(" ({}, {}) ({}, {}) [{}, {}] [{}, {}]\n", .{ x, y, width, height, sibling_prev, sibling_next, children_prev, children_next }); } fn toplevel_window(context: *Context, fw_control: Object, index: u32, parent: i32, wl_surface_id: u32, surface_type: u32, x: i32, y: i32, width: i32, height: i32, input_region_id: u32) anyerror!void { var st = @intToEnum(prot.fw_control_surface_type, surface_type); std.debug.warn("window[{} ^", .{index}); if (parent < 0) { std.debug.warn(" null]", .{}); } else { std.debug.warn(" {}]", .{parent}); } std.debug.warn(" @{}", .{wl_surface_id}); switch (st) { prot.fw_control_surface_type.wl_surface => std.debug.warn(" (wl_surface)", .{}), prot.fw_control_surface_type.wl_subsurface => std.debug.warn(" (wl_subsurface)", .{}), prot.fw_control_surface_type.xdg_toplevel => std.debug.warn(" (xdg_toplevel)", .{}), prot.fw_control_surface_type.xdg_popup => std.debug.warn(" (xdg_popup)", .{}), } std.debug.warn(" ({}", .{x}); std.debug.warn(", {})", .{y}); std.debug.warn(" ({}", .{width}); std.debug.warn(", {}):\n", .{height}); } fn region_rect(context: *Context, fw_control: Object, index: u32, x: i32, y: i32, width: i32, height: i32, op: i32) anyerror!void { std.debug.warn("\t\trect[{}]:\n", .{index}); std.debug.warn("\t\t\tx: {}\n", .{x}); std.debug.warn("\t\t\ty: {}\n", .{y}); std.debug.warn("\t\t\twidth: {}\n", .{width}); std.debug.warn("\t\t\theight: {}\n", .{height}); std.debug.warn("\t\t\top: {s}\n", .{if (op == 1) "Add" else "Sub"}); } fn done(context: *Context, fw_control: Object) anyerror!void { waiting = false; }
src/foxwhalectl/main.zig
const std = @import("std"); const bpf = @import("bpf"); const mem = std.mem; usingnamespace @import("common.zig"); const os = std.os; const assert = std.debug.assert; const c = @cImport({ @cInclude("net/if.h"); }); // tell the compiler to include the multithreaded event loop pub const io_mode = .evented; // embed an external file into the .rodata section const probe = @embedFile("probe.o"); // at compile time we parse the embedded elf file, if there is no section named // 'socket1' compilation fails comptime { @setEvalBranchQuota(4000); assert(bpf.elf.has_section(probe, "socket1")); } fn consume_events(perf_buffer: *bpf.PerfBuffer) void { while (perf_buffer.running.load(.SeqCst)) { const payload = perf_buffer.get(); switch (payload.event) { .sample => |sample| { std.debug.print("cpu: {}, sample: {}\n", .{ payload.cpu, mem.bytesToValue(usize, sample.items[0..8]), }); sample.deinit(); }, .lost => |cnt| { std.debug.print("cpu: {}, lost: {}\n", .{ payload.cpu, cnt }); }, } } } pub fn main() anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var obj = try bpf.Object.init(&gpa.allocator, probe); defer obj.deinit(); try obj.load(); defer obj.unload(); const sock_fd = try create_raw_socket("lo"); defer os.close(sock_fd); const prog = obj.find_prog("socket1") orelse unreachable; try os.setsockopt(sock_fd, c.SOL_SOCKET, c.SO_ATTACH_BPF, mem.asBytes(&prog)); const perf_event_array = try bpf.PerfEventArray.init(bpf.MapInfo{ .name = "", .fd = obj.find_map("events") orelse return error.NoEventsMap, .def = bpf.kern.PerfEventArray.init(256, 0).map.def, }); var perf_buffer = try bpf.PerfBuffer.init(&gpa.allocator, perf_event_array, 64); _ = async perf_buffer.run(); consume_events(&perf_buffer); }
src/main.zig
const std = @import("std"); const alka = @import("alka"); usingnamespace alka.math; usingnamespace alka.log; const mlog = std.log.scoped(.app); const ecs = alka.ecs; const game = @import("game.zig"); const maxent = 1024; pub const PlayerController = struct { left: *const alka.input.State = undefined, right: *const alka.input.State = undefined, up: *const alka.input.State = undefined, down: *const alka.input.State = undefined, dash: *const alka.input.State = undefined, dash_max: f32 = 0, dash_counter: f32 = 0, dash_timer_max: f32 = 0.5, dash_timer: f32 = 0, dash_start: bool = false, }; pub const Motion = struct { velocity: Vec2f = Vec2f{}, motion: Vec2f = Vec2f{}, constant: Vec2f = Vec2f{}, maxspeed: Vec2f = Vec2f{}, acc: f32 = 0, friction: f32 = 0, }; pub const Fabric = struct { maxtime: f32 = 1, ctime: f32 = 0, counter: u32 = 0, reloadtime: f32 = 0, deloadtime: f32 = 0, reloadc: f32 = 0, reloadcc: f32 = 0, state: u8 = 0, // 0: reload, 1: unload spawn: fn (self: u64) anyerror!void = undefined, }; pub const EnemyFabricController = struct { torque: f32 = 0, torquec: f32 = 0, speedup: f32 = 0, hearts: u32 = 3, }; pub const World = ecs.World(struct { plcontroller: ecs.StoreComponent("Player Controller", PlayerController, 1), motion: ecs.StoreComponent("Motion", Motion, maxent), ro: ecs.StoreComponent("Enemy Fabric Controller", EnemyFabricController, maxent), tr: ecs.StoreComponent("Transform", Transform2D, maxent), col: ecs.StoreComponent("Colour", alka.Colour, maxent), mask: ecs.StoreComponent("Collision Mask", []const u8, maxent), texturedraw: ecs.StoreComponent("Texture Draw", u64, maxent), rectdraw: ecs.StoreComponent("Rectangle Draw", i1, maxent), fab: alka.ecs.StoreComponent("Fabric", Fabric, maxent), }); pub const SpecialEntities = enum { player, wall_left, wall_right, wall_top, wall_bottom, }; pub var world = World{}; pub var is_init = false; pub var abortfunc = false; pub fn init() !void { defer is_init = true; world = try World.init(alka.getAllocator()); } pub fn deinit() void { defer is_init = false; world.deinit(); } pub fn update(dt: f32) !void { defer abortfunc = false; { comptime const comps = [_][]const u8{ "Motion", "Transform", "Player Controller", }; const mpos = alka.getMousePosition(); var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |entity| { var c = try entity.getPtr("Player Controller", PlayerController); var tr = try entity.getPtr("Transform", Transform2D); var mot = try entity.getPtr("Motion", Motion); tr.rotation = tr.getOriginated().angle(mpos) + 90; if (c.left.* == .down) { mot.motion.x = -mot.acc; } else if (c.right.* == .down) { mot.motion.x = mot.acc; } else mot.motion.x = 0; if (c.up.* == .down) { mot.motion.y = -mot.acc; } else if (c.down.* == .down) { mot.motion.y = mot.acc; } else mot.motion.y = 0; if (c.dash.* == .down and c.dash_counter <= 0) { const pos0 = tr.getOriginated(); const pos1 = alka.getMousePosition(); const angle = pos1.angleRad(pos0); const toward = Vec2f{ .x = @cos(angle), .y = @sin(angle), }; mot.motion = toward.mulValues(-1000, -1000); c.dash_counter = c.dash_max; c.dash_timer = c.dash_timer_max; c.dash_start = true; } if (c.dash_counter > 0) { c.dash_counter -= 1 * dt; } else { c.dash_counter = 0; } if (c.dash_start and c.dash_timer > 0) { c.dash_timer -= 1 * dt; } else c.dash_start = false; } } } { comptime const comps = [_][]const u8{ "Motion", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |entity| { var mot = try entity.getPtr("Motion", Motion); mot.velocity.x += blk: { var res = mot.motion.x + mot.constant.x; if (mot.velocity.x < -0.1) res += mot.friction; if (mot.velocity.x > 0.1) res -= mot.friction; break :blk res * dt; }; if (mot.velocity.x > mot.maxspeed.x) { mot.velocity.x = mot.maxspeed.x; } else if (mot.velocity.x < -mot.maxspeed.x) mot.velocity.x = -mot.maxspeed.x; mot.velocity.y += blk: { var res = mot.motion.y + mot.constant.y; if (mot.velocity.y < -0.1) res += mot.friction; if (mot.velocity.y > 0.1) res -= mot.friction; break :blk res * dt; }; if (mot.velocity.y > mot.maxspeed.y) { mot.velocity.y = mot.maxspeed.y; } else if (mot.velocity.y < -mot.maxspeed.y) mot.velocity.y = -mot.maxspeed.y; if (!(mot.constant.x > 0 or mot.constant.x < 0)) { if (mot.velocity.x >= -0.1 and mot.velocity.x <= 0.1) mot.velocity.x = 0; } if (!(mot.constant.y > 0 or mot.constant.y < 0)) { if (mot.velocity.y >= -0.1 and mot.velocity.y <= 0.1) mot.velocity.y = 0; } mot.motion = Vec2f{}; } } } { comptime const comps = [_][]const u8{ "Fabric", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |entity| { var fab = try entity.getPtr("Fabric", Fabric); if (fab.reloadc >= 0 and fab.reloadc < fab.deloadtime and fab.reloadcc >= fab.reloadtime) { if (fab.ctime <= 0) { try fab.spawn(entity.id); fab.ctime = fab.maxtime; } else fab.ctime -= 1 * dt; fab.state = 1; fab.reloadc += 1 * dt; } else { if (fab.reloadcc < fab.reloadtime) { fab.reloadcc += 1 * dt; fab.state = 0; } if (fab.reloadc >= fab.deloadtime) { fab.reloadc = 0; fab.reloadcc = 0; } } } } } { comptime const comps = [_][]const u8{ "Enemy Fabric Controller", "Fabric", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |entity| { const fab = try entity.get("Fabric", Fabric); var fcontrol = try entity.getPtr("Enemy Fabric Controller", EnemyFabricController); if (fab.state == 0) fcontrol.torquec += fcontrol.torque * dt; if (fab.state == 1) fcontrol.torquec += fcontrol.speedup * dt; } } } } pub fn fixed(dt: f32) !void { defer abortfunc = false; { comptime const comps = [_][]const u8{ "Enemy Fabric Controller", "Transform", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |entity| { var tr = try entity.getPtr("Transform", Transform2D); var fcontrol = try entity.getPtr("Enemy Fabric Controller", EnemyFabricController); tr.rotation += fcontrol.torquec; fcontrol.torquec = 0; } } } { comptime const comps = [_][]const u8{ "Transform", "Motion", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |entity| { var tr = try entity.getPtr("Transform", Transform2D); const mot = try entity.get("Motion", Motion); try moveAndCollide(entity); tr.position = tr.position.add(mot.velocity); } } } } pub fn draw() !void { const asset = alka.getAssetManager(); { comptime const comps = [_][]const u8{ "Transform", "Colour", "Rectangle Draw" }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (entry.value) |entity| { const tr = try entity.get("Transform", Transform2D); const colour = try entity.get("Colour", alka.Colour); try alka.drawRectangleAdv(tr.getRectangleNoOrigin(), tr.origin, deg2radf(tr.rotation), colour); } } } { comptime const comps = [_][]const u8{ "Enemy Fabric Controller", "Transform", "Colour", "Texture Draw" }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (entry.value) |entity| { var tr = try entity.get("Transform", Transform2D); const colour = try entity.get("Colour", alka.Colour); const fcontrol = try entity.get("Enemy Fabric Controller", EnemyFabricController); const texture = try asset.getTexture(10); var i: u32 = 0; tr.position.y -= tr.size.y; tr.size = Vec2f{ .x = @intToFloat(f32, texture.width) * 3.5, .y = @intToFloat(f32, texture.height) * 3.5, }; tr.origin = tr.size.divValues(2, 2); const x: f32 = tr.size.x; while (i < fcontrol.hearts) : (i += 1) { var rect = tr.getRectangleNoOrigin(); rect.position.x += x * @intToFloat(f32, i) - tr.size.x; try alka.drawTextureAdv(10, rect, Rectangle{ .size = Vec2f{ .x = @intToFloat(f32, texture.width), .y = @intToFloat(f32, texture.height), }, }, tr.origin, 0, colour); } } } } { comptime const comps = [_][]const u8{ "Transform", "Colour", "Texture Draw" }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (entry.value) |entity| { const tr = try entity.get("Transform", Transform2D); const colour = try entity.get("Colour", alka.Colour); const texture_id = try entity.get("Texture Draw", u64); const texture = try asset.getTexture(texture_id); // collision box //try alka.drawRectangleLines(tr.getRectangle(), colour); try alka.drawTextureAdv(texture_id, tr.getRectangleNoOrigin(), Rectangle{ .size = Vec2f{ .x = @intToFloat(f32, texture.width), .y = @intToFloat(f32, texture.height), }, }, tr.origin, deg2radf(tr.rotation), colour); } } } } fn moveAndCollide(entity: *World.Register) !void { const tr = try entity.get("Transform", Transform2D); const mask = try entity.get("Collision Mask", []const u8); var motion = try entity.getPtr("Motion", Motion); const off: f32 = 6.5; const push: f32 = 2; var tmot = Motion{}; { comptime const comps = [_][]const u8{ "Transform", "Collision Mask", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (entry.value) |oentity| { if (entity.id == oentity.id) continue; const omask = try oentity.get("Collision Mask", []const u8); const otr = try oentity.get("Transform", Transform2D); const omot = oentity.getPtr("Motion", Motion) catch &tmot; var collided = false; var destroy = false; var odestroy = false; if (std.mem.eql(u8, mask, "Enemy Fabric Controller")) { if (std.mem.eql(u8, omask, "Enemy Kamikaze")) { continue; } } else if (std.mem.eql(u8, mask, "Enemy Kamikaze")) { if (std.mem.eql(u8, omask, "Enemy Fabric Controller")) { continue; } } if (std.mem.eql(u8, mask, omask)) { continue; } if (tr.aabb(otr)) collided = true; if (motion.velocity.x <= -0.1 and tr.aabbMeeting(otr, Vec2f{ .x = -off })) { motion.velocity.x = -(motion.velocity.x - push - omot.velocity.x); omot.velocity.x = -(motion.velocity.x - push - omot.velocity.x); collided = true; } else if (motion.velocity.x >= 0.1 and tr.aabbMeeting(otr, Vec2f{ .x = off })) { motion.velocity.x = -(motion.velocity.x + push + omot.velocity.x); omot.velocity.x = -(motion.velocity.x + push + omot.velocity.x); collided = true; } if (motion.velocity.y <= -0.1 and tr.aabbMeeting(otr, Vec2f{ .y = -off })) { motion.velocity.y = -(motion.velocity.y - push - omot.velocity.y); omot.velocity.y = -(motion.velocity.y - push - omot.velocity.y); collided = true; } else if (motion.velocity.y >= 0.1 and tr.aabbMeeting(otr, Vec2f{ .y = off })) { motion.velocity.y = -(motion.velocity.y + push + omot.velocity.y); omot.velocity.y = -(motion.velocity.y + push + omot.velocity.y); collided = true; } if (collided) { const factor: f32 = @intToFloat(f32, game.rand.intRangeAtMost(i32, 1, 2)) * game.rand.float(f32); if (entity.id != @enumToInt(SpecialEntities.player)) { if (std.mem.eql(u8, omask, "Wall")) { destroy = true; } else if (std.mem.eql(u8, omask, "Player")) { game.scoreDecrease(game.score_decreasec_kamikaze, factor); destroy = true; } } else { if (std.mem.eql(u8, omask, "Enemy Kamikaze")) { game.scoreDecrease(game.score_decreasec_kamikaze, factor); odestroy = true; } else if (std.mem.eql(u8, omask, "Enemy Fabric Controller")) { var cnt = try entity.getPtr("Player Controller", PlayerController); if (cnt.dash_start) { game.scoreIncrease(game.score_increasec_kamikaze, factor); odestroy = try heartDecreaseEnemyFabricController(oentity); if (odestroy) game.scoreIncrease(game.score_increasec_station, factor); cnt.dash_start = false; } } } } if (odestroy) { oentity.destroy(); try world.removeRegister(oentity.id); abortfunc = true; } if (destroy) { entity.destroy(); try world.removeRegister(entity.id); abortfunc = true; } if (abortfunc) return; } } } } fn heartDecreaseEnemyFabricController(en: *World.Register) !bool { var fcontrol = try en.getPtr("Enemy Fabric Controller", EnemyFabricController); fcontrol.hearts -= 1; if (fcontrol.hearts == 0) return true; return false; }
examples/example_shooter_game/src/ecs.zig
// /*! @brief // * // * @sa @ref init_allocator // * @sa @ref glfwInitAllocator // * // * @since Added in version 3.4. // * // * @ingroup init // */ // typedef struct GLFWallocator // { // GLFWallocatefun allocate; // GLFWreallocatefun reallocate; // GLFWdeallocatefun deallocate; // void* user; // } GLFWallocator; // /*! @brief The function pointer type for memory allocation callbacks. // * // * This is the function pointer type for memory allocation callbacks. A memory // * allocation callback function has the following signature: // * @code // * void* function_name(size_t size, void* user) // * @endcode // * // * This function must return either a memory block at least `size` bytes long, // * or `NULL` if allocation failed. Note that not all parts of GLFW handle allocation // * failures gracefully yet. // * // * This function may be called during @ref glfwInit but before the library is // * flagged as initialized, as well as during @ref glfwTerminate after the // * library is no longer flagged as initialized. // * // * Any memory allocated by this function will be deallocated during library // * termination or earlier. // * // * The size will always be greater than zero. Allocations of size zero are filtered out // * before reaching the custom allocator. // * // * @param[in] size The minimum size, in bytes, of the memory block. // * @param[in] user The user-defined pointer from the allocator. // * @return The address of the newly allocated memory block, or `NULL` if an // * error occurred. // * // * @pointer_lifetime The returned memory block must be valid at least until it // * is deallocated. // * // * @reentrancy This function should not call any GLFW function. // * // * @thread_safety This function may be called from any thread that calls GLFW functions. // * // * @sa @ref init_allocator // * @sa @ref GLFWallocator // * // * @since Added in version 3.4. // * // * @ingroup init // */ // typedef void* (* GLFWallocatefun)(size_t size, void* user); // /*! @brief The function pointer type for memory reallocation callbacks. // * // * This is the function pointer type for memory reallocation callbacks. // * A memory reallocation callback function has the following signature: // * @code // * void* function_name(void* block, size_t size, void* user) // * @endcode // * // * This function must return a memory block at least `size` bytes long, or // * `NULL` if allocation failed. Note that not all parts of GLFW handle allocation // * failures gracefully yet. // * // * This function may be called during @ref glfwInit but before the library is // * flagged as initialized, as well as during @ref glfwTerminate after the // * library is no longer flagged as initialized. // * // * Any memory allocated by this function will be deallocated during library // * termination or earlier. // * // * The block address will never be `NULL` and the size will always be greater than zero. // * Reallocations of a block to size zero are converted into deallocations. Reallocations // * of `NULL` to a non-zero size are converted into regular allocations. // * // * @param[in] block The address of the memory block to reallocate. // * @param[in] size The new minimum size, in bytes, of the memory block. // * @param[in] user The user-defined pointer from the allocator. // * @return The address of the newly allocated or resized memory block, or // * `NULL` if an error occurred. // * // * @pointer_lifetime The returned memory block must be valid at least until it // * is deallocated. // * // * @reentrancy This function should not call any GLFW function. // * // * @thread_safety This function may be called from any thread that calls GLFW functions. // * // * @sa @ref init_allocator // * @sa @ref GLFWallocator // * // * @since Added in version 3.4. // * // * @ingroup init // */ // typedef void* (* GLFWreallocatefun)(void* block, size_t size, void* user); // /*! @brief The function pointer type for memory deallocation callbacks. // * // * This is the function pointer type for memory deallocation callbacks. // * A memory deallocation callback function has the following signature: // * @code // * void function_name(void* block, void* user) // * @endcode // * // * This function may deallocate the specified memory block. This memory block // * will have been allocated with the same allocator. // * // * This function may be called during @ref glfwInit but before the library is // * flagged as initialized, as well as during @ref glfwTerminate after the // * library is no longer flagged as initialized. // * // * The block address will never be `NULL`. Deallocations of `NULL` are filtered out // * before reaching the custom allocator. // * // * @param[in] block The address of the memory block to deallocate. // * @param[in] user The user-defined pointer from the allocator. // * // * @pointer_lifetime The specified memory block will not be accessed by GLFW // * after this function is called. // * // * @reentrancy This function should not call any GLFW function. // * // * @thread_safety This function may be called from any thread that calls GLFW functions. // * // * @sa @ref init_allocator // * @sa @ref GLFWallocator // * // * @since Added in version 3.4. // * // * @ingroup init // */ // typedef void (* GLFWdeallocatefun)(void* block, void* user);
glfw/src/allocator.zig
const std = @import("index.zig"); const io = std.io; const DefaultPrng = std.rand.DefaultPrng; const assert = std.debug.assert; const mem = std.mem; const os = std.os; const builtin = @import("builtin"); test "write a file, read it, then delete it" { var raw_bytes: [200 * 1024]u8 = undefined; var allocator = &std.heap.FixedBufferAllocator.init(raw_bytes[0..]).allocator; var data: [1024]u8 = undefined; var prng = DefaultPrng.init(1234); prng.random.bytes(data[0..]); const tmp_file_name = "temp_test_file.txt"; { var file = try os.File.openWrite(allocator, tmp_file_name); defer file.close(); var file_out_stream = io.FileOutStream.init(&file); var buf_stream = io.BufferedOutStream(io.FileOutStream.Error).init(&file_out_stream.stream); const st = &buf_stream.stream; try st.print("begin"); try st.write(data[0..]); try st.print("end"); try buf_stream.flush(); } { var file = try os.File.openRead(allocator, tmp_file_name); defer file.close(); const file_size = try file.getEndPos(); const expected_file_size = "begin".len + data.len + "end".len; assert(file_size == expected_file_size); var file_in_stream = io.FileInStream.init(&file); var buf_stream = io.BufferedInStream(io.FileInStream.Error).init(&file_in_stream.stream); const st = &buf_stream.stream; const contents = try st.readAllAlloc(allocator, 2 * 1024); defer allocator.free(contents); assert(mem.eql(u8, contents[0.."begin".len], "begin")); assert(mem.eql(u8, contents["begin".len .. contents.len - "end".len], data)); assert(mem.eql(u8, contents[contents.len - "end".len ..], "end")); } try os.deleteFile(allocator, tmp_file_name); } test "BufferOutStream" { var bytes: [100]u8 = undefined; var allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator; var buffer = try std.Buffer.initSize(allocator, 0); var buf_stream = &std.io.BufferOutStream.init(&buffer).stream; const x: i32 = 42; const y: i32 = 1234; try buf_stream.print("x: {}\ny: {}\n", x, y); assert(mem.eql(u8, buffer.toSlice(), "x: 42\ny: 1234\n")); }
std/io_test.zig
const std = @import("../std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const expect = std.testing.expect; /// Many producer, many consumer, non-allocating, thread-safe. /// Uses a mutex to protect access. pub fn Queue(comptime T: type) type { return struct { head: ?*Node, tail: ?*Node, mutex: std.Mutex, pub const Self = @This(); pub const Node = std.TailQueue(T).Node; pub fn init() Self { return Self{ .head = null, .tail = null, .mutex = std.Mutex.init(), }; } pub fn put(self: *Self, node: *Node) void { node.next = null; const held = self.mutex.acquire(); defer held.release(); node.prev = self.tail; self.tail = node; if (node.prev) |prev_tail| { prev_tail.next = node; } else { assert(self.head == null); self.head = node; } } pub fn get(self: *Self) ?*Node { const held = self.mutex.acquire(); defer held.release(); const head = self.head orelse return null; self.head = head.next; if (head.next) |new_head| { new_head.prev = null; } else { self.tail = null; } // This way, a get() and a remove() are thread-safe with each other. head.prev = null; head.next = null; return head; } pub fn unget(self: *Self, node: *Node) void { node.prev = null; const held = self.mutex.acquire(); defer held.release(); const opt_head = self.head; self.head = node; if (opt_head) |head| { head.next = node; } else { assert(self.tail == null); self.tail = node; } } /// Thread-safe with get() and remove(). Returns whether node was actually removed. pub fn remove(self: *Self, node: *Node) bool { const held = self.mutex.acquire(); defer held.release(); if (node.prev == null and node.next == null and self.head != node) { return false; } if (node.prev) |prev| { prev.next = node.next; } else { self.head = node.next; } if (node.next) |next| { next.prev = node.prev; } else { self.tail = node.prev; } node.prev = null; node.next = null; return true; } pub fn isEmpty(self: *Self) bool { const held = self.mutex.acquire(); defer held.release(); return self.head == null; } pub fn dump(self: *Self) void { self.dumpToStream(std.io.getStdErr().outStream()) catch return; } pub fn dumpToStream(self: *Self, stream: var) !void { const S = struct { fn dumpRecursive( s: var, optional_node: ?*Node, indent: usize, comptime depth: comptime_int, ) !void { try s.writeByteNTimes(' ', indent); if (optional_node) |node| { try s.print("0x{x}={}\n", .{ @ptrToInt(node), node.data }); if (depth == 0) { try s.print("(max depth)\n", .{}); return; } try dumpRecursive(s, node.next, indent + 1, depth - 1); } else { try s.print("(null)\n", .{}); } } }; const held = self.mutex.acquire(); defer held.release(); try stream.print("head: ", .{}); try S.dumpRecursive(stream, self.head, 0, 4); try stream.print("tail: ", .{}); try S.dumpRecursive(stream, self.tail, 0, 4); } }; } const Context = struct { allocator: *std.mem.Allocator, queue: *Queue(i32), put_sum: isize, get_sum: isize, get_count: usize, puts_done: bool, }; // TODO add lazy evaluated build options and then put puts_per_thread behind // some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor // CI we would use a less aggressive setting since at 1 core, while we still // want this test to pass, we need a smaller value since there is so much thrashing // we would also use a less aggressive setting when running in valgrind const puts_per_thread = 500; const put_thread_count = 3; test "std.atomic.Queue" { var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024); defer std.heap.page_allocator.free(plenty_of_memory); var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); var a = &fixed_buffer_allocator.allocator; var queue = Queue(i32).init(); var context = Context{ .allocator = a, .queue = &queue, .put_sum = 0, .get_sum = 0, .puts_done = false, .get_count = 0, }; if (builtin.single_threaded) { expect(context.queue.isEmpty()); { var i: usize = 0; while (i < put_thread_count) : (i += 1) { expect(startPuts(&context) == 0); } } expect(!context.queue.isEmpty()); context.puts_done = true; { var i: usize = 0; while (i < put_thread_count) : (i += 1) { expect(startGets(&context) == 0); } } expect(context.queue.isEmpty()); } else { expect(context.queue.isEmpty()); var putters: [put_thread_count]*std.Thread = undefined; for (putters) |*t| { t.* = try std.Thread.spawn(&context, startPuts); } var getters: [put_thread_count]*std.Thread = undefined; for (getters) |*t| { t.* = try std.Thread.spawn(&context, startGets); } for (putters) |t| t.wait(); @atomicStore(bool, &context.puts_done, true, .SeqCst); for (getters) |t| t.wait(); expect(context.queue.isEmpty()); } if (context.put_sum != context.get_sum) { std.debug.panic("failure\nput_sum:{} != get_sum:{}", .{ context.put_sum, context.get_sum }); } if (context.get_count != puts_per_thread * put_thread_count) { std.debug.panic("failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", .{ context.get_count, @as(u32, puts_per_thread), @as(u32, put_thread_count), }); } } fn startPuts(ctx: *Context) u8 { var put_count: usize = puts_per_thread; var r = std.rand.DefaultPrng.init(0xdeadbeef); while (put_count != 0) : (put_count -= 1) { std.time.sleep(1); // let the os scheduler be our fuzz const x = @bitCast(i32, r.random.int(u32)); const node = ctx.allocator.create(Queue(i32).Node) catch unreachable; node.* = .{ .prev = undefined, .next = undefined, .data = x, }; ctx.queue.put(node); _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst); } return 0; } fn startGets(ctx: *Context) u8 { while (true) { const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst); while (ctx.queue.get()) |node| { std.time.sleep(1); // let the os scheduler be our fuzz _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst); _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst); } if (last) return 0; } } test "std.atomic.Queue single-threaded" { var queue = Queue(i32).init(); expect(queue.isEmpty()); var node_0 = Queue(i32).Node{ .data = 0, .next = undefined, .prev = undefined, }; queue.put(&node_0); expect(!queue.isEmpty()); var node_1 = Queue(i32).Node{ .data = 1, .next = undefined, .prev = undefined, }; queue.put(&node_1); expect(!queue.isEmpty()); expect(queue.get().?.data == 0); expect(!queue.isEmpty()); var node_2 = Queue(i32).Node{ .data = 2, .next = undefined, .prev = undefined, }; queue.put(&node_2); expect(!queue.isEmpty()); var node_3 = Queue(i32).Node{ .data = 3, .next = undefined, .prev = undefined, }; queue.put(&node_3); expect(!queue.isEmpty()); expect(queue.get().?.data == 1); expect(!queue.isEmpty()); expect(queue.get().?.data == 2); expect(!queue.isEmpty()); var node_4 = Queue(i32).Node{ .data = 4, .next = undefined, .prev = undefined, }; queue.put(&node_4); expect(!queue.isEmpty()); expect(queue.get().?.data == 3); node_3.next = null; expect(!queue.isEmpty()); expect(queue.get().?.data == 4); expect(queue.isEmpty()); expect(queue.get() == null); expect(queue.isEmpty()); } test "std.atomic.Queue dump" { const mem = std.mem; var buffer: [1024]u8 = undefined; var expected_buffer: [1024]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); var queue = Queue(i32).init(); // Test empty stream fbs.reset(); try queue.dumpToStream(fbs.outStream()); expect(mem.eql(u8, buffer[0..fbs.pos], \\head: (null) \\tail: (null) \\ )); // Test a stream with one element var node_0 = Queue(i32).Node{ .data = 1, .next = undefined, .prev = undefined, }; queue.put(&node_0); fbs.reset(); try queue.dumpToStream(fbs.outStream()); var expected = try std.fmt.bufPrint(expected_buffer[0..], \\head: 0x{x}=1 \\ (null) \\tail: 0x{x}=1 \\ (null) \\ , .{ @ptrToInt(queue.head), @ptrToInt(queue.tail) }); expect(mem.eql(u8, buffer[0..fbs.pos], expected)); // Test a stream with two elements var node_1 = Queue(i32).Node{ .data = 2, .next = undefined, .prev = undefined, }; queue.put(&node_1); fbs.reset(); try queue.dumpToStream(fbs.outStream()); expected = try std.fmt.bufPrint(expected_buffer[0..], \\head: 0x{x}=1 \\ 0x{x}=2 \\ (null) \\tail: 0x{x}=2 \\ (null) \\ , .{ @ptrToInt(queue.head), @ptrToInt(queue.head.?.next), @ptrToInt(queue.tail) }); expect(mem.eql(u8, buffer[0..fbs.pos], expected)); }
lib/std/atomic/queue.zig
const zig_std = @import("std"); const Allocator = zig_std.mem.Allocator; const tokenizer = @import("tokenizer.zig"); pub const Token = tokenizer.Token; pub const Tokenizer = tokenizer.Tokenizer; pub const tokenize = tokenizer.tokenize; const parser = @import("parser.zig"); pub const Parser = parser.Parser; pub const parse = parser.parse; const ast = @import("ast.zig"); pub const Tree = ast.Tree; pub const Node = ast.Node; const compiler = @import("compiler.zig"); pub const Compiler = compiler.Compiler; pub const compile = compiler.compile; const value = @import("value.zig"); pub const Value = value.Value; pub const Type = value.Type; const vm = @import("vm.zig"); pub const Vm = vm.Vm; pub const Gc = @import("Gc.zig"); const bytecode = @import("bytecode.zig"); pub const Op = bytecode.Op; pub const Module = bytecode.Module; pub const RegRef = bytecode.RegRef; pub const Instruction = bytecode.Instruction; pub const repl = @import("repl.zig"); pub const std = @import("std.zig"); /// file extension of bog text files pub const extension = ".bog"; /// file extension of bog bytecode files, 'byte bog' pub const bytecode_extension = ".bbog"; pub const version = zig_std.builtin.Version{ .major = 0, .minor = 0, .patch = 1, }; pub const Errors = struct { list: List, const Kind = enum { err, note, trace, }; const List = zig_std.ArrayList(struct { msg: Value.String, index: u32, kind: Kind, }); pub fn init(alloc: *Allocator) Errors { return .{ .list = List.init(alloc) }; } pub fn deinit(self: *Errors) void { for (self.list.items) |*err| { err.msg.deinit(self.list.allocator); } self.list.deinit(); } pub fn add(self: *Errors, msg: Value.String, index: u32, kind: Kind) !void { try self.list.append(.{ .msg = msg, .index = index, .kind = kind, }); } pub fn render(self: *Errors, source: []const u8, writer: anytype) !void { const RED = "\x1b[31;1m"; const GREEN = "\x1b[32;1m"; const BOLD = "\x1b[0;1m"; const RESET = "\x1b[0m"; const CYAN = "\x1b[36;1m"; for (self.list.items) |*e| { switch (e.kind) { .err => try writer.writeAll(RED ++ "error: " ++ BOLD), .note => try writer.writeAll(CYAN ++ "note: " ++ BOLD), .trace => {}, } try writer.print("{s}\n" ++ RESET, .{e.msg.data}); const start = lineBegin(source, e.index); const end = zig_std.mem.indexOfScalarPos(u8, source, e.index, '\n') orelse source.len; try writer.writeAll(source[start..end]); try writer.writeAll(zig_std.cstr.line_sep); try writer.writeByteNTimes(' ', e.index - start); try writer.writeAll(GREEN ++ "^\n" ++ RESET); e.msg.deinit(self.list.allocator); } self.list.items.len = 0; } fn lineBegin(slice: []const u8, start_index: usize) usize { var i = start_index; while (i != 0) { i -= 1; if (slice[i] == '\n') return i + 1; } return 0; } };
src/bog.zig
const std = @import("std"); const gpa = std.heap.c_allocator; const u = @import("./util/index.zig"); // // pub const CollectOptions = struct { log: bool, update: bool, }; pub fn collect_deps_deep(dir: []const u8, mpath: []const u8, options: CollectOptions) !u.Module { const m = try u.ModFile.init(gpa, mpath); const moduledeps = &std.ArrayList(u.Module).init(gpa); try moduledeps.append(try collect_deps(dir, mpath, options)); for (m.devdeps) |d| { try get_module_from_dep(moduledeps, d, dir, m.name, options); } return u.Module{ .is_sys_lib = false, .id = "root", .name = "root", .main = m.main, .c_include_dirs = &.{}, .c_source_flags = &.{}, .c_source_files = &.{}, .deps = moduledeps.items, .clean_path = "", .only_os = &.{}, .except_os = &.{}, .yaml = m.yaml, }; } pub fn collect_deps(dir: []const u8, mpath: []const u8, options: CollectOptions) anyerror!u.Module { const m = try u.ModFile.init(gpa, mpath); const moduledeps = &std.ArrayList(u.Module).init(gpa); for (m.deps) |d| { try get_module_from_dep(moduledeps, d, dir, m.name, options); } return u.Module{ .is_sys_lib = false, .id = m.id, .name = m.name, .main = m.main, .c_include_dirs = m.c_include_dirs, .c_source_flags = m.c_source_flags, .c_source_files = m.c_source_files, .deps = moduledeps.items, .clean_path = "../..", .only_os = &.{}, .except_os = &.{}, .yaml = m.yaml, }; } pub fn collect_pkgs(mod: u.Module, list: *std.ArrayList(u.Module)) anyerror!void { if (u.list_contains_gen(u.Module, list, mod)) { return; } try list.append(mod); for (mod.deps) |d| { try collect_pkgs(d, list); } } fn get_moddir(basedir: []const u8, d: u.Dep, parent_name: []const u8, options: CollectOptions) ![]const u8 { const p = try std.fs.path.join(gpa, &.{ basedir, try d.clean_path() }); const pv = try std.fs.path.join(gpa, &.{ basedir, try d.clean_path_v() }); const tempdir = try std.fs.path.join(gpa, &.{ basedir, "temp" }); if (options.log) { u.print("fetch: {s}: {s}: {s}", .{ parent_name, @tagName(d.type), d.path }); } switch (d.type) { .system_lib => { // no op return ""; }, .git => { if (d.version.len > 0) { const vers = u.parse_split(u.GitVersionType, "-").do(d.version) catch |e| switch (e) { error.IterEmpty => unreachable, error.NoMemberFound => { const vtype = d.version[0..std.mem.indexOf(u8, d.version, "-").?]; u.assert(false, "fetch: git: version type '{s}' is invalid.", .{vtype}); unreachable; }, }; if (try u.does_folder_exist(pv)) { if (vers.id == .branch) { if (options.update) { try d.type.update(pv, d.path); } } return pv; } try d.type.pull(d.path, tempdir); if ((try u.run_cmd(tempdir, &.{ "git", "checkout", vers.string })) > 0) { u.assert(false, "fetch: git: {s}: {s} {s} does not exist", .{ d.path, @tagName(vers.id), vers.string }); } const td_fd = try std.fs.cwd().openDir(basedir, .{}); try std.fs.cwd().makePath(pv); try td_fd.rename("temp", try d.clean_path_v()); if (vers.id != .branch) { const pvd = try std.fs.cwd().openDir(pv, .{}); try pvd.deleteTree(".git"); } return pv; } if (!try u.does_folder_exist(p)) { try d.type.pull(d.path, p); } else { if (options.update) { try d.type.update(p, d.path); } } return p; }, .hg => { if (!try u.does_folder_exist(p)) { try d.type.pull(d.path, p); } else { if (options.update) { try d.type.update(p, d.path); } } return p; }, .http => { if (try u.does_folder_exist(pv)) { return pv; } const file_name = try u.last(try u.split(d.path, "/")); if (d.version.len > 0) { if (try u.does_folder_exist(pv)) { return pv; } const file_path = try std.fs.path.join(gpa, &.{ pv, file_name }); try d.type.pull(d.path, pv); if (try u.validate_hash(d.version, file_path)) { try std.fs.cwd().deleteFile(file_path); return pv; } try std.fs.cwd().deleteTree(pv); u.assert(false, "{s} does not match hash {s}", .{ d.path, d.version }); return p; } if (try u.does_folder_exist(p)) { try std.fs.cwd().deleteTree(p); } const file_path = try std.fs.path.join(gpa, &.{ p, file_name }); try d.type.pull(d.path, p); try std.fs.deleteFileAbsolute(file_path); return p; }, } } fn get_module_from_dep(list: *std.ArrayList(u.Module), d: u.Dep, dir: []const u8, parent_name: []const u8, options: CollectOptions) !void { const moddir = try get_moddir(dir, d, parent_name, options); switch (d.type) { .system_lib => { if (d.is_for_this()) try list.append(u.Module{ .is_sys_lib = true, .id = "", .name = d.path, .only_os = d.only_os, .except_os = d.except_os, .main = "", .c_include_dirs = &.{}, .c_source_flags = &.{}, .c_source_files = &.{}, .deps = &[_]u.Module{}, .clean_path = d.path, .yaml = null, }); }, else => blk: { var dd = try collect_deps(dir, try u.concat(&.{ moddir, "/zig.mod" }), options) catch |e| switch (e) { error.FileNotFound => { if (d.main.len > 0 or d.c_include_dirs.len > 0 or d.c_source_files.len > 0) { var mod_from = try u.Module.from(d); if (mod_from.id.len == 0) mod_from.id = try u.random_string(48); mod_from.clean_path = u.trim_prefix(moddir, dir)[1..]; if (mod_from.is_for_this()) try list.append(mod_from); } break :blk; }, else => e, }; dd.clean_path = u.trim_prefix(moddir, dir)[1..]; if (dd.id.len == 0) dd.id = try u.random_string(48); if (d.name.len > 0) dd.name = d.name; if (d.main.len > 0) dd.main = d.main; if (d.c_include_dirs.len > 0) dd.c_include_dirs = d.c_include_dirs; if (d.c_source_flags.len > 0) dd.c_source_flags = d.c_source_flags; if (d.c_source_files.len > 0) dd.c_source_files = d.c_source_files; if (d.only_os.len > 0) dd.only_os = d.only_os; if (d.except_os.len > 0) dd.except_os = d.except_os; if (dd.is_for_this()) try list.append(dd); }, } }
src/common.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const odbc = @import("odbc"); const db_connection = @import("connection.zig"); const DBConnection = db_connection.DBConnection; const ConnectionInfo = db_connection.ConnectionInfo; const Row = @import("result_set.zig").Row; const OdbcTestType = struct { id: u32, name: []const u8, occupation: []const u8, age: u32, fn deinit(self: *OdbcTestType, allocator: *Allocator) void { allocator.free(self.name); allocator.free(self.occupation); } }; // const OdbcTestType = struct { // name: []const u8, // age: []const u8, // job_info: struct { // job_name: []const u8 // }, // pub fn fromRow(row: *Row, allocator: *Allocator) !OdbcTestType { // var result: OdbcTestType = undefined; // result.name = try row.get([]const u8, allocator, "name"); // const age = try row.get(u32, allocator, "age"); // result.age = try std.fmt.allocPrint(allocator, "{} years old", .{age}); // result.job_info.job_name = try row.get([]const u8, allocator, "occupation"); // return result; // } // fn deinit(self: *OdbcTestType, allocator: *Allocator) void { // allocator.free(self.name); // allocator.free(self.age); // allocator.free(self.job_info.job_name); // } // }; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const allocator = &gpa.allocator; var connection_info = try ConnectionInfo.initWithConfig(allocator, .{ .driver = "PostgreSQL Unicode(x64)", .dsn = "PostgreSQL35W" }); defer connection_info.deinit(); const connection_string = try connection_info.toConnectionString(allocator); defer allocator.free(connection_string); var connection = try DBConnection.initWithConnectionString(connection_string); defer connection.deinit(); try connection.setCommitMode(.manual); var cursor = try connection.getCursor(allocator); defer cursor.deinit() catch {}; // _ = try cursor.insert(OdbcTestType, "odbc_zig_test", &[_]OdbcTestType{ // .{ // .id = 7, // .name = "Greg", // .occupation = "Programmer", // .age = 35 // } // }); // try cursor.commit(); // try cursor.prepare( // .{ "Reese", 30 }, // \\SELECT * // \\FROM odbc_zig_test // \\WHERE name = ? OR age < ? // ); var result_set = try cursor.executeDirect(OdbcTestType, .{}, "select * from odbc_zig_test"); defer result_set.deinit(); const query_results = try result_set.getAllRows(); defer { for (query_results) |*q| q.deinit(allocator); allocator.free(query_results); } for (query_results) |result| { std.debug.print("Id: {}\n", .{result.id}); std.debug.print("Name: {s}\n", .{result.name}); std.debug.print("Occupation: {s}\n", .{result.occupation}); std.debug.print("Age: {}\n\n", .{result.age}); } try cursor.close(); // const tables = try cursor.tablePrivileges("zig-test", "public", "odbc_zig_test"); // defer allocator.free(tables); // for (tables) |*table| { // std.debug.print("{}\n", .{table}); // table.deinit(allocator); // } // try cursor.close(); // const table_columns = try cursor.columns("zig-test", "public", "odbc_zig_test"); // defer allocator.free(table_columns); // for (table_columns) |*column| { // std.debug.print("{}\n", .{column}); // column.deinit(allocator); // } }
src/main.zig
const std = @import("std"); const assert = std.debug.assert; const mem = std.mem; const builtin = @import("builtin"); pub fn foo() error!i32 { const x = try bar(); return x + 1; } pub fn bar() error!i32 { return 13; } pub fn baz() error!i32 { const y = foo() catch 1234; return y + 1; } test "error wrapping" { assert((baz() catch unreachable) == 15); } fn gimmeItBroke() []const u8 { return @errorName(error.ItBroke); } test "@errorName" { assert(mem.eql(u8, @errorName(error.AnError), "AnError")); assert(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName")); } test "error values" { const a = i32(error.err1); const b = i32(error.err2); assert(a != b); } test "redefinition of error values allowed" { shouldBeNotEqual(error.AnError, error.SecondError); } fn shouldBeNotEqual(a: error, b: error) void { if (a == b) unreachable; } test "error binary operator" { const a = errBinaryOperatorG(true) catch 3; const b = errBinaryOperatorG(false) catch 3; assert(a == 3); assert(b == 10); } fn errBinaryOperatorG(x: bool) error!isize { return if (x) error.ItBroke else isize(10); } test "unwrap simple value from error" { const i = unwrapSimpleValueFromErrorDo() catch unreachable; assert(i == 13); } fn unwrapSimpleValueFromErrorDo() error!isize { return 13; } test "error return in assignment" { doErrReturnInAssignment() catch unreachable; } fn doErrReturnInAssignment() error!void { var x : i32 = undefined; x = try makeANonErr(); } fn makeANonErr() error!i32 { return 1; } test "error union type " { testErrorUnionType(); comptime testErrorUnionType(); } fn testErrorUnionType() void { const x: error!i32 = 1234; if (x) |value| assert(value == 1234) else |_| unreachable; assert(@typeId(@typeOf(x)) == builtin.TypeId.ErrorUnion); assert(@typeId(@typeOf(x).ErrorSet) == builtin.TypeId.ErrorSet); assert(@typeOf(x).ErrorSet == error); } test "error set type " { testErrorSetType(); comptime testErrorSetType(); } const MyErrSet = error {OutOfMemory, FileNotFound}; fn testErrorSetType() void { assert(@memberCount(MyErrSet) == 2); const a: MyErrSet!i32 = 5678; const b: MyErrSet!i32 = MyErrSet.OutOfMemory; if (a) |value| assert(value == 5678) else |err| switch (err) { error.OutOfMemory => unreachable, error.FileNotFound => unreachable, } } test "explicit error set cast" { testExplicitErrorSetCast(Set1.A); comptime testExplicitErrorSetCast(Set1.A); } const Set1 = error{A, B}; const Set2 = error{A, C}; fn testExplicitErrorSetCast(set1: Set1) void { var x = Set2(set1); var y = Set1(x); assert(y == error.A); } test "comptime test error for empty error set" { testComptimeTestErrorEmptySet(1234); comptime testComptimeTestErrorEmptySet(1234); } const EmptyErrorSet = error {}; fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) void { if (x) |v| assert(v == 1234) else |err| @compileError("bad"); } test "syntax: nullable operator in front of error union operator" { comptime { assert(?error!i32 == ?(error!i32)); } } test "comptime err to int of error set with only 1 possible value" { testErrToIntWithOnePossibleValue(error.A, u32(error.A)); comptime testErrToIntWithOnePossibleValue(error.A, u32(error.A)); } fn testErrToIntWithOnePossibleValue(x: error{A}, comptime value: u32) void { if (u32(x) != value) { @compileError("bad"); } } test "error union peer type resolution" { testErrorUnionPeerTypeResolution(1); comptime testErrorUnionPeerTypeResolution(1); } fn testErrorUnionPeerTypeResolution(x: i32) void { const y = switch (x) { 1 => bar_1(), 2 => baz_1(), else => quux_1(), }; } fn bar_1() error { return error.A; } fn baz_1() !i32 { return error.B; } fn quux_1() !i32 { return error.C; }
test/cases/error.zig
const std = @import("std"); const log = @import("log.zig"); pub const Header = packed struct { magic: u32, totalsize: u32, off_dt_struct: u32, off_dt_strings: u32, off_mem_rsvmap: u32, version: u32, last_comp_version: u32, boot_cpuid_phys: u32, size_dt_strings: u32, size_dt_struct: u32, }; pub const ReserveEntry = packed struct { address: u64, size: u64, }; fn zeroTerminatedStringToSlice(p: [*]const u8) [:0]const u8 { var i: usize = 0; while (p[i] != 0) { i += 1; } return p[0..i :0]; } const Prop = struct { name: [:0]const u8, value: []const u8, next: ?*Prop, }; pub const NodeError = error{ NoSuchProp, PropTooLong, }; fn reinterpretSlice(comptime U: type, comptime T: type, slice: T) U { const ScalarU = @TypeOf(U[0]); const p = @ptrCast([*]ScalarU, @alignCast(@alignOf(ScalarU), &slice[0])); const lastIndex = slice.len / @sizeOf(ScalarU); return p[0..lastIndex]; } pub const Node = struct { name: [:0]const u8, prop: ?*Prop, parent: ?*Node, sibling: ?*Node, child: ?*Node, const NodeIterator = struct { current: ?*const Node, pub fn next(self: *NodeIterator) ?*const Node { const ret = self.current; if (self.current) |c| self.current = c.sibling; return ret; } }; const PropIterator = struct { current: ?*const Prop, pub fn next(self: *PropIterator) ?*const Prop { const ret = self.current; if (self.current) |c| self.current = c.next; return ret; } }; fn addChild(self: *Node, child: *Node) void { child.parent = self; child.prop = null; child.sibling = self.child; child.child = null; self.child = child; } fn addProp(self: *Node, prop: *Prop) void { prop.next = self.prop; self.prop = prop; } pub fn iterChildren(self: Node) NodeIterator { return NodeIterator{ .current = self.child }; } pub fn iterProps(self: Node) PropIterator { return PropIterator{ .current = self.prop }; } pub fn getChild(self: Node, name: []const u8) ?*const Node { var iter = self.iterChildren(); while (iter.next()) |node| { if (std.mem.eql(u8, name, node.name)) return node; } return null; } fn getProp(self: Node, name: []const u8) ?*const Prop { var iter = self.iterProps(); while (iter.next()) |node| { if (std.mem.eql(u8, name, node.name)) return node; } return null; } pub fn getRawProp(self: Node, name: []const u8) ?[]const u8 { return (self.getProp(name) orelse return null).value; } pub fn getU32ArrayProp(self: Node, name: []const u8, buffer: []u32) NodeError![]u32 { const bytes = self.getRawProp(name) orelse return NodeError.NoSuchProp; if (bytes.len > buffer.len * @sizeOf(@TypeOf(buffer[0]))) { return NodeError.PropTooLong; } const p = @ptrCast([*]const u32, @alignCast(@alignOf(u32), &bytes[0])); const lastIndex = bytes.len / @sizeOf(u32); const array = p[0..lastIndex]; for (array) |v, i| { buffer[i] = std.mem.bigToNative(u32, v); } return buffer[0..array.len]; } pub fn getU32Prop(self: Node, name: []const u8) NodeError!u32 { var buffer: [1]u32 = undefined; return (try self.getU32ArrayProp(name, buffer[0..]))[0]; } pub fn translateAddress(self: Node, address: usize) !usize { var curr: *const Node = &self; var result = address; var buffer: [16]u32 = undefined; while (true) { if (curr.getU32ArrayProp("ranges", buffer[0..])) |ranges| { // TODO: Assume that ranges are (src, dest, size) for now. var i: usize = 0; while (i < ranges.len) : (i += 3) { const src = ranges[i]; const dest = ranges[i + 1]; const size = ranges[i + 2]; if (result >= src and result <= src + size) { result = result + (dest -% src); break; } } } else |err| switch (err) { // No ranges property? That's fine, keep going. NodeError.NoSuchProp => {}, else => return err, } curr = curr.parent orelse return result; } } }; pub const Helper = struct { struc: [*]u8, strings: [*]u8, const Self = @This(); fn readString(self: Self, offset: *u32) [:0]const u8 { const name = zeroTerminatedStringToSlice(self.struc + offset.*); offset.* += (@truncate(u32, name.len) + 4) & ~@as(u32, 3); return name; } fn readU32(self: Self, offset: *u32) u32 { const p = @ptrCast(*u32, @alignCast(@alignOf(u32), self.struc + offset.*)); offset.* += @sizeOf(@TypeOf(p.*)); return @byteSwap(u32, p.*); } fn stringTableEntry(self: Self, offset: usize) [:0]const u8 { const p = @ptrCast([*]const u8, self.strings + offset); return zeroTerminatedStringToSlice(p); } pub fn init(header: *const Header) Self { return .{ .struc = @intToPtr([*]u8, @ptrToInt(header) + @byteSwap(u32, header.off_dt_struct)), .strings = @intToPtr([*]u8, @ptrToInt(header) + @byteSwap(u32, header.off_dt_strings)), }; } }; pub const ParseError = error{ InvalidToken, }; pub const DeviceTree = struct { root: *Node, const Self = @This(); pub fn getNodeByPath(self: Self, path: []const u8) ?*const Node { var current: *const Node = self.root; var it = std.mem.tokenize(u8, path, "/"); while (it.next()) |name| current = current.getChild(name) orelse return null; return current; } pub fn parse(buffer: []u8, header: *const Header) !Self { var offset: u32 = 0; var fba = std.heap.FixedBufferAllocator.init(buffer); const allocator = fba.allocator(); const helper = Helper.init(header); const root = try allocator.create(Node); root.name = ""; root.prop = null; root.parent = null; root.sibling = null; root.child = null; var current = root; parse_loop: while (true) { const token_type = helper.readU32(&offset); switch (token_type) { 1 => { const node = try allocator.create(Node); node.name = helper.readString(&offset); current.addChild(node); current = node; }, 2 => { current = @ptrCast(*Node, current.parent); }, 3 => { const prop = try allocator.create(Prop); const value_len = helper.readU32(&offset); prop.name = helper.stringTableEntry(helper.readU32(&offset)); prop.value = (helper.struc + offset)[0..value_len]; offset += (value_len + 3) & ~@as(u32, 3); current.addProp(prop); }, 4 => {}, 9 => break :parse_loop, else => return ParseError.InvalidToken, } } return Self{ .root = root.child orelse unreachable }; } };
dtb.zig
const std = @import("std"); const assert = std.debug.assert; // TODO // Cannot use testing allocator because I was too lazy to write the deinit() // method for Node (our graph). // // const allocator = std.testing.allocator; const allocator = std.heap.page_allocator; const MAX_DEPTH = 500; pub const Map = struct { pub const Pos = struct { const OFFSET: usize = 1000; x: usize, y: usize, pub fn init(x: usize, y: usize) Pos { return Pos{ .x = x, .y = y, }; } pub fn equal(self: Pos, other: Pos) bool { return self.x == other.x and self.y == other.y; } }; pub const PortalName = struct { where: u8, label: u8, pub fn init(where: u8, label: u8) PortalName { return PortalName{ .where = where, .label = label, }; } }; cells: std.AutoHashMap(Pos, Tile), portals: std.AutoHashMap(Pos, Pos), outer: std.AutoHashMap(Pos, void), one: std.AutoHashMap(Pos, PortalName), two: std.AutoHashMap(usize, Pos), graph: std.AutoHashMap(Pos, std.AutoHashMap(Pos, usize)), py: usize, ymin: usize, ymax: usize, pmin: Pos, pmax: Pos, psrc: Pos, ptgt: Pos, pub const Dir = enum(u8) { N = 1, S = 2, W = 3, E = 4, pub fn reverse(d: Dir) Dir { return switch (d) { Dir.N => Dir.S, Dir.S => Dir.N, Dir.W => Dir.E, Dir.E => Dir.W, }; } pub fn move(p: Pos, d: Dir) Pos { var q = p; switch (d) { Dir.N => q.y -= 1, Dir.S => q.y += 1, Dir.W => q.x -= 1, Dir.E => q.x += 1, } return q; } }; pub const Tile = enum(u8) { Empty = 0, Passage = 1, Wall = 2, Portal = 3, }; pub fn init() Map { var self = Map{ .cells = std.AutoHashMap(Pos, Tile).init(allocator), .one = std.AutoHashMap(Pos, PortalName).init(allocator), .two = std.AutoHashMap(usize, Pos).init(allocator), .graph = std.AutoHashMap(Pos, std.AutoHashMap(Pos, usize)).init(allocator), .portals = std.AutoHashMap(Pos, Pos).init(allocator), .outer = std.AutoHashMap(Pos, void).init(allocator), .pmin = Pos.init(std.math.maxInt(usize), std.math.maxInt(usize)), .pmax = Pos.init(0, 0), .psrc = Pos.init(0, 0), .ptgt = Pos.init(0, 0), .py = 0, .ymin = std.math.maxInt(usize), .ymax = 0, }; return self; } pub fn deinit(self: *Map) void { self.outer.deinit(); self.portals.deinit(); self.graph.deinit(); self.two.deinit(); self.one.deinit(); self.cells.deinit(); } pub fn parse(self: *Map, line: []const u8) void { var where: u8 = 0; var x: usize = 0; while (x < line.len) : (x += 1) { if (line[x] == ' ') { if (where == 1) where = 2; if (where == 3) where = 4; continue; } var p = Pos.init(x + Pos.OFFSET, self.py + Pos.OFFSET); var t: Tile = undefined; if (line[x] == '.') t = Tile.Passage; if (line[x] == '#') { if (self.ymin > p.y) self.ymin = p.y; if (self.ymax < p.y) self.ymax = p.y; if (where == 0) where = 1; if (where == 2) where = 3; t = Tile.Wall; } if (line[x] >= 'A' and line[x] <= 'Z') { if (where == 1) where = 2; if (where == 3) where = 4; t = Tile.Portal; _ = self.one.put(p, PortalName.init(where, line[x])) catch unreachable; } self.set_pos(p, t); } self.py += 1; } pub const Label = struct { fn encode(l0: u8, l1: u8) usize { return @intCast(usize, l0) * 1000 + @intCast(usize, l1); } fn decode(l: usize) void { var v: usize = l; const l1 = @intCast(u8, v % 1000); v /= 1000; const l0 = @intCast(u8, v % 1000); std.debug.warn("LABEL: [{c}{c}]\n", .{ l0, l1 }); } }; pub fn find_portals(self: *Map) void { var seen = std.AutoHashMap(Pos, void).init(allocator); defer seen.deinit(); const lAA = Label.encode('A', 'A'); const lZZ = Label.encode('Z', 'Z'); var y: usize = self.pmin.y; while (y <= self.pmax.y) : (y += 1) { var x: usize = self.pmin.x; while (x <= self.pmax.x) : (x += 1) { var pc = Pos.init(x, y); var tc = self.get_pos(pc); if (tc != Tile.Portal) continue; if (seen.contains(pc)) continue; const x0 = self.one.get(pc).?; const l0 = x0.label; _ = self.one.remove(pc); // std.debug.warn("LOOKING at portal {c} {}\n",.{ l0, pc}); var k: u8 = 1; while (k <= 4) : (k += 1) { const d = @intToEnum(Dir, k); const r = Dir.reverse(d); const pn = Dir.move(pc, d); var tn = self.get_pos(pn); if (tn != Tile.Portal) continue; if (seen.contains(pn)) continue; _ = seen.put(pc, {}) catch unreachable; _ = seen.put(pn, {}) catch unreachable; const x1 = self.one.get(pn).?; const l1 = x1.label; if (x0.where != x1.where) { std.debug.warn("FUCKERS!\n", .{}); break; } _ = self.one.remove(pn); var label = Label.encode(l0, l1); // std.debug.warn("PORTAL {c}{c} in area {}\n",.{ l0, l1, x0.where}); var p0 = Dir.move(pn, d); var pt = self.get_pos(p0); if (pt != Tile.Passage) { p0 = Dir.move(pc, r); pt = self.get_pos(p0); } if (pt != Tile.Passage) { std.debug.warn("FUCK\n", .{}); } if (x0.where == 0 or x0.where == 4 or (x0.where == 1 and (p0.y == self.ymin or p0.y == self.ymax))) { _ = self.outer.put(p0, {}) catch unreachable; } if (label == lAA) { self.psrc = p0; _ = self.portals.put(p0, p0) catch unreachable; break; } if (label == lZZ) { self.ptgt = p0; _ = self.portals.put(p0, p0) catch unreachable; break; } if (self.two.contains(label)) { // found second endpoint of a portal const p1 = self.two.get(label).?; // std.debug.warn("SECOND pos for label {}: {}\n",.{ label, p0}); // std.debug.warn("PORTAL SECOND [{}] {} {}\n",.{ label, p0, p1}); _ = self.portals.put(p0, p1) catch unreachable; _ = self.portals.put(p1, p0) catch unreachable; _ = self.two.remove(label); } else { // found first endpoint of a portal // std.debug.warn("PORTAL FIRST [{}] {}\n",.{ label, p0}); _ = self.two.put(label, p0) catch unreachable; // std.debug.warn("FIRST pos for label {}: {}\n",.{ label, p0}); } break; } } } } const PosDist = struct { pos: Pos, dist: usize, pub fn init(pos: Pos, dist: usize) PosDist { return PosDist{ .pos = pos, .dist = dist, }; } fn cmp(l: PosDist, r: PosDist) std.math.Order { if (l.dist < r.dist) return std.math.Order.lt; if (l.dist > r.dist) return std.math.Order.gt; if (l.pos.x < r.pos.x) return std.math.Order.lt; if (l.pos.x > r.pos.x) return std.math.Order.gt; if (l.pos.y < r.pos.y) return std.math.Order.lt; if (l.pos.y > r.pos.y) return std.math.Order.gt; return std.math.Order.eq; } }; pub fn find_graph(self: *Map) void { self.graph.clearRetainingCapacity(); var it = self.portals.iterator(); while (it.next()) |kv| { const portal = kv.key_ptr.*; var reach = std.AutoHashMap(Pos, usize).init(allocator); var seen = std.AutoHashMap(Pos, void).init(allocator); defer seen.deinit(); const PQ = std.PriorityQueue(PosDist, PosDist.cmp); var Pend = PQ.init(allocator); defer Pend.deinit(); _ = Pend.add(PosDist.init(portal, 0)) catch unreachable; while (Pend.count() != 0) { const data = Pend.remove(); if (!data.pos.equal(portal) and self.portals.contains(data.pos)) { _ = reach.put(data.pos, data.dist) catch unreachable; } _ = seen.put(data.pos, {}) catch unreachable; const dist = data.dist + 1; var j: u8 = 1; while (j <= 4) : (j += 1) { const d = @intToEnum(Dir, j); var v = Dir.move(data.pos, d); if (!self.cells.contains(v)) continue; if (seen.contains(v)) continue; const tile = self.cells.get(v).?; if (tile != Tile.Passage) continue; _ = Pend.add(PosDist.init(v, dist)) catch unreachable; } } _ = self.graph.put(portal, reach) catch unreachable; // std.debug.warn("FROM portal {} {}:\n",.{ portal.x - 1000, portal.y - 1000}); // var itr = reach.iterator(); // while (itr.next()) |kvr| { // std.debug.warn("- portal {} {} dist {}:\n",.{ kvr.key.x - 1000, kvr.key.y - 1000, kvr.value}); // } } } pub const PortalInfo = struct { pos: Pos, depth: usize, pub fn init(pos: Pos, depth: usize) PortalInfo { return PortalInfo{ .pos = pos, .depth = depth, }; } pub fn equal(self: PortalInfo, other: PortalInfo) bool { return self.depth == other.depth and self.pos.equal(other.pos); } }; // Long live the master, <NAME> // https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm pub fn find_path_to_target(self: *Map, recursive: bool) usize { var Pend = std.AutoHashMap(PortalInfo, void).init(allocator); defer Pend.deinit(); var Dist = std.AutoHashMap(PortalInfo, usize).init(allocator); defer Dist.deinit(); var Path = std.AutoHashMap(PortalInfo, PortalInfo).init(allocator); defer Path.deinit(); // Fill Pend for all nodes var depth: usize = 0; while (depth < MAX_DEPTH) : (depth += 1) { var itg = self.graph.iterator(); while (itg.next()) |kvg| { const p = kvg.key_ptr.*; const pi = PortalInfo.init(p, depth); _ = Pend.put(pi, {}) catch unreachable; } if (!recursive) break; } const ps = PortalInfo.init(self.psrc, 0); var pt = PortalInfo.init(self.ptgt, 0); _ = Dist.put(ps, 0) catch unreachable; while (Pend.count() != 0) { // Search for a pending node with minimal distance // TODO: we could use a PriorityQueue here to quickly get at the // node, but we will also need to update the node's distance later, // which would mean re-shuffling the PQ; not sure how to do this. var pu: PortalInfo = undefined; var dmin: usize = std.math.maxInt(usize); var it = Pend.iterator(); while (it.next()) |v| { const p = v.key_ptr.*; if (!Dist.contains(p)) { continue; } const found = Dist.getEntry(p).?; if (dmin > found.value_ptr.*) { dmin = found.value_ptr.*; pu = found.key_ptr.*; } } var u: Pos = pu.pos; if (dmin == std.math.maxInt(usize)) { return 0; } _ = Pend.remove(pu); if (pu.equal(pt)) { // node chosen is our target, we can stop searching now break; } // update dist for all neighbours of u // add closest neighbour of u to the path const du = Dist.get(pu).?; const neighbours = self.graph.get(u).?; // std.debug.warn("CONSIDER {} {} depth {} distance {} neighbours {}\n",.{ u.x - 1000, u.y - 1000, pu.depth, du, neighbours.count(})); var itn = neighbours.iterator(); while (itn.next()) |kvn| { var dd: i32 = 0; var v = kvn.key_ptr.*; const outer = self.outer.contains(v); const IS = v.equal(self.psrc); const IT = v.equal(self.ptgt); if (recursive) { if (outer) { if (pu.depth > 0) { if (IS or IT) { continue; } else { dd = -1; } } else { if (IS or IT) {} else { continue; } } } else { if (IS) continue; if (IT) continue; dd = 1; } } const nd = @intCast(i32, pu.depth) + dd; var t = self.portals.get(v).?; var alt = du + kvn.value_ptr.*; if (t.equal(v) or IS or IT) {} else { v = t; alt += 1; } var pv = PortalInfo.init(v, @intCast(usize, nd)); var dv: usize = std.math.maxInt(usize); if (Dist.contains(pv)) dv = Dist.get(pv).?; if (alt < dv) { // std.debug.warn("UPDATE {} {} distance {}\n",.{ v.x - 1000, v.y - 1000, alt}); _ = Dist.put(pv, alt) catch unreachable; _ = Path.put(pv, pu) catch unreachable; } } } const dist = Dist.get(pt).?; return dist; } pub fn get_pos(self: *Map, pos: Pos) Tile { if (!self.cells.contains(pos)) return Tile.Empty; return self.cells.get(pos).?; } pub fn set_pos(self: *Map, pos: Pos, mark: Tile) void { _ = self.cells.put(pos, mark) catch unreachable; if (self.pmin.x > pos.x) self.pmin.x = pos.x; if (self.pmin.y > pos.y) self.pmin.y = pos.y; if (self.pmax.x < pos.x) self.pmax.x = pos.x; if (self.pmax.y < pos.y) self.pmax.y = pos.y; } pub fn show(self: Map) void { const sx = self.pmax.x - self.pmin.x + 1; const sy = self.pmax.y - self.pmin.y + 1; std.debug.warn("MAP: {} x {} - {} {} - {} {}\n", .{ sx, sy, self.pmin.x, self.pmin.y, self.pmax.x, self.pmax.y }); std.debug.warn("SRC: {} -- TGT {}\n", .{ self.psrc, self.ptgt }); var y: usize = self.pmin.y; while (y <= self.pmax.y) : (y += 1) { std.debug.warn("{:4} | ", .{y}); var x: usize = self.pmin.x; while (x <= self.pmax.x) : (x += 1) { const p = Pos.init(x, y); const t = self.cells.get(p); var c: u8 = ' '; if (t != null) { switch (t.?.value) { Tile.Empty => c = ' ', Tile.Passage => c = '.', Tile.Wall => c = '#', Tile.Portal => c = 'X', } } std.debug.warn("{c}", .{c}); } std.debug.warn("\n", .{}); } var it = self.portals.iterator(); while (it.next()) |kv| { std.debug.warn("Portal: {} to {}\n", .{ kv.key, kv.value }); } } }; test "small maze" { var map = Map.init(); defer map.deinit(); const data = \\ A \\ A \\ #######.######### \\ #######.........# \\ #######.#######.# \\ #######.#######.# \\ #######.#######.# \\ ##### B ###.# \\BC...## C ###.# \\ ##.## ###.# \\ ##...DE F ###.# \\ ##### G ###.# \\ #########.#####.# \\DE..#######...###.# \\ #.#########.###.# \\FG..#########.....# \\ ###########.##### \\ Z \\ Z ; var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { map.parse(line); } map.find_portals(); map.find_graph(); // map.show(); assert(map.one.count() == 0); assert(map.two.count() == 0); assert(map.psrc.equal(Map.Pos.init(9 + Map.Pos.OFFSET, 2 + Map.Pos.OFFSET))); assert(map.ptgt.equal(Map.Pos.init(13 + Map.Pos.OFFSET, 16 + Map.Pos.OFFSET))); const result = map.find_path_to_target(false); assert(result == 23); } test "medium maze" { var map = Map.init(); defer map.deinit(); const data = \\ A \\ A \\ #################.############# \\ #.#...#...................#.#.# \\ #.#.#.###.###.###.#########.#.# \\ #.#.#.......#...#.....#.#.#...# \\ #.#########.###.#####.#.#.###.# \\ #.............#.#.....#.......# \\ ###.###########.###.#####.#.#.# \\ #.....# A C #.#.#.# \\ ####### S P #####.# \\ #.#...# #......VT \\ #.#.#.# #.##### \\ #...#.# YN....#.# \\ #.###.# #####.# \\DI....#.# #.....# \\ #####.# #.###.# \\ZZ......# QG....#..AS \\ ###.### ####### \\JO..#.#.# #.....# \\ #.#.#.# ###.#.# \\ #...#..DI BU....#..LF \\ #####.# #.##### \\YN......# VT..#....QG \\ #.###.# #.###.# \\ #.#...# #.....# \\ ###.### J L J #.#.### \\ #.....# O F P #.#...# \\ #.###.#####.#.#####.#####.###.# \\ #...#.#.#...#.....#.....#.#...# \\ #.#####.###.###.#.#.#########.# \\ #...#.#.....#...#.#.#.#.....#.# \\ #.###.#####.###.###.#.#.####### \\ #.#.........#...#.............# \\ #########.###.###.############# \\ B J C \\ U P P ; var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { map.parse(line); } map.find_portals(); map.find_graph(); // map.show(); assert(map.one.count() == 0); assert(map.two.count() == 0); const result = map.find_path_to_target(false); assert(result == 58); } test "small maze recursive" { var map = Map.init(); defer map.deinit(); const data = \\ A \\ A \\ #######.######### \\ #######.........# \\ #######.#######.# \\ #######.#######.# \\ #######.#######.# \\ ##### B ###.# \\BC...## C ###.# \\ ##.## ###.# \\ ##...DE F ###.# \\ ##### G ###.# \\ #########.#####.# \\DE..#######...###.# \\ #.#########.###.# \\FG..#########.....# \\ ###########.##### \\ Z \\ Z ; var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { map.parse(line); } map.find_portals(); map.find_graph(); // map.show(); assert(map.one.count() == 0); assert(map.two.count() == 0); assert(map.psrc.equal(Map.Pos.init(9 + Map.Pos.OFFSET, 2 + Map.Pos.OFFSET))); assert(map.ptgt.equal(Map.Pos.init(13 + Map.Pos.OFFSET, 16 + Map.Pos.OFFSET))); const result = map.find_path_to_target(true); assert(result == 26); } test "medium maze recursive" { var map = Map.init(); defer map.deinit(); const data = \\ Z L X W C \\ Z P Q B K \\ ###########.#.#.#.#######.############### \\ #...#.......#.#.......#.#.......#.#.#...# \\ ###.#.#.#.#.#.#.#.###.#.#.#######.#.#.### \\ #.#...#.#.#...#.#.#...#...#...#.#.......# \\ #.###.#######.###.###.#.###.###.#.####### \\ #...#.......#.#...#...#.............#...# \\ #.#########.#######.#.#######.#######.### \\ #...#.# F R I Z #.#.#.# \\ #.###.# D E C H #.#.#.# \\ #.#...# #...#.# \\ #.###.# #.###.# \\ #.#....OA WB..#.#..ZH \\ #.###.# #.#.#.# \\CJ......# #.....# \\ ####### ####### \\ #.#....CK #......IC \\ #.###.# #.###.# \\ #.....# #...#.# \\ ###.### #.#.#.# \\XF....#.# RF..#.#.# \\ #####.# ####### \\ #......CJ NM..#...# \\ ###.#.# #.###.# \\RE....#.# #......RF \\ ###.### X X L #.#.#.# \\ #.....# F Q P #.#.#.# \\ ###.###########.###.#######.#########.### \\ #.....#...#.....#.......#...#.....#.#...# \\ #####.#.###.#######.#######.###.###.#.#.# \\ #.......#.......#.#.#.#.#...#...#...#.#.# \\ #####.###.#####.#.#.#.#.###.###.#.###.### \\ #.......#.....#.#...#...............#...# \\ #############.#.#.###.################### \\ A O F N \\ A A D M ; var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { map.parse(line); } map.find_portals(); map.find_graph(); // map.show(); const result = map.find_path_to_target(true); assert(result == 396); }
2019/p20/map.zig
const std = @import("std"); const allocator = std.heap.page_allocator; const INT_MAX = 4294967295; fn charToDigit(c: u8) u32 { return switch (c) { '0'...'9' => c - '0', 'A'...'Z' => c - 'A' + 10, 'a'...'z' => c - 'a' + 10, else => 0, }; } fn readFile() anyerror!std.ArrayList(std.ArrayList(u32)) { var file = try std.fs.cwd().openFile("input.txt", .{}); defer file.close(); var buf_reader = std.io.bufferedReader(file.reader()); var in_stream = buf_reader.reader(); var buf: [1024]u8 = undefined; var lines = std.ArrayList(std.ArrayList(u32)).init(allocator); while (try in_stream.readUntilDelimiterOrEof(&buf, '\n')) |l| { var line = std.ArrayList(u32).init(allocator); for (l) |character| { try line.append(charToDigit(character)); } try lines.append(line); } return lines; } fn minDistance(dist: std.ArrayList(u32), sptSet: std.ArrayList(u32)) u32 { var min: u32 = INT_MAX; var min_index: u32 = undefined; var v: u32 = 0; while (v < dist.items.len) { if ((sptSet.items[v] == 0) and (dist.items[v] <= min)) { min = dist.items[v]; min_index = v; } v = v + 1; } return min_index; } fn dijkstra(graph: std.ArrayList(std.ArrayList(u32)), src: u32) anyerror!void { var dist = std.ArrayList(u32).init(allocator); var sptSet = std.ArrayList(u32).init(allocator); var i: u32 = 0; while (i < graph.items.len) { try dist.append(INT_MAX); try sptSet.append(0); i = i + 1; } dist.items[0] = src; var count: u32 = 0; while (count < graph.items.len - 1) { var u = minDistance(dist, sptSet); sptSet.items[u] = 1; var v: u32 = 0; while (v < graph.items.len) { if (sptSet.items[v] == 0 and dist.items[u] != INT_MAX and dist.items[u] + graph.items[u].items[v] < dist.items[v]) { dist.items[v] = dist.items[u] + graph.items[u].items[v]; } v = v + 1; } count = count + 1; } std.debug.print("{any}", .{dist.items}); } pub fn main() anyerror!void { const lines = try readFile(); defer lines.deinit(); try dijkstra(lines, 1); }
day15/main.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day01.txt"); pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); var allocator = &arena.allocator; var numberListFromArrayList = try util.parseDay01FileString(allocator, util.ReadType.ArrayList, data); var numberListFromSlice = try util.parseDay01FileString(allocator, util.ReadType.Slice, data); var numberListFromLibrary = try util.parseDay01FileString(allocator, util.ReadType.Library, data); std.debug.print("Slice Len = {any}\n", .{numberListFromSlice.len}); std.debug.print("Library Len = {any}\n", .{numberListFromLibrary.len}); var totalIncreasesFromList = countDepthIncreases(numberListFromArrayList); std.debug.print("Increases list = {any}\n", .{totalIncreasesFromList}); var totalIncreasesFromSlice = countDepthIncreases(numberListFromSlice); std.debug.print("Increases slice = {any}\n", .{totalIncreasesFromSlice}); var totalIncreasesFromLibrary = countDepthIncreases(numberListFromLibrary); std.debug.print("Increases Library = {any}\n", .{totalIncreasesFromLibrary}); std.debug.print("Increases are same {b}\n", .{totalIncreasesFromList==totalIncreasesFromSlice}); std.debug.print("Increases are same {b}\n", .{totalIncreasesFromList==totalIncreasesFromLibrary}); var totalSlidingIncreasesList = countSlidingDepthIncreases(numberListFromArrayList); std.debug.print("Sliding increases list = {any}\n", .{totalSlidingIncreasesList}); var totalSlidingIncreasesSlice = countSlidingDepthIncreases(numberListFromSlice); std.debug.print("Sliding increases slice = {any}\n", .{totalSlidingIncreasesSlice}); std.debug.print("Sliding increases are same {b}\n", .{totalSlidingIncreasesList==totalSlidingIncreasesSlice}); } fn countDepthIncreases(depths: []u64) u64 { std.debug.print("Depths list count = {any}\n", .{depths.len}); var totalIncreases : u64 = 0; var i: usize = 1; while (i < depths.len) : (i += 1) { if (depths[i] > depths[i-1]) { totalIncreases += 1; } } return totalIncreases; } fn countSlidingDepthIncreases(depths: []u64) u64 { std.debug.print("Depths list count (sliding) = {any}\n", .{depths.len}); var totalIncreases : u64 = 0; var i: usize = 0; // +1 doing i and i+1 at the same time // +2 because the windows looks ahead 2 from the starting index while (i < depths.len - (1 + 2)) : (i += 1) { if (calculateDepthWindowAt(depths, i + 1) > calculateDepthWindowAt(depths, i)) { totalIncreases += 1; } } return totalIncreases; } fn calculateDepthWindowAt(depths: []u64, startOfWindow : usize) u64 { return depths[startOfWindow] + depths[startOfWindow + 1] + depths[startOfWindow + 2]; } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day01.zig
pub const TIMERR_NOERROR = @as(u32, 0); pub const TIMERR_NOCANDO = @as(u32, 97); pub const TIMERR_STRUCT = @as(u32, 129); pub const MAXPNAMELEN = @as(u32, 32); pub const MAXERRORLENGTH = @as(u32, 256); pub const MM_MICROSOFT = @as(u32, 1); pub const MM_MIDI_MAPPER = @as(u32, 1); pub const MM_WAVE_MAPPER = @as(u32, 2); pub const MM_SNDBLST_MIDIOUT = @as(u32, 3); pub const MM_SNDBLST_MIDIIN = @as(u32, 4); pub const MM_SNDBLST_SYNTH = @as(u32, 5); pub const MM_SNDBLST_WAVEOUT = @as(u32, 6); pub const MM_SNDBLST_WAVEIN = @as(u32, 7); pub const MM_ADLIB = @as(u32, 9); pub const MM_MPU401_MIDIOUT = @as(u32, 10); pub const MM_MPU401_MIDIIN = @as(u32, 11); pub const MM_PC_JOYSTICK = @as(u32, 12); pub const TIME_MS = @as(u32, 1); pub const TIME_SAMPLES = @as(u32, 2); pub const TIME_BYTES = @as(u32, 4); pub const TIME_SMPTE = @as(u32, 8); pub const TIME_MIDI = @as(u32, 16); pub const TIME_TICKS = @as(u32, 32); pub const MM_JOY1MOVE = @as(u32, 928); pub const MM_JOY2MOVE = @as(u32, 929); pub const MM_JOY1ZMOVE = @as(u32, 930); pub const MM_JOY2ZMOVE = @as(u32, 931); pub const MM_JOY1BUTTONDOWN = @as(u32, 949); pub const MM_JOY2BUTTONDOWN = @as(u32, 950); pub const MM_JOY1BUTTONUP = @as(u32, 951); pub const MM_JOY2BUTTONUP = @as(u32, 952); pub const MM_MCINOTIFY = @as(u32, 953); pub const MM_WOM_OPEN = @as(u32, 955); pub const MM_WOM_CLOSE = @as(u32, 956); pub const MM_WOM_DONE = @as(u32, 957); pub const MM_WIM_OPEN = @as(u32, 958); pub const MM_WIM_CLOSE = @as(u32, 959); pub const MM_WIM_DATA = @as(u32, 960); pub const MM_MIM_OPEN = @as(u32, 961); pub const MM_MIM_CLOSE = @as(u32, 962); pub const MM_MIM_DATA = @as(u32, 963); pub const MM_MIM_LONGDATA = @as(u32, 964); pub const MM_MIM_ERROR = @as(u32, 965); pub const MM_MIM_LONGERROR = @as(u32, 966); pub const MM_MOM_OPEN = @as(u32, 967); pub const MM_MOM_CLOSE = @as(u32, 968); pub const MM_MOM_DONE = @as(u32, 969); pub const MM_DRVM_OPEN = @as(u32, 976); pub const MM_DRVM_CLOSE = @as(u32, 977); pub const MM_DRVM_DATA = @as(u32, 978); pub const MM_DRVM_ERROR = @as(u32, 979); pub const MM_STREAM_OPEN = @as(u32, 980); pub const MM_STREAM_CLOSE = @as(u32, 981); pub const MM_STREAM_DONE = @as(u32, 982); pub const MM_STREAM_ERROR = @as(u32, 983); pub const MM_MOM_POSITIONCB = @as(u32, 970); pub const MM_MCISIGNAL = @as(u32, 971); pub const MM_MIM_MOREDATA = @as(u32, 972); pub const MM_MIXM_LINE_CHANGE = @as(u32, 976); pub const MM_MIXM_CONTROL_CHANGE = @as(u32, 977); pub const MMSYSERR_BASE = @as(u32, 0); pub const WAVERR_BASE = @as(u32, 32); pub const MIDIERR_BASE = @as(u32, 64); pub const TIMERR_BASE = @as(u32, 96); pub const JOYERR_BASE = @as(u32, 160); pub const MCIERR_BASE = @as(u32, 256); pub const MIXERR_BASE = @as(u32, 1024); pub const MCI_STRING_OFFSET = @as(u32, 512); pub const MCI_VD_OFFSET = @as(u32, 1024); pub const MCI_CD_OFFSET = @as(u32, 1088); pub const MCI_WAVE_OFFSET = @as(u32, 1152); pub const MCI_SEQ_OFFSET = @as(u32, 1216); pub const MMSYSERR_NOERROR = @as(u32, 0); pub const MMSYSERR_ERROR = @as(u32, 1); pub const MMSYSERR_BADDEVICEID = @as(u32, 2); pub const MMSYSERR_NOTENABLED = @as(u32, 3); pub const MMSYSERR_ALLOCATED = @as(u32, 4); pub const MMSYSERR_INVALHANDLE = @as(u32, 5); pub const MMSYSERR_NODRIVER = @as(u32, 6); pub const MMSYSERR_NOMEM = @as(u32, 7); pub const MMSYSERR_NOTSUPPORTED = @as(u32, 8); pub const MMSYSERR_BADERRNUM = @as(u32, 9); pub const MMSYSERR_INVALFLAG = @as(u32, 10); pub const MMSYSERR_INVALPARAM = @as(u32, 11); pub const MMSYSERR_HANDLEBUSY = @as(u32, 12); pub const MMSYSERR_INVALIDALIAS = @as(u32, 13); pub const MMSYSERR_BADDB = @as(u32, 14); pub const MMSYSERR_KEYNOTFOUND = @as(u32, 15); pub const MMSYSERR_READERROR = @as(u32, 16); pub const MMSYSERR_WRITEERROR = @as(u32, 17); pub const MMSYSERR_DELETEERROR = @as(u32, 18); pub const MMSYSERR_VALNOTFOUND = @as(u32, 19); pub const MMSYSERR_NODRIVERCB = @as(u32, 20); pub const MMSYSERR_MOREDATA = @as(u32, 21); pub const MMSYSERR_LASTERROR = @as(u32, 21); pub const TIME_ONESHOT = @as(u32, 0); pub const TIME_PERIODIC = @as(u32, 1); pub const TIME_CALLBACK_FUNCTION = @as(u32, 0); pub const TIME_CALLBACK_EVENT_SET = @as(u32, 16); pub const TIME_CALLBACK_EVENT_PULSE = @as(u32, 32); pub const TIME_KILL_SYNCHRONOUS = @as(u32, 256); //-------------------------------------------------------------------------------- // Section: Types (11) //-------------------------------------------------------------------------------- pub const TIMECODE_SAMPLE_FLAGS = enum(u32) { TIMECODE_READ = 4121, ATN_READ = 5047, RTC_READ = 5050, _, pub fn initFlags(o: struct { TIMECODE_READ: u1 = 0, ATN_READ: u1 = 0, RTC_READ: u1 = 0, }) TIMECODE_SAMPLE_FLAGS { return @intToEnum(TIMECODE_SAMPLE_FLAGS, (if (o.TIMECODE_READ == 1) @enumToInt(TIMECODE_SAMPLE_FLAGS.TIMECODE_READ) else 0) | (if (o.ATN_READ == 1) @enumToInt(TIMECODE_SAMPLE_FLAGS.ATN_READ) else 0) | (if (o.RTC_READ == 1) @enumToInt(TIMECODE_SAMPLE_FLAGS.RTC_READ) else 0) ); } }; pub const ED_DEVCAP_TIMECODE_READ = TIMECODE_SAMPLE_FLAGS.TIMECODE_READ; pub const ED_DEVCAP_ATN_READ = TIMECODE_SAMPLE_FLAGS.ATN_READ; pub const ED_DEVCAP_RTC_READ = TIMECODE_SAMPLE_FLAGS.RTC_READ; pub const HTASK = *opaque{}; pub const MMTIME = packed struct { wType: u32, u: packed union { ms: u32, sample: u32, cb: u32, ticks: u32, smpte: extern struct { hour: u8, min: u8, sec: u8, frame: u8, fps: u8, dummy: u8, pad: [2]u8, }, midi: packed struct { songptrpos: u32, }, }, }; pub const LPDRVCALLBACK = fn( hdrvr: ?HDRVR, uMsg: u32, dwUser: usize, dw1: usize, dw2: usize, ) callconv(@import("std").os.windows.WINAPI) void; pub const TIMECAPS = extern struct { wPeriodMin: u32, wPeriodMax: u32, }; pub const LPTIMECALLBACK = fn( uTimerID: u32, uMsg: u32, dwUser: usize, dw1: usize, dw2: usize, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' const IID_IReferenceClock_Value = @import("zig.zig").Guid.initString("56a86897-0ad4-11ce-b03a-0020af0ba770"); pub const IID_IReferenceClock = &IID_IReferenceClock_Value; pub const IReferenceClock = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetTime: fn( self: *const IReferenceClock, pTime: ?*i64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AdviseTime: fn( self: *const IReferenceClock, baseTime: i64, streamTime: i64, hEvent: ?HANDLE, pdwAdviseCookie: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AdvisePeriodic: fn( self: *const IReferenceClock, startTime: i64, periodTime: i64, hSemaphore: ?HANDLE, pdwAdviseCookie: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Unadvise: fn( self: *const IReferenceClock, dwAdviseCookie: usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReferenceClock_GetTime(self: *const T, pTime: ?*i64) callconv(.Inline) HRESULT { return @ptrCast(*const IReferenceClock.VTable, self.vtable).GetTime(@ptrCast(*const IReferenceClock, self), pTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReferenceClock_AdviseTime(self: *const T, baseTime: i64, streamTime: i64, hEvent: ?HANDLE, pdwAdviseCookie: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IReferenceClock.VTable, self.vtable).AdviseTime(@ptrCast(*const IReferenceClock, self), baseTime, streamTime, hEvent, pdwAdviseCookie); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReferenceClock_AdvisePeriodic(self: *const T, startTime: i64, periodTime: i64, hSemaphore: ?HANDLE, pdwAdviseCookie: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IReferenceClock.VTable, self.vtable).AdvisePeriodic(@ptrCast(*const IReferenceClock, self), startTime, periodTime, hSemaphore, pdwAdviseCookie); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReferenceClock_Unadvise(self: *const T, dwAdviseCookie: usize) callconv(.Inline) HRESULT { return @ptrCast(*const IReferenceClock.VTable, self.vtable).Unadvise(@ptrCast(*const IReferenceClock, self), dwAdviseCookie); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IReferenceClockTimerControl_Value = @import("zig.zig").Guid.initString("ebec459c-2eca-4d42-a8af-30df557614b8"); pub const IID_IReferenceClockTimerControl = &IID_IReferenceClockTimerControl_Value; pub const IReferenceClockTimerControl = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetDefaultTimerResolution: fn( self: *const IReferenceClockTimerControl, timerResolution: i64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDefaultTimerResolution: fn( self: *const IReferenceClockTimerControl, pTimerResolution: ?*i64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReferenceClockTimerControl_SetDefaultTimerResolution(self: *const T, timerResolution: i64) callconv(.Inline) HRESULT { return @ptrCast(*const IReferenceClockTimerControl.VTable, self.vtable).SetDefaultTimerResolution(@ptrCast(*const IReferenceClockTimerControl, self), timerResolution); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReferenceClockTimerControl_GetDefaultTimerResolution(self: *const T, pTimerResolution: ?*i64) callconv(.Inline) HRESULT { return @ptrCast(*const IReferenceClockTimerControl.VTable, self.vtable).GetDefaultTimerResolution(@ptrCast(*const IReferenceClockTimerControl, self), pTimerResolution); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IReferenceClock2_Value = @import("zig.zig").Guid.initString("36b73885-c2c8-11cf-8b46-00805f6cef60"); pub const IID_IReferenceClock2 = &IID_IReferenceClock2_Value; pub const IReferenceClock2 = extern struct { pub const VTable = extern struct { base: IReferenceClock.VTable, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IReferenceClock.MethodMixin(T); };} pub usingnamespace MethodMixin(@This()); }; pub const TIMECODE = extern union { Anonymous: extern struct { wFrameRate: u16, wFrameFract: u16, dwFrames: u32, }, qw: u64, }; pub const TIMECODE_SAMPLE = extern struct { qwTick: i64, timecode: TIMECODE, dwUser: u32, dwFlags: TIMECODE_SAMPLE_FLAGS, }; //-------------------------------------------------------------------------------- // Section: Functions (7) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' pub extern "WINMM" fn timeGetSystemTime( // TODO: what to do with BytesParamIndex 1? pmmt: ?*MMTIME, cbmmt: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "WINMM" fn timeGetTime( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "WINMM" fn timeGetDevCaps( // TODO: what to do with BytesParamIndex 1? ptc: ?*TIMECAPS, cbtc: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "WINMM" fn timeBeginPeriod( uPeriod: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "WINMM" fn timeEndPeriod( uPeriod: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "WINMM" fn timeSetEvent( uDelay: u32, uResolution: u32, fptc: ?LPTIMECALLBACK, dwUser: usize, fuEvent: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "WINMM" fn timeKillEvent( uTimerID: u32, ) callconv(@import("std").os.windows.WINAPI) u32; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (4) //-------------------------------------------------------------------------------- const HANDLE = @import("foundation.zig").HANDLE; const HDRVR = @import("media/multimedia.zig").HDRVR; const HRESULT = @import("foundation.zig").HRESULT; const IUnknown = @import("system/com.zig").IUnknown; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "LPDRVCALLBACK")) { _ = LPDRVCALLBACK; } if (@hasDecl(@This(), "LPTIMECALLBACK")) { _ = LPTIMECALLBACK; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } } //-------------------------------------------------------------------------------- // Section: SubModules (13) //-------------------------------------------------------------------------------- pub const audio = @import("media/audio.zig"); pub const device_manager = @import("media/device_manager.zig"); pub const direct_show = @import("media/direct_show.zig"); pub const dx_media_objects = @import("media/dx_media_objects.zig"); pub const kernel_streaming = @import("media/kernel_streaming.zig"); pub const library_sharing_services = @import("media/library_sharing_services.zig"); pub const media_foundation = @import("media/media_foundation.zig"); pub const media_player = @import("media/media_player.zig"); pub const multimedia = @import("media/multimedia.zig"); pub const picture_acquisition = @import("media/picture_acquisition.zig"); pub const speech = @import("media/speech.zig"); pub const streaming = @import("media/streaming.zig"); pub const windows_media_format = @import("media/windows_media_format.zig");
win32/media.zig
const std = @import("std"); const expect = std.testing.expect; test "@byteSwap" { comptime testByteSwap(); testByteSwap(); } test "@byteSwap on vectors" { comptime testVectorByteSwap(); testVectorByteSwap(); } fn testByteSwap() void { expect(@byteSwap(u0, 0) == 0); expect(@byteSwap(u8, 0x12) == 0x12); expect(@byteSwap(u16, 0x1234) == 0x3412); expect(@byteSwap(u24, 0x123456) == 0x563412); expect(@byteSwap(u32, 0x12345678) == 0x78563412); expect(@byteSwap(u40, 0x123456789a) == 0x9a78563412); expect(@byteSwap(i48, 0x123456789abc) == @bitCast(i48, u48(0xbc9a78563412))); expect(@byteSwap(u56, 0x123456789abcde) == 0xdebc9a78563412); expect(@byteSwap(u64, 0x123456789abcdef1) == 0xf1debc9a78563412); expect(@byteSwap(u128, 0x123456789abcdef11121314151617181) == 0x8171615141312111f1debc9a78563412); expect(@byteSwap(u0, u0(0)) == 0); expect(@byteSwap(i8, i8(-50)) == -50); expect(@byteSwap(i16, @bitCast(i16, u16(0x1234))) == @bitCast(i16, u16(0x3412))); expect(@byteSwap(i24, @bitCast(i24, u24(0x123456))) == @bitCast(i24, u24(0x563412))); expect(@byteSwap(i32, @bitCast(i32, u32(0x12345678))) == @bitCast(i32, u32(0x78563412))); expect(@byteSwap(u40, @bitCast(i40, u40(0x123456789a))) == u40(0x9a78563412)); expect(@byteSwap(i48, @bitCast(i48, u48(0x123456789abc))) == @bitCast(i48, u48(0xbc9a78563412))); expect(@byteSwap(i56, @bitCast(i56, u56(0x123456789abcde))) == @bitCast(i56, u56(0xdebc9a78563412))); expect(@byteSwap(i64, @bitCast(i64, u64(0x123456789abcdef1))) == @bitCast(i64, u64(0xf1debc9a78563412))); expect(@byteSwap(i128, @bitCast(i128, u128(0x123456789abcdef11121314151617181))) == @bitCast(i128, u128(0x8171615141312111f1debc9a78563412))); } fn testVectorByteSwap() void { expect((@byteSwap(u8, @Vector(2, u8)([2]u8{0x12, 0x13})) == @Vector(2, u8)([2]u8{0x12, 0x13})).all); expect((@byteSwap(u16, @Vector(2, u16)([2]u16{0x1234, 0x2345})) == @Vector(2, u16)([2]u16{0x3412, 0x4523})).all); expect((@byteSwap(u24, @Vector(2, u24)([2]u24{0x123456, 0x234567})) == @Vector(2, u24)([2]u24{0x563412, 0x674523})).all); }
test/stage1/behavior/byteswap.zig
//! Similar to `StaticResetEvent` but on `set()` it also (atomically) does `reset()`. //! Unlike StaticResetEvent, `wait()` can only be called by one thread (MPSC-like). //! //! AutoResetEvent has 3 possible states: //! - UNSET: the AutoResetEvent is currently unset //! - SET: the AutoResetEvent was notified before a wait() was called //! - <StaticResetEvent pointer>: there is an active waiter waiting for a notification. //! //! When attempting to wait: //! if the event is unset, it registers a ResetEvent pointer to be notified when the event is set //! if the event is already set, then it consumes the notification and resets the event. //! //! When attempting to notify: //! if the event is unset, then we set the event //! if theres a waiting ResetEvent, then we unset the event and notify the ResetEvent //! //! This ensures that the event is automatically reset after a wait() has been issued //! and avoids the race condition when using StaticResetEvent in the following scenario: //! thread 1 | thread 2 //! StaticResetEvent.wait() | //! | StaticResetEvent.set() //! | StaticResetEvent.set() //! StaticResetEvent.reset() | //! StaticResetEvent.wait() | (missed the second .set() notification above) state: usize = UNSET, const std = @import("../std.zig"); const builtin = @import("builtin"); const testing = std.testing; const assert = std.debug.assert; const StaticResetEvent = std.Thread.StaticResetEvent; const AutoResetEvent = @This(); const UNSET = 0; const SET = 1; /// the minimum alignment for the `*StaticResetEvent` created by wait*() const event_align = std.math.max(@alignOf(StaticResetEvent), 2); pub fn wait(self: *AutoResetEvent) void { self.waitFor(null) catch unreachable; } pub fn timedWait(self: *AutoResetEvent, timeout: u64) error{TimedOut}!void { return self.waitFor(timeout); } fn waitFor(self: *AutoResetEvent, timeout: ?u64) error{TimedOut}!void { // lazily initialized StaticResetEvent var reset_event: StaticResetEvent align(event_align) = undefined; var has_reset_event = false; var state = @atomicLoad(usize, &self.state, .SeqCst); while (true) { // consume a notification if there is any if (state == SET) { @atomicStore(usize, &self.state, UNSET, .SeqCst); return; } // check if theres currently a pending ResetEvent pointer already registered if (state != UNSET) { unreachable; // multiple waiting threads on the same AutoResetEvent } // lazily initialize the ResetEvent if it hasn't been already if (!has_reset_event) { has_reset_event = true; reset_event = .{}; } // Since the AutoResetEvent currently isnt set, // try to register our ResetEvent on it to wait // for a set() call from another thread. if (@cmpxchgWeak( usize, &self.state, UNSET, @ptrToInt(&reset_event), .SeqCst, .SeqCst, )) |new_state| { state = new_state; continue; } // if no timeout was specified, then just wait forever const timeout_ns = timeout orelse { reset_event.wait(); return; }; // wait with a timeout and return if signalled via set() switch (reset_event.timedWait(timeout_ns)) { .event_set => return, .timed_out => {}, } // If we timed out, we need to transition the AutoResetEvent back to UNSET. // If we don't, then when we return, a set() thread could observe a pointer to an invalid ResetEvent. state = @cmpxchgStrong( usize, &self.state, @ptrToInt(&reset_event), UNSET, .SeqCst, .SeqCst, ) orelse return error.TimedOut; // We didn't manage to unregister ourselves from the state. if (state == SET) { unreachable; // AutoResetEvent notified without waking up the waiting thread } else if (state != UNSET) { unreachable; // multiple waiting threads on the same AutoResetEvent observed when timing out } // This menas a set() thread saw our ResetEvent pointer, acquired it, and is trying to wake it up. // We need to wait for it to wake up our ResetEvent before we can return and invalidate it. // We don't return error.TimedOut here as it technically notified us while we were "timing out". reset_event.wait(); return; } } pub fn set(self: *AutoResetEvent) void { var state = @atomicLoad(usize, &self.state, .SeqCst); while (true) { // If the AutoResetEvent is already set, there is nothing else left to do if (state == SET) { return; } // If the AutoResetEvent isn't set, // then try to leave a notification for the wait() thread that we set() it. if (state == UNSET) { state = @cmpxchgWeak( usize, &self.state, UNSET, SET, .SeqCst, .SeqCst, ) orelse return; continue; } // There is a ResetEvent pointer registered on the AutoResetEvent event thats waiting. // Try to acquire ownership of it so that we can wake it up. // This also resets the AutoResetEvent so that there is no race condition as defined above. if (@cmpxchgWeak( usize, &self.state, state, UNSET, .SeqCst, .SeqCst, )) |new_state| { state = new_state; continue; } const reset_event = @intToPtr(*align(event_align) StaticResetEvent, state); reset_event.set(); return; } } test "basic usage" { // test local code paths { var event = AutoResetEvent{}; testing.expectError(error.TimedOut, event.timedWait(1)); event.set(); event.wait(); } // test cross-thread signaling if (builtin.single_threaded) return; const Context = struct { value: u128 = 0, in: AutoResetEvent = AutoResetEvent{}, out: AutoResetEvent = AutoResetEvent{}, const Self = @This(); fn sender(self: *Self) void { testing.expect(self.value == 0); self.value = 1; self.out.set(); self.in.wait(); testing.expect(self.value == 2); self.value = 3; self.out.set(); self.in.wait(); testing.expect(self.value == 4); } fn receiver(self: *Self) void { self.out.wait(); testing.expect(self.value == 1); self.value = 2; self.in.set(); self.out.wait(); testing.expect(self.value == 3); self.value = 4; self.in.set(); } }; var context = Context{}; const send_thread = try std.Thread.spawn(Context.sender, &context); const recv_thread = try std.Thread.spawn(Context.receiver, &context); send_thread.wait(); recv_thread.wait(); }
lib/std/Thread/AutoResetEvent.zig
const std = @import("../std.zig"); const assert = std.debug.assert; pub const ParseError = error{ OutOfMemory, InvalidStringLiteral, }; pub const Result = union(enum) { success, /// Found an invalid character at this index. invalid_character: usize, /// Expected hex digits at this index. expected_hex_digits: usize, /// Invalid hex digits at this index. invalid_hex_escape: usize, /// Invalid unicode escape at this index. invalid_unicode_escape: usize, /// The left brace at this index is missing a matching right brace. missing_matching_rbrace: usize, /// Expected unicode digits at this index. expected_unicode_digits: usize, }; /// Parses `bytes` as a Zig string literal and appends the result to `buf`. /// Asserts `bytes` has '"' at beginning and end. pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory}!Result { assert(bytes.len >= 2 and bytes[0] == '"' and bytes[bytes.len - 1] == '"'); const slice = bytes[1..]; const prev_len = buf.items.len; try buf.ensureCapacity(prev_len + slice.len - 1); errdefer buf.shrinkRetainingCapacity(prev_len); const State = enum { Start, Backslash, }; var state = State.Start; var index: usize = 0; while (true) : (index += 1) { const b = slice[index]; switch (state) { State.Start => switch (b) { '\\' => state = State.Backslash, '\n' => { return Result{ .invalid_character = index }; }, '"' => return Result.success, else => try buf.append(b), }, State.Backslash => switch (b) { 'n' => { try buf.append('\n'); state = State.Start; }, 'r' => { try buf.append('\r'); state = State.Start; }, '\\' => { try buf.append('\\'); state = State.Start; }, 't' => { try buf.append('\t'); state = State.Start; }, '\'' => { try buf.append('\''); state = State.Start; }, '"' => { try buf.append('"'); state = State.Start; }, 'x' => { // TODO: add more/better/broader tests for this. const index_continue = index + 3; if (slice.len < index_continue) { return Result{ .expected_hex_digits = index }; } if (std.fmt.parseUnsigned(u8, slice[index + 1 .. index_continue], 16)) |byte| { try buf.append(byte); state = State.Start; index = index_continue - 1; // loop-header increments again } else |err| switch (err) { error.Overflow => unreachable, // 2 digits base 16 fits in a u8. error.InvalidCharacter => { return Result{ .invalid_hex_escape = index + 1 }; }, } }, 'u' => { // TODO: add more/better/broader tests for this. // TODO: we are already inside a nice, clean state machine... use it // instead of this hacky code. if (slice.len > index + 2 and slice[index + 1] == '{') { if (std.mem.indexOfScalarPos(u8, slice[0..std.math.min(index + 9, slice.len)], index + 3, '}')) |index_end| { const hex_str = slice[index + 2 .. index_end]; if (std.fmt.parseUnsigned(u32, hex_str, 16)) |uint| { if (uint <= 0x10ffff) { try buf.appendSlice(std.mem.toBytes(uint)[0..]); state = State.Start; index = index_end; // loop-header increments continue; } } else |err| switch (err) { error.Overflow => unreachable, error.InvalidCharacter => { return Result{ .invalid_unicode_escape = index + 1 }; }, } } else { return Result{ .missing_matching_rbrace = index + 1 }; } } else { return Result{ .expected_unicode_digits = index }; } }, else => { return Result{ .invalid_character = index }; }, }, } } else unreachable; // TODO should not need else unreachable on while(true) } /// Higher level API. Does not return extra info about parse errors. /// Caller owns returned memory. pub fn parseAlloc(allocator: *std.mem.Allocator, bytes: []const u8) ParseError![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); switch (try parseAppend(&buf, bytes)) { .success => return buf.toOwnedSlice(), else => return error.InvalidStringLiteral, } } test "parse" { const expect = std.testing.expect; const eql = std.mem.eql; var fixed_buf_mem: [32]u8 = undefined; var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]); var alloc = &fixed_buf_alloc.allocator; expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\""))); expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\""))); expect(eql(u8, "f💯", try parseAlloc(alloc, "\"f\u{1f4af}\""))); }
lib/std/zig/string_literal.zig
const std = @import("std"); const builtin = @import("builtin"); const system = std.os.system; const linux = std.os.linux; const libc = @cImport({ @cInclude("stdio.h"); @cInclude("string.h"); }); const Tty = @This(); pub const COLOR_BLACK = 0; pub const COLOR_RED = 1; pub const COLOR_GREEN = 2; pub const COLOR_YELLOW = 3; pub const COLOR_BLUE = 4; pub const COLOR_MAGENTA = 5; pub const COLOR_CYAN = 6; pub const COLOR_WHITE = 7; pub const COLOR_NORMAL = 9; fdin: i32, fout: std.fs.File, buffered_writer: std.io.BufferedWriter(4096, std.fs.File.Writer), original_termios: std.os.termios, fg_color: i32 = 0, max_width: usize = 0, max_height: usize = 0, pub fn reset(self: *Tty) void { std.os.tcsetattr(self.fdin, std.os.TCSA.NOW, self.original_termios) catch { std.debug.print("Failed to reset termios attributes\n", .{}); }; } pub fn init(filename: []const u8) !Tty { const fdin = try std.os.open(filename, std.os.O.RDONLY, 0); errdefer std.os.close(fdin); var fout = try std.fs.openFileAbsolute(filename, .{ .mode = .write_only }); errdefer fout.close(); var tty = Tty{ .fdin = fdin, .fout = fout, .buffered_writer = std.io.bufferedWriter(fout.writer()), .original_termios = try std.os.tcgetattr(fdin), }; var new_termios = tty.original_termios; new_termios.iflag &= ~(@as(@TypeOf(new_termios.iflag), system.ICRNL)); new_termios.lflag &= ~(@as(@TypeOf(new_termios.lflag), (system.ICANON | system.ECHO | system.ISIG))); std.os.tcsetattr(tty.fdin, std.os.TCSA.NOW, new_termios) catch { std.debug.print("Failed to update termios attributes\n", .{}); }; tty.getWinSize(); tty.setNormal(); try std.os.sigaction(std.os.SIG.WINCH, &std.os.Sigaction{ .handler = .{ .sigaction = std.os.SIG.IGN }, .mask = std.os.empty_sigset, .flags = 0, }, null); return tty; } pub fn deinit(self: *Tty) void { self.reset(); self.fout.close(); std.os.close(self.fdin); } pub fn getWinSize(self: *Tty) void { var ws: system.winsize = undefined; if (system.ioctl(self.fout.handle, system.T.IOCGWINSZ, @ptrToInt(&ws)) == -1) { self.max_width = 80; self.max_height = 25; } else { self.max_width = ws.ws_col; self.max_height = ws.ws_row; } } pub fn printf(self: *Tty, comptime format: []const u8, args: anytype) void { self.buffered_writer.writer().print(format, args) catch unreachable; } pub fn setFg(self: *Tty, fg: i32) void { if (self.fg_color != fg) { self.sgr(30 + fg); self.fg_color = 30; } } pub fn setInvert(self: *Tty) void { self.sgr(7); } pub fn setUnderline(self: *Tty) void { self.sgr(4); } pub fn setBold(self: *Tty) void { self.sgr(1); } pub fn setNormal(self: *Tty) void { self.sgr(0); self.fg_color = COLOR_NORMAL; } pub fn setWrap(self: *Tty, wrap: bool) void { var c: u8 = if (wrap) 'h' else 'l'; self.printf("\x1b[?7{c}", .{c}); } pub fn newline(self: *Tty) void { self.printf("\x1b[K\n", .{}); } pub fn clearLine(self: *Tty) void { self.printf("\x1b[K", .{}); } pub fn setCol(self: *Tty, col: usize) void { self.printf("\x1b[{d}G", .{col + 1}); } pub fn moveUp(self: *Tty, i: usize) void { self.printf("\x1b[{d}A", .{i}); } pub fn putc(self: *Tty, c: u8) void { self.buffered_writer.writer().writeByte(c) catch unreachable; } pub fn flush(self: *Tty) void { self.buffered_writer.flush() catch unreachable; } fn sgr(self: *Tty, code: i32) void { self.printf("\x1b[{d}m", .{code}); } pub fn getChar(self: *Tty) !u8 { var c: [1]u8 = undefined; if (std.os.read(self.fdin, &c)) |bytes_read| { if (bytes_read == 0) { // EOF return error.EndOfFile; } return c[0]; } else |err| { std.log.err("error reading from tty", .{}); return err; } } pub fn inputReady(self: *Tty, timeout: ?i32, return_on_signal: bool) !bool { const ts = if (timeout) |t| &std.os.timespec{ .tv_sec = @divTrunc(t, 1000), .tv_nsec = @rem(t, 1000) * 1000000, } else null; switch (builtin.os.tag) { .macos, .freebsd, .netbsd, .dragonfly => { var kq = try std.os.kqueue(); defer std.os.close(kq); const chlist: [1]std.os.Kevent = .{.{ .ident = @intCast(usize, self.fdin), .filter = system.EVFILT_READ, .flags = system.EV_ADD | system.EV_ONESHOT | system.EV_CLEAR, .fflags = system.NOTE_LOWAT, .data = 1, .udata = 0, }}; var evlist: [1]std.os.Kevent = undefined; // Call kevent directly rather than using the wrapper in std.os so that we can handle // EINTR while (true) { const rc = system.kevent(kq, &chlist, 1, &evlist, 1, ts); switch (std.os.errno(rc)) { .SUCCESS => for (evlist[0..@intCast(usize, rc)]) |ev| { if (ev.filter == system.EVFILT_READ) { if (ev.flags & system.EV_ERROR != 0) { std.debug.print("kevent error: {s}\n", .{ @tagName(std.os.errno(ev.data)), }); return error.InvalidValue; } else { return true; } } } else { return false; }, .INTR => if (return_on_signal) return false else continue, // Copied from std.os.kevent .ACCES => return error.AccessDenied, .FAULT => unreachable, .BADF => unreachable, // Always a race condition. .INVAL => unreachable, .NOENT => return error.EventNotFound, .NOMEM => return error.SystemResources, .SRCH => return error.ProcessNotFound, else => unreachable, } } }, .linux => { const epfd = try std.os.epoll_create1(0); defer std.os.close(epfd); try std.os.epoll_ctl(epfd, linux.EPOLL.CTL_ADD, self.fdin, &linux.epoll_event{ .events = linux.EPOLL.IN, .data = .{ .fd = self.fdin }, }); var events: [1]linux.epoll_event = undefined; while (true) { const rc = linux.epoll_wait(epfd, &events, 1, timeout orelse -1); switch (std.os.errno(rc)) { .SUCCESS => for (events) |ev| { if (ev.data.fd == self.fdin) { return true; } } else return false, .INTR => if (return_on_signal) return false else continue, else => unreachable, } } }, .windows => { // Help wanted unreachable; }, else => unreachable, } return false; }
src/Tty.zig
const std = @import("std"); usingnamespace (@import("../machine.zig")); usingnamespace (@import("../util.zig")); const imm = Operand.immediate; const memory = Operand.memory; const reg = Operand.register; const regRm = Operand.registerRm; test "simple 8086 opcodes" { const m16 = Machine.init(.x86_16); const m32 = Machine.init(.x86_32); const m64 = Machine.init(.x64); debugPrint(false); { testOp0(m32, .AAA, "37"); testOp0(m32, .AAD, "D5 0A"); testOp0(m32, .AAM, "D4 0A"); testOp0(m32, .AAS, "3F"); testOp0(m64, .AAA, AsmError.InvalidOperand); testOp0(m64, .AAD, AsmError.InvalidOperand); testOp0(m64, .AAM, AsmError.InvalidOperand); testOp0(m64, .AAS, AsmError.InvalidOperand); testOp0(m32, .DAA, "27"); testOp0(m32, .DAS, "2F"); testOp0(m32, .HLT, "F4"); testOp0(m32, .LAHF, "9F"); testOp0(m32, .SAHF, "9E"); testOp0(m32, .SALC, "D6"); testOp0(m64, .SALC, AsmError.InvalidOperand); testOp0(m32, .STC, "F9"); testOp0(m32, .STD, "FD"); testOp0(m32, .STI, "FB"); testOp0(m32, .CLC, "F8"); testOp0(m32, .CLD, "FC"); testOp0(m32, .CLI, "FA"); testOp0(m32, .CMC, "F5"); testOp0(m32, .WAIT, "9B"); testOp0(m32, .FWAIT, "9B"); testOp0(m64, .CBW, "66 98"); testOp0(m64, .CWDE, "98"); testOp0(m64, .CDQE, "48 98"); { // PUSHF testOp0(m64, .PUSHF, "9C"); testOp0(m64, .PUSHFW, "66 9C"); testOp0(m64, .PUSHFD, AsmError.InvalidOperand); testOp0(m64, .PUSHFQ, "9C"); testOp0(m32, .PUSHF, "9C"); testOp0(m32, .PUSHFW, "66 9C"); testOp0(m32, .PUSHFD, "9C"); testOp0(m32, .PUSHFQ, AsmError.InvalidOperand); // POPF testOp0(m64, .POPF, "9D"); testOp0(m64, .POPFW, "66 9D"); testOp0(m64, .POPFD, AsmError.InvalidOperand); testOp0(m64, .POPFQ, "9D"); testOp0(m32, .POPF, "9D"); testOp0(m32, .POPFW, "66 9D"); testOp0(m32, .POPFD, "9D"); testOp0(m32, .POPFQ, AsmError.InvalidOperand); } { // INS testOp0(m64, .INSB, "6C"); testOp0(m64, .INSW, "66 6D"); testOp0(m64, .INSD, "6D"); testOp0(m32, .INSB, "6C"); testOp0(m32, .INSW, "66 6D"); testOp0(m32, .INSD, "6D"); // OUTS testOp0(m64, .OUTSB, "6E"); testOp0(m64, .OUTSW, "66 6F"); testOp0(m64, .OUTSD, "6F"); testOp0(m32, .OUTSB, "6E"); testOp0(m32, .OUTSW, "66 6F"); testOp0(m32, .OUTSD, "6F"); } { // MOVS testOp0(m64, .MOVSB, "A4"); testOp0(m64, .MOVSW, "66 A5"); testOp0(m64, .MOVSD, "A5"); testOp0(m64, .MOVSQ, "48 A5"); testOp0(m32, .MOVSB, "A4"); testOp0(m32, .MOVSW, "66 A5"); testOp0(m32, .MOVSD, "A5"); testOp0(m32, .MOVSQ, AsmError.InvalidOperand); // CMPS testOp0(m64, .CMPSB, "A6"); testOp0(m64, .CMPSW, "66 A7"); testOp0(m64, .CMPSD, "A7"); testOp0(m64, .CMPSQ, "48 A7"); testOp0(m32, .CMPSB, "A6"); testOp0(m32, .CMPSW, "66 A7"); testOp0(m32, .CMPSD, "A7"); testOp0(m32, .CMPSQ, AsmError.InvalidOperand); // STOS testOp0(m64, .STOSB, "AA"); testOp0(m64, .STOSW, "66 AB"); testOp0(m64, .STOSD, "AB"); testOp0(m64, .STOSQ, "48 AB"); testOp0(m32, .STOSB, "AA"); testOp0(m32, .STOSW, "66 AB"); testOp0(m32, .STOSD, "AB"); testOp0(m32, .STOSQ, AsmError.InvalidOperand); // LODS testOp0(m64, .LODSB, "AC"); testOp0(m64, .LODSW, "66 AD"); testOp0(m64, .LODSD, "AD"); testOp0(m64, .LODSQ, "48 AD"); testOp0(m32, .LODSB, "AC"); testOp0(m32, .LODSW, "66 AD"); testOp0(m32, .LODSD, "AD"); testOp0(m32, .LODSQ, AsmError.InvalidOperand); // SCAS testOp0(m64, .SCASB, "AE"); testOp0(m64, .SCASW, "66 AF"); testOp0(m64, .SCASD, "AF"); testOp0(m64, .SCASQ, "48 AF"); testOp0(m32, .SCASB, "AE"); testOp0(m32, .SCASW, "66 AF"); testOp0(m32, .SCASD, "AF"); testOp0(m32, .SCASQ, AsmError.InvalidOperand); } { // XLATB testOp0(m32, .XLATB, "D7"); } } { const op1 = Operand.immediate16(0x1100); // RET testOp0(m64, .RET, "c3"); testOp1(m64, .RET, imm(0x7FFF), "c2ff7f"); testOp0(m64, .RETW, "66c3"); testOp1(m64, .RETW, imm(0x7FFF), "66c2ff7f"); testOp0(m64, .RETD, AsmError.InvalidOperand); testOp1(m64, .RETD, imm(0x7FFF), AsmError.InvalidOperand); testOp0(m64, .RETQ, "c3"); testOp1(m64, .RETQ, imm(0x7FFF), "c2ff7f"); // RETF testOp0(m64, .RETF, "cb"); testOp1(m64, .RETF, imm(0x7FFF), "caff7f"); testOp0(m64, .RETFW, "66cb"); testOp1(m64, .RETFW, imm(0x7FFF), "66caff7f"); testOp0(m64, .RETFD, AsmError.InvalidOperand); testOp1(m64, .RETFD, imm(0x7FFF), AsmError.InvalidOperand); testOp0(m64, .RETFQ, "cb"); testOp1(m64, .RETFQ, imm(0x7FFF), "caff7f"); // RETN testOp0(m64, .RETN, "c3"); testOp1(m64, .RETN, imm(0x7FFF), "c2ff7f"); testOp0(m64, .RETNW, "66c3"); testOp1(m64, .RETNW, imm(0x7FFF), "66c2ff7f"); testOp0(m64, .RETND, AsmError.InvalidOperand); testOp1(m64, .RETND, imm(0x7FFF), AsmError.InvalidOperand); testOp0(m64, .RETNQ, "c3"); testOp1(m64, .RETNQ, imm(0x7FFF), "c2ff7f"); // IRET testOp0(m32, .IRET, "cf"); testOp0(m32, .IRETW, "66cf"); testOp0(m32, .IRETD, "cf"); testOp0(m32, .IRETQ, AsmError.InvalidOperand); // IRET testOp0(m16, .IRET, "cf"); testOp0(m16, .IRETW, "cf"); testOp0(m16, .IRETD, "66cf"); testOp0(m16, .IRETQ, AsmError.InvalidOperand); } { const op1 = Operand.immediate8(0x0A); testOp1(m32, .AAD, op1, "D5 0A"); testOp1(m32, .AAM, op1, "D4 0A"); testOp1(m64, .AAD, op1, AsmError.InvalidOperand); testOp1(m64, .AAM, op1, AsmError.InvalidOperand); } { const op1 = Operand.immediate8(0x0A); testOp1(m32, .INT, op1, "CD 0A"); testOp0(m32, .INT1, "F1"); testOp0(m32, .INT3, "CC"); testOp0(m32, .INTO, "CE"); testOp1(m64, .INT, op1, "CD 0A"); testOp0(m64, .INT1, "F1"); testOp0(m64, .INT3, "CC"); testOp0(m64, .INTO, AsmError.InvalidOperand); } { // IN const imm8 = Operand.immediate8(0x00); const al = Operand.register(.AL); const ax = Operand.register(.AX); const eax = Operand.register(.EAX); const dx = Operand.register(.DX); testOp2(m32, .IN, al, imm8, "E4 00"); testOp2(m32, .IN, ax, imm8, "66 E5 00"); testOp2(m32, .IN, eax, imm8, "E5 00"); testOp2(m32, .IN, al, dx, "EC"); testOp2(m32, .IN, ax, dx, "66 ED"); testOp2(m32, .IN, eax, dx, "ED"); // OUT testOp2(m32, .OUT, imm8, al, "E6 00"); testOp2(m32, .OUT, imm8, ax, "66 E7 00"); testOp2(m32, .OUT, imm8, eax, "E7 00"); testOp2(m32, .OUT, dx, al, "EE"); testOp2(m32, .OUT, dx, ax, "66 EF"); testOp2(m32, .OUT, dx, eax, "EF"); } // Unary { { testOp1(m16, .INC, regRm(.AL), "FE C0"); testOp1(m16, .DEC, regRm(.AL), "FE C8"); testOp1(m16, .NOT, regRm(.AL), "F6 D0"); testOp1(m16, .NEG, regRm(.AL), "F6 D8"); // testOp1(m32, .INC, regRm(.AL), "FE C0"); testOp1(m32, .DEC, regRm(.AL), "FE C8"); testOp1(m32, .NOT, regRm(.AL), "F6 D0"); testOp1(m32, .NEG, regRm(.AL), "F6 D8"); // testOp1(m64, .INC, regRm(.AL), "FE C0"); testOp1(m64, .DEC, regRm(.AL), "FE C8"); testOp1(m64, .NOT, regRm(.AL), "F6 D0"); testOp1(m64, .NEG, regRm(.AL), "F6 D8"); } { testOp1(m16, .INC, regRm(.AX), "FF C0"); testOp1(m16, .DEC, regRm(.AX), "FF C8"); testOp1(m16, .NOT, regRm(.AX), "F7 D0"); testOp1(m16, .NEG, regRm(.AX), "F7 D8"); // testOp1(m32, .INC, regRm(.AX), "66 FF C0"); testOp1(m32, .DEC, regRm(.AX), "66 FF C8"); testOp1(m32, .NOT, regRm(.AX), "66 F7 D0"); testOp1(m32, .NEG, regRm(.AX), "66 F7 D8"); // testOp1(m64, .INC, regRm(.AX), "66 FF C0"); testOp1(m64, .DEC, regRm(.AX), "66 FF C8"); testOp1(m64, .NOT, regRm(.AX), "66 F7 D0"); testOp1(m64, .NEG, regRm(.AX), "66 F7 D8"); } { testOp1(m16, .INC, regRm(.EAX), "66 FF C0"); testOp1(m16, .DEC, regRm(.EAX), "66 FF C8"); testOp1(m16, .NOT, regRm(.EAX), "66 F7 D0"); testOp1(m16, .NEG, regRm(.EAX), "66 F7 D8"); // testOp1(m32, .INC, regRm(.EAX), "FF C0"); testOp1(m32, .DEC, regRm(.EAX), "FF C8"); testOp1(m32, .NOT, regRm(.EAX), "F7 D0"); testOp1(m32, .NEG, regRm(.EAX), "F7 D8"); // testOp1(m64, .INC, regRm(.EAX), "FF C0"); testOp1(m64, .DEC, regRm(.EAX), "FF C8"); testOp1(m64, .NOT, regRm(.EAX), "F7 D0"); testOp1(m64, .NEG, regRm(.EAX), "F7 D8"); } { testOp1(m16, .INC, regRm(.RAX), AsmError.InvalidOperand); testOp1(m16, .DEC, regRm(.RAX), AsmError.InvalidOperand); testOp1(m16, .NOT, regRm(.RAX), AsmError.InvalidOperand); testOp1(m16, .NEG, regRm(.RAX), AsmError.InvalidOperand); // testOp1(m32, .INC, regRm(.RAX), AsmError.InvalidOperand); testOp1(m32, .DEC, regRm(.RAX), AsmError.InvalidOperand); testOp1(m32, .NOT, regRm(.RAX), AsmError.InvalidOperand); testOp1(m32, .NEG, regRm(.RAX), AsmError.InvalidOperand); // testOp1(m64, .INC, regRm(.RAX), "48 FF C0"); testOp1(m64, .DEC, regRm(.RAX), "48 FF C8"); testOp1(m64, .NOT, regRm(.RAX), "48 F7 D0"); testOp1(m64, .NEG, regRm(.RAX), "48 F7 D8"); } { testOp1(m16, .INC, reg(.AX), "40"); testOp1(m16, .DEC, reg(.AX), "48"); // testOp1(m32, .INC, reg(.AX), "66 40"); testOp1(m32, .DEC, reg(.AX), "66 48"); // testOp1(m64, .INC, reg(.AX), "66 FF C0"); testOp1(m64, .DEC, reg(.AX), "66 FF C8"); } { testOp1(m16, .INC, reg(.EAX), "66 40"); testOp1(m16, .DEC, reg(.EAX), "66 48"); // testOp1(m32, .INC, reg(.EAX), "40"); testOp1(m32, .DEC, reg(.EAX), "48"); // testOp1(m64, .INC, reg(.EAX), "FF C0"); testOp1(m64, .DEC, reg(.EAX), "FF C8"); } } // LEA { { const op1 = Operand.register(.EAX); const op2 = Operand.registerRm(.EAX); testOp2(m32, .LEA, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LEA, op1, op2, AsmError.InvalidOperand); } { const op1 = Operand.register(.AX); const op2 = Operand.memoryRm(.DefaultSeg, .WORD, .EAX, 0x11); testOp2(m32, .LEA, op1, op2, "66 8D 40 11"); testOp2(m64, .LEA, op1, op2, "66 67 8D 40 11"); } { const op1 = Operand.register(.EAX); const op2 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0x11); testOp2(m32, .LEA, op1, op2, "8D 40 11"); testOp2(m64, .LEA, op1, op2, "67 8D 40 11"); } { const op1 = Operand.register(.RAX); const op2 = Operand.memoryRm(.DefaultSeg, .QWORD, .EAX, 0x11); testOp2(m32, .LEA, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LEA, op1, op2, "67 48 8D 40 11"); } { const op1 = Operand.register(.EAX); const op2 = Operand.memoryRm(.DefaultSeg, .DWORD, .RAX, 0x11); testOp2(m32, .LEA, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LEA, op1, op2, "8D 40 11"); } } { { const op1 = Operand.register(.AX); const op2 = Operand.memoryRm(.DefaultSeg, .FAR_WORD, .EAX, 0x11); testOp2(m32, .LDS, op1, op2, "66 C5 40 11"); testOp2(m64, .LDS, op1, op2, AsmError.InvalidOperand); testOp2(m32, .LES, op1, op2, "66 C4 40 11"); testOp2(m64, .LES, op1, op2, AsmError.InvalidOperand); testOp2(m32, .LSS, op1, op2, "66 0F B2 40 11"); testOp2(m64, .LSS, op1, op2, "66 67 0F B2 40 11"); testOp2(m32, .LFS, op1, op2, "66 0F B4 40 11"); testOp2(m64, .LFS, op1, op2, "66 67 0F B4 40 11"); testOp2(m32, .LGS, op1, op2, "66 0F B5 40 11"); testOp2(m64, .LGS, op1, op2, "66 67 0F B5 40 11"); } { const op1 = Operand.register(.EAX); const op2 = Operand.memoryRm(.DefaultSeg, .FAR_DWORD, .EAX, 0x11); testOp2(m32, .LDS, op1, op2, "C5 40 11"); testOp2(m64, .LDS, op1, op2, AsmError.InvalidOperand); testOp2(m32, .LES, op1, op2, "C4 40 11"); testOp2(m64, .LES, op1, op2, AsmError.InvalidOperand); testOp2(m32, .LSS, op1, op2, "0F B2 40 11"); testOp2(m64, .LSS, op1, op2, "67 0F B2 40 11"); testOp2(m32, .LFS, op1, op2, "0F B4 40 11"); testOp2(m64, .LFS, op1, op2, "67 0F B4 40 11"); testOp2(m32, .LGS, op1, op2, "0F B5 40 11"); testOp2(m64, .LGS, op1, op2, "67 0F B5 40 11"); } { const op1 = Operand.register(.RAX); const op2 = Operand.memoryRm(.DefaultSeg, .FAR_QWORD, .EAX, 0x11); testOp2(m32, .LDS, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LDS, op1, op2, AsmError.InvalidOperand); testOp2(m32, .LES, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LES, op1, op2, AsmError.InvalidOperand); testOp2(m32, .LSS, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LSS, op1, op2, "67 48 0F B2 40 11"); testOp2(m32, .LFS, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LFS, op1, op2, "67 48 0F B4 40 11"); testOp2(m32, .LGS, op1, op2, AsmError.InvalidOperand); testOp2(m64, .LGS, op1, op2, "67 48 0F B5 40 11"); } } // LOOP { { const op1 = Operand.immediate(0x11); testOp1(m32, .LOOP, op1, "E2 11"); testOp1(m32, .LOOPE, op1, "E1 11"); testOp1(m32, .LOOPNE, op1, "E0 11"); testOp1(m64, .LOOP, op1, "E2 11"); testOp1(m64, .LOOPE, op1, "E1 11"); testOp1(m64, .LOOPNE, op1, "E0 11"); } } }
src/x86/tests/simple_8086.zig
const sf = @import("../sfml.zig"); const Font = @This(); // Constructor/destructor /// Loads a font from a file pub fn createFromFile(path: [:0]const u8) !Font { var font = sf.c.sfFont_createFromFile(path); if (font) |f| { return Font{ ._ptr = f }; } else return sf.Error.resourceLoadingError; } /// Loads a font from a file in memory pub fn createFromMemory(data: []const u8) !Font { var font = sf.c.sfFont_createFromMemory(@ptrCast(?*const anyopaque, data.ptr), data.len); if (font) |f| { return Font{ ._ptr = f }; } else return sf.Error.resourceLoadingError; } /// Destroys a font pub fn destroy(self: *Font) void { sf.c.sfFont_destroy(self._ptr); self._ptr = undefined; } /// Gets the family name of this font /// Normally, this is done through getInfo, but as info only contains this data, this makes more sense pub fn getFamily(self: Font) [*:0]const u8 { return sf.c.sfFont_getInfo(self._ptr).family; } /// Gets the kerning offset of two glyphs pub fn getKerning(self: Font, first: u32, second: u32, character_size: usize) f32 { return sf.c.sfFont_getKerning(self._ptr, first, second, @intCast(c_uint, character_size)); } /// Gets the default spacing between two lines pub fn getLineSpacing(self: Font, character_size: usize) f32 { return sf.c.sfFont_getLineSpacing(self._ptr, @intCast(c_uint, character_size)); } /// Gets the vertical offset of the underline pub fn getUnderlinePosition(self: Font, character_size: usize) f32 { return sf.c.sfFont_getUnderlinePosition(self._ptr, @intCast(c_uint, character_size)); } /// Gets the underline thickness pub fn getUnderlineThickness(self: Font, character_size: usize) f32 { return sf.c.sfFont_getUnderlineThickness(self._ptr, @intCast(c_uint, character_size)); } pub const getGlyph = @compileError("Function is not implemented yet."); pub const initFromStream = @compileError("Function is not implemented yet."); /// Pointer to the csfml font _ptr: *sf.c.sfFont, test "Font: sane getters and setters" { const std = @import("std"); const tst = std.testing; // TODO: is it a good idea to have a test rely on resources? var font = try createFromMemory(@embedFile("../../../arial.ttf")); defer font.destroy(); try tst.expect(std.mem.eql(u8, "Arial", std.mem.span(font.getFamily()))); // TODO: how to test that? _ = font.getLineSpacing(12); _ = font.getUnderlinePosition(12); _ = font.getUnderlineThickness(12); _ = font.getKerning('a', 'b', 12); }
src/sfml/graphics/Font.zig
const SFR = @import("../std/sfr.zig").SFR; const mcu = @import("../atmega328p.zig"); const UDR = SFR(0xc6, u8, packed union { RXB: u8, TXB: u8, }); const UCSRA = SFR(0xc0, u8, packed struct { MPCM: u1 = 0, // D0 U2X: u1 = 0, // D1 PE: u1 = 0, // D2 DOR: u1 = 0, // D3 FE: u1 = 0, // D4 UDRE: u1 = 0, // D5 TXC: u1 = 0, // D6 RXC: u1 = 0, // D7 }); const UCSRB = SFR(0xc1, u8, packed struct { TXB8: u1 = 0, // D0 RXB8: u1 = 0, // D1 UCSZ2: u1 = 0, // D2 TXEN: u1 = 0, // D3 RXEN: u1 = 0, // D4 UDREIE: u1 = 0, // D5 TXCIE: u1 = 0, // D6 RXCIE: u1 = 0, // D7 }); const UCSRC = SFR(0xc2, u8, packed struct { UCPOL: u1 = 0, // D0 UCSZ0: u1 = 0, // D1 1 UCSZ1: u1 = 0, // D2 1 USBS: u1 = 0, // D3 UPM0: u1 = 0, // D4 UPM1: u1 = 0, // D5 UMSEL: u1 = 0, // D6 URSEL: u1 = 0, // D7 }); const UBRRL = SFR(0xc4, u8, packed struct { USART: u8 = 0, // UBRR[7:0] }); const UBRRH = SFR(0xc5, u8, packed struct { USART: u4 = 0, // D11:D8 - UBRR[11:8] reserved: u3 = 0, // D14:D12 URSEL: u1 = 0, // D15 }); pub fn init(comptime baud: comptime_int) void { // given 9600 baud rate and 16MHz CPU frequency we get UBRR of // 103 = 0b000001100111 first 8 bits of which should go to UBRRL // other 4 bits go to UBRRH // 8 - async doublle speed mode const ubrr: u12 = mcu.cpu_freq / (8 * baud) - 1; UBRRL.write(.{ .USART = ubrr }); UBRRH.write(.{ .USART = ubrr >> 8 }); // we have initiallized zeroed UCSRA, so we need to enable U2X UCSRA.write(.{ .U2X = 1, }); // allow receive and transmit UCSRB.write(.{ // 0b00011000 .TXEN = 1, .RXEN = 1, }); UCSRC.write(.{ // 0b10000110 .UCSZ0 = 1, .UCSZ1 = 1, .URSEL = 1, }); } pub fn receiveChar() u8 { while (UCSRA.read().RXC != 1) {} return UDR.read().RXB; } pub fn receiveString() []u8 { var res: [0x40]u8 = undefined; const first_ch: u8 = receiveChar(); var next_ch: u8 = first_ch; var counter: u8 = 0; while (next_ch != '\r') { sendChar(next_ch); res[counter] = next_ch; counter += 1; next_ch = receiveChar(); } sendString("\r\n"); return res[0..counter]; } pub fn sendChar(ch: u8) void { while (UCSRA.read().UDRE != 1) {} UDR.write(.{ .TXB = ch }); } pub fn sendString(str: []const u8) void { for (str) |ch| { if (ch != 0) { sendChar(ch); } else { break; } } } pub fn sendStringLn(str: []const u8) void { sendString(str); sendString("\r\n"); }
src/serial/uart.zig
pub const VSS_ASSOC_NO_MAX_SPACE = @as(i32, -1); pub const VSS_ASSOC_REMOVE = @as(u32, 0); //-------------------------------------------------------------------------------- // Section: Types (70) //-------------------------------------------------------------------------------- pub const IVssExamineWriterMetadata = extern struct { placeholder: usize, // TODO: why is this type empty? }; pub const VSS_OBJECT_TYPE = enum(i32) { UNKNOWN = 0, NONE = 1, SNAPSHOT_SET = 2, SNAPSHOT = 3, PROVIDER = 4, TYPE_COUNT = 5, }; pub const VSS_OBJECT_UNKNOWN = VSS_OBJECT_TYPE.UNKNOWN; pub const VSS_OBJECT_NONE = VSS_OBJECT_TYPE.NONE; pub const VSS_OBJECT_SNAPSHOT_SET = VSS_OBJECT_TYPE.SNAPSHOT_SET; pub const VSS_OBJECT_SNAPSHOT = VSS_OBJECT_TYPE.SNAPSHOT; pub const VSS_OBJECT_PROVIDER = VSS_OBJECT_TYPE.PROVIDER; pub const VSS_OBJECT_TYPE_COUNT = VSS_OBJECT_TYPE.TYPE_COUNT; pub const VSS_SNAPSHOT_STATE = enum(i32) { UNKNOWN = 0, PREPARING = 1, PROCESSING_PREPARE = 2, PREPARED = 3, PROCESSING_PRECOMMIT = 4, PRECOMMITTED = 5, PROCESSING_COMMIT = 6, COMMITTED = 7, PROCESSING_POSTCOMMIT = 8, PROCESSING_PREFINALCOMMIT = 9, PREFINALCOMMITTED = 10, PROCESSING_POSTFINALCOMMIT = 11, CREATED = 12, ABORTED = 13, DELETED = 14, POSTCOMMITTED = 15, COUNT = 16, }; pub const VSS_SS_UNKNOWN = VSS_SNAPSHOT_STATE.UNKNOWN; pub const VSS_SS_PREPARING = VSS_SNAPSHOT_STATE.PREPARING; pub const VSS_SS_PROCESSING_PREPARE = VSS_SNAPSHOT_STATE.PROCESSING_PREPARE; pub const VSS_SS_PREPARED = VSS_SNAPSHOT_STATE.PREPARED; pub const VSS_SS_PROCESSING_PRECOMMIT = VSS_SNAPSHOT_STATE.PROCESSING_PRECOMMIT; pub const VSS_SS_PRECOMMITTED = VSS_SNAPSHOT_STATE.PRECOMMITTED; pub const VSS_SS_PROCESSING_COMMIT = VSS_SNAPSHOT_STATE.PROCESSING_COMMIT; pub const VSS_SS_COMMITTED = VSS_SNAPSHOT_STATE.COMMITTED; pub const VSS_SS_PROCESSING_POSTCOMMIT = VSS_SNAPSHOT_STATE.PROCESSING_POSTCOMMIT; pub const VSS_SS_PROCESSING_PREFINALCOMMIT = VSS_SNAPSHOT_STATE.PROCESSING_PREFINALCOMMIT; pub const VSS_SS_PREFINALCOMMITTED = VSS_SNAPSHOT_STATE.PREFINALCOMMITTED; pub const VSS_SS_PROCESSING_POSTFINALCOMMIT = VSS_SNAPSHOT_STATE.PROCESSING_POSTFINALCOMMIT; pub const VSS_SS_CREATED = VSS_SNAPSHOT_STATE.CREATED; pub const VSS_SS_ABORTED = VSS_SNAPSHOT_STATE.ABORTED; pub const VSS_SS_DELETED = VSS_SNAPSHOT_STATE.DELETED; pub const VSS_SS_POSTCOMMITTED = VSS_SNAPSHOT_STATE.POSTCOMMITTED; pub const VSS_SS_COUNT = VSS_SNAPSHOT_STATE.COUNT; pub const VSS_VOLUME_SNAPSHOT_ATTRIBUTES = enum(i32) { PERSISTENT = 1, NO_AUTORECOVERY = 2, CLIENT_ACCESSIBLE = 4, NO_AUTO_RELEASE = 8, NO_WRITERS = 16, TRANSPORTABLE = 32, NOT_SURFACED = 64, NOT_TRANSACTED = 128, HARDWARE_ASSISTED = 65536, DIFFERENTIAL = 131072, PLEX = 262144, IMPORTED = 524288, EXPOSED_LOCALLY = 1048576, EXPOSED_REMOTELY = 2097152, AUTORECOVER = 4194304, ROLLBACK_RECOVERY = 8388608, DELAYED_POSTSNAPSHOT = 16777216, TXF_RECOVERY = 33554432, FILE_SHARE = 67108864, }; pub const VSS_VOLSNAP_ATTR_PERSISTENT = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.PERSISTENT; pub const VSS_VOLSNAP_ATTR_NO_AUTORECOVERY = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.NO_AUTORECOVERY; pub const VSS_VOLSNAP_ATTR_CLIENT_ACCESSIBLE = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.CLIENT_ACCESSIBLE; pub const VSS_VOLSNAP_ATTR_NO_AUTO_RELEASE = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.NO_AUTO_RELEASE; pub const VSS_VOLSNAP_ATTR_NO_WRITERS = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.NO_WRITERS; pub const VSS_VOLSNAP_ATTR_TRANSPORTABLE = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.TRANSPORTABLE; pub const VSS_VOLSNAP_ATTR_NOT_SURFACED = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.NOT_SURFACED; pub const VSS_VOLSNAP_ATTR_NOT_TRANSACTED = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.NOT_TRANSACTED; pub const VSS_VOLSNAP_ATTR_HARDWARE_ASSISTED = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.HARDWARE_ASSISTED; pub const VSS_VOLSNAP_ATTR_DIFFERENTIAL = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.DIFFERENTIAL; pub const VSS_VOLSNAP_ATTR_PLEX = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.PLEX; pub const VSS_VOLSNAP_ATTR_IMPORTED = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.IMPORTED; pub const VSS_VOLSNAP_ATTR_EXPOSED_LOCALLY = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.EXPOSED_LOCALLY; pub const VSS_VOLSNAP_ATTR_EXPOSED_REMOTELY = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.EXPOSED_REMOTELY; pub const VSS_VOLSNAP_ATTR_AUTORECOVER = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.AUTORECOVER; pub const VSS_VOLSNAP_ATTR_ROLLBACK_RECOVERY = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.ROLLBACK_RECOVERY; pub const VSS_VOLSNAP_ATTR_DELAYED_POSTSNAPSHOT = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.DELAYED_POSTSNAPSHOT; pub const VSS_VOLSNAP_ATTR_TXF_RECOVERY = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.TXF_RECOVERY; pub const VSS_VOLSNAP_ATTR_FILE_SHARE = VSS_VOLUME_SNAPSHOT_ATTRIBUTES.FILE_SHARE; pub const VSS_SNAPSHOT_CONTEXT = enum(i32) { BACKUP = 0, FILE_SHARE_BACKUP = 16, NAS_ROLLBACK = 25, APP_ROLLBACK = 9, CLIENT_ACCESSIBLE = 29, CLIENT_ACCESSIBLE_WRITERS = 13, ALL = -1, }; pub const VSS_CTX_BACKUP = VSS_SNAPSHOT_CONTEXT.BACKUP; pub const VSS_CTX_FILE_SHARE_BACKUP = VSS_SNAPSHOT_CONTEXT.FILE_SHARE_BACKUP; pub const VSS_CTX_NAS_ROLLBACK = VSS_SNAPSHOT_CONTEXT.NAS_ROLLBACK; pub const VSS_CTX_APP_ROLLBACK = VSS_SNAPSHOT_CONTEXT.APP_ROLLBACK; pub const VSS_CTX_CLIENT_ACCESSIBLE = VSS_SNAPSHOT_CONTEXT.CLIENT_ACCESSIBLE; pub const VSS_CTX_CLIENT_ACCESSIBLE_WRITERS = VSS_SNAPSHOT_CONTEXT.CLIENT_ACCESSIBLE_WRITERS; pub const VSS_CTX_ALL = VSS_SNAPSHOT_CONTEXT.ALL; pub const VSS_PROVIDER_CAPABILITIES = enum(i32) { LEGACY = 1, COMPLIANT = 2, LUN_REPOINT = 4, LUN_RESYNC = 8, OFFLINE_CREATION = 16, MULTIPLE_IMPORT = 32, RECYCLING = 64, PLEX = 128, DIFFERENTIAL = 256, CLUSTERED = 512, }; pub const VSS_PRV_CAPABILITY_LEGACY = VSS_PROVIDER_CAPABILITIES.LEGACY; pub const VSS_PRV_CAPABILITY_COMPLIANT = VSS_PROVIDER_CAPABILITIES.COMPLIANT; pub const VSS_PRV_CAPABILITY_LUN_REPOINT = VSS_PROVIDER_CAPABILITIES.LUN_REPOINT; pub const VSS_PRV_CAPABILITY_LUN_RESYNC = VSS_PROVIDER_CAPABILITIES.LUN_RESYNC; pub const VSS_PRV_CAPABILITY_OFFLINE_CREATION = VSS_PROVIDER_CAPABILITIES.OFFLINE_CREATION; pub const VSS_PRV_CAPABILITY_MULTIPLE_IMPORT = VSS_PROVIDER_CAPABILITIES.MULTIPLE_IMPORT; pub const VSS_PRV_CAPABILITY_RECYCLING = VSS_PROVIDER_CAPABILITIES.RECYCLING; pub const VSS_PRV_CAPABILITY_PLEX = VSS_PROVIDER_CAPABILITIES.PLEX; pub const VSS_PRV_CAPABILITY_DIFFERENTIAL = VSS_PROVIDER_CAPABILITIES.DIFFERENTIAL; pub const VSS_PRV_CAPABILITY_CLUSTERED = VSS_PROVIDER_CAPABILITIES.CLUSTERED; pub const VSS_HARDWARE_OPTIONS = enum(i32) { BREAKEX_FLAG_MASK_LUNS = 1, BREAKEX_FLAG_MAKE_READ_WRITE = 2, BREAKEX_FLAG_REVERT_IDENTITY_ALL = 4, BREAKEX_FLAG_REVERT_IDENTITY_NONE = 8, ONLUNSTATECHANGE_NOTIFY_READ_WRITE = 256, ONLUNSTATECHANGE_NOTIFY_LUN_PRE_RECOVERY = 512, ONLUNSTATECHANGE_NOTIFY_LUN_POST_RECOVERY = 1024, ONLUNSTATECHANGE_DO_MASK_LUNS = 2048, }; pub const VSS_BREAKEX_FLAG_MASK_LUNS = VSS_HARDWARE_OPTIONS.BREAKEX_FLAG_MASK_LUNS; pub const VSS_BREAKEX_FLAG_MAKE_READ_WRITE = VSS_HARDWARE_OPTIONS.BREAKEX_FLAG_MAKE_READ_WRITE; pub const VSS_BREAKEX_FLAG_REVERT_IDENTITY_ALL = VSS_HARDWARE_OPTIONS.BREAKEX_FLAG_REVERT_IDENTITY_ALL; pub const VSS_BREAKEX_FLAG_REVERT_IDENTITY_NONE = VSS_HARDWARE_OPTIONS.BREAKEX_FLAG_REVERT_IDENTITY_NONE; pub const VSS_ONLUNSTATECHANGE_NOTIFY_READ_WRITE = VSS_HARDWARE_OPTIONS.ONLUNSTATECHANGE_NOTIFY_READ_WRITE; pub const VSS_ONLUNSTATECHANGE_NOTIFY_LUN_PRE_RECOVERY = VSS_HARDWARE_OPTIONS.ONLUNSTATECHANGE_NOTIFY_LUN_PRE_RECOVERY; pub const VSS_ONLUNSTATECHANGE_NOTIFY_LUN_POST_RECOVERY = VSS_HARDWARE_OPTIONS.ONLUNSTATECHANGE_NOTIFY_LUN_POST_RECOVERY; pub const VSS_ONLUNSTATECHANGE_DO_MASK_LUNS = VSS_HARDWARE_OPTIONS.ONLUNSTATECHANGE_DO_MASK_LUNS; pub const VSS_RECOVERY_OPTIONS = enum(i32) { REVERT_IDENTITY_ALL = 256, NO_VOLUME_CHECK = 512, }; pub const VSS_RECOVERY_REVERT_IDENTITY_ALL = VSS_RECOVERY_OPTIONS.REVERT_IDENTITY_ALL; pub const VSS_RECOVERY_NO_VOLUME_CHECK = VSS_RECOVERY_OPTIONS.NO_VOLUME_CHECK; pub const VSS_WRITER_STATE = enum(i32) { UNKNOWN = 0, STABLE = 1, WAITING_FOR_FREEZE = 2, WAITING_FOR_THAW = 3, WAITING_FOR_POST_SNAPSHOT = 4, WAITING_FOR_BACKUP_COMPLETE = 5, FAILED_AT_IDENTIFY = 6, FAILED_AT_PREPARE_BACKUP = 7, FAILED_AT_PREPARE_SNAPSHOT = 8, FAILED_AT_FREEZE = 9, FAILED_AT_THAW = 10, FAILED_AT_POST_SNAPSHOT = 11, FAILED_AT_BACKUP_COMPLETE = 12, FAILED_AT_PRE_RESTORE = 13, FAILED_AT_POST_RESTORE = 14, FAILED_AT_BACKUPSHUTDOWN = 15, COUNT = 16, }; pub const VSS_WS_UNKNOWN = VSS_WRITER_STATE.UNKNOWN; pub const VSS_WS_STABLE = VSS_WRITER_STATE.STABLE; pub const VSS_WS_WAITING_FOR_FREEZE = VSS_WRITER_STATE.WAITING_FOR_FREEZE; pub const VSS_WS_WAITING_FOR_THAW = VSS_WRITER_STATE.WAITING_FOR_THAW; pub const VSS_WS_WAITING_FOR_POST_SNAPSHOT = VSS_WRITER_STATE.WAITING_FOR_POST_SNAPSHOT; pub const VSS_WS_WAITING_FOR_BACKUP_COMPLETE = VSS_WRITER_STATE.WAITING_FOR_BACKUP_COMPLETE; pub const VSS_WS_FAILED_AT_IDENTIFY = VSS_WRITER_STATE.FAILED_AT_IDENTIFY; pub const VSS_WS_FAILED_AT_PREPARE_BACKUP = VSS_WRITER_STATE.FAILED_AT_PREPARE_BACKUP; pub const VSS_WS_FAILED_AT_PREPARE_SNAPSHOT = VSS_WRITER_STATE.FAILED_AT_PREPARE_SNAPSHOT; pub const VSS_WS_FAILED_AT_FREEZE = VSS_WRITER_STATE.FAILED_AT_FREEZE; pub const VSS_WS_FAILED_AT_THAW = VSS_WRITER_STATE.FAILED_AT_THAW; pub const VSS_WS_FAILED_AT_POST_SNAPSHOT = VSS_WRITER_STATE.FAILED_AT_POST_SNAPSHOT; pub const VSS_WS_FAILED_AT_BACKUP_COMPLETE = VSS_WRITER_STATE.FAILED_AT_BACKUP_COMPLETE; pub const VSS_WS_FAILED_AT_PRE_RESTORE = VSS_WRITER_STATE.FAILED_AT_PRE_RESTORE; pub const VSS_WS_FAILED_AT_POST_RESTORE = VSS_WRITER_STATE.FAILED_AT_POST_RESTORE; pub const VSS_WS_FAILED_AT_BACKUPSHUTDOWN = VSS_WRITER_STATE.FAILED_AT_BACKUPSHUTDOWN; pub const VSS_WS_COUNT = VSS_WRITER_STATE.COUNT; pub const VSS_BACKUP_TYPE = enum(i32) { UNDEFINED = 0, FULL = 1, INCREMENTAL = 2, DIFFERENTIAL = 3, LOG = 4, COPY = 5, OTHER = 6, }; pub const VSS_BT_UNDEFINED = VSS_BACKUP_TYPE.UNDEFINED; pub const VSS_BT_FULL = VSS_BACKUP_TYPE.FULL; pub const VSS_BT_INCREMENTAL = VSS_BACKUP_TYPE.INCREMENTAL; pub const VSS_BT_DIFFERENTIAL = VSS_BACKUP_TYPE.DIFFERENTIAL; pub const VSS_BT_LOG = VSS_BACKUP_TYPE.LOG; pub const VSS_BT_COPY = VSS_BACKUP_TYPE.COPY; pub const VSS_BT_OTHER = VSS_BACKUP_TYPE.OTHER; pub const VSS_RESTORE_TYPE = enum(i32) { UNDEFINED = 0, BY_COPY = 1, IMPORT = 2, OTHER = 3, }; pub const VSS_RTYPE_UNDEFINED = VSS_RESTORE_TYPE.UNDEFINED; pub const VSS_RTYPE_BY_COPY = VSS_RESTORE_TYPE.BY_COPY; pub const VSS_RTYPE_IMPORT = VSS_RESTORE_TYPE.IMPORT; pub const VSS_RTYPE_OTHER = VSS_RESTORE_TYPE.OTHER; pub const VSS_ROLLFORWARD_TYPE = enum(i32) { UNDEFINED = 0, NONE = 1, ALL = 2, PARTIAL = 3, }; pub const VSS_RF_UNDEFINED = VSS_ROLLFORWARD_TYPE.UNDEFINED; pub const VSS_RF_NONE = VSS_ROLLFORWARD_TYPE.NONE; pub const VSS_RF_ALL = VSS_ROLLFORWARD_TYPE.ALL; pub const VSS_RF_PARTIAL = VSS_ROLLFORWARD_TYPE.PARTIAL; pub const VSS_PROVIDER_TYPE = enum(i32) { UNKNOWN = 0, SYSTEM = 1, SOFTWARE = 2, HARDWARE = 3, FILESHARE = 4, }; pub const VSS_PROV_UNKNOWN = VSS_PROVIDER_TYPE.UNKNOWN; pub const VSS_PROV_SYSTEM = VSS_PROVIDER_TYPE.SYSTEM; pub const VSS_PROV_SOFTWARE = VSS_PROVIDER_TYPE.SOFTWARE; pub const VSS_PROV_HARDWARE = VSS_PROVIDER_TYPE.HARDWARE; pub const VSS_PROV_FILESHARE = VSS_PROVIDER_TYPE.FILESHARE; pub const VSS_APPLICATION_LEVEL = enum(i32) { UNKNOWN = 0, SYSTEM = 1, BACK_END = 2, FRONT_END = 3, SYSTEM_RM = 4, AUTO = -1, }; pub const VSS_APP_UNKNOWN = VSS_APPLICATION_LEVEL.UNKNOWN; pub const VSS_APP_SYSTEM = VSS_APPLICATION_LEVEL.SYSTEM; pub const VSS_APP_BACK_END = VSS_APPLICATION_LEVEL.BACK_END; pub const VSS_APP_FRONT_END = VSS_APPLICATION_LEVEL.FRONT_END; pub const VSS_APP_SYSTEM_RM = VSS_APPLICATION_LEVEL.SYSTEM_RM; pub const VSS_APP_AUTO = VSS_APPLICATION_LEVEL.AUTO; pub const VSS_SNAPSHOT_COMPATIBILITY = enum(i32) { DEFRAG = 1, CONTENTINDEX = 2, }; pub const VSS_SC_DISABLE_DEFRAG = VSS_SNAPSHOT_COMPATIBILITY.DEFRAG; pub const VSS_SC_DISABLE_CONTENTINDEX = VSS_SNAPSHOT_COMPATIBILITY.CONTENTINDEX; pub const VSS_SNAPSHOT_PROPERTY_ID = enum(i32) { UNKNOWN = 0, SNAPSHOT_ID = 1, SNAPSHOT_SET_ID = 2, SNAPSHOTS_COUNT = 3, SNAPSHOT_DEVICE = 4, ORIGINAL_VOLUME = 5, ORIGINATING_MACHINE = 6, SERVICE_MACHINE = 7, EXPOSED_NAME = 8, EXPOSED_PATH = 9, PROVIDER_ID = 10, SNAPSHOT_ATTRIBUTES = 11, CREATION_TIMESTAMP = 12, STATUS = 13, }; pub const VSS_SPROPID_UNKNOWN = VSS_SNAPSHOT_PROPERTY_ID.UNKNOWN; pub const VSS_SPROPID_SNAPSHOT_ID = VSS_SNAPSHOT_PROPERTY_ID.SNAPSHOT_ID; pub const VSS_SPROPID_SNAPSHOT_SET_ID = VSS_SNAPSHOT_PROPERTY_ID.SNAPSHOT_SET_ID; pub const VSS_SPROPID_SNAPSHOTS_COUNT = VSS_SNAPSHOT_PROPERTY_ID.SNAPSHOTS_COUNT; pub const VSS_SPROPID_SNAPSHOT_DEVICE = VSS_SNAPSHOT_PROPERTY_ID.SNAPSHOT_DEVICE; pub const VSS_SPROPID_ORIGINAL_VOLUME = VSS_SNAPSHOT_PROPERTY_ID.ORIGINAL_VOLUME; pub const VSS_SPROPID_ORIGINATING_MACHINE = VSS_SNAPSHOT_PROPERTY_ID.ORIGINATING_MACHINE; pub const VSS_SPROPID_SERVICE_MACHINE = VSS_SNAPSHOT_PROPERTY_ID.SERVICE_MACHINE; pub const VSS_SPROPID_EXPOSED_NAME = VSS_SNAPSHOT_PROPERTY_ID.EXPOSED_NAME; pub const VSS_SPROPID_EXPOSED_PATH = VSS_SNAPSHOT_PROPERTY_ID.EXPOSED_PATH; pub const VSS_SPROPID_PROVIDER_ID = VSS_SNAPSHOT_PROPERTY_ID.PROVIDER_ID; pub const VSS_SPROPID_SNAPSHOT_ATTRIBUTES = VSS_SNAPSHOT_PROPERTY_ID.SNAPSHOT_ATTRIBUTES; pub const VSS_SPROPID_CREATION_TIMESTAMP = VSS_SNAPSHOT_PROPERTY_ID.CREATION_TIMESTAMP; pub const VSS_SPROPID_STATUS = VSS_SNAPSHOT_PROPERTY_ID.STATUS; pub const VSS_FILE_SPEC_BACKUP_TYPE = enum(i32) { FULL_BACKUP_REQUIRED = 1, DIFFERENTIAL_BACKUP_REQUIRED = 2, INCREMENTAL_BACKUP_REQUIRED = 4, LOG_BACKUP_REQUIRED = 8, FULL_SNAPSHOT_REQUIRED = 256, DIFFERENTIAL_SNAPSHOT_REQUIRED = 512, INCREMENTAL_SNAPSHOT_REQUIRED = 1024, LOG_SNAPSHOT_REQUIRED = 2048, CREATED_DURING_BACKUP = 65536, ALL_BACKUP_REQUIRED = 15, ALL_SNAPSHOT_REQUIRED = 3840, }; pub const VSS_FSBT_FULL_BACKUP_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.FULL_BACKUP_REQUIRED; pub const VSS_FSBT_DIFFERENTIAL_BACKUP_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.DIFFERENTIAL_BACKUP_REQUIRED; pub const VSS_FSBT_INCREMENTAL_BACKUP_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.INCREMENTAL_BACKUP_REQUIRED; pub const VSS_FSBT_LOG_BACKUP_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.LOG_BACKUP_REQUIRED; pub const VSS_FSBT_FULL_SNAPSHOT_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.FULL_SNAPSHOT_REQUIRED; pub const VSS_FSBT_DIFFERENTIAL_SNAPSHOT_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.DIFFERENTIAL_SNAPSHOT_REQUIRED; pub const VSS_FSBT_INCREMENTAL_SNAPSHOT_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.INCREMENTAL_SNAPSHOT_REQUIRED; pub const VSS_FSBT_LOG_SNAPSHOT_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.LOG_SNAPSHOT_REQUIRED; pub const VSS_FSBT_CREATED_DURING_BACKUP = VSS_FILE_SPEC_BACKUP_TYPE.CREATED_DURING_BACKUP; pub const VSS_FSBT_ALL_BACKUP_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.ALL_BACKUP_REQUIRED; pub const VSS_FSBT_ALL_SNAPSHOT_REQUIRED = VSS_FILE_SPEC_BACKUP_TYPE.ALL_SNAPSHOT_REQUIRED; pub const VSS_BACKUP_SCHEMA = enum(i32) { UNDEFINED = 0, DIFFERENTIAL = 1, INCREMENTAL = 2, EXCLUSIVE_INCREMENTAL_DIFFERENTIAL = 4, LOG = 8, COPY = 16, TIMESTAMPED = 32, LAST_MODIFY = 64, LSN = 128, WRITER_SUPPORTS_NEW_TARGET = 256, WRITER_SUPPORTS_RESTORE_WITH_MOVE = 512, INDEPENDENT_SYSTEM_STATE = 1024, ROLLFORWARD_RESTORE = 4096, RESTORE_RENAME = 8192, AUTHORITATIVE_RESTORE = 16384, WRITER_SUPPORTS_PARALLEL_RESTORES = 32768, }; pub const VSS_BS_UNDEFINED = VSS_BACKUP_SCHEMA.UNDEFINED; pub const VSS_BS_DIFFERENTIAL = VSS_BACKUP_SCHEMA.DIFFERENTIAL; pub const VSS_BS_INCREMENTAL = VSS_BACKUP_SCHEMA.INCREMENTAL; pub const VSS_BS_EXCLUSIVE_INCREMENTAL_DIFFERENTIAL = VSS_BACKUP_SCHEMA.EXCLUSIVE_INCREMENTAL_DIFFERENTIAL; pub const VSS_BS_LOG = VSS_BACKUP_SCHEMA.LOG; pub const VSS_BS_COPY = VSS_BACKUP_SCHEMA.COPY; pub const VSS_BS_TIMESTAMPED = VSS_BACKUP_SCHEMA.TIMESTAMPED; pub const VSS_BS_LAST_MODIFY = VSS_BACKUP_SCHEMA.LAST_MODIFY; pub const VSS_BS_LSN = VSS_BACKUP_SCHEMA.LSN; pub const VSS_BS_WRITER_SUPPORTS_NEW_TARGET = VSS_BACKUP_SCHEMA.WRITER_SUPPORTS_NEW_TARGET; pub const VSS_BS_WRITER_SUPPORTS_RESTORE_WITH_MOVE = VSS_BACKUP_SCHEMA.WRITER_SUPPORTS_RESTORE_WITH_MOVE; pub const VSS_BS_INDEPENDENT_SYSTEM_STATE = VSS_BACKUP_SCHEMA.INDEPENDENT_SYSTEM_STATE; pub const VSS_BS_ROLLFORWARD_RESTORE = VSS_BACKUP_SCHEMA.ROLLFORWARD_RESTORE; pub const VSS_BS_RESTORE_RENAME = VSS_BACKUP_SCHEMA.RESTORE_RENAME; pub const VSS_BS_AUTHORITATIVE_RESTORE = VSS_BACKUP_SCHEMA.AUTHORITATIVE_RESTORE; pub const VSS_BS_WRITER_SUPPORTS_PARALLEL_RESTORES = VSS_BACKUP_SCHEMA.WRITER_SUPPORTS_PARALLEL_RESTORES; pub const VSS_SNAPSHOT_PROP = extern struct { m_SnapshotId: Guid, m_SnapshotSetId: Guid, m_lSnapshotsCount: i32, m_pwszSnapshotDeviceObject: ?*u16, m_pwszOriginalVolumeName: ?*u16, m_pwszOriginatingMachine: ?*u16, m_pwszServiceMachine: ?*u16, m_pwszExposedName: ?*u16, m_pwszExposedPath: ?*u16, m_ProviderId: Guid, m_lSnapshotAttributes: i32, m_tsCreationTimestamp: i64, m_eStatus: VSS_SNAPSHOT_STATE, }; pub const VSS_PROVIDER_PROP = extern struct { m_ProviderId: Guid, m_pwszProviderName: ?*u16, m_eProviderType: VSS_PROVIDER_TYPE, m_pwszProviderVersion: ?*u16, m_ProviderVersionId: Guid, m_ClassId: Guid, }; pub const VSS_OBJECT_UNION = extern union { Snap: VSS_SNAPSHOT_PROP, Prov: VSS_PROVIDER_PROP, }; pub const VSS_OBJECT_PROP = extern struct { Type: VSS_OBJECT_TYPE, Obj: VSS_OBJECT_UNION, }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IVssEnumObject_Value = @import("../zig.zig").Guid.initString("ae1c7110-2f60-11d3-8a39-00c04f72d8e3"); pub const IID_IVssEnumObject = &IID_IVssEnumObject_Value; pub const IVssEnumObject = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IVssEnumObject, celt: u32, rgelt: [*]VSS_OBJECT_PROP, pceltFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IVssEnumObject, celt: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IVssEnumObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IVssEnumObject, ppenum: ?*?*IVssEnumObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumObject_Next(self: *const T, celt: u32, rgelt: [*]VSS_OBJECT_PROP, pceltFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumObject.VTable, self.vtable).Next(@ptrCast(*const IVssEnumObject, self), celt, rgelt, pceltFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumObject_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumObject.VTable, self.vtable).Skip(@ptrCast(*const IVssEnumObject, self), celt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumObject_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumObject.VTable, self.vtable).Reset(@ptrCast(*const IVssEnumObject, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumObject_Clone(self: *const T, ppenum: ?*?*IVssEnumObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumObject.VTable, self.vtable).Clone(@ptrCast(*const IVssEnumObject, self), ppenum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IVssAsync_Value = @import("../zig.zig").Guid.initString("507c37b4-cf5b-4e95-b0af-14eb9767467e"); pub const IID_IVssAsync = &IID_IVssAsync_Value; pub const IVssAsync = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Cancel: fn( self: *const IVssAsync, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Wait: fn( self: *const IVssAsync, dwMilliseconds: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryStatus: fn( self: *const IVssAsync, pHrResult: ?*HRESULT, pReserved: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAsync_Cancel(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAsync.VTable, self.vtable).Cancel(@ptrCast(*const IVssAsync, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAsync_Wait(self: *const T, dwMilliseconds: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAsync.VTable, self.vtable).Wait(@ptrCast(*const IVssAsync, self), dwMilliseconds); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAsync_QueryStatus(self: *const T, pHrResult: ?*HRESULT, pReserved: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAsync.VTable, self.vtable).QueryStatus(@ptrCast(*const IVssAsync, self), pHrResult, pReserved); } };} pub usingnamespace MethodMixin(@This()); }; pub const VSS_USAGE_TYPE = enum(i32) { UNDEFINED = 0, BOOTABLESYSTEMSTATE = 1, SYSTEMSERVICE = 2, USERDATA = 3, OTHER = 4, }; pub const VSS_UT_UNDEFINED = VSS_USAGE_TYPE.UNDEFINED; pub const VSS_UT_BOOTABLESYSTEMSTATE = VSS_USAGE_TYPE.BOOTABLESYSTEMSTATE; pub const VSS_UT_SYSTEMSERVICE = VSS_USAGE_TYPE.SYSTEMSERVICE; pub const VSS_UT_USERDATA = VSS_USAGE_TYPE.USERDATA; pub const VSS_UT_OTHER = VSS_USAGE_TYPE.OTHER; pub const VSS_SOURCE_TYPE = enum(i32) { UNDEFINED = 0, TRANSACTEDDB = 1, NONTRANSACTEDDB = 2, OTHER = 3, }; pub const VSS_ST_UNDEFINED = VSS_SOURCE_TYPE.UNDEFINED; pub const VSS_ST_TRANSACTEDDB = VSS_SOURCE_TYPE.TRANSACTEDDB; pub const VSS_ST_NONTRANSACTEDDB = VSS_SOURCE_TYPE.NONTRANSACTEDDB; pub const VSS_ST_OTHER = VSS_SOURCE_TYPE.OTHER; pub const VSS_RESTOREMETHOD_ENUM = enum(i32) { UNDEFINED = 0, RESTORE_IF_NOT_THERE = 1, RESTORE_IF_CAN_REPLACE = 2, STOP_RESTORE_START = 3, RESTORE_TO_ALTERNATE_LOCATION = 4, RESTORE_AT_REBOOT = 5, RESTORE_AT_REBOOT_IF_CANNOT_REPLACE = 6, CUSTOM = 7, RESTORE_STOP_START = 8, }; pub const VSS_RME_UNDEFINED = VSS_RESTOREMETHOD_ENUM.UNDEFINED; pub const VSS_RME_RESTORE_IF_NOT_THERE = VSS_RESTOREMETHOD_ENUM.RESTORE_IF_NOT_THERE; pub const VSS_RME_RESTORE_IF_CAN_REPLACE = VSS_RESTOREMETHOD_ENUM.RESTORE_IF_CAN_REPLACE; pub const VSS_RME_STOP_RESTORE_START = VSS_RESTOREMETHOD_ENUM.STOP_RESTORE_START; pub const VSS_RME_RESTORE_TO_ALTERNATE_LOCATION = VSS_RESTOREMETHOD_ENUM.RESTORE_TO_ALTERNATE_LOCATION; pub const VSS_RME_RESTORE_AT_REBOOT = VSS_RESTOREMETHOD_ENUM.RESTORE_AT_REBOOT; pub const VSS_RME_RESTORE_AT_REBOOT_IF_CANNOT_REPLACE = VSS_RESTOREMETHOD_ENUM.RESTORE_AT_REBOOT_IF_CANNOT_REPLACE; pub const VSS_RME_CUSTOM = VSS_RESTOREMETHOD_ENUM.CUSTOM; pub const VSS_RME_RESTORE_STOP_START = VSS_RESTOREMETHOD_ENUM.RESTORE_STOP_START; pub const VSS_WRITERRESTORE_ENUM = enum(i32) { UNDEFINED = 0, NEVER = 1, IF_REPLACE_FAILS = 2, ALWAYS = 3, }; pub const VSS_WRE_UNDEFINED = VSS_WRITERRESTORE_ENUM.UNDEFINED; pub const VSS_WRE_NEVER = VSS_WRITERRESTORE_ENUM.NEVER; pub const VSS_WRE_IF_REPLACE_FAILS = VSS_WRITERRESTORE_ENUM.IF_REPLACE_FAILS; pub const VSS_WRE_ALWAYS = VSS_WRITERRESTORE_ENUM.ALWAYS; pub const VSS_COMPONENT_TYPE = enum(i32) { UNDEFINED = 0, DATABASE = 1, FILEGROUP = 2, }; pub const VSS_CT_UNDEFINED = VSS_COMPONENT_TYPE.UNDEFINED; pub const VSS_CT_DATABASE = VSS_COMPONENT_TYPE.DATABASE; pub const VSS_CT_FILEGROUP = VSS_COMPONENT_TYPE.FILEGROUP; pub const VSS_ALTERNATE_WRITER_STATE = enum(i32) { UNDEFINED = 0, NO_ALTERNATE_WRITER = 1, ALTERNATE_WRITER_EXISTS = 2, THIS_IS_ALTERNATE_WRITER = 3, }; pub const VSS_AWS_UNDEFINED = VSS_ALTERNATE_WRITER_STATE.UNDEFINED; pub const VSS_AWS_NO_ALTERNATE_WRITER = VSS_ALTERNATE_WRITER_STATE.NO_ALTERNATE_WRITER; pub const VSS_AWS_ALTERNATE_WRITER_EXISTS = VSS_ALTERNATE_WRITER_STATE.ALTERNATE_WRITER_EXISTS; pub const VSS_AWS_THIS_IS_ALTERNATE_WRITER = VSS_ALTERNATE_WRITER_STATE.THIS_IS_ALTERNATE_WRITER; pub const VSS_SUBSCRIBE_MASK = enum(i32) { POST_SNAPSHOT_FLAG = 1, BACKUP_EVENTS_FLAG = 2, RESTORE_EVENTS_FLAG = 4, IO_THROTTLING_FLAG = 8, ALL_FLAGS = -1, }; pub const VSS_SM_POST_SNAPSHOT_FLAG = VSS_SUBSCRIBE_MASK.POST_SNAPSHOT_FLAG; pub const VSS_SM_BACKUP_EVENTS_FLAG = VSS_SUBSCRIBE_MASK.BACKUP_EVENTS_FLAG; pub const VSS_SM_RESTORE_EVENTS_FLAG = VSS_SUBSCRIBE_MASK.RESTORE_EVENTS_FLAG; pub const VSS_SM_IO_THROTTLING_FLAG = VSS_SUBSCRIBE_MASK.IO_THROTTLING_FLAG; pub const VSS_SM_ALL_FLAGS = VSS_SUBSCRIBE_MASK.ALL_FLAGS; pub const VSS_RESTORE_TARGET = enum(i32) { UNDEFINED = 0, ORIGINAL = 1, ALTERNATE = 2, DIRECTED = 3, ORIGINAL_LOCATION = 4, }; pub const VSS_RT_UNDEFINED = VSS_RESTORE_TARGET.UNDEFINED; pub const VSS_RT_ORIGINAL = VSS_RESTORE_TARGET.ORIGINAL; pub const VSS_RT_ALTERNATE = VSS_RESTORE_TARGET.ALTERNATE; pub const VSS_RT_DIRECTED = VSS_RESTORE_TARGET.DIRECTED; pub const VSS_RT_ORIGINAL_LOCATION = VSS_RESTORE_TARGET.ORIGINAL_LOCATION; pub const VSS_FILE_RESTORE_STATUS = enum(i32) { UNDEFINED = 0, NONE = 1, ALL = 2, FAILED = 3, }; pub const VSS_RS_UNDEFINED = VSS_FILE_RESTORE_STATUS.UNDEFINED; pub const VSS_RS_NONE = VSS_FILE_RESTORE_STATUS.NONE; pub const VSS_RS_ALL = VSS_FILE_RESTORE_STATUS.ALL; pub const VSS_RS_FAILED = VSS_FILE_RESTORE_STATUS.FAILED; pub const VSS_COMPONENT_FLAGS = enum(i32) { BACKUP_RECOVERY = 1, APP_ROLLBACK_RECOVERY = 2, NOT_SYSTEM_STATE = 4, }; pub const VSS_CF_BACKUP_RECOVERY = VSS_COMPONENT_FLAGS.BACKUP_RECOVERY; pub const VSS_CF_APP_ROLLBACK_RECOVERY = VSS_COMPONENT_FLAGS.APP_ROLLBACK_RECOVERY; pub const VSS_CF_NOT_SYSTEM_STATE = VSS_COMPONENT_FLAGS.NOT_SYSTEM_STATE; pub const IVssWMFiledesc = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPath: fn( self: *const IVssWMFiledesc, pbstrPath: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilespec: fn( self: *const IVssWMFiledesc, pbstrFilespec: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRecursive: fn( self: *const IVssWMFiledesc, pbRecursive: ?*bool, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAlternateLocation: fn( self: *const IVssWMFiledesc, pbstrAlternateLocation: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBackupTypeMask: fn( self: *const IVssWMFiledesc, pdwTypeMask: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMFiledesc_GetPath(self: *const T, pbstrPath: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMFiledesc.VTable, self.vtable).GetPath(@ptrCast(*const IVssWMFiledesc, self), pbstrPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMFiledesc_GetFilespec(self: *const T, pbstrFilespec: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMFiledesc.VTable, self.vtable).GetFilespec(@ptrCast(*const IVssWMFiledesc, self), pbstrFilespec); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMFiledesc_GetRecursive(self: *const T, pbRecursive: ?*bool) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMFiledesc.VTable, self.vtable).GetRecursive(@ptrCast(*const IVssWMFiledesc, self), pbRecursive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMFiledesc_GetAlternateLocation(self: *const T, pbstrAlternateLocation: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMFiledesc.VTable, self.vtable).GetAlternateLocation(@ptrCast(*const IVssWMFiledesc, self), pbstrAlternateLocation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMFiledesc_GetBackupTypeMask(self: *const T, pdwTypeMask: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMFiledesc.VTable, self.vtable).GetBackupTypeMask(@ptrCast(*const IVssWMFiledesc, self), pdwTypeMask); } };} pub usingnamespace MethodMixin(@This()); }; pub const IVssWMDependency = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetWriterId: fn( self: *const IVssWMDependency, pWriterId: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLogicalPath: fn( self: *const IVssWMDependency, pbstrLogicalPath: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetComponentName: fn( self: *const IVssWMDependency, pbstrComponentName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMDependency_GetWriterId(self: *const T, pWriterId: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMDependency.VTable, self.vtable).GetWriterId(@ptrCast(*const IVssWMDependency, self), pWriterId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMDependency_GetLogicalPath(self: *const T, pbstrLogicalPath: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMDependency.VTable, self.vtable).GetLogicalPath(@ptrCast(*const IVssWMDependency, self), pbstrLogicalPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWMDependency_GetComponentName(self: *const T, pbstrComponentName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWMDependency.VTable, self.vtable).GetComponentName(@ptrCast(*const IVssWMDependency, self), pbstrComponentName); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IVssComponent_Value = @import("../zig.zig").Guid.initString("d2c72c96-c121-4518-b627-e5a93d010ead"); pub const IID_IVssComponent = &IID_IVssComponent_Value; pub const IVssComponent = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetLogicalPath: fn( self: *const IVssComponent, pbstrPath: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetComponentType: fn( self: *const IVssComponent, pct: ?*VSS_COMPONENT_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetComponentName: fn( self: *const IVssComponent, pbstrName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBackupSucceeded: fn( self: *const IVssComponent, pbSucceeded: ?*bool, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAlternateLocationMappingCount: fn( self: *const IVssComponent, pcMappings: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAlternateLocationMapping: fn( self: *const IVssComponent, iMapping: u32, ppFiledesc: ?*?*IVssWMFiledesc, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetBackupMetadata: fn( self: *const IVssComponent, wszData: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBackupMetadata: fn( self: *const IVssComponent, pbstrData: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddPartialFile: fn( self: *const IVssComponent, wszPath: ?[*:0]const u16, wszFilename: ?[*:0]const u16, wszRanges: ?[*:0]const u16, wszMetadata: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPartialFileCount: fn( self: *const IVssComponent, pcPartialFiles: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPartialFile: fn( self: *const IVssComponent, iPartialFile: u32, pbstrPath: ?*?BSTR, pbstrFilename: ?*?BSTR, pbstrRange: ?*?BSTR, pbstrMetadata: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsSelectedForRestore: fn( self: *const IVssComponent, pbSelectedForRestore: ?*bool, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAdditionalRestores: fn( self: *const IVssComponent, pbAdditionalRestores: ?*bool, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNewTargetCount: fn( self: *const IVssComponent, pcNewTarget: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNewTarget: fn( self: *const IVssComponent, iNewTarget: u32, ppFiledesc: ?*?*IVssWMFiledesc, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDirectedTarget: fn( self: *const IVssComponent, wszSourcePath: ?[*:0]const u16, wszSourceFilename: ?[*:0]const u16, wszSourceRangeList: ?[*:0]const u16, wszDestinationPath: ?[*:0]const u16, wszDestinationFilename: ?[*:0]const u16, wszDestinationRangeList: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDirectedTargetCount: fn( self: *const IVssComponent, pcDirectedTarget: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDirectedTarget: fn( self: *const IVssComponent, iDirectedTarget: u32, pbstrSourcePath: ?*?BSTR, pbstrSourceFileName: ?*?BSTR, pbstrSourceRangeList: ?*?BSTR, pbstrDestinationPath: ?*?BSTR, pbstrDestinationFilename: ?*?BSTR, pbstrDestinationRangeList: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRestoreMetadata: fn( self: *const IVssComponent, wszRestoreMetadata: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRestoreMetadata: fn( self: *const IVssComponent, pbstrRestoreMetadata: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRestoreTarget: fn( self: *const IVssComponent, target: VSS_RESTORE_TARGET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRestoreTarget: fn( self: *const IVssComponent, pTarget: ?*VSS_RESTORE_TARGET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPreRestoreFailureMsg: fn( self: *const IVssComponent, wszPreRestoreFailureMsg: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPreRestoreFailureMsg: fn( self: *const IVssComponent, pbstrPreRestoreFailureMsg: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPostRestoreFailureMsg: fn( self: *const IVssComponent, wszPostRestoreFailureMsg: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPostRestoreFailureMsg: fn( self: *const IVssComponent, pbstrPostRestoreFailureMsg: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetBackupStamp: fn( self: *const IVssComponent, wszBackupStamp: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBackupStamp: fn( self: *const IVssComponent, pbstrBackupStamp: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPreviousBackupStamp: fn( self: *const IVssComponent, pbstrBackupStamp: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBackupOptions: fn( self: *const IVssComponent, pbstrBackupOptions: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRestoreOptions: fn( self: *const IVssComponent, pbstrRestoreOptions: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRestoreSubcomponentCount: fn( self: *const IVssComponent, pcRestoreSubcomponent: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRestoreSubcomponent: fn( self: *const IVssComponent, iComponent: u32, pbstrLogicalPath: ?*?BSTR, pbstrComponentName: ?*?BSTR, pbRepair: ?*bool, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFileRestoreStatus: fn( self: *const IVssComponent, pStatus: ?*VSS_FILE_RESTORE_STATUS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDifferencedFilesByLastModifyTime: fn( self: *const IVssComponent, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: BOOL, ftLastModifyTime: FILETIME, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDifferencedFilesByLastModifyLSN: fn( self: *const IVssComponent, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: BOOL, bstrLsnString: ?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDifferencedFilesCount: fn( self: *const IVssComponent, pcDifferencedFiles: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDifferencedFile: fn( self: *const IVssComponent, iDifferencedFile: u32, pbstrPath: ?*?BSTR, pbstrFilespec: ?*?BSTR, pbRecursive: ?*BOOL, pbstrLsnString: ?*?BSTR, pftLastModifyTime: ?*FILETIME, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetLogicalPath(self: *const T, pbstrPath: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetLogicalPath(@ptrCast(*const IVssComponent, self), pbstrPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetComponentType(self: *const T, pct: ?*VSS_COMPONENT_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetComponentType(@ptrCast(*const IVssComponent, self), pct); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetComponentName(self: *const T, pbstrName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetComponentName(@ptrCast(*const IVssComponent, self), pbstrName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetBackupSucceeded(self: *const T, pbSucceeded: ?*bool) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetBackupSucceeded(@ptrCast(*const IVssComponent, self), pbSucceeded); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetAlternateLocationMappingCount(self: *const T, pcMappings: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetAlternateLocationMappingCount(@ptrCast(*const IVssComponent, self), pcMappings); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetAlternateLocationMapping(self: *const T, iMapping: u32, ppFiledesc: ?*?*IVssWMFiledesc) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetAlternateLocationMapping(@ptrCast(*const IVssComponent, self), iMapping, ppFiledesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_SetBackupMetadata(self: *const T, wszData: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).SetBackupMetadata(@ptrCast(*const IVssComponent, self), wszData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetBackupMetadata(self: *const T, pbstrData: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetBackupMetadata(@ptrCast(*const IVssComponent, self), pbstrData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_AddPartialFile(self: *const T, wszPath: ?[*:0]const u16, wszFilename: ?[*:0]const u16, wszRanges: ?[*:0]const u16, wszMetadata: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).AddPartialFile(@ptrCast(*const IVssComponent, self), wszPath, wszFilename, wszRanges, wszMetadata); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetPartialFileCount(self: *const T, pcPartialFiles: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetPartialFileCount(@ptrCast(*const IVssComponent, self), pcPartialFiles); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetPartialFile(self: *const T, iPartialFile: u32, pbstrPath: ?*?BSTR, pbstrFilename: ?*?BSTR, pbstrRange: ?*?BSTR, pbstrMetadata: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetPartialFile(@ptrCast(*const IVssComponent, self), iPartialFile, pbstrPath, pbstrFilename, pbstrRange, pbstrMetadata); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_IsSelectedForRestore(self: *const T, pbSelectedForRestore: ?*bool) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).IsSelectedForRestore(@ptrCast(*const IVssComponent, self), pbSelectedForRestore); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetAdditionalRestores(self: *const T, pbAdditionalRestores: ?*bool) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetAdditionalRestores(@ptrCast(*const IVssComponent, self), pbAdditionalRestores); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetNewTargetCount(self: *const T, pcNewTarget: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetNewTargetCount(@ptrCast(*const IVssComponent, self), pcNewTarget); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetNewTarget(self: *const T, iNewTarget: u32, ppFiledesc: ?*?*IVssWMFiledesc) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetNewTarget(@ptrCast(*const IVssComponent, self), iNewTarget, ppFiledesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_AddDirectedTarget(self: *const T, wszSourcePath: ?[*:0]const u16, wszSourceFilename: ?[*:0]const u16, wszSourceRangeList: ?[*:0]const u16, wszDestinationPath: ?[*:0]const u16, wszDestinationFilename: ?[*:0]const u16, wszDestinationRangeList: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).AddDirectedTarget(@ptrCast(*const IVssComponent, self), wszSourcePath, wszSourceFilename, wszSourceRangeList, wszDestinationPath, wszDestinationFilename, wszDestinationRangeList); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetDirectedTargetCount(self: *const T, pcDirectedTarget: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetDirectedTargetCount(@ptrCast(*const IVssComponent, self), pcDirectedTarget); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetDirectedTarget(self: *const T, iDirectedTarget: u32, pbstrSourcePath: ?*?BSTR, pbstrSourceFileName: ?*?BSTR, pbstrSourceRangeList: ?*?BSTR, pbstrDestinationPath: ?*?BSTR, pbstrDestinationFilename: ?*?BSTR, pbstrDestinationRangeList: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetDirectedTarget(@ptrCast(*const IVssComponent, self), iDirectedTarget, pbstrSourcePath, pbstrSourceFileName, pbstrSourceRangeList, pbstrDestinationPath, pbstrDestinationFilename, pbstrDestinationRangeList); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_SetRestoreMetadata(self: *const T, wszRestoreMetadata: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).SetRestoreMetadata(@ptrCast(*const IVssComponent, self), wszRestoreMetadata); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetRestoreMetadata(self: *const T, pbstrRestoreMetadata: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetRestoreMetadata(@ptrCast(*const IVssComponent, self), pbstrRestoreMetadata); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_SetRestoreTarget(self: *const T, target: VSS_RESTORE_TARGET) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).SetRestoreTarget(@ptrCast(*const IVssComponent, self), target); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetRestoreTarget(self: *const T, pTarget: ?*VSS_RESTORE_TARGET) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetRestoreTarget(@ptrCast(*const IVssComponent, self), pTarget); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_SetPreRestoreFailureMsg(self: *const T, wszPreRestoreFailureMsg: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).SetPreRestoreFailureMsg(@ptrCast(*const IVssComponent, self), wszPreRestoreFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetPreRestoreFailureMsg(self: *const T, pbstrPreRestoreFailureMsg: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetPreRestoreFailureMsg(@ptrCast(*const IVssComponent, self), pbstrPreRestoreFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_SetPostRestoreFailureMsg(self: *const T, wszPostRestoreFailureMsg: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).SetPostRestoreFailureMsg(@ptrCast(*const IVssComponent, self), wszPostRestoreFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetPostRestoreFailureMsg(self: *const T, pbstrPostRestoreFailureMsg: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetPostRestoreFailureMsg(@ptrCast(*const IVssComponent, self), pbstrPostRestoreFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_SetBackupStamp(self: *const T, wszBackupStamp: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).SetBackupStamp(@ptrCast(*const IVssComponent, self), wszBackupStamp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetBackupStamp(self: *const T, pbstrBackupStamp: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetBackupStamp(@ptrCast(*const IVssComponent, self), pbstrBackupStamp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetPreviousBackupStamp(self: *const T, pbstrBackupStamp: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetPreviousBackupStamp(@ptrCast(*const IVssComponent, self), pbstrBackupStamp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetBackupOptions(self: *const T, pbstrBackupOptions: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetBackupOptions(@ptrCast(*const IVssComponent, self), pbstrBackupOptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetRestoreOptions(self: *const T, pbstrRestoreOptions: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetRestoreOptions(@ptrCast(*const IVssComponent, self), pbstrRestoreOptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetRestoreSubcomponentCount(self: *const T, pcRestoreSubcomponent: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetRestoreSubcomponentCount(@ptrCast(*const IVssComponent, self), pcRestoreSubcomponent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetRestoreSubcomponent(self: *const T, iComponent: u32, pbstrLogicalPath: ?*?BSTR, pbstrComponentName: ?*?BSTR, pbRepair: ?*bool) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetRestoreSubcomponent(@ptrCast(*const IVssComponent, self), iComponent, pbstrLogicalPath, pbstrComponentName, pbRepair); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetFileRestoreStatus(self: *const T, pStatus: ?*VSS_FILE_RESTORE_STATUS) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetFileRestoreStatus(@ptrCast(*const IVssComponent, self), pStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_AddDifferencedFilesByLastModifyTime(self: *const T, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: BOOL, ftLastModifyTime: FILETIME) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).AddDifferencedFilesByLastModifyTime(@ptrCast(*const IVssComponent, self), wszPath, wszFilespec, bRecursive, ftLastModifyTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_AddDifferencedFilesByLastModifyLSN(self: *const T, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: BOOL, bstrLsnString: ?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).AddDifferencedFilesByLastModifyLSN(@ptrCast(*const IVssComponent, self), wszPath, wszFilespec, bRecursive, bstrLsnString); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetDifferencedFilesCount(self: *const T, pcDifferencedFiles: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetDifferencedFilesCount(@ptrCast(*const IVssComponent, self), pcDifferencedFiles); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponent_GetDifferencedFile(self: *const T, iDifferencedFile: u32, pbstrPath: ?*?BSTR, pbstrFilespec: ?*?BSTR, pbRecursive: ?*BOOL, pbstrLsnString: ?*?BSTR, pftLastModifyTime: ?*FILETIME) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponent.VTable, self.vtable).GetDifferencedFile(@ptrCast(*const IVssComponent, self), iDifferencedFile, pbstrPath, pbstrFilespec, pbRecursive, pbstrLsnString, pftLastModifyTime); } };} pub usingnamespace MethodMixin(@This()); }; pub const IVssWriterComponents = extern struct { pub const VTable = extern struct { GetComponentCount: fn( self: *const IVssWriterComponents, pcComponents: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetWriterInfo: fn( self: *const IVssWriterComponents, pidInstance: ?*Guid, pidWriter: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetComponent: fn( self: *const IVssWriterComponents, iComponent: u32, ppComponent: ?*?*IVssComponent, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterComponents_GetComponentCount(self: *const T, pcComponents: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterComponents.VTable, self.vtable).GetComponentCount(@ptrCast(*const IVssWriterComponents, self), pcComponents); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterComponents_GetWriterInfo(self: *const T, pidInstance: ?*Guid, pidWriter: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterComponents.VTable, self.vtable).GetWriterInfo(@ptrCast(*const IVssWriterComponents, self), pidInstance, pidWriter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterComponents_GetComponent(self: *const T, iComponent: u32, ppComponent: ?*?*IVssComponent) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterComponents.VTable, self.vtable).GetComponent(@ptrCast(*const IVssWriterComponents, self), iComponent, ppComponent); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IVssComponentEx_Value = @import("../zig.zig").Guid.initString("156c8b5e-f131-4bd7-9c97-d1923be7e1fa"); pub const IID_IVssComponentEx = &IID_IVssComponentEx_Value; pub const IVssComponentEx = extern struct { pub const VTable = extern struct { base: IVssComponent.VTable, SetPrepareForBackupFailureMsg: fn( self: *const IVssComponentEx, wszFailureMsg: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPostSnapshotFailureMsg: fn( self: *const IVssComponentEx, wszFailureMsg: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPrepareForBackupFailureMsg: fn( self: *const IVssComponentEx, pbstrFailureMsg: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPostSnapshotFailureMsg: fn( self: *const IVssComponentEx, pbstrFailureMsg: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAuthoritativeRestore: fn( self: *const IVssComponentEx, pbAuth: ?*bool, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRollForward: fn( self: *const IVssComponentEx, pRollType: ?*VSS_ROLLFORWARD_TYPE, pbstrPoint: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRestoreName: fn( self: *const IVssComponentEx, pbstrName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IVssComponent.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx_SetPrepareForBackupFailureMsg(self: *const T, wszFailureMsg: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx.VTable, self.vtable).SetPrepareForBackupFailureMsg(@ptrCast(*const IVssComponentEx, self), wszFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx_SetPostSnapshotFailureMsg(self: *const T, wszFailureMsg: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx.VTable, self.vtable).SetPostSnapshotFailureMsg(@ptrCast(*const IVssComponentEx, self), wszFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx_GetPrepareForBackupFailureMsg(self: *const T, pbstrFailureMsg: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx.VTable, self.vtable).GetPrepareForBackupFailureMsg(@ptrCast(*const IVssComponentEx, self), pbstrFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx_GetPostSnapshotFailureMsg(self: *const T, pbstrFailureMsg: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx.VTable, self.vtable).GetPostSnapshotFailureMsg(@ptrCast(*const IVssComponentEx, self), pbstrFailureMsg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx_GetAuthoritativeRestore(self: *const T, pbAuth: ?*bool) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx.VTable, self.vtable).GetAuthoritativeRestore(@ptrCast(*const IVssComponentEx, self), pbAuth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx_GetRollForward(self: *const T, pRollType: ?*VSS_ROLLFORWARD_TYPE, pbstrPoint: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx.VTable, self.vtable).GetRollForward(@ptrCast(*const IVssComponentEx, self), pRollType, pbstrPoint); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx_GetRestoreName(self: *const T, pbstrName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx.VTable, self.vtable).GetRestoreName(@ptrCast(*const IVssComponentEx, self), pbstrName); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IVssComponentEx2_Value = @import("../zig.zig").Guid.initString("3b5be0f2-07a9-4e4b-bdd3-cfdc8e2c0d2d"); pub const IID_IVssComponentEx2 = &IID_IVssComponentEx2_Value; pub const IVssComponentEx2 = extern struct { pub const VTable = extern struct { base: IVssComponentEx.VTable, SetFailure: fn( self: *const IVssComponentEx2, hr: HRESULT, hrApplication: HRESULT, wszApplicationMessage: ?[*:0]const u16, dwReserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFailure: fn( self: *const IVssComponentEx2, phr: ?*HRESULT, phrApplication: ?*HRESULT, pbstrApplicationMessage: ?*?BSTR, pdwReserved: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IVssComponentEx.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx2_SetFailure(self: *const T, hr: HRESULT, hrApplication: HRESULT, wszApplicationMessage: ?[*:0]const u16, dwReserved: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx2.VTable, self.vtable).SetFailure(@ptrCast(*const IVssComponentEx2, self), hr, hrApplication, wszApplicationMessage, dwReserved); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssComponentEx2_GetFailure(self: *const T, phr: ?*HRESULT, phrApplication: ?*HRESULT, pbstrApplicationMessage: ?*?BSTR, pdwReserved: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssComponentEx2.VTable, self.vtable).GetFailure(@ptrCast(*const IVssComponentEx2, self), phr, phrApplication, pbstrApplicationMessage, pdwReserved); } };} pub usingnamespace MethodMixin(@This()); }; pub const IVssCreateWriterMetadata = extern struct { pub const VTable = extern struct { AddIncludeFiles: fn( self: *const IVssCreateWriterMetadata, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddExcludeFiles: fn( self: *const IVssCreateWriterMetadata, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddComponent: fn( self: *const IVssCreateWriterMetadata, ct: VSS_COMPONENT_TYPE, wszLogicalPath: ?[*:0]const u16, wszComponentName: ?[*:0]const u16, wszCaption: ?[*:0]const u16, pbIcon: ?*const u8, cbIcon: u32, bRestoreMetadata: u8, bNotifyOnBackupComplete: u8, bSelectable: u8, bSelectableForRestore: u8, dwComponentFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDatabaseFiles: fn( self: *const IVssCreateWriterMetadata, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDatabaseLogFiles: fn( self: *const IVssCreateWriterMetadata, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddFilesToFileGroup: fn( self: *const IVssCreateWriterMetadata, wszLogicalPath: ?[*:0]const u16, wszGroupName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16, dwBackupTypeMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRestoreMethod: fn( self: *const IVssCreateWriterMetadata, method: VSS_RESTOREMETHOD_ENUM, wszService: ?[*:0]const u16, wszUserProcedure: ?[*:0]const u16, writerRestore: VSS_WRITERRESTORE_ENUM, bRebootRequired: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddAlternateLocationMapping: fn( self: *const IVssCreateWriterMetadata, wszSourcePath: ?[*:0]const u16, wszSourceFilespec: ?[*:0]const u16, bRecursive: u8, wszDestination: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddComponentDependency: fn( self: *const IVssCreateWriterMetadata, wszForLogicalPath: ?[*:0]const u16, wszForComponentName: ?[*:0]const u16, onWriterId: Guid, wszOnLogicalPath: ?[*:0]const u16, wszOnComponentName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetBackupSchema: fn( self: *const IVssCreateWriterMetadata, dwSchemaMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDocument: fn( self: *const IVssCreateWriterMetadata, pDoc: ?*?*IXMLDOMDocument, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SaveAsXML: fn( self: *const IVssCreateWriterMetadata, pbstrXML: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddIncludeFiles(self: *const T, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddIncludeFiles(@ptrCast(*const IVssCreateWriterMetadata, self), wszPath, wszFilespec, bRecursive, wszAlternateLocation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddExcludeFiles(self: *const T, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddExcludeFiles(@ptrCast(*const IVssCreateWriterMetadata, self), wszPath, wszFilespec, bRecursive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddComponent(self: *const T, ct: VSS_COMPONENT_TYPE, wszLogicalPath: ?[*:0]const u16, wszComponentName: ?[*:0]const u16, wszCaption: ?[*:0]const u16, pbIcon: ?*const u8, cbIcon: u32, bRestoreMetadata: u8, bNotifyOnBackupComplete: u8, bSelectable: u8, bSelectableForRestore: u8, dwComponentFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddComponent(@ptrCast(*const IVssCreateWriterMetadata, self), ct, wszLogicalPath, wszComponentName, wszCaption, pbIcon, cbIcon, bRestoreMetadata, bNotifyOnBackupComplete, bSelectable, bSelectableForRestore, dwComponentFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddDatabaseFiles(self: *const T, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddDatabaseFiles(@ptrCast(*const IVssCreateWriterMetadata, self), wszLogicalPath, wszDatabaseName, wszPath, wszFilespec, dwBackupTypeMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddDatabaseLogFiles(self: *const T, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddDatabaseLogFiles(@ptrCast(*const IVssCreateWriterMetadata, self), wszLogicalPath, wszDatabaseName, wszPath, wszFilespec, dwBackupTypeMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddFilesToFileGroup(self: *const T, wszLogicalPath: ?[*:0]const u16, wszGroupName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16, dwBackupTypeMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddFilesToFileGroup(@ptrCast(*const IVssCreateWriterMetadata, self), wszLogicalPath, wszGroupName, wszPath, wszFilespec, bRecursive, wszAlternateLocation, dwBackupTypeMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_SetRestoreMethod(self: *const T, method: VSS_RESTOREMETHOD_ENUM, wszService: ?[*:0]const u16, wszUserProcedure: ?[*:0]const u16, writerRestore: VSS_WRITERRESTORE_ENUM, bRebootRequired: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).SetRestoreMethod(@ptrCast(*const IVssCreateWriterMetadata, self), method, wszService, wszUserProcedure, writerRestore, bRebootRequired); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddAlternateLocationMapping(self: *const T, wszSourcePath: ?[*:0]const u16, wszSourceFilespec: ?[*:0]const u16, bRecursive: u8, wszDestination: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddAlternateLocationMapping(@ptrCast(*const IVssCreateWriterMetadata, self), wszSourcePath, wszSourceFilespec, bRecursive, wszDestination); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_AddComponentDependency(self: *const T, wszForLogicalPath: ?[*:0]const u16, wszForComponentName: ?[*:0]const u16, onWriterId: Guid, wszOnLogicalPath: ?[*:0]const u16, wszOnComponentName: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).AddComponentDependency(@ptrCast(*const IVssCreateWriterMetadata, self), wszForLogicalPath, wszForComponentName, onWriterId, wszOnLogicalPath, wszOnComponentName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_SetBackupSchema(self: *const T, dwSchemaMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).SetBackupSchema(@ptrCast(*const IVssCreateWriterMetadata, self), dwSchemaMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_GetDocument(self: *const T, pDoc: ?*?*IXMLDOMDocument) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).GetDocument(@ptrCast(*const IVssCreateWriterMetadata, self), pDoc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadata_SaveAsXML(self: *const T, pbstrXML: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadata.VTable, self.vtable).SaveAsXML(@ptrCast(*const IVssCreateWriterMetadata, self), pbstrXML); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IVssCreateWriterMetadataEx_Value = @import("../zig.zig").Guid.initString("9f21981d-d469-4349-b807-39e64e4674e1"); pub const IID_IVssCreateWriterMetadataEx = &IID_IVssCreateWriterMetadataEx_Value; pub const IVssCreateWriterMetadataEx = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddDatabaseFiles: fn( self: *const IVssCreateWriterMetadataEx, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDatabaseLogFiles: fn( self: *const IVssCreateWriterMetadataEx, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddFilesToFileGroup: fn( self: *const IVssCreateWriterMetadataEx, wszLogicalPath: ?[*:0]const u16, wszGroupName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16, dwBackupTypeMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRestoreMethod: fn( self: *const IVssCreateWriterMetadataEx, method: VSS_RESTOREMETHOD_ENUM, wszService: ?[*:0]const u16, wszUserProcedure: ?[*:0]const u16, writerRestore: VSS_WRITERRESTORE_ENUM, bRebootRequired: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddAlternateLocationMapping: fn( self: *const IVssCreateWriterMetadataEx, wszSourcePath: ?[*:0]const u16, wszSourceFilespec: ?[*:0]const u16, bRecursive: u8, wszDestination: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddComponentDependency: fn( self: *const IVssCreateWriterMetadataEx, wszForLogicalPath: ?[*:0]const u16, wszForComponentName: ?[*:0]const u16, onWriterId: Guid, wszOnLogicalPath: ?[*:0]const u16, wszOnComponentName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetBackupSchema: fn( self: *const IVssCreateWriterMetadataEx, dwSchemaMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDocument: fn( self: *const IVssCreateWriterMetadataEx, pDoc: ?*?*IXMLDOMDocument, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SaveAsXML: fn( self: *const IVssCreateWriterMetadataEx, pbstrXML: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryInterface: fn( self: *const IVssCreateWriterMetadataEx, riid: ?*const Guid, ppvObject: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddRef: fn( self: *const IVssCreateWriterMetadataEx, ) callconv(@import("std").os.windows.WINAPI) u32, Release: fn( self: *const IVssCreateWriterMetadataEx, ) callconv(@import("std").os.windows.WINAPI) u32, AddExcludeFilesFromSnapshot: fn( self: *const IVssCreateWriterMetadataEx, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_AddDatabaseFiles(self: *const T, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).AddDatabaseFiles(@ptrCast(*const IVssCreateWriterMetadataEx, self), wszLogicalPath, wszDatabaseName, wszPath, wszFilespec, dwBackupTypeMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_AddDatabaseLogFiles(self: *const T, wszLogicalPath: ?[*:0]const u16, wszDatabaseName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, dwBackupTypeMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).AddDatabaseLogFiles(@ptrCast(*const IVssCreateWriterMetadataEx, self), wszLogicalPath, wszDatabaseName, wszPath, wszFilespec, dwBackupTypeMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_AddFilesToFileGroup(self: *const T, wszLogicalPath: ?[*:0]const u16, wszGroupName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16, dwBackupTypeMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).AddFilesToFileGroup(@ptrCast(*const IVssCreateWriterMetadataEx, self), wszLogicalPath, wszGroupName, wszPath, wszFilespec, bRecursive, wszAlternateLocation, dwBackupTypeMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_SetRestoreMethod(self: *const T, method: VSS_RESTOREMETHOD_ENUM, wszService: ?[*:0]const u16, wszUserProcedure: ?[*:0]const u16, writerRestore: VSS_WRITERRESTORE_ENUM, bRebootRequired: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).SetRestoreMethod(@ptrCast(*const IVssCreateWriterMetadataEx, self), method, wszService, wszUserProcedure, writerRestore, bRebootRequired); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_AddAlternateLocationMapping(self: *const T, wszSourcePath: ?[*:0]const u16, wszSourceFilespec: ?[*:0]const u16, bRecursive: u8, wszDestination: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).AddAlternateLocationMapping(@ptrCast(*const IVssCreateWriterMetadataEx, self), wszSourcePath, wszSourceFilespec, bRecursive, wszDestination); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_AddComponentDependency(self: *const T, wszForLogicalPath: ?[*:0]const u16, wszForComponentName: ?[*:0]const u16, onWriterId: Guid, wszOnLogicalPath: ?[*:0]const u16, wszOnComponentName: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).AddComponentDependency(@ptrCast(*const IVssCreateWriterMetadataEx, self), wszForLogicalPath, wszForComponentName, onWriterId, wszOnLogicalPath, wszOnComponentName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_SetBackupSchema(self: *const T, dwSchemaMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).SetBackupSchema(@ptrCast(*const IVssCreateWriterMetadataEx, self), dwSchemaMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_GetDocument(self: *const T, pDoc: ?*?*IXMLDOMDocument) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).GetDocument(@ptrCast(*const IVssCreateWriterMetadataEx, self), pDoc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_SaveAsXML(self: *const T, pbstrXML: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).SaveAsXML(@ptrCast(*const IVssCreateWriterMetadataEx, self), pbstrXML); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_QueryInterface(self: *const T, riid: ?*const Guid, ppvObject: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).QueryInterface(@ptrCast(*const IVssCreateWriterMetadataEx, self), riid, ppvObject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_AddRef(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).AddRef(@ptrCast(*const IVssCreateWriterMetadataEx, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_Release(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).Release(@ptrCast(*const IVssCreateWriterMetadataEx, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateWriterMetadataEx_AddExcludeFilesFromSnapshot(self: *const T, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateWriterMetadataEx.VTable, self.vtable).AddExcludeFilesFromSnapshot(@ptrCast(*const IVssCreateWriterMetadataEx, self), wszPath, wszFilespec, bRecursive); } };} pub usingnamespace MethodMixin(@This()); }; pub const IVssWriterImpl = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Initialize: fn( self: *const IVssWriterImpl, writerId: Guid, wszWriterName: ?[*:0]const u16, wszWriterInstanceName: ?[*:0]const u16, dwMajorVersion: u32, dwMinorVersion: u32, ut: VSS_USAGE_TYPE, st: VSS_SOURCE_TYPE, nLevel: VSS_APPLICATION_LEVEL, dwTimeout: u32, aws: VSS_ALTERNATE_WRITER_STATE, bIOThrottlingOnly: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Subscribe: fn( self: *const IVssWriterImpl, dwSubscribeTimeout: u32, dwEventFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Unsubscribe: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Uninitialize: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) void, GetCurrentVolumeArray: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) ?*?PWSTR, GetCurrentVolumeCount: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) u32, GetSnapshotDeviceName: fn( self: *const IVssWriterImpl, wszOriginalVolume: ?[*:0]const u16, ppwszSnapshotDevice: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCurrentSnapshotSetId: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) Guid, GetContext: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) i32, GetCurrentLevel: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) VSS_APPLICATION_LEVEL, IsPathAffected: fn( self: *const IVssWriterImpl, wszPath: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) bool, IsBootableSystemStateBackedUp: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) bool, AreComponentsSelected: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) bool, GetBackupType: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) VSS_BACKUP_TYPE, GetRestoreType: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) VSS_RESTORE_TYPE, SetWriterFailure: fn( self: *const IVssWriterImpl, hr: HRESULT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsPartialFileSupportEnabled: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) bool, InstallAlternateWriter: fn( self: *const IVssWriterImpl, idWriter: Guid, clsid: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetIdentityInformation: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) ?*IVssExamineWriterMetadata, SetWriterFailureEx: fn( self: *const IVssWriterImpl, hr: HRESULT, hrApplication: HRESULT, wszApplicationMessage: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSessionId: fn( self: *const IVssWriterImpl, idSession: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsWriterShuttingDown: fn( self: *const IVssWriterImpl, ) callconv(@import("std").os.windows.WINAPI) bool, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_Initialize(self: *const T, writerId: Guid, wszWriterName: ?[*:0]const u16, wszWriterInstanceName: ?[*:0]const u16, dwMajorVersion: u32, dwMinorVersion: u32, ut: VSS_USAGE_TYPE, st: VSS_SOURCE_TYPE, nLevel: VSS_APPLICATION_LEVEL, dwTimeout: u32, aws: VSS_ALTERNATE_WRITER_STATE, bIOThrottlingOnly: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).Initialize(@ptrCast(*const IVssWriterImpl, self), writerId, wszWriterName, wszWriterInstanceName, dwMajorVersion, dwMinorVersion, ut, st, nLevel, dwTimeout, aws, bIOThrottlingOnly); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_Subscribe(self: *const T, dwSubscribeTimeout: u32, dwEventFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).Subscribe(@ptrCast(*const IVssWriterImpl, self), dwSubscribeTimeout, dwEventFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_Unsubscribe(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).Unsubscribe(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_Uninitialize(self: *const T) callconv(.Inline) void { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).Uninitialize(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetCurrentVolumeArray(self: *const T) callconv(.Inline) ?*?PWSTR { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetCurrentVolumeArray(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetCurrentVolumeCount(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetCurrentVolumeCount(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetSnapshotDeviceName(self: *const T, wszOriginalVolume: ?[*:0]const u16, ppwszSnapshotDevice: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetSnapshotDeviceName(@ptrCast(*const IVssWriterImpl, self), wszOriginalVolume, ppwszSnapshotDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetCurrentSnapshotSetId(self: *const T) callconv(.Inline) Guid { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetCurrentSnapshotSetId(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetContext(self: *const T) callconv(.Inline) i32 { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetContext(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetCurrentLevel(self: *const T) callconv(.Inline) VSS_APPLICATION_LEVEL { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetCurrentLevel(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_IsPathAffected(self: *const T, wszPath: ?[*:0]const u16) callconv(.Inline) bool { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).IsPathAffected(@ptrCast(*const IVssWriterImpl, self), wszPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_IsBootableSystemStateBackedUp(self: *const T) callconv(.Inline) bool { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).IsBootableSystemStateBackedUp(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_AreComponentsSelected(self: *const T) callconv(.Inline) bool { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).AreComponentsSelected(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetBackupType(self: *const T) callconv(.Inline) VSS_BACKUP_TYPE { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetBackupType(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetRestoreType(self: *const T) callconv(.Inline) VSS_RESTORE_TYPE { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetRestoreType(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_SetWriterFailure(self: *const T, hr: HRESULT) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).SetWriterFailure(@ptrCast(*const IVssWriterImpl, self), hr); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_IsPartialFileSupportEnabled(self: *const T) callconv(.Inline) bool { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).IsPartialFileSupportEnabled(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_InstallAlternateWriter(self: *const T, idWriter: Guid, clsid: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).InstallAlternateWriter(@ptrCast(*const IVssWriterImpl, self), idWriter, clsid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetIdentityInformation(self: *const T) callconv(.Inline) ?*IVssExamineWriterMetadata { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetIdentityInformation(@ptrCast(*const IVssWriterImpl, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_SetWriterFailureEx(self: *const T, hr: HRESULT, hrApplication: HRESULT, wszApplicationMessage: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).SetWriterFailureEx(@ptrCast(*const IVssWriterImpl, self), hr, hrApplication, wszApplicationMessage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_GetSessionId(self: *const T, idSession: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).GetSessionId(@ptrCast(*const IVssWriterImpl, self), idSession); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssWriterImpl_IsWriterShuttingDown(self: *const T) callconv(.Inline) bool { return @ptrCast(*const IVssWriterImpl.VTable, self.vtable).IsWriterShuttingDown(@ptrCast(*const IVssWriterImpl, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IVssCreateExpressWriterMetadata_Value = @import("../zig.zig").Guid.initString("9c772e77-b26e-427f-92dd-c996f41ea5e3"); pub const IID_IVssCreateExpressWriterMetadata = &IID_IVssCreateExpressWriterMetadata_Value; pub const IVssCreateExpressWriterMetadata = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddExcludeFiles: fn( self: *const IVssCreateExpressWriterMetadata, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddComponent: fn( self: *const IVssCreateExpressWriterMetadata, ct: VSS_COMPONENT_TYPE, wszLogicalPath: ?[*:0]const u16, wszComponentName: ?[*:0]const u16, wszCaption: ?[*:0]const u16, pbIcon: ?*const u8, cbIcon: u32, bRestoreMetadata: u8, bNotifyOnBackupComplete: u8, bSelectable: u8, bSelectableForRestore: u8, dwComponentFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddFilesToFileGroup: fn( self: *const IVssCreateExpressWriterMetadata, wszLogicalPath: ?[*:0]const u16, wszGroupName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16, dwBackupTypeMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRestoreMethod: fn( self: *const IVssCreateExpressWriterMetadata, method: VSS_RESTOREMETHOD_ENUM, wszService: ?[*:0]const u16, wszUserProcedure: ?[*:0]const u16, writerRestore: VSS_WRITERRESTORE_ENUM, bRebootRequired: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddComponentDependency: fn( self: *const IVssCreateExpressWriterMetadata, wszForLogicalPath: ?[*:0]const u16, wszForComponentName: ?[*:0]const u16, onWriterId: Guid, wszOnLogicalPath: ?[*:0]const u16, wszOnComponentName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetBackupSchema: fn( self: *const IVssCreateExpressWriterMetadata, dwSchemaMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SaveAsXML: fn( self: *const IVssCreateExpressWriterMetadata, pbstrXML: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateExpressWriterMetadata_AddExcludeFiles(self: *const T, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateExpressWriterMetadata.VTable, self.vtable).AddExcludeFiles(@ptrCast(*const IVssCreateExpressWriterMetadata, self), wszPath, wszFilespec, bRecursive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateExpressWriterMetadata_AddComponent(self: *const T, ct: VSS_COMPONENT_TYPE, wszLogicalPath: ?[*:0]const u16, wszComponentName: ?[*:0]const u16, wszCaption: ?[*:0]const u16, pbIcon: ?*const u8, cbIcon: u32, bRestoreMetadata: u8, bNotifyOnBackupComplete: u8, bSelectable: u8, bSelectableForRestore: u8, dwComponentFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateExpressWriterMetadata.VTable, self.vtable).AddComponent(@ptrCast(*const IVssCreateExpressWriterMetadata, self), ct, wszLogicalPath, wszComponentName, wszCaption, pbIcon, cbIcon, bRestoreMetadata, bNotifyOnBackupComplete, bSelectable, bSelectableForRestore, dwComponentFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateExpressWriterMetadata_AddFilesToFileGroup(self: *const T, wszLogicalPath: ?[*:0]const u16, wszGroupName: ?[*:0]const u16, wszPath: ?[*:0]const u16, wszFilespec: ?[*:0]const u16, bRecursive: u8, wszAlternateLocation: ?[*:0]const u16, dwBackupTypeMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateExpressWriterMetadata.VTable, self.vtable).AddFilesToFileGroup(@ptrCast(*const IVssCreateExpressWriterMetadata, self), wszLogicalPath, wszGroupName, wszPath, wszFilespec, bRecursive, wszAlternateLocation, dwBackupTypeMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateExpressWriterMetadata_SetRestoreMethod(self: *const T, method: VSS_RESTOREMETHOD_ENUM, wszService: ?[*:0]const u16, wszUserProcedure: ?[*:0]const u16, writerRestore: VSS_WRITERRESTORE_ENUM, bRebootRequired: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateExpressWriterMetadata.VTable, self.vtable).SetRestoreMethod(@ptrCast(*const IVssCreateExpressWriterMetadata, self), method, wszService, wszUserProcedure, writerRestore, bRebootRequired); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateExpressWriterMetadata_AddComponentDependency(self: *const T, wszForLogicalPath: ?[*:0]const u16, wszForComponentName: ?[*:0]const u16, onWriterId: Guid, wszOnLogicalPath: ?[*:0]const u16, wszOnComponentName: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateExpressWriterMetadata.VTable, self.vtable).AddComponentDependency(@ptrCast(*const IVssCreateExpressWriterMetadata, self), wszForLogicalPath, wszForComponentName, onWriterId, wszOnLogicalPath, wszOnComponentName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateExpressWriterMetadata_SetBackupSchema(self: *const T, dwSchemaMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateExpressWriterMetadata.VTable, self.vtable).SetBackupSchema(@ptrCast(*const IVssCreateExpressWriterMetadata, self), dwSchemaMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssCreateExpressWriterMetadata_SaveAsXML(self: *const T, pbstrXML: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IVssCreateExpressWriterMetadata.VTable, self.vtable).SaveAsXML(@ptrCast(*const IVssCreateExpressWriterMetadata, self), pbstrXML); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IVssExpressWriter_Value = @import("../zig.zig").Guid.initString("e33affdc-59c7-47b1-97d5-4266598f6235"); pub const IID_IVssExpressWriter = &IID_IVssExpressWriter_Value; pub const IVssExpressWriter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateMetadata: fn( self: *const IVssExpressWriter, writerId: Guid, writerName: ?[*:0]const u16, usageType: VSS_USAGE_TYPE, versionMajor: u32, versionMinor: u32, reserved: u32, ppMetadata: ?*?*IVssCreateExpressWriterMetadata, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadMetadata: fn( self: *const IVssExpressWriter, metadata: ?[*:0]const u16, reserved: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Register: fn( self: *const IVssExpressWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Unregister: fn( self: *const IVssExpressWriter, writerId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssExpressWriter_CreateMetadata(self: *const T, writerId: Guid, writerName: ?[*:0]const u16, usageType: VSS_USAGE_TYPE, versionMajor: u32, versionMinor: u32, reserved: u32, ppMetadata: ?*?*IVssCreateExpressWriterMetadata) callconv(.Inline) HRESULT { return @ptrCast(*const IVssExpressWriter.VTable, self.vtable).CreateMetadata(@ptrCast(*const IVssExpressWriter, self), writerId, writerName, usageType, versionMajor, versionMinor, reserved, ppMetadata); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssExpressWriter_LoadMetadata(self: *const T, metadata: ?[*:0]const u16, reserved: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssExpressWriter.VTable, self.vtable).LoadMetadata(@ptrCast(*const IVssExpressWriter, self), metadata, reserved); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssExpressWriter_Register(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IVssExpressWriter.VTable, self.vtable).Register(@ptrCast(*const IVssExpressWriter, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssExpressWriter_Unregister(self: *const T, writerId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssExpressWriter.VTable, self.vtable).Unregister(@ptrCast(*const IVssExpressWriter, self), writerId); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_VssSnapshotMgmt_Value = @import("../zig.zig").Guid.initString("0b5a2c52-3eb9-470a-96e2-6c6d4570e40f"); pub const CLSID_VssSnapshotMgmt = &CLSID_VssSnapshotMgmt_Value; pub const VSS_MGMT_OBJECT_TYPE = enum(i32) { UNKNOWN = 0, VOLUME = 1, DIFF_VOLUME = 2, DIFF_AREA = 3, }; pub const VSS_MGMT_OBJECT_UNKNOWN = VSS_MGMT_OBJECT_TYPE.UNKNOWN; pub const VSS_MGMT_OBJECT_VOLUME = VSS_MGMT_OBJECT_TYPE.VOLUME; pub const VSS_MGMT_OBJECT_DIFF_VOLUME = VSS_MGMT_OBJECT_TYPE.DIFF_VOLUME; pub const VSS_MGMT_OBJECT_DIFF_AREA = VSS_MGMT_OBJECT_TYPE.DIFF_AREA; pub const VSS_VOLUME_PROP = extern struct { m_pwszVolumeName: ?*u16, m_pwszVolumeDisplayName: ?*u16, }; pub const VSS_DIFF_VOLUME_PROP = extern struct { m_pwszVolumeName: ?*u16, m_pwszVolumeDisplayName: ?*u16, m_llVolumeFreeSpace: i64, m_llVolumeTotalSpace: i64, }; pub const VSS_DIFF_AREA_PROP = extern struct { m_pwszVolumeName: ?*u16, m_pwszDiffAreaVolumeName: ?*u16, m_llMaximumDiffSpace: i64, m_llAllocatedDiffSpace: i64, m_llUsedDiffSpace: i64, }; pub const VSS_MGMT_OBJECT_UNION = extern union { Vol: VSS_VOLUME_PROP, DiffVol: VSS_DIFF_VOLUME_PROP, DiffArea: VSS_DIFF_AREA_PROP, }; pub const VSS_MGMT_OBJECT_PROP = extern struct { Type: VSS_MGMT_OBJECT_TYPE, Obj: VSS_MGMT_OBJECT_UNION, }; pub const VSS_PROTECTION_LEVEL = enum(i32) { ORIGINAL_VOLUME = 0, SNAPSHOT = 1, }; pub const VSS_PROTECTION_LEVEL_ORIGINAL_VOLUME = VSS_PROTECTION_LEVEL.ORIGINAL_VOLUME; pub const VSS_PROTECTION_LEVEL_SNAPSHOT = VSS_PROTECTION_LEVEL.SNAPSHOT; pub const VSS_PROTECTION_FAULT = enum(i32) { NONE = 0, DIFF_AREA_MISSING = 1, IO_FAILURE_DURING_ONLINE = 2, META_DATA_CORRUPTION = 3, MEMORY_ALLOCATION_FAILURE = 4, MAPPED_MEMORY_FAILURE = 5, COW_READ_FAILURE = 6, COW_WRITE_FAILURE = 7, DIFF_AREA_FULL = 8, GROW_TOO_SLOW = 9, GROW_FAILED = 10, DESTROY_ALL_SNAPSHOTS = 11, FILE_SYSTEM_FAILURE = 12, IO_FAILURE = 13, DIFF_AREA_REMOVED = 14, EXTERNAL_WRITER_TO_DIFF_AREA = 15, MOUNT_DURING_CLUSTER_OFFLINE = 16, }; pub const VSS_PROTECTION_FAULT_NONE = VSS_PROTECTION_FAULT.NONE; pub const VSS_PROTECTION_FAULT_DIFF_AREA_MISSING = VSS_PROTECTION_FAULT.DIFF_AREA_MISSING; pub const VSS_PROTECTION_FAULT_IO_FAILURE_DURING_ONLINE = VSS_PROTECTION_FAULT.IO_FAILURE_DURING_ONLINE; pub const VSS_PROTECTION_FAULT_META_DATA_CORRUPTION = VSS_PROTECTION_FAULT.META_DATA_CORRUPTION; pub const VSS_PROTECTION_FAULT_MEMORY_ALLOCATION_FAILURE = VSS_PROTECTION_FAULT.MEMORY_ALLOCATION_FAILURE; pub const VSS_PROTECTION_FAULT_MAPPED_MEMORY_FAILURE = VSS_PROTECTION_FAULT.MAPPED_MEMORY_FAILURE; pub const VSS_PROTECTION_FAULT_COW_READ_FAILURE = VSS_PROTECTION_FAULT.COW_READ_FAILURE; pub const VSS_PROTECTION_FAULT_COW_WRITE_FAILURE = VSS_PROTECTION_FAULT.COW_WRITE_FAILURE; pub const VSS_PROTECTION_FAULT_DIFF_AREA_FULL = VSS_PROTECTION_FAULT.DIFF_AREA_FULL; pub const VSS_PROTECTION_FAULT_GROW_TOO_SLOW = VSS_PROTECTION_FAULT.GROW_TOO_SLOW; pub const VSS_PROTECTION_FAULT_GROW_FAILED = VSS_PROTECTION_FAULT.GROW_FAILED; pub const VSS_PROTECTION_FAULT_DESTROY_ALL_SNAPSHOTS = VSS_PROTECTION_FAULT.DESTROY_ALL_SNAPSHOTS; pub const VSS_PROTECTION_FAULT_FILE_SYSTEM_FAILURE = VSS_PROTECTION_FAULT.FILE_SYSTEM_FAILURE; pub const VSS_PROTECTION_FAULT_IO_FAILURE = VSS_PROTECTION_FAULT.IO_FAILURE; pub const VSS_PROTECTION_FAULT_DIFF_AREA_REMOVED = VSS_PROTECTION_FAULT.DIFF_AREA_REMOVED; pub const VSS_PROTECTION_FAULT_EXTERNAL_WRITER_TO_DIFF_AREA = VSS_PROTECTION_FAULT.EXTERNAL_WRITER_TO_DIFF_AREA; pub const VSS_PROTECTION_FAULT_MOUNT_DURING_CLUSTER_OFFLINE = VSS_PROTECTION_FAULT.MOUNT_DURING_CLUSTER_OFFLINE; pub const VSS_VOLUME_PROTECTION_INFO = extern struct { m_protectionLevel: VSS_PROTECTION_LEVEL, m_volumeIsOfflineForProtection: BOOL, m_protectionFault: VSS_PROTECTION_FAULT, m_failureStatus: i32, m_volumeHasUnusedDiffArea: BOOL, m_reserved: u32, }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssSnapshotMgmt_Value = @import("../zig.zig").Guid.initString("fa7df749-66e7-4986-a27f-e2f04ae53772"); pub const IID_IVssSnapshotMgmt = &IID_IVssSnapshotMgmt_Value; pub const IVssSnapshotMgmt = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetProviderMgmtInterface: fn( self: *const IVssSnapshotMgmt, ProviderId: Guid, InterfaceId: ?*const Guid, ppItf: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryVolumesSupportedForSnapshots: fn( self: *const IVssSnapshotMgmt, ProviderId: Guid, lContext: i32, ppEnum: ?*?*IVssEnumMgmtObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QuerySnapshotsByVolume: fn( self: *const IVssSnapshotMgmt, pwszVolumeName: ?*u16, ProviderId: Guid, ppEnum: ?*?*IVssEnumObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSnapshotMgmt_GetProviderMgmtInterface(self: *const T, ProviderId: Guid, InterfaceId: ?*const Guid, ppItf: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSnapshotMgmt.VTable, self.vtable).GetProviderMgmtInterface(@ptrCast(*const IVssSnapshotMgmt, self), ProviderId, InterfaceId, ppItf); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSnapshotMgmt_QueryVolumesSupportedForSnapshots(self: *const T, ProviderId: Guid, lContext: i32, ppEnum: ?*?*IVssEnumMgmtObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSnapshotMgmt.VTable, self.vtable).QueryVolumesSupportedForSnapshots(@ptrCast(*const IVssSnapshotMgmt, self), ProviderId, lContext, ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSnapshotMgmt_QuerySnapshotsByVolume(self: *const T, pwszVolumeName: ?*u16, ProviderId: Guid, ppEnum: ?*?*IVssEnumObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSnapshotMgmt.VTable, self.vtable).QuerySnapshotsByVolume(@ptrCast(*const IVssSnapshotMgmt, self), pwszVolumeName, ProviderId, ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssSnapshotMgmt2_Value = @import("../zig.zig").Guid.initString("0f61ec39-fe82-45f2-a3f0-768b5d427102"); pub const IID_IVssSnapshotMgmt2 = &IID_IVssSnapshotMgmt2_Value; pub const IVssSnapshotMgmt2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetMinDiffAreaSize: fn( self: *const IVssSnapshotMgmt2, pllMinDiffAreaSize: ?*i64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSnapshotMgmt2_GetMinDiffAreaSize(self: *const T, pllMinDiffAreaSize: ?*i64) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSnapshotMgmt2.VTable, self.vtable).GetMinDiffAreaSize(@ptrCast(*const IVssSnapshotMgmt2, self), pllMinDiffAreaSize); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssDifferentialSoftwareSnapshotMgmt_Value = @import("../zig.zig").Guid.initString("214a0f28-b737-4026-b847-4f9e37d79529"); pub const IID_IVssDifferentialSoftwareSnapshotMgmt = &IID_IVssDifferentialSoftwareSnapshotMgmt_Value; pub const IVssDifferentialSoftwareSnapshotMgmt = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddDiffArea: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, llMaximumDiffSpace: i64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ChangeDiffAreaMaximumSize: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, llMaximumDiffSpace: i64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryVolumesSupportedForDiffAreas: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt, pwszOriginalVolumeName: ?*u16, ppEnum: ?*?*IVssEnumMgmtObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryDiffAreasForVolume: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt, pwszVolumeName: ?*u16, ppEnum: ?*?*IVssEnumMgmtObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryDiffAreasOnVolume: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt, pwszVolumeName: ?*u16, ppEnum: ?*?*IVssEnumMgmtObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryDiffAreasForSnapshot: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt, SnapshotId: Guid, ppEnum: ?*?*IVssEnumMgmtObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt_AddDiffArea(self: *const T, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, llMaximumDiffSpace: i64) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt.VTable, self.vtable).AddDiffArea(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt, self), pwszVolumeName, pwszDiffAreaVolumeName, llMaximumDiffSpace); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt_ChangeDiffAreaMaximumSize(self: *const T, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, llMaximumDiffSpace: i64) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt.VTable, self.vtable).ChangeDiffAreaMaximumSize(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt, self), pwszVolumeName, pwszDiffAreaVolumeName, llMaximumDiffSpace); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt_QueryVolumesSupportedForDiffAreas(self: *const T, pwszOriginalVolumeName: ?*u16, ppEnum: ?*?*IVssEnumMgmtObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt.VTable, self.vtable).QueryVolumesSupportedForDiffAreas(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt, self), pwszOriginalVolumeName, ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt_QueryDiffAreasForVolume(self: *const T, pwszVolumeName: ?*u16, ppEnum: ?*?*IVssEnumMgmtObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt.VTable, self.vtable).QueryDiffAreasForVolume(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt, self), pwszVolumeName, ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt_QueryDiffAreasOnVolume(self: *const T, pwszVolumeName: ?*u16, ppEnum: ?*?*IVssEnumMgmtObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt.VTable, self.vtable).QueryDiffAreasOnVolume(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt, self), pwszVolumeName, ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt_QueryDiffAreasForSnapshot(self: *const T, SnapshotId: Guid, ppEnum: ?*?*IVssEnumMgmtObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt.VTable, self.vtable).QueryDiffAreasForSnapshot(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt, self), SnapshotId, ppEnum); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssDifferentialSoftwareSnapshotMgmt2_Value = @import("../zig.zig").Guid.initString("949d7353-675f-4275-8969-f044c6277815"); pub const IID_IVssDifferentialSoftwareSnapshotMgmt2 = &IID_IVssDifferentialSoftwareSnapshotMgmt2_Value; pub const IVssDifferentialSoftwareSnapshotMgmt2 = extern struct { pub const VTable = extern struct { base: IVssDifferentialSoftwareSnapshotMgmt.VTable, ChangeDiffAreaMaximumSizeEx: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt2, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, llMaximumDiffSpace: i64, bVolatile: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MigrateDiffAreas: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt2, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, pwszNewDiffAreaVolumeName: ?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryMigrationStatus: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt2, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, ppAsync: ?*?*IVssAsync, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSnapshotPriority: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt2, idSnapshot: Guid, priority: u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IVssDifferentialSoftwareSnapshotMgmt.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt2_ChangeDiffAreaMaximumSizeEx(self: *const T, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, llMaximumDiffSpace: i64, bVolatile: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2.VTable, self.vtable).ChangeDiffAreaMaximumSizeEx(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2, self), pwszVolumeName, pwszDiffAreaVolumeName, llMaximumDiffSpace, bVolatile); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt2_MigrateDiffAreas(self: *const T, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, pwszNewDiffAreaVolumeName: ?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2.VTable, self.vtable).MigrateDiffAreas(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2, self), pwszVolumeName, pwszDiffAreaVolumeName, pwszNewDiffAreaVolumeName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt2_QueryMigrationStatus(self: *const T, pwszVolumeName: ?*u16, pwszDiffAreaVolumeName: ?*u16, ppAsync: ?*?*IVssAsync) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2.VTable, self.vtable).QueryMigrationStatus(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2, self), pwszVolumeName, pwszDiffAreaVolumeName, ppAsync); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt2_SetSnapshotPriority(self: *const T, idSnapshot: Guid, priority: u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2.VTable, self.vtable).SetSnapshotPriority(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt2, self), idSnapshot, priority); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssDifferentialSoftwareSnapshotMgmt3_Value = @import("../zig.zig").Guid.initString("383f7e71-a4c5-401f-b27f-f826289f8458"); pub const IID_IVssDifferentialSoftwareSnapshotMgmt3 = &IID_IVssDifferentialSoftwareSnapshotMgmt3_Value; pub const IVssDifferentialSoftwareSnapshotMgmt3 = extern struct { pub const VTable = extern struct { base: IVssDifferentialSoftwareSnapshotMgmt2.VTable, SetVolumeProtectLevel: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt3, pwszVolumeName: ?*u16, protectionLevel: VSS_PROTECTION_LEVEL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVolumeProtectLevel: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt3, pwszVolumeName: ?*u16, protectionLevel: ?*VSS_VOLUME_PROTECTION_INFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ClearVolumeProtectFault: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt3, pwszVolumeName: ?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteUnusedDiffAreas: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt3, pwszDiffAreaVolumeName: ?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QuerySnapshotDeltaBitmap: fn( self: *const IVssDifferentialSoftwareSnapshotMgmt3, idSnapshotOlder: Guid, idSnapshotYounger: Guid, pcBlockSizePerBit: ?*u32, pcBitmapLength: ?*u32, ppbBitmap: [*]?*u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IVssDifferentialSoftwareSnapshotMgmt2.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt3_SetVolumeProtectLevel(self: *const T, pwszVolumeName: ?*u16, protectionLevel: VSS_PROTECTION_LEVEL) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3.VTable, self.vtable).SetVolumeProtectLevel(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3, self), pwszVolumeName, protectionLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt3_GetVolumeProtectLevel(self: *const T, pwszVolumeName: ?*u16, protectionLevel: ?*VSS_VOLUME_PROTECTION_INFO) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3.VTable, self.vtable).GetVolumeProtectLevel(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3, self), pwszVolumeName, protectionLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt3_ClearVolumeProtectFault(self: *const T, pwszVolumeName: ?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3.VTable, self.vtable).ClearVolumeProtectFault(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3, self), pwszVolumeName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt3_DeleteUnusedDiffAreas(self: *const T, pwszDiffAreaVolumeName: ?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3.VTable, self.vtable).DeleteUnusedDiffAreas(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3, self), pwszDiffAreaVolumeName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssDifferentialSoftwareSnapshotMgmt3_QuerySnapshotDeltaBitmap(self: *const T, idSnapshotOlder: Guid, idSnapshotYounger: Guid, pcBlockSizePerBit: ?*u32, pcBitmapLength: ?*u32, ppbBitmap: [*]?*u8) callconv(.Inline) HRESULT { return @ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3.VTable, self.vtable).QuerySnapshotDeltaBitmap(@ptrCast(*const IVssDifferentialSoftwareSnapshotMgmt3, self), idSnapshotOlder, idSnapshotYounger, pcBlockSizePerBit, pcBitmapLength, ppbBitmap); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssEnumMgmtObject_Value = @import("../zig.zig").Guid.initString("01954e6b-9254-4e6e-808c-c9e05d007696"); pub const IID_IVssEnumMgmtObject = &IID_IVssEnumMgmtObject_Value; pub const IVssEnumMgmtObject = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IVssEnumMgmtObject, celt: u32, rgelt: [*]VSS_MGMT_OBJECT_PROP, pceltFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IVssEnumMgmtObject, celt: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IVssEnumMgmtObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IVssEnumMgmtObject, ppenum: ?*?*IVssEnumMgmtObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumMgmtObject_Next(self: *const T, celt: u32, rgelt: [*]VSS_MGMT_OBJECT_PROP, pceltFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumMgmtObject.VTable, self.vtable).Next(@ptrCast(*const IVssEnumMgmtObject, self), celt, rgelt, pceltFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumMgmtObject_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumMgmtObject.VTable, self.vtable).Skip(@ptrCast(*const IVssEnumMgmtObject, self), celt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumMgmtObject_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumMgmtObject.VTable, self.vtable).Reset(@ptrCast(*const IVssEnumMgmtObject, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssEnumMgmtObject_Clone(self: *const T, ppenum: ?*?*IVssEnumMgmtObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssEnumMgmtObject.VTable, self.vtable).Clone(@ptrCast(*const IVssEnumMgmtObject, self), ppenum); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_VSSCoordinator_Value = @import("../zig.zig").Guid.initString("e579ab5f-1cc4-44b4-bed9-de0991ff0623"); pub const CLSID_VSSCoordinator = &CLSID_VSSCoordinator_Value; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssAdmin_Value = @import("../zig.zig").Guid.initString("77ed5996-2f63-11d3-8a39-00c04f72d8e3"); pub const IID_IVssAdmin = &IID_IVssAdmin_Value; pub const IVssAdmin = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, RegisterProvider: fn( self: *const IVssAdmin, pProviderId: Guid, ClassId: Guid, pwszProviderName: ?*u16, eProviderType: VSS_PROVIDER_TYPE, pwszProviderVersion: ?*u16, ProviderVersionId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnregisterProvider: fn( self: *const IVssAdmin, ProviderId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryProviders: fn( self: *const IVssAdmin, ppEnum: ?*?*IVssEnumObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AbortAllSnapshotsInProgress: fn( self: *const IVssAdmin, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAdmin_RegisterProvider(self: *const T, pProviderId: Guid, ClassId: Guid, pwszProviderName: ?*u16, eProviderType: VSS_PROVIDER_TYPE, pwszProviderVersion: ?*u16, ProviderVersionId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAdmin.VTable, self.vtable).RegisterProvider(@ptrCast(*const IVssAdmin, self), pProviderId, ClassId, pwszProviderName, eProviderType, pwszProviderVersion, ProviderVersionId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAdmin_UnregisterProvider(self: *const T, ProviderId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAdmin.VTable, self.vtable).UnregisterProvider(@ptrCast(*const IVssAdmin, self), ProviderId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAdmin_QueryProviders(self: *const T, ppEnum: ?*?*IVssEnumObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAdmin.VTable, self.vtable).QueryProviders(@ptrCast(*const IVssAdmin, self), ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAdmin_AbortAllSnapshotsInProgress(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAdmin.VTable, self.vtable).AbortAllSnapshotsInProgress(@ptrCast(*const IVssAdmin, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IVssAdminEx_Value = @import("../zig.zig").Guid.initString("7858a9f8-b1fa-41a6-964f-b9b36b8cd8d8"); pub const IID_IVssAdminEx = &IID_IVssAdminEx_Value; pub const IVssAdminEx = extern struct { pub const VTable = extern struct { base: IVssAdmin.VTable, GetProviderCapability: fn( self: *const IVssAdminEx, pProviderId: Guid, pllOriginalCapabilityMask: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetProviderContext: fn( self: *const IVssAdminEx, ProviderId: Guid, plContext: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetProviderContext: fn( self: *const IVssAdminEx, ProviderId: Guid, lContext: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IVssAdmin.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAdminEx_GetProviderCapability(self: *const T, pProviderId: Guid, pllOriginalCapabilityMask: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAdminEx.VTable, self.vtable).GetProviderCapability(@ptrCast(*const IVssAdminEx, self), pProviderId, pllOriginalCapabilityMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAdminEx_GetProviderContext(self: *const T, ProviderId: Guid, plContext: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAdminEx.VTable, self.vtable).GetProviderContext(@ptrCast(*const IVssAdminEx, self), ProviderId, plContext); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssAdminEx_SetProviderContext(self: *const T, ProviderId: Guid, lContext: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssAdminEx.VTable, self.vtable).SetProviderContext(@ptrCast(*const IVssAdminEx, self), ProviderId, lContext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssSoftwareSnapshotProvider_Value = @import("../zig.zig").Guid.initString("609e123e-2c5a-44d3-8f01-0b1d9a47d1ff"); pub const IID_IVssSoftwareSnapshotProvider = &IID_IVssSoftwareSnapshotProvider_Value; pub const IVssSoftwareSnapshotProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetContext: fn( self: *const IVssSoftwareSnapshotProvider, lContext: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSnapshotProperties: fn( self: *const IVssSoftwareSnapshotProvider, SnapshotId: Guid, pProp: ?*VSS_SNAPSHOT_PROP, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Query: fn( self: *const IVssSoftwareSnapshotProvider, QueriedObjectId: Guid, eQueriedObjectType: VSS_OBJECT_TYPE, eReturnedObjectsType: VSS_OBJECT_TYPE, ppEnum: ?*?*IVssEnumObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteSnapshots: fn( self: *const IVssSoftwareSnapshotProvider, SourceObjectId: Guid, eSourceObjectType: VSS_OBJECT_TYPE, bForceDelete: BOOL, plDeletedSnapshots: ?*i32, pNondeletedSnapshotID: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BeginPrepareSnapshot: fn( self: *const IVssSoftwareSnapshotProvider, SnapshotSetId: Guid, SnapshotId: Guid, pwszVolumeName: ?*u16, lNewContext: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsVolumeSupported: fn( self: *const IVssSoftwareSnapshotProvider, pwszVolumeName: ?*u16, pbSupportedByThisProvider: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsVolumeSnapshotted: fn( self: *const IVssSoftwareSnapshotProvider, pwszVolumeName: ?*u16, pbSnapshotsPresent: ?*BOOL, plSnapshotCompatibility: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSnapshotProperty: fn( self: *const IVssSoftwareSnapshotProvider, SnapshotId: Guid, eSnapshotPropertyId: VSS_SNAPSHOT_PROPERTY_ID, vProperty: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RevertToSnapshot: fn( self: *const IVssSoftwareSnapshotProvider, SnapshotId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, QueryRevertStatus: fn( self: *const IVssSoftwareSnapshotProvider, pwszVolume: ?*u16, ppAsync: ?*?*IVssAsync, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_SetContext(self: *const T, lContext: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).SetContext(@ptrCast(*const IVssSoftwareSnapshotProvider, self), lContext); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_GetSnapshotProperties(self: *const T, SnapshotId: Guid, pProp: ?*VSS_SNAPSHOT_PROP) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).GetSnapshotProperties(@ptrCast(*const IVssSoftwareSnapshotProvider, self), SnapshotId, pProp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_Query(self: *const T, QueriedObjectId: Guid, eQueriedObjectType: VSS_OBJECT_TYPE, eReturnedObjectsType: VSS_OBJECT_TYPE, ppEnum: ?*?*IVssEnumObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).Query(@ptrCast(*const IVssSoftwareSnapshotProvider, self), QueriedObjectId, eQueriedObjectType, eReturnedObjectsType, ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_DeleteSnapshots(self: *const T, SourceObjectId: Guid, eSourceObjectType: VSS_OBJECT_TYPE, bForceDelete: BOOL, plDeletedSnapshots: ?*i32, pNondeletedSnapshotID: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).DeleteSnapshots(@ptrCast(*const IVssSoftwareSnapshotProvider, self), SourceObjectId, eSourceObjectType, bForceDelete, plDeletedSnapshots, pNondeletedSnapshotID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_BeginPrepareSnapshot(self: *const T, SnapshotSetId: Guid, SnapshotId: Guid, pwszVolumeName: ?*u16, lNewContext: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).BeginPrepareSnapshot(@ptrCast(*const IVssSoftwareSnapshotProvider, self), SnapshotSetId, SnapshotId, pwszVolumeName, lNewContext); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_IsVolumeSupported(self: *const T, pwszVolumeName: ?*u16, pbSupportedByThisProvider: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).IsVolumeSupported(@ptrCast(*const IVssSoftwareSnapshotProvider, self), pwszVolumeName, pbSupportedByThisProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_IsVolumeSnapshotted(self: *const T, pwszVolumeName: ?*u16, pbSnapshotsPresent: ?*BOOL, plSnapshotCompatibility: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).IsVolumeSnapshotted(@ptrCast(*const IVssSoftwareSnapshotProvider, self), pwszVolumeName, pbSnapshotsPresent, plSnapshotCompatibility); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_SetSnapshotProperty(self: *const T, SnapshotId: Guid, eSnapshotPropertyId: VSS_SNAPSHOT_PROPERTY_ID, vProperty: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).SetSnapshotProperty(@ptrCast(*const IVssSoftwareSnapshotProvider, self), SnapshotId, eSnapshotPropertyId, vProperty); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_RevertToSnapshot(self: *const T, SnapshotId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).RevertToSnapshot(@ptrCast(*const IVssSoftwareSnapshotProvider, self), SnapshotId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssSoftwareSnapshotProvider_QueryRevertStatus(self: *const T, pwszVolume: ?*u16, ppAsync: ?*?*IVssAsync) callconv(.Inline) HRESULT { return @ptrCast(*const IVssSoftwareSnapshotProvider.VTable, self.vtable).QueryRevertStatus(@ptrCast(*const IVssSoftwareSnapshotProvider, self), pwszVolume, ppAsync); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssProviderCreateSnapshotSet_Value = @import("../zig.zig").Guid.initString("5f894e5b-1e39-4778-8e23-9abad9f0e08c"); pub const IID_IVssProviderCreateSnapshotSet = &IID_IVssProviderCreateSnapshotSet_Value; pub const IVssProviderCreateSnapshotSet = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, EndPrepareSnapshots: fn( self: *const IVssProviderCreateSnapshotSet, SnapshotSetId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PreCommitSnapshots: fn( self: *const IVssProviderCreateSnapshotSet, SnapshotSetId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CommitSnapshots: fn( self: *const IVssProviderCreateSnapshotSet, SnapshotSetId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PostCommitSnapshots: fn( self: *const IVssProviderCreateSnapshotSet, SnapshotSetId: Guid, lSnapshotsCount: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PreFinalCommitSnapshots: fn( self: *const IVssProviderCreateSnapshotSet, SnapshotSetId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PostFinalCommitSnapshots: fn( self: *const IVssProviderCreateSnapshotSet, SnapshotSetId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AbortSnapshots: fn( self: *const IVssProviderCreateSnapshotSet, SnapshotSetId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderCreateSnapshotSet_EndPrepareSnapshots(self: *const T, SnapshotSetId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderCreateSnapshotSet.VTable, self.vtable).EndPrepareSnapshots(@ptrCast(*const IVssProviderCreateSnapshotSet, self), SnapshotSetId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderCreateSnapshotSet_PreCommitSnapshots(self: *const T, SnapshotSetId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderCreateSnapshotSet.VTable, self.vtable).PreCommitSnapshots(@ptrCast(*const IVssProviderCreateSnapshotSet, self), SnapshotSetId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderCreateSnapshotSet_CommitSnapshots(self: *const T, SnapshotSetId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderCreateSnapshotSet.VTable, self.vtable).CommitSnapshots(@ptrCast(*const IVssProviderCreateSnapshotSet, self), SnapshotSetId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderCreateSnapshotSet_PostCommitSnapshots(self: *const T, SnapshotSetId: Guid, lSnapshotsCount: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderCreateSnapshotSet.VTable, self.vtable).PostCommitSnapshots(@ptrCast(*const IVssProviderCreateSnapshotSet, self), SnapshotSetId, lSnapshotsCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderCreateSnapshotSet_PreFinalCommitSnapshots(self: *const T, SnapshotSetId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderCreateSnapshotSet.VTable, self.vtable).PreFinalCommitSnapshots(@ptrCast(*const IVssProviderCreateSnapshotSet, self), SnapshotSetId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderCreateSnapshotSet_PostFinalCommitSnapshots(self: *const T, SnapshotSetId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderCreateSnapshotSet.VTable, self.vtable).PostFinalCommitSnapshots(@ptrCast(*const IVssProviderCreateSnapshotSet, self), SnapshotSetId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderCreateSnapshotSet_AbortSnapshots(self: *const T, SnapshotSetId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderCreateSnapshotSet.VTable, self.vtable).AbortSnapshots(@ptrCast(*const IVssProviderCreateSnapshotSet, self), SnapshotSetId); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IVssProviderNotifications_Value = @import("../zig.zig").Guid.initString("e561901f-03a5-4afe-86d0-72baeece7004"); pub const IID_IVssProviderNotifications = &IID_IVssProviderNotifications_Value; pub const IVssProviderNotifications = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnLoad: fn( self: *const IVssProviderNotifications, pCallback: ?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnUnload: fn( self: *const IVssProviderNotifications, bForceUnload: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderNotifications_OnLoad(self: *const T, pCallback: ?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderNotifications.VTable, self.vtable).OnLoad(@ptrCast(*const IVssProviderNotifications, self), pCallback); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssProviderNotifications_OnUnload(self: *const T, bForceUnload: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IVssProviderNotifications.VTable, self.vtable).OnUnload(@ptrCast(*const IVssProviderNotifications, self), bForceUnload); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windowsServer2003' const IID_IVssHardwareSnapshotProvider_Value = @import("../zig.zig").Guid.initString("9593a157-44e9-4344-bbeb-44fbf9b06b10"); pub const IID_IVssHardwareSnapshotProvider = &IID_IVssHardwareSnapshotProvider_Value; pub const IVssHardwareSnapshotProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AreLunsSupported: fn( self: *const IVssHardwareSnapshotProvider, lLunCount: i32, lContext: i32, rgwszDevices: [*]?*u16, pLunInformation: [*]VDS_LUN_INFORMATION, pbIsSupported: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FillInLunInfo: fn( self: *const IVssHardwareSnapshotProvider, wszDeviceName: ?*u16, pLunInfo: ?*VDS_LUN_INFORMATION, pbIsSupported: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BeginPrepareSnapshot: fn( self: *const IVssHardwareSnapshotProvider, SnapshotSetId: Guid, SnapshotId: Guid, lContext: i32, lLunCount: i32, rgDeviceNames: [*]?*u16, rgLunInformation: [*]VDS_LUN_INFORMATION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetTargetLuns: fn( self: *const IVssHardwareSnapshotProvider, lLunCount: i32, rgDeviceNames: [*]?*u16, rgSourceLuns: [*]VDS_LUN_INFORMATION, rgDestinationLuns: [*]VDS_LUN_INFORMATION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LocateLuns: fn( self: *const IVssHardwareSnapshotProvider, lLunCount: i32, rgSourceLuns: [*]VDS_LUN_INFORMATION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnLunEmpty: fn( self: *const IVssHardwareSnapshotProvider, wszDeviceName: ?*u16, pInformation: ?*VDS_LUN_INFORMATION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProvider_AreLunsSupported(self: *const T, lLunCount: i32, lContext: i32, rgwszDevices: [*]?*u16, pLunInformation: [*]VDS_LUN_INFORMATION, pbIsSupported: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProvider.VTable, self.vtable).AreLunsSupported(@ptrCast(*const IVssHardwareSnapshotProvider, self), lLunCount, lContext, rgwszDevices, pLunInformation, pbIsSupported); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProvider_FillInLunInfo(self: *const T, wszDeviceName: ?*u16, pLunInfo: ?*VDS_LUN_INFORMATION, pbIsSupported: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProvider.VTable, self.vtable).FillInLunInfo(@ptrCast(*const IVssHardwareSnapshotProvider, self), wszDeviceName, pLunInfo, pbIsSupported); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProvider_BeginPrepareSnapshot(self: *const T, SnapshotSetId: Guid, SnapshotId: Guid, lContext: i32, lLunCount: i32, rgDeviceNames: [*]?*u16, rgLunInformation: [*]VDS_LUN_INFORMATION) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProvider.VTable, self.vtable).BeginPrepareSnapshot(@ptrCast(*const IVssHardwareSnapshotProvider, self), SnapshotSetId, SnapshotId, lContext, lLunCount, rgDeviceNames, rgLunInformation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProvider_GetTargetLuns(self: *const T, lLunCount: i32, rgDeviceNames: [*]?*u16, rgSourceLuns: [*]VDS_LUN_INFORMATION, rgDestinationLuns: [*]VDS_LUN_INFORMATION) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProvider.VTable, self.vtable).GetTargetLuns(@ptrCast(*const IVssHardwareSnapshotProvider, self), lLunCount, rgDeviceNames, rgSourceLuns, rgDestinationLuns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProvider_LocateLuns(self: *const T, lLunCount: i32, rgSourceLuns: [*]VDS_LUN_INFORMATION) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProvider.VTable, self.vtable).LocateLuns(@ptrCast(*const IVssHardwareSnapshotProvider, self), lLunCount, rgSourceLuns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProvider_OnLunEmpty(self: *const T, wszDeviceName: ?*u16, pInformation: ?*VDS_LUN_INFORMATION) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProvider.VTable, self.vtable).OnLunEmpty(@ptrCast(*const IVssHardwareSnapshotProvider, self), wszDeviceName, pInformation); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windowsServer2008' const IID_IVssHardwareSnapshotProviderEx_Value = @import("../zig.zig").Guid.initString("7f5ba925-cdb1-4d11-a71f-339eb7e709fd"); pub const IID_IVssHardwareSnapshotProviderEx = &IID_IVssHardwareSnapshotProviderEx_Value; pub const IVssHardwareSnapshotProviderEx = extern struct { pub const VTable = extern struct { base: IVssHardwareSnapshotProvider.VTable, GetProviderCapabilities: fn( self: *const IVssHardwareSnapshotProviderEx, pllOriginalCapabilityMask: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnLunStateChange: fn( self: *const IVssHardwareSnapshotProviderEx, pSnapshotLuns: [*]VDS_LUN_INFORMATION, pOriginalLuns: [*]VDS_LUN_INFORMATION, dwCount: u32, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResyncLuns: fn( self: *const IVssHardwareSnapshotProviderEx, pSourceLuns: [*]VDS_LUN_INFORMATION, pTargetLuns: [*]VDS_LUN_INFORMATION, dwCount: u32, ppAsync: ?*?*IVssAsync, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnReuseLuns: fn( self: *const IVssHardwareSnapshotProviderEx, pSnapshotLuns: [*]VDS_LUN_INFORMATION, pOriginalLuns: [*]VDS_LUN_INFORMATION, dwCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IVssHardwareSnapshotProvider.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProviderEx_GetProviderCapabilities(self: *const T, pllOriginalCapabilityMask: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProviderEx.VTable, self.vtable).GetProviderCapabilities(@ptrCast(*const IVssHardwareSnapshotProviderEx, self), pllOriginalCapabilityMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProviderEx_OnLunStateChange(self: *const T, pSnapshotLuns: [*]VDS_LUN_INFORMATION, pOriginalLuns: [*]VDS_LUN_INFORMATION, dwCount: u32, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProviderEx.VTable, self.vtable).OnLunStateChange(@ptrCast(*const IVssHardwareSnapshotProviderEx, self), pSnapshotLuns, pOriginalLuns, dwCount, dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProviderEx_ResyncLuns(self: *const T, pSourceLuns: [*]VDS_LUN_INFORMATION, pTargetLuns: [*]VDS_LUN_INFORMATION, dwCount: u32, ppAsync: ?*?*IVssAsync) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProviderEx.VTable, self.vtable).ResyncLuns(@ptrCast(*const IVssHardwareSnapshotProviderEx, self), pSourceLuns, pTargetLuns, dwCount, ppAsync); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssHardwareSnapshotProviderEx_OnReuseLuns(self: *const T, pSnapshotLuns: [*]VDS_LUN_INFORMATION, pOriginalLuns: [*]VDS_LUN_INFORMATION, dwCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssHardwareSnapshotProviderEx.VTable, self.vtable).OnReuseLuns(@ptrCast(*const IVssHardwareSnapshotProviderEx, self), pSnapshotLuns, pOriginalLuns, dwCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IVssFileShareSnapshotProvider_Value = @import("../zig.zig").Guid.initString("c8636060-7c2e-11df-8c4a-0800200c9a66"); pub const IID_IVssFileShareSnapshotProvider = &IID_IVssFileShareSnapshotProvider_Value; pub const IVssFileShareSnapshotProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetContext: fn( self: *const IVssFileShareSnapshotProvider, lContext: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSnapshotProperties: fn( self: *const IVssFileShareSnapshotProvider, SnapshotId: Guid, pProp: ?*VSS_SNAPSHOT_PROP, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Query: fn( self: *const IVssFileShareSnapshotProvider, QueriedObjectId: Guid, eQueriedObjectType: VSS_OBJECT_TYPE, eReturnedObjectsType: VSS_OBJECT_TYPE, ppEnum: ?*?*IVssEnumObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteSnapshots: fn( self: *const IVssFileShareSnapshotProvider, SourceObjectId: Guid, eSourceObjectType: VSS_OBJECT_TYPE, bForceDelete: BOOL, plDeletedSnapshots: ?*i32, pNondeletedSnapshotID: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BeginPrepareSnapshot: fn( self: *const IVssFileShareSnapshotProvider, SnapshotSetId: Guid, SnapshotId: Guid, pwszSharePath: ?*u16, lNewContext: i32, ProviderId: Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsPathSupported: fn( self: *const IVssFileShareSnapshotProvider, pwszSharePath: ?*u16, pbSupportedByThisProvider: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsPathSnapshotted: fn( self: *const IVssFileShareSnapshotProvider, pwszSharePath: ?*u16, pbSnapshotsPresent: ?*BOOL, plSnapshotCompatibility: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSnapshotProperty: fn( self: *const IVssFileShareSnapshotProvider, SnapshotId: Guid, eSnapshotPropertyId: VSS_SNAPSHOT_PROPERTY_ID, vProperty: VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_SetContext(self: *const T, lContext: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).SetContext(@ptrCast(*const IVssFileShareSnapshotProvider, self), lContext); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_GetSnapshotProperties(self: *const T, SnapshotId: Guid, pProp: ?*VSS_SNAPSHOT_PROP) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).GetSnapshotProperties(@ptrCast(*const IVssFileShareSnapshotProvider, self), SnapshotId, pProp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_Query(self: *const T, QueriedObjectId: Guid, eQueriedObjectType: VSS_OBJECT_TYPE, eReturnedObjectsType: VSS_OBJECT_TYPE, ppEnum: ?*?*IVssEnumObject) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).Query(@ptrCast(*const IVssFileShareSnapshotProvider, self), QueriedObjectId, eQueriedObjectType, eReturnedObjectsType, ppEnum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_DeleteSnapshots(self: *const T, SourceObjectId: Guid, eSourceObjectType: VSS_OBJECT_TYPE, bForceDelete: BOOL, plDeletedSnapshots: ?*i32, pNondeletedSnapshotID: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).DeleteSnapshots(@ptrCast(*const IVssFileShareSnapshotProvider, self), SourceObjectId, eSourceObjectType, bForceDelete, plDeletedSnapshots, pNondeletedSnapshotID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_BeginPrepareSnapshot(self: *const T, SnapshotSetId: Guid, SnapshotId: Guid, pwszSharePath: ?*u16, lNewContext: i32, ProviderId: Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).BeginPrepareSnapshot(@ptrCast(*const IVssFileShareSnapshotProvider, self), SnapshotSetId, SnapshotId, pwszSharePath, lNewContext, ProviderId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_IsPathSupported(self: *const T, pwszSharePath: ?*u16, pbSupportedByThisProvider: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).IsPathSupported(@ptrCast(*const IVssFileShareSnapshotProvider, self), pwszSharePath, pbSupportedByThisProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_IsPathSnapshotted(self: *const T, pwszSharePath: ?*u16, pbSnapshotsPresent: ?*BOOL, plSnapshotCompatibility: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).IsPathSnapshotted(@ptrCast(*const IVssFileShareSnapshotProvider, self), pwszSharePath, pbSnapshotsPresent, plSnapshotCompatibility); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IVssFileShareSnapshotProvider_SetSnapshotProperty(self: *const T, SnapshotId: Guid, eSnapshotPropertyId: VSS_SNAPSHOT_PROPERTY_ID, vProperty: VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IVssFileShareSnapshotProvider.VTable, self.vtable).SetSnapshotProperty(@ptrCast(*const IVssFileShareSnapshotProvider, self), SnapshotId, eSnapshotPropertyId, vProperty); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (1) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows6.1' pub extern "VSSAPI" fn CreateVssExpressWriterInternal( ppWriter: ?*?*IVssExpressWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (10) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const BSTR = @import("../foundation.zig").BSTR; const FILETIME = @import("../foundation.zig").FILETIME; const HRESULT = @import("../foundation.zig").HRESULT; const IUnknown = @import("../system/com.zig").IUnknown; const IXMLDOMDocument = @import("../data/xml/ms_xml.zig").IXMLDOMDocument; const PWSTR = @import("../foundation.zig").PWSTR; const VARIANT = @import("../system/ole_automation.zig").VARIANT; const VDS_LUN_INFORMATION = @import("../storage/virtual_disk_service.zig").VDS_LUN_INFORMATION; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/storage/vss.zig
pub const SCEX2_ALT_NETBIOS_NAME = @as(u32, 1); //-------------------------------------------------------------------------------- // Section: Types (21) //-------------------------------------------------------------------------------- pub const VER_FLAGS = enum(u32) { MINORVERSION = 1, MAJORVERSION = 2, BUILDNUMBER = 4, PLATFORMID = 8, SERVICEPACKMINOR = 16, SERVICEPACKMAJOR = 32, SUITENAME = 64, PRODUCT_TYPE = 128, _, pub fn initFlags(o: struct { MINORVERSION: u1 = 0, MAJORVERSION: u1 = 0, BUILDNUMBER: u1 = 0, PLATFORMID: u1 = 0, SERVICEPACKMINOR: u1 = 0, SERVICEPACKMAJOR: u1 = 0, SUITENAME: u1 = 0, PRODUCT_TYPE: u1 = 0, }) VER_FLAGS { return @intToEnum(VER_FLAGS, (if (o.MINORVERSION == 1) @enumToInt(VER_FLAGS.MINORVERSION) else 0) | (if (o.MAJORVERSION == 1) @enumToInt(VER_FLAGS.MAJORVERSION) else 0) | (if (o.BUILDNUMBER == 1) @enumToInt(VER_FLAGS.BUILDNUMBER) else 0) | (if (o.PLATFORMID == 1) @enumToInt(VER_FLAGS.PLATFORMID) else 0) | (if (o.SERVICEPACKMINOR == 1) @enumToInt(VER_FLAGS.SERVICEPACKMINOR) else 0) | (if (o.SERVICEPACKMAJOR == 1) @enumToInt(VER_FLAGS.SERVICEPACKMAJOR) else 0) | (if (o.SUITENAME == 1) @enumToInt(VER_FLAGS.SUITENAME) else 0) | (if (o.PRODUCT_TYPE == 1) @enumToInt(VER_FLAGS.PRODUCT_TYPE) else 0) ); } }; pub const VER_MINORVERSION = VER_FLAGS.MINORVERSION; pub const VER_MAJORVERSION = VER_FLAGS.MAJORVERSION; pub const VER_BUILDNUMBER = VER_FLAGS.BUILDNUMBER; pub const VER_PLATFORMID = VER_FLAGS.PLATFORMID; pub const VER_SERVICEPACKMINOR = VER_FLAGS.SERVICEPACKMINOR; pub const VER_SERVICEPACKMAJOR = VER_FLAGS.SERVICEPACKMAJOR; pub const VER_SUITENAME = VER_FLAGS.SUITENAME; pub const VER_PRODUCT_TYPE = VER_FLAGS.PRODUCT_TYPE; pub const FIRMWARE_TABLE_PROVIDER = enum(u32) { ACPI = 1094930505, FIRM = 1179210317, RSMB = 1381190978, }; pub const ACPI = FIRMWARE_TABLE_PROVIDER.ACPI; pub const FIRM = FIRMWARE_TABLE_PROVIDER.FIRM; pub const RSMB = FIRMWARE_TABLE_PROVIDER.RSMB; pub const USER_CET_ENVIRONMENT = enum(u32) { WIN32_PROCESS = 0, SGX2_ENCLAVE = 2, VBS_BASIC_ENCLAVE = 17, }; pub const USER_CET_ENVIRONMENT_WIN32_PROCESS = USER_CET_ENVIRONMENT.WIN32_PROCESS; pub const USER_CET_ENVIRONMENT_SGX2_ENCLAVE = USER_CET_ENVIRONMENT.SGX2_ENCLAVE; pub const USER_CET_ENVIRONMENT_VBS_BASIC_ENCLAVE = USER_CET_ENVIRONMENT.VBS_BASIC_ENCLAVE; pub const OS_PRODUCT_TYPE = enum(u32) { BUSINESS = 6, BUSINESS_N = 16, CLUSTER_SERVER = 18, CLUSTER_SERVER_V = 64, CORE = 101, CORE_COUNTRYSPECIFIC = 99, CORE_N = 98, CORE_SINGLELANGUAGE = 100, DATACENTER_EVALUATION_SERVER = 80, DATACENTER_A_SERVER_CORE = 145, STANDARD_A_SERVER_CORE = 146, DATACENTER_SERVER = 8, DATACENTER_SERVER_CORE = 12, DATACENTER_SERVER_CORE_V = 39, DATACENTER_SERVER_V = 37, EDUCATION = 121, EDUCATION_N = 122, ENTERPRISE = 4, ENTERPRISE_E = 70, ENTERPRISE_EVALUATION = 72, ENTERPRISE_N = 27, ENTERPRISE_N_EVALUATION = 84, ENTERPRISE_S = 125, ENTERPRISE_S_EVALUATION = 129, ENTERPRISE_S_N = 126, ENTERPRISE_S_N_EVALUATION = 130, ENTERPRISE_SERVER = 10, ENTERPRISE_SERVER_CORE = 14, ENTERPRISE_SERVER_CORE_V = 41, ENTERPRISE_SERVER_IA64 = 15, ENTERPRISE_SERVER_V = 38, ESSENTIALBUSINESS_SERVER_ADDL = 60, ESSENTIALBUSINESS_SERVER_ADDLSVC = 62, ESSENTIALBUSINESS_SERVER_MGMT = 59, ESSENTIALBUSINESS_SERVER_MGMTSVC = 61, HOME_BASIC = 2, HOME_BASIC_E = 67, HOME_BASIC_N = 5, HOME_PREMIUM = 3, HOME_PREMIUM_E = 68, HOME_PREMIUM_N = 26, HOME_PREMIUM_SERVER = 34, HOME_SERVER = 19, HYPERV = 42, IOTUAP = 123, IOTUAPCOMMERCIAL = 131, MEDIUMBUSINESS_SERVER_MANAGEMENT = 30, MEDIUMBUSINESS_SERVER_MESSAGING = 32, MEDIUMBUSINESS_SERVER_SECURITY = 31, MOBILE_CORE = 104, MOBILE_ENTERPRISE = 133, MULTIPOINT_PREMIUM_SERVER = 77, MULTIPOINT_STANDARD_SERVER = 76, PRO_WORKSTATION = 161, PRO_WORKSTATION_N = 162, PROFESSIONAL = 48, PROFESSIONAL_E = 69, PROFESSIONAL_N = 49, PROFESSIONAL_WMC = 103, SB_SOLUTION_SERVER = 50, SB_SOLUTION_SERVER_EM = 54, SERVER_FOR_SB_SOLUTIONS = 51, SERVER_FOR_SB_SOLUTIONS_EM = 55, SERVER_FOR_SMALLBUSINESS = 24, SERVER_FOR_SMALLBUSINESS_V = 35, SERVER_FOUNDATION = 33, SMALLBUSINESS_SERVER = 9, SMALLBUSINESS_SERVER_PREMIUM = 25, SMALLBUSINESS_SERVER_PREMIUM_CORE = 63, SOLUTION_EMBEDDEDSERVER = 56, STANDARD_EVALUATION_SERVER = 79, STANDARD_SERVER = 7, STANDARD_SERVER_CORE_ = 13, STANDARD_SERVER_CORE_V = 40, STANDARD_SERVER_V = 36, STANDARD_SERVER_SOLUTIONS = 52, STANDARD_SERVER_SOLUTIONS_CORE = 53, STARTER = 11, STARTER_E = 66, STARTER_N = 47, STORAGE_ENTERPRISE_SERVER = 23, STORAGE_ENTERPRISE_SERVER_CORE = 46, STORAGE_EXPRESS_SERVER = 20, STORAGE_EXPRESS_SERVER_CORE = 43, STORAGE_STANDARD_EVALUATION_SERVER = 96, STORAGE_STANDARD_SERVER = 21, STORAGE_STANDARD_SERVER_CORE = 44, STORAGE_WORKGROUP_EVALUATION_SERVER = 95, STORAGE_WORKGROUP_SERVER = 22, STORAGE_WORKGROUP_SERVER_CORE = 45, ULTIMATE = 1, ULTIMATE_E = 71, ULTIMATE_N = 28, UNDEFINED = 0, WEB_SERVER = 17, WEB_SERVER_CORE = 29, }; pub const PRODUCT_BUSINESS = OS_PRODUCT_TYPE.BUSINESS; pub const PRODUCT_BUSINESS_N = OS_PRODUCT_TYPE.BUSINESS_N; pub const PRODUCT_CLUSTER_SERVER = OS_PRODUCT_TYPE.CLUSTER_SERVER; pub const PRODUCT_CLUSTER_SERVER_V = OS_PRODUCT_TYPE.CLUSTER_SERVER_V; pub const PRODUCT_CORE = OS_PRODUCT_TYPE.CORE; pub const PRODUCT_CORE_COUNTRYSPECIFIC = OS_PRODUCT_TYPE.CORE_COUNTRYSPECIFIC; pub const PRODUCT_CORE_N = OS_PRODUCT_TYPE.CORE_N; pub const PRODUCT_CORE_SINGLELANGUAGE = OS_PRODUCT_TYPE.CORE_SINGLELANGUAGE; pub const PRODUCT_DATACENTER_EVALUATION_SERVER = OS_PRODUCT_TYPE.DATACENTER_EVALUATION_SERVER; pub const PRODUCT_DATACENTER_A_SERVER_CORE = OS_PRODUCT_TYPE.DATACENTER_A_SERVER_CORE; pub const PRODUCT_STANDARD_A_SERVER_CORE = OS_PRODUCT_TYPE.STANDARD_A_SERVER_CORE; pub const PRODUCT_DATACENTER_SERVER = OS_PRODUCT_TYPE.DATACENTER_SERVER; pub const PRODUCT_DATACENTER_SERVER_CORE = OS_PRODUCT_TYPE.DATACENTER_SERVER_CORE; pub const PRODUCT_DATACENTER_SERVER_CORE_V = OS_PRODUCT_TYPE.DATACENTER_SERVER_CORE_V; pub const PRODUCT_DATACENTER_SERVER_V = OS_PRODUCT_TYPE.DATACENTER_SERVER_V; pub const PRODUCT_EDUCATION = OS_PRODUCT_TYPE.EDUCATION; pub const PRODUCT_EDUCATION_N = OS_PRODUCT_TYPE.EDUCATION_N; pub const PRODUCT_ENTERPRISE = OS_PRODUCT_TYPE.ENTERPRISE; pub const PRODUCT_ENTERPRISE_E = OS_PRODUCT_TYPE.ENTERPRISE_E; pub const PRODUCT_ENTERPRISE_EVALUATION = OS_PRODUCT_TYPE.ENTERPRISE_EVALUATION; pub const PRODUCT_ENTERPRISE_N = OS_PRODUCT_TYPE.ENTERPRISE_N; pub const PRODUCT_ENTERPRISE_N_EVALUATION = OS_PRODUCT_TYPE.ENTERPRISE_N_EVALUATION; pub const PRODUCT_ENTERPRISE_S = OS_PRODUCT_TYPE.ENTERPRISE_S; pub const PRODUCT_ENTERPRISE_S_EVALUATION = OS_PRODUCT_TYPE.ENTERPRISE_S_EVALUATION; pub const PRODUCT_ENTERPRISE_S_N = OS_PRODUCT_TYPE.ENTERPRISE_S_N; pub const PRODUCT_ENTERPRISE_S_N_EVALUATION = OS_PRODUCT_TYPE.ENTERPRISE_S_N_EVALUATION; pub const PRODUCT_ENTERPRISE_SERVER = OS_PRODUCT_TYPE.ENTERPRISE_SERVER; pub const PRODUCT_ENTERPRISE_SERVER_CORE = OS_PRODUCT_TYPE.ENTERPRISE_SERVER_CORE; pub const PRODUCT_ENTERPRISE_SERVER_CORE_V = OS_PRODUCT_TYPE.ENTERPRISE_SERVER_CORE_V; pub const PRODUCT_ENTERPRISE_SERVER_IA64 = OS_PRODUCT_TYPE.ENTERPRISE_SERVER_IA64; pub const PRODUCT_ENTERPRISE_SERVER_V = OS_PRODUCT_TYPE.ENTERPRISE_SERVER_V; pub const PRODUCT_ESSENTIALBUSINESS_SERVER_ADDL = OS_PRODUCT_TYPE.ESSENTIALBUSINESS_SERVER_ADDL; pub const PRODUCT_ESSENTIALBUSINESS_SERVER_ADDLSVC = OS_PRODUCT_TYPE.ESSENTIALBUSINESS_SERVER_ADDLSVC; pub const PRODUCT_ESSENTIALBUSINESS_SERVER_MGMT = OS_PRODUCT_TYPE.ESSENTIALBUSINESS_SERVER_MGMT; pub const PRODUCT_ESSENTIALBUSINESS_SERVER_MGMTSVC = OS_PRODUCT_TYPE.ESSENTIALBUSINESS_SERVER_MGMTSVC; pub const PRODUCT_HOME_BASIC = OS_PRODUCT_TYPE.HOME_BASIC; pub const PRODUCT_HOME_BASIC_E = OS_PRODUCT_TYPE.HOME_BASIC_E; pub const PRODUCT_HOME_BASIC_N = OS_PRODUCT_TYPE.HOME_BASIC_N; pub const PRODUCT_HOME_PREMIUM = OS_PRODUCT_TYPE.HOME_PREMIUM; pub const PRODUCT_HOME_PREMIUM_E = OS_PRODUCT_TYPE.HOME_PREMIUM_E; pub const PRODUCT_HOME_PREMIUM_N = OS_PRODUCT_TYPE.HOME_PREMIUM_N; pub const PRODUCT_HOME_PREMIUM_SERVER = OS_PRODUCT_TYPE.HOME_PREMIUM_SERVER; pub const PRODUCT_HOME_SERVER = OS_PRODUCT_TYPE.HOME_SERVER; pub const PRODUCT_HYPERV = OS_PRODUCT_TYPE.HYPERV; pub const PRODUCT_IOTUAP = OS_PRODUCT_TYPE.IOTUAP; pub const PRODUCT_IOTUAPCOMMERCIAL = OS_PRODUCT_TYPE.IOTUAPCOMMERCIAL; pub const PRODUCT_MEDIUMBUSINESS_SERVER_MANAGEMENT = OS_PRODUCT_TYPE.MEDIUMBUSINESS_SERVER_MANAGEMENT; pub const PRODUCT_MEDIUMBUSINESS_SERVER_MESSAGING = OS_PRODUCT_TYPE.MEDIUMBUSINESS_SERVER_MESSAGING; pub const PRODUCT_MEDIUMBUSINESS_SERVER_SECURITY = OS_PRODUCT_TYPE.MEDIUMBUSINESS_SERVER_SECURITY; pub const PRODUCT_MOBILE_CORE = OS_PRODUCT_TYPE.MOBILE_CORE; pub const PRODUCT_MOBILE_ENTERPRISE = OS_PRODUCT_TYPE.MOBILE_ENTERPRISE; pub const PRODUCT_MULTIPOINT_PREMIUM_SERVER = OS_PRODUCT_TYPE.MULTIPOINT_PREMIUM_SERVER; pub const PRODUCT_MULTIPOINT_STANDARD_SERVER = OS_PRODUCT_TYPE.MULTIPOINT_STANDARD_SERVER; pub const PRODUCT_PRO_WORKSTATION = OS_PRODUCT_TYPE.PRO_WORKSTATION; pub const PRODUCT_PRO_WORKSTATION_N = OS_PRODUCT_TYPE.PRO_WORKSTATION_N; pub const PRODUCT_PROFESSIONAL = OS_PRODUCT_TYPE.PROFESSIONAL; pub const PRODUCT_PROFESSIONAL_E = OS_PRODUCT_TYPE.PROFESSIONAL_E; pub const PRODUCT_PROFESSIONAL_N = OS_PRODUCT_TYPE.PROFESSIONAL_N; pub const PRODUCT_PROFESSIONAL_WMC = OS_PRODUCT_TYPE.PROFESSIONAL_WMC; pub const PRODUCT_SB_SOLUTION_SERVER = OS_PRODUCT_TYPE.SB_SOLUTION_SERVER; pub const PRODUCT_SB_SOLUTION_SERVER_EM = OS_PRODUCT_TYPE.SB_SOLUTION_SERVER_EM; pub const PRODUCT_SERVER_FOR_SB_SOLUTIONS = OS_PRODUCT_TYPE.SERVER_FOR_SB_SOLUTIONS; pub const PRODUCT_SERVER_FOR_SB_SOLUTIONS_EM = OS_PRODUCT_TYPE.SERVER_FOR_SB_SOLUTIONS_EM; pub const PRODUCT_SERVER_FOR_SMALLBUSINESS = OS_PRODUCT_TYPE.SERVER_FOR_SMALLBUSINESS; pub const PRODUCT_SERVER_FOR_SMALLBUSINESS_V = OS_PRODUCT_TYPE.SERVER_FOR_SMALLBUSINESS_V; pub const PRODUCT_SERVER_FOUNDATION = OS_PRODUCT_TYPE.SERVER_FOUNDATION; pub const PRODUCT_SMALLBUSINESS_SERVER = OS_PRODUCT_TYPE.SMALLBUSINESS_SERVER; pub const PRODUCT_SMALLBUSINESS_SERVER_PREMIUM = OS_PRODUCT_TYPE.SMALLBUSINESS_SERVER_PREMIUM; pub const PRODUCT_SMALLBUSINESS_SERVER_PREMIUM_CORE = OS_PRODUCT_TYPE.SMALLBUSINESS_SERVER_PREMIUM_CORE; pub const PRODUCT_SOLUTION_EMBEDDEDSERVER = OS_PRODUCT_TYPE.SOLUTION_EMBEDDEDSERVER; pub const PRODUCT_STANDARD_EVALUATION_SERVER = OS_PRODUCT_TYPE.STANDARD_EVALUATION_SERVER; pub const PRODUCT_STANDARD_SERVER = OS_PRODUCT_TYPE.STANDARD_SERVER; pub const PRODUCT_STANDARD_SERVER_CORE_ = OS_PRODUCT_TYPE.STANDARD_SERVER_CORE_; pub const PRODUCT_STANDARD_SERVER_CORE_V = OS_PRODUCT_TYPE.STANDARD_SERVER_CORE_V; pub const PRODUCT_STANDARD_SERVER_V = OS_PRODUCT_TYPE.STANDARD_SERVER_V; pub const PRODUCT_STANDARD_SERVER_SOLUTIONS = OS_PRODUCT_TYPE.STANDARD_SERVER_SOLUTIONS; pub const PRODUCT_STANDARD_SERVER_SOLUTIONS_CORE = OS_PRODUCT_TYPE.STANDARD_SERVER_SOLUTIONS_CORE; pub const PRODUCT_STARTER = OS_PRODUCT_TYPE.STARTER; pub const PRODUCT_STARTER_E = OS_PRODUCT_TYPE.STARTER_E; pub const PRODUCT_STARTER_N = OS_PRODUCT_TYPE.STARTER_N; pub const PRODUCT_STORAGE_ENTERPRISE_SERVER = OS_PRODUCT_TYPE.STORAGE_ENTERPRISE_SERVER; pub const PRODUCT_STORAGE_ENTERPRISE_SERVER_CORE = OS_PRODUCT_TYPE.STORAGE_ENTERPRISE_SERVER_CORE; pub const PRODUCT_STORAGE_EXPRESS_SERVER = OS_PRODUCT_TYPE.STORAGE_EXPRESS_SERVER; pub const PRODUCT_STORAGE_EXPRESS_SERVER_CORE = OS_PRODUCT_TYPE.STORAGE_EXPRESS_SERVER_CORE; pub const PRODUCT_STORAGE_STANDARD_EVALUATION_SERVER = OS_PRODUCT_TYPE.STORAGE_STANDARD_EVALUATION_SERVER; pub const PRODUCT_STORAGE_STANDARD_SERVER = OS_PRODUCT_TYPE.STORAGE_STANDARD_SERVER; pub const PRODUCT_STORAGE_STANDARD_SERVER_CORE = OS_PRODUCT_TYPE.STORAGE_STANDARD_SERVER_CORE; pub const PRODUCT_STORAGE_WORKGROUP_EVALUATION_SERVER = OS_PRODUCT_TYPE.STORAGE_WORKGROUP_EVALUATION_SERVER; pub const PRODUCT_STORAGE_WORKGROUP_SERVER = OS_PRODUCT_TYPE.STORAGE_WORKGROUP_SERVER; pub const PRODUCT_STORAGE_WORKGROUP_SERVER_CORE = OS_PRODUCT_TYPE.STORAGE_WORKGROUP_SERVER_CORE; pub const PRODUCT_ULTIMATE = OS_PRODUCT_TYPE.ULTIMATE; pub const PRODUCT_ULTIMATE_E = OS_PRODUCT_TYPE.ULTIMATE_E; pub const PRODUCT_ULTIMATE_N = OS_PRODUCT_TYPE.ULTIMATE_N; pub const PRODUCT_UNDEFINED = OS_PRODUCT_TYPE.UNDEFINED; pub const PRODUCT_WEB_SERVER = OS_PRODUCT_TYPE.WEB_SERVER; pub const PRODUCT_WEB_SERVER_CORE = OS_PRODUCT_TYPE.WEB_SERVER_CORE; pub const FIRMWARE_TABLE_ID = u32; pub const WOW64_ARCHITECTURE_INFORMATION = extern struct { _bitfield: u32, }; pub const FIRMWARE_TYPE = enum(i32) { Unknown = 0, Bios = 1, Uefi = 2, Max = 3, }; pub const FirmwareTypeUnknown = FIRMWARE_TYPE.Unknown; pub const FirmwareTypeBios = FIRMWARE_TYPE.Bios; pub const FirmwareTypeUefi = FIRMWARE_TYPE.Uefi; pub const FirmwareTypeMax = FIRMWARE_TYPE.Max; pub const LOGICAL_PROCESSOR_RELATIONSHIP = enum(i32) { ProcessorCore = 0, NumaNode = 1, Cache = 2, ProcessorPackage = 3, Group = 4, All = 65535, }; pub const RelationProcessorCore = LOGICAL_PROCESSOR_RELATIONSHIP.ProcessorCore; pub const RelationNumaNode = LOGICAL_PROCESSOR_RELATIONSHIP.NumaNode; pub const RelationCache = LOGICAL_PROCESSOR_RELATIONSHIP.Cache; pub const RelationProcessorPackage = LOGICAL_PROCESSOR_RELATIONSHIP.ProcessorPackage; pub const RelationGroup = LOGICAL_PROCESSOR_RELATIONSHIP.Group; pub const RelationAll = LOGICAL_PROCESSOR_RELATIONSHIP.All; pub const SYSTEM_LOGICAL_PROCESSOR_INFORMATION = extern struct { ProcessorMask: usize, Relationship: LOGICAL_PROCESSOR_RELATIONSHIP, Anonymous: extern union { ProcessorCore: extern struct { Flags: u8, }, NumaNode: extern struct { NodeNumber: u32, }, Cache: CACHE_DESCRIPTOR, Reserved: [2]u64, }, }; pub const SYSTEM_PROCESSOR_CYCLE_TIME_INFORMATION = extern struct { CycleTime: u64, }; pub const OSVERSIONINFOA = extern struct { dwOSVersionInfoSize: u32, dwMajorVersion: u32, dwMinorVersion: u32, dwBuildNumber: u32, dwPlatformId: u32, szCSDVersion: [128]CHAR, }; pub const OSVERSIONINFOW = extern struct { dwOSVersionInfoSize: u32, dwMajorVersion: u32, dwMinorVersion: u32, dwBuildNumber: u32, dwPlatformId: u32, szCSDVersion: [128]u16, }; pub const OSVERSIONINFOEXA = extern struct { dwOSVersionInfoSize: u32, dwMajorVersion: u32, dwMinorVersion: u32, dwBuildNumber: u32, dwPlatformId: u32, szCSDVersion: [128]CHAR, wServicePackMajor: u16, wServicePackMinor: u16, wSuiteMask: u16, wProductType: u8, wReserved: u8, }; pub const OSVERSIONINFOEXW = extern struct { dwOSVersionInfoSize: u32, dwMajorVersion: u32, dwMinorVersion: u32, dwBuildNumber: u32, dwPlatformId: u32, szCSDVersion: [128]u16, wServicePackMajor: u16, wServicePackMinor: u16, wSuiteMask: u16, wProductType: u8, wReserved: u8, }; pub const SYSTEM_INFO = extern struct { Anonymous: extern union { dwOemId: u32, Anonymous: extern struct { wProcessorArchitecture: PROCESSOR_ARCHITECTURE, wReserved: u16, }, }, dwPageSize: u32, lpMinimumApplicationAddress: ?*c_void, lpMaximumApplicationAddress: ?*c_void, dwActiveProcessorMask: usize, dwNumberOfProcessors: u32, dwProcessorType: u32, dwAllocationGranularity: u32, wProcessorLevel: u16, wProcessorRevision: u16, }; pub const MEMORYSTATUSEX = extern struct { dwLength: u32, dwMemoryLoad: u32, ullTotalPhys: u64, ullAvailPhys: u64, ullTotalPageFile: u64, ullAvailPageFile: u64, ullTotalVirtual: u64, ullAvailVirtual: u64, ullAvailExtendedVirtual: u64, }; pub const COMPUTER_NAME_FORMAT = enum(i32) { NetBIOS = 0, DnsHostname = 1, DnsDomain = 2, DnsFullyQualified = 3, PhysicalNetBIOS = 4, PhysicalDnsHostname = 5, PhysicalDnsDomain = 6, PhysicalDnsFullyQualified = 7, Max = 8, }; pub const ComputerNameNetBIOS = COMPUTER_NAME_FORMAT.NetBIOS; pub const ComputerNameDnsHostname = COMPUTER_NAME_FORMAT.DnsHostname; pub const ComputerNameDnsDomain = COMPUTER_NAME_FORMAT.DnsDomain; pub const ComputerNameDnsFullyQualified = COMPUTER_NAME_FORMAT.DnsFullyQualified; pub const ComputerNamePhysicalNetBIOS = COMPUTER_NAME_FORMAT.PhysicalNetBIOS; pub const ComputerNamePhysicalDnsHostname = COMPUTER_NAME_FORMAT.PhysicalDnsHostname; pub const ComputerNamePhysicalDnsDomain = COMPUTER_NAME_FORMAT.PhysicalDnsDomain; pub const ComputerNamePhysicalDnsFullyQualified = COMPUTER_NAME_FORMAT.PhysicalDnsFullyQualified; pub const ComputerNameMax = COMPUTER_NAME_FORMAT.Max; pub const MEMORYSTATUS = extern struct { dwLength: u32, dwMemoryLoad: u32, dwTotalPhys: usize, dwAvailPhys: usize, dwTotalPageFile: usize, dwAvailPageFile: usize, dwTotalVirtual: usize, dwAvailVirtual: usize, }; pub const DEP_SYSTEM_POLICY_TYPE = enum(i32) { PolicyAlwaysOff = 0, PolicyAlwaysOn = 1, PolicyOptIn = 2, PolicyOptOut = 3, TotalPolicyCount = 4, }; pub const DEPPolicyAlwaysOff = DEP_SYSTEM_POLICY_TYPE.PolicyAlwaysOff; pub const DEPPolicyAlwaysOn = DEP_SYSTEM_POLICY_TYPE.PolicyAlwaysOn; pub const DEPPolicyOptIn = DEP_SYSTEM_POLICY_TYPE.PolicyOptIn; pub const DEPPolicyOptOut = DEP_SYSTEM_POLICY_TYPE.PolicyOptOut; pub const DEPTotalPolicyCount = DEP_SYSTEM_POLICY_TYPE.TotalPolicyCount; pub const PGET_SYSTEM_WOW64_DIRECTORY_A = fn( lpBuffer: ?[*:0]u8, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub const PGET_SYSTEM_WOW64_DIRECTORY_W = fn( lpBuffer: ?[*:0]u16, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; //-------------------------------------------------------------------------------- // Section: Functions (55) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GlobalMemoryStatusEx( lpBuffer: ?*MEMORYSTATUSEX, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemInfo( lpSystemInfo: ?*SYSTEM_INFO, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemTime( lpSystemTime: ?*SYSTEMTIME, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemTimeAsFileTime( lpSystemTimeAsFileTime: ?*FILETIME, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetLocalTime( lpSystemTime: ?*SYSTEMTIME, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "KERNEL32" fn IsUserCetAvailableInEnvironment( UserCetEnvironment: USER_CET_ENVIRONMENT, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn GetSystemLeapSecondInformation( Enabled: ?*BOOL, Flags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetVersion( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetLocalTime( lpSystemTime: ?*const SYSTEMTIME, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetTickCount( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetTickCount64( ) callconv(@import("std").os.windows.WINAPI) u64; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemTimeAdjustment( lpTimeAdjustment: ?*u32, lpTimeIncrement: ?*u32, lpTimeAdjustmentDisabled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-core-sysinfo-l1-2-4" fn GetSystemTimeAdjustmentPrecise( lpTimeAdjustment: ?*u64, lpTimeIncrement: ?*u64, lpTimeAdjustmentDisabled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemDirectoryA( lpBuffer: ?[*:0]u8, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemDirectoryW( lpBuffer: ?[*:0]u16, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetWindowsDirectoryA( lpBuffer: ?[*:0]u8, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetWindowsDirectoryW( lpBuffer: ?[*:0]u16, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemWindowsDirectoryA( lpBuffer: ?[*:0]u8, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetSystemWindowsDirectoryW( lpBuffer: ?[*:0]u16, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetComputerNameExA( NameType: COMPUTER_NAME_FORMAT, lpBuffer: ?[*:0]u8, nSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetComputerNameExW( NameType: COMPUTER_NAME_FORMAT, lpBuffer: ?[*:0]u16, nSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetComputerNameExW( NameType: COMPUTER_NAME_FORMAT, lpBuffer: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetSystemTime( lpSystemTime: ?*const SYSTEMTIME, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetVersionExA( lpVersionInformation: ?*OSVERSIONINFOA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetVersionExW( lpVersionInformation: ?*OSVERSIONINFOW, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetLogicalProcessorInformation( // TODO: what to do with BytesParamIndex 1? Buffer: ?*SYSTEM_LOGICAL_PROCESSOR_INFORMATION, ReturnedLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.1' pub extern "KERNEL32" fn GetLogicalProcessorInformationEx( RelationshipType: LOGICAL_PROCESSOR_RELATIONSHIP, // TODO: what to do with BytesParamIndex 2? Buffer: ?*SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, ReturnedLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetNativeSystemInfo( lpSystemInfo: ?*SYSTEM_INFO, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetSystemTimePreciseAsFileTime( lpSystemTimeAsFileTime: ?*FILETIME, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetProductInfo( dwOSMajorVersion: u32, dwOSMinorVersion: u32, dwSpMajorVersion: u32, dwSpMinorVersion: u32, pdwReturnedProductType: ?*OS_PRODUCT_TYPE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn VerSetConditionMask( ConditionMask: u64, TypeMask: VER_FLAGS, Condition: u8, ) callconv(@import("std").os.windows.WINAPI) u64; pub extern "api-ms-win-core-sysinfo-l1-2-0" fn GetOsSafeBootMode( Flags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn EnumSystemFirmwareTables( FirmwareTableProviderSignature: FIRMWARE_TABLE_PROVIDER, // TODO: what to do with BytesParamIndex 2? pFirmwareTableEnumBuffer: ?*FIRMWARE_TABLE_ID, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetSystemFirmwareTable( FirmwareTableProviderSignature: FIRMWARE_TABLE_PROVIDER, FirmwareTableID: FIRMWARE_TABLE_ID, // TODO: what to do with BytesParamIndex 3? pFirmwareTableBuffer: ?*c_void, BufferSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "KERNEL32" fn DnsHostnameToComputerNameExW( Hostname: ?[*:0]const u16, ComputerName: ?[*:0]u16, nSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetPhysicallyInstalledSystemMemory( TotalMemoryInKilobytes: ?*u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn SetComputerNameEx2W( NameType: COMPUTER_NAME_FORMAT, Flags: u32, lpBuffer: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetSystemTimeAdjustment( dwTimeAdjustment: u32, bTimeAdjustmentDisabled: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-core-sysinfo-l1-2-4" fn SetSystemTimeAdjustmentPrecise( dwTimeAdjustment: u64, bTimeAdjustmentDisabled: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.1' pub extern "KERNEL32" fn GetProcessorSystemCycleTime( Group: u16, // TODO: what to do with BytesParamIndex 2? Buffer: ?*SYSTEM_PROCESSOR_CYCLE_TIME_INFORMATION, ReturnedLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "api-ms-win-core-sysinfo-l1-2-3" fn GetOsManufacturingMode( pbEnabled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-core-sysinfo-l1-2-3" fn GetIntegratedDisplaySize( sizeInInches: ?*f64, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetComputerNameA( lpComputerName: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetComputerNameW( lpComputerName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetComputerNameExA( NameType: COMPUTER_NAME_FORMAT, lpBuffer: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetSystemWow64DirectoryA( lpBuffer: ?[*:0]u8, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GetSystemWow64DirectoryW( lpBuffer: ?[*:0]u16, uSize: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows10.0.10586' pub extern "api-ms-win-core-wow64-l1-1-1" fn GetSystemWow64Directory2A( lpBuffer: ?[*:0]u8, uSize: u32, ImageFileMachineType: u16, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows10.0.10586' pub extern "api-ms-win-core-wow64-l1-1-1" fn GetSystemWow64Directory2W( lpBuffer: ?[*:0]u16, uSize: u32, ImageFileMachineType: u16, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows10.0.16299' pub extern "KERNEL32" fn IsWow64GuestMachineSupported( WowGuestMachine: u16, MachineIsSupported: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "KERNEL32" fn GlobalMemoryStatus( lpBuffer: ?*MEMORYSTATUS, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetSystemDEPPolicy( ) callconv(@import("std").os.windows.WINAPI) DEP_SYSTEM_POLICY_TYPE; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetFirmwareType( FirmwareType: ?*FIRMWARE_TYPE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn VerifyVersionInfoA( lpVersionInformation: ?*OSVERSIONINFOEXA, dwTypeMask: VER_FLAGS, dwlConditionMask: u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn VerifyVersionInfoW( lpVersionInformation: ?*OSVERSIONINFOEXW, dwTypeMask: VER_FLAGS, dwlConditionMask: u64, ) callconv(@import("std").os.windows.WINAPI) BOOL; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (13) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { pub const OSVERSIONINFO = thismodule.OSVERSIONINFOA; pub const OSVERSIONINFOEX = thismodule.OSVERSIONINFOEXA; pub const PGET_SYSTEM_WOW64_DIRECTORY_ = thismodule.PGET_SYSTEM_WOW64_DIRECTORY_A; pub const GetSystemDirectory = thismodule.GetSystemDirectoryA; pub const GetWindowsDirectory = thismodule.GetWindowsDirectoryA; pub const GetSystemWindowsDirectory = thismodule.GetSystemWindowsDirectoryA; pub const GetComputerNameEx = thismodule.GetComputerNameExA; pub const SetComputerNameEx = thismodule.SetComputerNameExA; pub const GetVersionEx = thismodule.GetVersionExA; pub const SetComputerName = thismodule.SetComputerNameA; pub const GetSystemWow64Directory = thismodule.GetSystemWow64DirectoryA; pub const GetSystemWow64Directory2 = thismodule.GetSystemWow64Directory2A; pub const VerifyVersionInfo = thismodule.VerifyVersionInfoA; }, .wide => struct { pub const OSVERSIONINFO = thismodule.OSVERSIONINFOW; pub const OSVERSIONINFOEX = thismodule.OSVERSIONINFOEXW; pub const PGET_SYSTEM_WOW64_DIRECTORY_ = thismodule.PGET_SYSTEM_WOW64_DIRECTORY_W; pub const GetSystemDirectory = thismodule.GetSystemDirectoryW; pub const GetWindowsDirectory = thismodule.GetWindowsDirectoryW; pub const GetSystemWindowsDirectory = thismodule.GetSystemWindowsDirectoryW; pub const GetComputerNameEx = thismodule.GetComputerNameExW; pub const SetComputerNameEx = thismodule.SetComputerNameExW; pub const GetVersionEx = thismodule.GetVersionExW; pub const SetComputerName = thismodule.SetComputerNameW; pub const GetSystemWow64Directory = thismodule.GetSystemWow64DirectoryW; pub const GetSystemWow64Directory2 = thismodule.GetSystemWow64Directory2W; pub const VerifyVersionInfo = thismodule.VerifyVersionInfoW; }, .unspecified => if (@import("builtin").is_test) struct { pub const OSVERSIONINFO = *opaque{}; pub const OSVERSIONINFOEX = *opaque{}; pub const PGET_SYSTEM_WOW64_DIRECTORY_ = *opaque{}; pub const GetSystemDirectory = *opaque{}; pub const GetWindowsDirectory = *opaque{}; pub const GetSystemWindowsDirectory = *opaque{}; pub const GetComputerNameEx = *opaque{}; pub const SetComputerNameEx = *opaque{}; pub const GetVersionEx = *opaque{}; pub const SetComputerName = *opaque{}; pub const GetSystemWow64Directory = *opaque{}; pub const GetSystemWow64Directory2 = *opaque{}; pub const VerifyVersionInfo = *opaque{}; } else struct { pub const OSVERSIONINFO = @compileError("'OSVERSIONINFO' requires that UNICODE be set to true or false in the root module"); pub const OSVERSIONINFOEX = @compileError("'OSVERSIONINFOEX' requires that UNICODE be set to true or false in the root module"); pub const PGET_SYSTEM_WOW64_DIRECTORY_ = @compileError("'PGET_SYSTEM_WOW64_DIRECTORY_' requires that UNICODE be set to true or false in the root module"); pub const GetSystemDirectory = @compileError("'GetSystemDirectory' requires that UNICODE be set to true or false in the root module"); pub const GetWindowsDirectory = @compileError("'GetWindowsDirectory' requires that UNICODE be set to true or false in the root module"); pub const GetSystemWindowsDirectory = @compileError("'GetSystemWindowsDirectory' requires that UNICODE be set to true or false in the root module"); pub const GetComputerNameEx = @compileError("'GetComputerNameEx' requires that UNICODE be set to true or false in the root module"); pub const SetComputerNameEx = @compileError("'SetComputerNameEx' requires that UNICODE be set to true or false in the root module"); pub const GetVersionEx = @compileError("'GetVersionEx' requires that UNICODE be set to true or false in the root module"); pub const SetComputerName = @compileError("'SetComputerName' requires that UNICODE be set to true or false in the root module"); pub const GetSystemWow64Directory = @compileError("'GetSystemWow64Directory' requires that UNICODE be set to true or false in the root module"); pub const GetSystemWow64Directory2 = @compileError("'GetSystemWow64Directory2' requires that UNICODE be set to true or false in the root module"); pub const VerifyVersionInfo = @compileError("'VerifyVersionInfo' requires that UNICODE be set to true or false in the root module"); }, }; //-------------------------------------------------------------------------------- // Section: Imports (10) //-------------------------------------------------------------------------------- const BOOL = @import("../foundation.zig").BOOL; const CACHE_DESCRIPTOR = @import("../system/system_services.zig").CACHE_DESCRIPTOR; const CHAR = @import("../system/system_services.zig").CHAR; const FILETIME = @import("../foundation.zig").FILETIME; const HRESULT = @import("../foundation.zig").HRESULT; const PROCESSOR_ARCHITECTURE = @import("../system/diagnostics/debug.zig").PROCESSOR_ARCHITECTURE; const PSTR = @import("../foundation.zig").PSTR; const PWSTR = @import("../foundation.zig").PWSTR; const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX = @import("../system/system_services.zig").SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX; const SYSTEMTIME = @import("../foundation.zig").SYSTEMTIME; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "PGET_SYSTEM_WOW64_DIRECTORY_A")) { _ = PGET_SYSTEM_WOW64_DIRECTORY_A; } if (@hasDecl(@This(), "PGET_SYSTEM_WOW64_DIRECTORY_W")) { _ = PGET_SYSTEM_WOW64_DIRECTORY_W; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/system/system_information.zig
const zt = @import("zt"); const main = @import("../main.zig"); const ig = @import("imgui"); const zg = zt.custom_components; var rotation: f32 = 0.0; var scale: f32 = 1.0; var verts = [_]zt.math.Vec2{ zt.math.vec2(-70, 0), zt.math.vec2(0, -50), zt.math.vec2(-20, 0), zt.math.vec2(0, 50) }; var rect: zt.game.Physics.Shape = zt.game.Physics.Shape.rectangle(.{}, .{ .x = 100, .y = 50 }); var rectPos: zt.math.Vec2 = .{}; var line: zt.game.Physics.Shape = zt.game.Physics.Shape.line(.{}, .{ .x = 100, .y = 100 }); var linePos: zt.math.Vec2 = .{}; var circle: zt.game.Physics.Shape = zt.game.Physics.Shape.circle(20.0); var circlePos: zt.math.Vec2 = .{}; var point: zt.game.Physics.Shape = zt.game.Physics.Shape.point(.{}); var pointPos: zt.math.Vec2 = .{}; var poly: zt.game.Physics.Shape = zt.game.Physics.Shape{ .Polygon = .{ .vertices = &verts } }; var polyPos: zt.math.Vec2 = .{}; pub fn update(ctx: *main.SampleApplication.Context) void { control(); var render = ctx.data.render; var io = ig.igGetIO(); // It's important to set the render size, then the camera. This applies the matrices used to display all the sprites. render.updateRenderSize(io.*.DisplaySize); render.updateCamera(.{}, scale, rotation); // Line Checks. { const col = if (line.overlaps(linePos, rect, rectPos) or line.overlaps(linePos, circle, circlePos) or line.overlaps(linePos, poly, polyPos) or line.overlaps(linePos, point, pointPos)) zt.math.Vec4.white else zt.math.vec4(0.0, 0.5, 0.5, 0.7); render.line(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), linePos.add(line.Line.start), linePos.add(line.Line.end), 0, 4.0, col, col); } // Circle Checks. { const col = if (circle.overlaps(circlePos, rect, rectPos) or circle.overlaps(circlePos, line, linePos) or circle.overlaps(circlePos, poly, polyPos) or circle.overlaps(circlePos, point, pointPos)) zt.math.Vec4.white else zt.math.vec4(0.0, 0.5, 0.5, 0.7); render.circle(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), circlePos, circle.Circle.radius, 0, col); } // Rect Checks. { const col = if (rect.overlaps(rectPos, line, linePos) or rect.overlaps(rectPos, circle, circlePos) or rect.overlaps(rectPos, poly, polyPos) or rect.overlaps(rectPos, point, pointPos)) zt.math.Vec4.white else zt.math.vec4(0.0, 0.5, 0.5, 0.7); var renderPos = rect.Rectangle; renderPos.position = renderPos.position.add(rectPos); render.rectangleHollow(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), renderPos, 0, 4.0, col); } // Point Checks. { const col = if (point.overlaps(pointPos, line, linePos) or point.overlaps(pointPos, circle, circlePos) or point.overlaps(pointPos, rect, rectPos) or point.overlaps(pointPos, poly, polyPos)) zt.math.Vec4.white else zt.math.vec4(0.0, 0.5, 0.5, 0.7); var renderPos = zt.math.rect(point.Point.x + pointPos.x - 1, point.Point.y + pointPos.y - 1, 2, 2); render.rectangleHollow(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), renderPos, 0, 4.0, col); } // Poly Checks. { const col = if (poly.overlaps(polyPos, line, linePos) or poly.overlaps(polyPos, circle, circlePos) or poly.overlaps(polyPos, point, pointPos) or poly.overlaps(polyPos, rect, rectPos)) zt.math.Vec4.white else zt.math.vec4(0.0, 0.5, 0.5, 0.7); for (poly.Polygon.vertices) |v, i| { var next = if (i + 1 == poly.Polygon.vertices.len) poly.Polygon.vertices[0] else poly.Polygon.vertices[i + 1]; var pos = v.add(polyPos); var nextPos = next.add(polyPos); render.line(ctx.data.sheet, zt.math.rect(131, 84, 2, 2), pos, nextPos, 0, 4.0, col, col); } } render.flush(); } fn control() void { var io = ig.igGetIO(); ig.igSetNextWindowPos(io.*.DisplaySize, ig.ImGuiCond_Appearing, .{ .x = 1, .y = 1 }); if (ig.igBegin("Renderer Demo Settings", null, ig.ImGuiWindowFlags_None)) { ig.igPushItemWidth(ig.igGetWindowWidth() * 0.5); _ = ig.igDragFloat("Camera Rotation", &rotation, 0.02, zt.math.toRadians(-360.0), zt.math.toRadians(360.0), "%.3f", ig.ImGuiSliderFlags_None); _ = ig.igDragFloat("Camera Zoom", &scale, 0.02, 0.1, 16, "%.3f", ig.ImGuiSliderFlags_None); ig.igSeparator(); _ = zg.ztEditDrag("Line Pos", 0.1, &linePos); _ = zg.ztEditDrag("Rect Pos", 0.1, &rectPos); _ = zg.ztEditDrag("Circle Pos", 0.1, &circlePos); _ = zg.ztEditDrag("Point Pos", 0.1, &pointPos); _ = zg.ztEditDrag("Poly Pos", 0.1, &polyPos); ig.igPopItemWidth(); } ig.igEnd(); }
example/src/scenes/colliders.zig
const u = @import("util.zig"); const std = @import("std"); pub const Linking = @import("IR/Linking.zig"); const iters = @import("IR/iters.zig"); pub const ulen = u32; pub const Module = struct { arena: std.heap.ArenaAllocator, funcs: []const Func = &[_]Func{}, tables: []const Table = &[_]Table{}, memory: ?Memory = null, globals: []const Global = &[_]Global{}, start: ?ulen = null, elements: []const Elem = &[_]Elem{}, datas: []const Data = &[_]Data{}, //linking: Linking, customs: []const Section.Custom = &[_]Section.Custom{}, pub fn init(allocator: std.mem.Allocator) Module { return .{ .arena = std.heap.ArenaAllocator.init(allocator) }; } pub fn deinit(self: Module) void { self.arena.deinit(); } pub fn link(_: []const Module, allocator: std.mem.Allocator) !Module { var m = init(allocator); //TODO: return m; } pub fn imports(self: *const Module) iters.Import { return .{ .m = self }; } pub fn exports(self: *const Module) iters.Export { return .{ .m = self }; } }; pub const Numtype = std.wasm.Valtype; pub const Valtype = Numtype; /// Interface type proposal definitions /// as https://github.com/WebAssembly/interface-types pub const Intertype = union(enum) { f32, f64, s8, u8, s16, u16, s32, u32, s64, u64, char, list: *const Intertype, /// like struct record: []const Field, /// like union variant: []const Field, pub const Field = struct { name: u.Txt, id: ?u.Txt, type: *const Intertype, }; pub const Abbrv = union(enum) { /// (list char) string, /// (record ("i" <intertype>)*) for i tuple: []const Intertype, /// (record (<name> bool)) flags: []const u.Txt, /// (variant ("false") ("true")) bool, /// (variant (<name>)*) @"enum": []const u.Txt, /// (variant ("none") ("some" <intertype)) option: Intertype, /// (variant ("i" <intertype>)*) for i @"union": []const Intertype, /// (variant ("ok" <intertype>?) ("error" <intertype>?)) expected: struct { ok: ?Intertype, err: ?Intertype, }, }; }; /// Internal signature types pub const Sigtype = enum { i32, i64, f32, f64, s8, u8, s16, u16, s32, u32, s64, u64, pub fn lower(it: Sigtype) Numtype { return switch (it) { .i32, .s8, .u8, .s16, .u16, .s32, .u32 => .i32, .i64, .s64, .u64 => .i64, .f32 => .f32, .f64 => .f64, }; } pub fn upper(a: Numtype) Sigtype { return switch (a) { .i32 => .i32, .i64 => .i64, .f32 => .f32, .f64 => .f64, }; } pub fn eql(a: Sigtype, b: Sigtype) bool { return @enumToInt(a) == @enumToInt(b); } }; pub const InitExpr = std.wasm.InitExpression; pub inline fn initExpr(expr: InitExpr) Code.Op { return switch (expr) { .i32_const => .i32_const, .i64_const => .i64_const, .f32_const => .f32_const, .f64_const => .f64_const, .global_get => .global_get, }; } pub const Code = struct { bytes: u.Bin, /// block types (indexed as Reloc .typeIndexLeb) types: []const Func.Sig = &[_]Func.Sig{}, relocs: []const Linking.Reloc.Entry = &[_]Linking.Reloc.Entry{}, pub const Op = std.wasm.Opcode; pub const MemArg = struct { align_: u32, offset: u32 = 0, }; }; pub const Func = struct { body: union(enum) { import: ImportName, code: Code, }, id: ?u.Txt = null, exports: []const ExportName = &[_]ExportName{}, type: Sig, pub const Sig = struct { params: []const Sigtype = &[_]Sigtype{}, results: []const Sigtype = &[_]Sigtype{}, }; }; pub const Table = struct { body: union(enum) { import: ImportName, intern: void }, id: ?u.Txt = null, exports: []const ExportName = &[_]ExportName{}, type: std.wasm.RefType, size: std.wasm.Limits, }; pub const Memory = struct { import: ?ImportName = null, id: ?u.Txt = null, exports: []const ExportName = &[_]ExportName{}, size: std.wasm.Limits, }; pub const Global = struct { body: union(enum) { import: ImportName, init: InitExpr, }, id: ?u.Txt = null, exports: []const ExportName = &[_]ExportName{}, type: Sigtype, mutable: bool, }; pub const Elem = struct { type: Type, init: union(enum) { val: []const InitExpr, func: []const u32, }, mode: Mode, pub const Type = enum(u8) { elemkind = 0x00, funcref = 0x70, externref = 0x6F, }; const Mode = union(enum) { passive, active: struct { table: u32, offset: InitExpr, }, declarative, }; }; pub const Data = struct { body: union(enum) { active: struct { mem: u32, offset: InitExpr, content: u.Bin, }, passive: u.Bin, }, id: ?u.Txt = null, }; pub const ImportName = struct { module: u.Txt, name: u.Txt, }; pub const ExportName = u.Txt; pub const Section = struct { pub const Type = std.wasm.Section; pub const Custom = struct { name: u.Txt, content: u.Bin, after: Type, relocs: []const Linking.Reloc.Entry = &[_]Linking.Reloc.Entry{}, }; };
src/IR.zig
const __floattisf = @import("floatXisf.zig").__floattisf; const testing = @import("std").testing; fn test__floattisf(a: i128, expected: f32) !void { const x = __floattisf(a); try testing.expect(x == expected); } test "floattisf" { try test__floattisf(0, 0.0); try test__floattisf(1, 1.0); try test__floattisf(2, 2.0); try test__floattisf(-1, -1.0); try test__floattisf(-2, -2.0); try test__floattisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62); try test__floattisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62); try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000008000000000), -0x1.FFFFFEp+62); try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000010000000000), -0x1.FFFFFCp+62); try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000000), -0x1.000000p+63); try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000001), -0x1.000000p+63); try test__floattisf(0x0007FB72E8000000, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72EA000000, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72EB000000, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72EC000000, 0x1.FEDCBCp+50); try test__floattisf(0x0007FB72E8000001, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72E6000000, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72E7000000, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72E4000001, 0x1.FEDCBAp+50); try test__floattisf(0x0007FB72E4000000, 0x1.FEDCB8p+50); try test__floattisf(make_ti(0x0007FB72E8000000, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72EA000000, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72EB000000, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72EBFFFFFF, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72EC000000, 0), 0x1.FEDCBCp+114); try test__floattisf(make_ti(0x0007FB72E8000001, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72E6000000, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72E7000000, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72E7FFFFFF, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72E4000001, 0), 0x1.FEDCBAp+114); try test__floattisf(make_ti(0x0007FB72E4000000, 0), 0x1.FEDCB8p+114); } fn make_ti(high: u64, low: u64) i128 { var result: u128 = high; result <<= 64; result |= low; return @bitCast(i128, result); }
lib/std/special/compiler_rt/floattisf_test.zig