code
stringlengths
38
801k
repo_path
stringlengths
6
263
const std = @import("std"); const mem = std.mem; const math = std.math; const testing = std.testing; const unicode = std.unicode; const bog = @import("bog.zig"); const Errors = bog.Errors; fn isWhiteSpace(c: u32) bool { return switch (c) { ' ', '\t', '\r', // NO-BREAK SPACE 0x00A0, // OGHAM SPACE MARK 0x1680, // MONGOLIAN VOWEL SEPARATOR 0x180E, // EN QUAD 0x2000, // EM QUAD 0x2001, // EN SPACE 0x2002, // EM SPACE 0x2003, // THREE-PER-EM SPACE 0x2004, // FOUR-PER-EM SPACE 0x2005, // SIX-PER-EM SPACE 0x2006, // FIGURE SPACE 0x2007, // PUNCTUATION SPACE 0x2008, // THIN SPACE 0x2009, // HAIR SPACE 0x200A, // ZERO WIDTH SPACE 0x200B, // NARROW NO-BREAK SPACE 0x202F, // MEDIUM MATHEMATICAL SPACE 0x205F, // IDEOGRAPHIC SPACE 0x3000, // ZERO WIDTH NO-BREAK SPACE 0xFEFF, // HALFWIDTH HANGUL FILLER 0xFFA0 => true, else => false, }; } fn isIdentifier(c: u32) bool { return switch (c) { 'a'...'z', 'A'...'Z', '_', '0'...'9', // unicode identifiers 0x00A8, 0x00AA, 0x00AD, 0x00AF, 0x00B2...0x00B5, 0x00B7...0x00BA, 0x00BC...0x00BE, 0x00C0...0x00D6, 0x00D8...0x00F6, 0x00F8...0x167F, 0x1681...0x180D, 0x180F...0x1FFF, 0x200B...0x200D, 0x202A...0x202E, 0x203F...0x2040, 0x2054, 0x2060...0x218F, 0x2460...0x24FF, 0x2776...0x2793, 0x2C00...0x2DFF, 0x2E80...0x2FFF, 0x3004...0x3007, 0x3021...0x302F, 0x3031...0xD7FF, 0xF900...0xFD3D, 0xFD40...0xFDCF, 0xFDF0...0xFE44, 0xFE47...0xFFFD, 0x10000...0x1FFFD, 0x20000...0x2FFFD, 0x30000...0x3FFFD, 0x40000...0x4FFFD, 0x50000...0x5FFFD, 0x60000...0x6FFFD, 0x70000...0x7FFFD, 0x80000...0x8FFFD, 0x90000...0x9FFFD, 0xA0000...0xAFFFD, 0xB0000...0xBFFFD, 0xC0000...0xCFFFD, 0xD0000...0xDFFFD, 0xE0000...0xEFFFD, => true, else => false, }; } pub const Token = struct { start: u32, end: u32, id: Id, pub const List = std.ArrayList(Token); pub const Index = u32; pub const Id = union(enum) { Eof, Indent: u16, Comment, Identifier, String, Integer, Number, Nl, Pipe, PipeEqual, Equal, EqualEqual, BangEqual, LParen, RParen, Percent, PercentEqual, LBrace, RBrace, LBracket, RBracket, Period, EqualRarr, Caret, CaretEqual, Plus, PlusEqual, PlusPlus, Minus, MinusEqual, Asterisk, AsteriskEqual, AsteriskAsterisk, AsteriskAsteriskEqual, Slash, SlashEqual, SlashSlash, SlashSlashEqual, Comma, Ampersand, AmpersandEqual, LArr, LArrEqual, LArrArr, LArrArrEqual, RArr, RArrEqual, RArrArr, RArrArrEqual, Tilde, Colon, Underscore, At, FormatStart, Format, FormatEnd, /// keywords Keyword_not, Keyword_and, Keyword_or, Keyword_let, Keyword_continue, Keyword_break, Keyword_return, Keyword_if, Keyword_else, Keyword_false, Keyword_true, Keyword_for, Keyword_while, Keyword_match, Keyword_catch, Keyword_try, Keyword_error, Keyword_import, Keyword_is, Keyword_in, Keyword_fn, Keyword_as, Keyword_const, Keyword_this, }; pub const keywords = std.ComptimeStringMap(Id, .{ .{ "not", .Keyword_not }, .{ "and", .Keyword_and }, .{ "or", .Keyword_or }, .{ "let", .Keyword_let }, .{ "continue", .Keyword_continue }, .{ "break", .Keyword_break }, .{ "return", .Keyword_return }, .{ "if", .Keyword_if }, .{ "else", .Keyword_else }, .{ "false", .Keyword_false }, .{ "true", .Keyword_true }, .{ "for", .Keyword_for }, .{ "while", .Keyword_while }, .{ "match", .Keyword_match }, .{ "catch", .Keyword_catch }, .{ "try", .Keyword_try }, .{ "error", .Keyword_error }, .{ "import", .Keyword_import }, .{ "is", .Keyword_is }, .{ "in", .Keyword_in }, .{ "fn", .Keyword_fn }, .{ "as", .Keyword_as }, .{ "const", .Keyword_const }, .{ "this", .Keyword_this }, .{ "_", .Underscore }, }); pub fn string(id: @TagType(Id)) []const u8 { return switch (id) { .Comment => "<Comment>", .Eof => "<EOF>", .Nl => "<NL>", .Indent => "<INDENT>", .Identifier => "Identifier", .String => "String", .Integer => "Integer", .Number => "Number", .Pipe => "|", .PipeEqual => "|=", .Equal => "=", .EqualEqual => "==", .BangEqual => "!=", .LParen => "(", .RParen => ")", .Percent => "%", .PercentEqual => "%=", .LBrace => "{", .RBrace => "}", .LBracket => "[", .RBracket => "]", .Period => ".", .EqualRarr => "=>", .Caret => "^", .CaretEqual => "^=", .Plus => "+", .PlusEqual => "+=", .PlusPlus => "++", .Minus => "-", .MinusEqual => "-=", .Asterisk => "*", .AsteriskEqual => "*=", .AsteriskAsterisk => "**", .AsteriskAsteriskEqual => "**=", .Slash => "/", .SlashEqual => "/=", .SlashSlash => "//", .SlashSlashEqual => "//=", .Comma => ",", .Ampersand => "&", .AmpersandEqual => "&=", .LArr => "<", .LArrEqual => "<=", .LArrArr => "<<", .LArrArrEqual => "<<=", .RArr => ">", .RArrEqual => ">=", .RArrArr => ">>", .RArrArrEqual => ">>=", .Tilde => "~", .Colon => ":", .Underscore => "_", .At => "@", .FormatStart, .Format, .FormatEnd => "Format string", .Keyword_not => "not", .Keyword_and => "and", .Keyword_or => "or", .Keyword_let => "let", .Keyword_continue => "continue", .Keyword_break => "break", .Keyword_return => "return", .Keyword_if => "if", .Keyword_else => "else", .Keyword_false => "false", .Keyword_true => "true", .Keyword_for => "for", .Keyword_while => "while", .Keyword_match => "match", .Keyword_catch => "catch", .Keyword_try => "try", .Keyword_error => "error", .Keyword_import => "import", .Keyword_is => "is", .Keyword_in => "in", .Keyword_fn => "fn", .Keyword_as => "as", .Keyword_const => "const", .Keyword_this => "this", }; } }; pub fn tokenize(allocator: *mem.Allocator, source: []const u8, errors: *Errors) Tokenizer.Error![]const Token { // estimate one token per 8 bytes to reduce allocation in the beginning const estimated = source.len / 8; var tokenizer = Tokenizer{ .tokens = try Token.List.initCapacity(allocator, estimated), .errors = errors, .it = .{ .i = 0, .bytes = source, }, .repl = false, }; errdefer tokenizer.tokens.deinit(); while (true) { const tok = try tokenizer.tokens.addOne(); tok.* = try tokenizer.next(); if (tok.id == .Eof) { // std.debug.warn("estimated: {}, actual: {}\n\n", .{estimated, tokenizer.tokens.items.len}); return tokenizer.tokens.toOwnedSlice(); } } } pub fn tokenizeRepl(repl: *@import("repl.zig").Repl) Tokenizer.Error!bool { // remove previous eof const self = &repl.tokenizer; self.it.bytes = repl.buffer.items; self.errors = &repl.vm.errors; if (self.tokens.items.len > 0) _ = self.tokens.pop(); const start_len = self.tokens.items.len; while (true) { const tok = try self.tokens.addOne(); tok.* = try self.next(); if (tok.id == .Eof) { // check if more input is expected return if (self.tokens.items.len == start_len + 2) true else if (self.paren_level != 0 or self.string or self.expect_indent or self.format_string != 0 or self.indent_level != 0) false else true; } } } pub const Tokenizer = struct { errors: *Errors, tokens: Token.List, it: unicode.Utf8Iterator, /// indentation specific variables indent_char: ?u32 = null, /// level of parentheses paren_level: u16 = 0, /// level of parentheses at the start of the format string format_paren_level: u16 = 0, /// how many of `indent_char` are in one indentation level chars_per_indent: ?u8 = null, indent_level: u16 = 0, /// format string delimiter, 0 if not in a format string format_string: u8 = 0, /// saw a nl, need to check for indentation expect_indent: bool = false, /// currently in a multiline string string: bool = false, repl: bool, pub const Error = error{TokenizeError} || mem.Allocator.Error; fn reportErr(self: *Tokenizer, msg: []const u8, c: u21) Error { try self.errors.add( .{ .data = msg }, @truncate(u32, self.it.i - (unicode.utf8CodepointSequenceLength(c) catch unreachable)), .err, ); self.it.i = self.it.bytes.len; return error.TokenizeError; } fn getIndent(self: *Tokenizer) !?Token { var start_index = self.it.i; var count: u8 = 0; // get all indentation characters while (self.it.nextCodepoint()) |c| switch (c) { '\r' => continue, '\n', ';' => { // empty line; rest count count = 0; if (self.repl) break; }, '#' => { self.it.i -= 1; return null; }, else => if (self.indent_char != null and c == self.indent_char.?) { count += 1; } else if (isWhiteSpace(c)) { self.indent_char = c; count += 1; } else { self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; break; }, } else { if (self.repl) { if (self.indent_level == 0 and self.tokens.items.len > 2) { switch (self.tokens.items[self.tokens.items.len - 3].id) { // no further input is expected after these tokens // so we can stop asking for more input .Comment, .Identifier, .Keyword_this, .String, .Integer, .Number, .RBrace, .RBracket, .Underscore, .Keyword_break, .Keyword_continue, .Keyword_false, .Keyword_true, => return null, else => {}, } } self.expect_indent = true; return null; } // EOF level goes to zero count = 0; } if (count == 0) { // back to level zero, close all blocks self.indent_char = null; self.chars_per_indent = null; return null; } errdefer if (self.repl) { // reset indentation in case of error self.indent_char = null; self.chars_per_indent = null; }; if (self.chars_per_indent) |some| { if (count % some != 0) { // inconsistent amount of `ìndent_char`s per level return self.reportErr("inconsistent indentation", 'a'); } } else { self.chars_per_indent = count; } const level = @divExact(count, self.chars_per_indent.?); if (level > 50) { return self.reportErr("indentation exceeds maximum of 50 levels", 'a'); } // needed by the repl tokenizer self.indent_level = level; return Token{ .id = .{ .Indent = level }, .start = @truncate(u32, start_index), .end = @truncate(u32, self.it.i), }; } fn next(self: *Tokenizer) !Token { // get indent if (self.expect_indent) { self.expect_indent = false; if (try self.getIndent()) |some| return some; } var start_index = self.it.i; var state: enum { Start, String, EscapeSequence, HexEscape, UnicodeStart, UnicodeEscape, UnicodeEnd, Identifier, Equal, Bang, Pipe, Percent, Asterisk, AsteriskAsterisk, Plus, LArr, LArrArr, RArr, RArrArr, Caret, Minus, Slash, SlashSlash, Ampersand, LineComment, BinaryNumber, OctalNumber, HexNumber, Number, Zero, NumberDot, FloatFraction, FloatExponent, FloatExponentDigits, f, FormatString, FormatBrace, } = .Start; var res: Token.Id = .Eof; var str_delimit: u32 = undefined; var counter: u32 = 0; var dot_index: ?usize = null; var escape_end_state: @TypeOf(state) = .String; while (self.it.nextCodepoint()) |c| { switch (state) { .Start => switch (c) { '#' => { state = .LineComment; }, '\n', ';' => { res = .Nl; self.expect_indent = true; break; }, '"', '\'' => { self.string = true; str_delimit = c; state = .String; }, '=' => { state = .Equal; }, '!' => { state = .Bang; }, '|' => { state = .Pipe; }, '(' => { self.paren_level += 1; res = .LParen; break; }, ')' => { if (self.paren_level == 0) { return self.reportErr("unmatched ')'", c); } self.paren_level -= 1; res = .RParen; break; }, '[' => { self.paren_level += 1; res = .LBracket; break; }, ']' => { if (self.paren_level == 0) { return self.reportErr("unmatched ']'", c); } self.paren_level -= 1; res = .RBracket; break; }, ',' => { res = .Comma; break; }, '@' => { res = .At; break; }, '%' => { state = .Percent; }, '*' => { state = .Asterisk; }, '+' => { state = .Plus; }, '<' => { state = .LArr; }, '>' => { state = .RArr; }, '^' => { state = .Caret; }, '{' => { self.paren_level += 1; res = .LBrace; break; }, '}' => { if (self.format_string != 0 and self.format_paren_level == self.paren_level) { escape_end_state = .FormatString; state = .FormatString; res = .Format; continue; } if (self.paren_level == 0) { return self.reportErr("unmatched '}'", c); } self.paren_level -= 1; res = .RBrace; break; }, '~' => { res = .Tilde; break; }, ':' => { if (self.format_string != 0 and self.format_paren_level == self.paren_level) { escape_end_state = .FormatString; state = .FormatString; res = .Format; continue; } res = .Colon; break; }, '.' => { res = .Period; break; }, '-' => { state = .Minus; }, '/' => { state = .Slash; }, '&' => { state = .Ampersand; }, '0' => { state = .Zero; }, '1'...'9' => { state = .Number; }, 'f' => if (self.format_string != 0) { state = .Identifier; } else { state = .f; }, else => { if (isWhiteSpace(c)) { start_index = self.it.i; } else if (isIdentifier(c)) { state = .Identifier; } else { return self.reportErr("invalid character", c); } }, }, .String => switch (c) { '\\' => { state = .EscapeSequence; }, '\n', '\r' => { if (str_delimit == '\'') { return self.reportErr("invalid newline, use'\"' for multiline strings", c); } }, else => { if (c == str_delimit) { self.string = false; res = .String; break; } }, }, .EscapeSequence => switch (c) { '\'', '"', '\\', 'r', 't', 'n' => { state = escape_end_state; }, 'x' => { counter = 0; state = .HexEscape; }, 'u' => { state = .UnicodeStart; }, else => { return self.reportErr("invalid escape sequence", c); }, }, .HexEscape => switch (c) { '0'...'9', 'a'...'f', 'A'...'F' => { counter += 1; if (counter > 2) { state = escape_end_state; } }, else => { if (counter != 2) { return self.reportErr("\\x pattern must be followed by 2 hex digits", c); } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; state = escape_end_state; }, }, .UnicodeStart => if (c == '{') { counter = 0; state = .UnicodeEscape; } else { return self.reportErr("expected '{' after '\\u'", c); }, .UnicodeEscape => switch (c) { '0'...'9', 'a'...'f', 'A'...'F' => { counter += 1; if (counter > 6) { state = .UnicodeEnd; } }, '}' => { state = escape_end_state; }, else => { return self.reportErr("expected hex digits or '}'", c); }, }, .UnicodeEnd => if (c == '}') { state = escape_end_state; } else { return self.reportErr("expected '}'", c); }, .f => switch (c) { '\'' => { self.format_string = '\''; self.format_paren_level = self.paren_level; res = .FormatStart; state = .FormatString; escape_end_state = .FormatString; }, '"' => { self.format_string = '"'; self.format_paren_level = self.paren_level; res = .FormatStart; state = .FormatString; escape_end_state = .FormatString; }, else => { self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; state = .Identifier; }, }, .FormatString => { if (c == self.format_string) { if (res == .FormatStart) { res = .String; } else { res = .FormatEnd; } self.format_string = 0; break; } else if (c == '\\') { state = .EscapeSequence; } else if (c == '\n' or c == '\r') { if (self.format_string == '\'') { return self.reportErr("invalid newline, use'\"' for multiline strings", c); } } else if (c == '{') { state = .FormatBrace; } }, .FormatBrace => { if (c == '{') { state = .FormatString; } else { self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; break; } }, .Identifier => { if (!isIdentifier(c)) { self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; const slice = self.it.bytes[start_index..self.it.i]; res = Token.keywords.get(slice) orelse .Identifier; break; } }, .Equal => switch (c) { '=' => { res = .EqualEqual; break; }, '>' => { res = .EqualRarr; break; }, else => { self.it.i = start_index + 1; res = .Equal; break; }, }, .Bang => switch (c) { '=' => { res = .BangEqual; break; }, else => { return self.reportErr("invalid character, use 'not' for boolean not", c); }, }, .Pipe => switch (c) { '=' => { res = .PipeEqual; break; }, else => { self.it.i = start_index + 1; res = .Pipe; break; }, }, .Percent => switch (c) { '=' => { res = .PercentEqual; break; }, else => { self.it.i = start_index + 1; res = .Percent; break; }, }, .Asterisk => switch (c) { '=' => { res = .AsteriskEqual; break; }, '*' => { state = .AsteriskAsterisk; }, else => { self.it.i = start_index + 1; res = .Asterisk; break; }, }, .AsteriskAsterisk => switch (c) { '=' => { res = .AsteriskAsteriskEqual; break; }, else => { self.it.i = start_index + 2; res = .AsteriskAsterisk; break; }, }, .Plus => switch (c) { '=' => { res = .PlusEqual; break; }, '+' => { res = .PlusPlus; break; }, else => { self.it.i = start_index + 1; res = .Plus; break; }, }, .LArr => switch (c) { '<' => { state = .LArrArr; }, '=' => { res = .LArrEqual; break; }, else => { self.it.i = start_index + 1; res = .LArr; break; }, }, .LArrArr => switch (c) { '=' => { res = .LArrArrEqual; break; }, else => { self.it.i = start_index + 2; res = .LArrArr; break; }, }, .RArr => switch (c) { '>' => { state = .RArrArr; }, '=' => { res = .RArrEqual; break; }, else => { self.it.i = start_index + 1; res = .RArr; break; }, }, .RArrArr => switch (c) { '=' => { res = .RArrArrEqual; break; }, else => { self.it.i = start_index + 2; res = .RArrArr; break; }, }, .Caret => switch (c) { '=' => { res = .CaretEqual; break; }, else => { self.it.i = start_index + 1; res = .Caret; break; }, }, .Minus => switch (c) { '=' => { res = .MinusEqual; break; }, else => { self.it.i = start_index + 1; res = .Minus; break; }, }, .Slash => switch (c) { '/' => { state = .SlashSlash; }, '=' => { res = .SlashEqual; break; }, else => { self.it.i = start_index + 1; res = .Slash; break; }, }, .SlashSlash => switch (c) { '=' => { res = .SlashSlashEqual; break; }, else => { self.it.i = start_index + 2; res = .SlashSlash; break; }, }, .Ampersand => switch (c) { '=' => { res = .AmpersandEqual; break; }, else => { self.it.i = start_index + 1; res = .Ampersand; break; }, }, .LineComment => switch (c) { '\n', '\r' => { self.it.i -= 1; res = .Comment; break; }, else => {}, }, .Zero => switch (c) { 'b' => { state = .BinaryNumber; }, 'o' => { state = .OctalNumber; }, 'x' => { state = .HexNumber; }, '.', ',' => { state = .NumberDot; dot_index = self.it.i - 1; }, '0'...'7' => { return self.reportErr("invalid character, octal literals start with '0o'", c); }, '_' => { state = .Number; }, else => { if (isIdentifier(c)) { state = .Identifier; continue; } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; res = .Integer; break; }, }, .NumberDot => switch (c) { '.' => { self.it.i -= 2; res = .Integer; break; }, '0'...'9', 'e', 'E', '_' => { self.it.i -= 1; state = .FloatFraction; }, else => { self.it.i = dot_index.?; res = .Integer; break; }, }, .BinaryNumber => switch (c) { '0', '1', '_' => {}, else => { if (isIdentifier(c)) { state = .Identifier; continue; } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; res = .Integer; break; }, }, .OctalNumber => switch (c) { '0'...'7', '_' => {}, else => { if (isIdentifier(c)) { state = .Identifier; continue; } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; res = .Integer; break; }, }, .HexNumber => switch (c) { '0'...'9', 'a'...'f', 'A'...'F', '_' => {}, 'p', 'P' => { state = .FloatExponent; }, else => { if (isIdentifier(c)) { state = .Identifier; continue; } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; res = .Integer; break; }, }, .Number => switch (c) { '0'...'9', '_' => {}, '.' => { state = .NumberDot; dot_index = self.it.i - 1; }, 'e', 'E' => { state = .FloatExponent; }, else => { if (isIdentifier(c)) { state = .Identifier; continue; } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; res = .Integer; break; }, }, .FloatFraction => switch (c) { '0'...'9', '_' => {}, 'e', 'E' => { state = .FloatExponent; }, '.', ',' => { self.it.i -= 1; res = .Number; break; }, else => { if (isIdentifier(c)) { self.it.i = dot_index orelse { state = .Identifier; continue; }; res = .Integer; break; } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; res = .Number; break; }, }, .FloatExponent => switch (c) { '+', '-' => { state = .FloatExponentDigits; }, else => { if (isIdentifier(c)) { self.it.i = dot_index orelse { state = .Identifier; continue; }; res = .Integer; break; } self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; state = .FloatExponentDigits; }, }, .FloatExponentDigits => switch (c) { '0'...'9' => { counter += 1; }, '_' => {}, else => { if (counter != 0 and !isIdentifier(c)) { self.it.i -= unicode.utf8CodepointSequenceLength(c) catch unreachable; res = .Number; break; } self.it.i = dot_index orelse { self.it.i = start_index; state = .Identifier; continue; }; res = .Integer; break; }, }, } } else { switch (state) { .Start => {}, .Identifier => { const slice = self.it.bytes[start_index..]; res = Token.keywords.get(slice) orelse .Identifier; }, .BinaryNumber, .OctalNumber, .HexNumber, .Number, .Zero, => res = .Integer, .FormatBrace => {}, .String, .FormatString => { if (self.repl) { // if running in repl this might be a multiline string self.it.i = start_index; res = .Eof; } else return self.reportErr("unterminated string", 'a'); }, .LineComment => res = .Comment, .FloatFraction => res = .Number, .Equal => res = .Equal, .Minus => res = .Minus, .Slash => res = .Slash, .SlashSlash => res = .SlashSlash, .Ampersand => res = .Ampersand, .Pipe => res = .Pipe, .RArr => res = .RArr, .RArrArr => res = .RArrArr, .LArr => res = .LArr, .LArrArr => res = .LArrArr, .Plus => res = .Plus, .Percent => res = .Percent, .Caret => res = .Caret, .Asterisk => res = .Asterisk, .AsteriskAsterisk => res = .AsteriskAsterisk, .NumberDot => { self.it.i -= 1; res = .Integer; }, else => { return self.reportErr("unexpected EOF", 'a'); }, } } return Token{ .id = res, .start = @truncate(u32, start_index), .end = @truncate(u32, self.it.i), }; } }; fn expectTokens(source: []const u8, expected_tokens: []const Token.Id) void { var errors = Errors.init(std.testing.allocator); defer errors.deinit(); var tokenizer = Tokenizer{ .tokens = undefined, .errors = &errors, .repl = false, .it = .{ .i = 0, .bytes = source, }, }; blk: { for (expected_tokens) |expected_token| { const token = tokenizer.next() catch break :blk; std.testing.expectEqual(expected_token, token.id); } const last_token = tokenizer.next() catch break :blk; std.testing.expect(last_token.id == .Eof); return; } errors.render(source, std.io.getStdErr().writer()) catch {}; @panic("test failed"); } test "operators" { expectTokens( \\!= | |= = == \\( ) { } [ ] . @ => \\^ ^= + += ++ - -= \\* *= ** **= % %= / /= // //= \\, & &= < <= << \\<<= > >= >> >>= ~ _ \\#hello world , &[_]Token.Id{ .BangEqual, .Pipe, .PipeEqual, .Equal, .EqualEqual, .Nl, .LParen, .RParen, .LBrace, .RBrace, .LBracket, .RBracket, .Period, .At, .EqualRarr, .Nl, .Caret, .CaretEqual, .Plus, .PlusEqual, .PlusPlus, .Minus, .MinusEqual, .Nl, .Asterisk, .AsteriskEqual, .AsteriskAsterisk, .AsteriskAsteriskEqual, .Percent, .PercentEqual, .Slash, .SlashEqual, .SlashSlash, .SlashSlashEqual, .Nl, .Comma, .Ampersand, .AmpersandEqual, .LArr, .LArrEqual, .LArrArr, .Nl, .LArrArrEqual, .RArr, .RArrEqual, .RArrArr, .RArrArrEqual, .Tilde, .Underscore, .Nl, .Comment, }); } test "keywords" { expectTokens( \\not and or let continue break return if else false true for \\while match catch try error import is in fn as const this , &[_]Token.Id{ .Keyword_not, .Keyword_and, .Keyword_or, .Keyword_let, .Keyword_continue, .Keyword_break, .Keyword_return, .Keyword_if, .Keyword_else, .Keyword_false, .Keyword_true, .Keyword_for, .Nl, .Keyword_while, .Keyword_match, .Keyword_catch, .Keyword_try, .Keyword_error, .Keyword_import, .Keyword_is, .Keyword_in, .Keyword_fn, .Keyword_as, .Keyword_const, .Keyword_this, }); } test "indentation" { expectTokens( \\if \\ if \\ \\ if \\if \\ if \\ if \\ , &[_]Token.Id{ .Keyword_if, .Nl, .{ .Indent = 1 }, .Keyword_if, .Nl, .{ .Indent = 2 }, .Keyword_if, .Nl, .Keyword_if, .Nl, .{ .Indent = 1 }, .Keyword_if, .Nl, .{ .Indent = 1 }, .Keyword_if, .Nl, }); } test "identifiers" { expectTokens( \\0b1gg \\0x1gg \\0o1gg \\0gg \\1gg , &[_]Token.Id{ .Identifier, .Nl, .Identifier, .Nl, .Identifier, .Nl, .Identifier, .Nl, .Identifier, }); expectTokens( \\30.30f \\30.30ee \\30.30e+12a \\30.30e+12- \\30.30e+- , &[_]Token.Id{ .Integer, .Period, .Identifier, .Nl, .Integer, .Period, .Identifier, .Nl, .Integer, .Period, .Identifier, .Plus, .Identifier, .Nl, .Number, .Minus, .Nl, .Integer, .Period, .Identifier, .Plus, .Minus, }); } test "numbers" { expectTokens( \\0. \\0, \\0.0 \\0,0 \\0.0.0 \\0,0,0 , &[_]Token.Id{ .Integer, .Period, .Nl, .Integer, .Comma, .Nl, .Number, .Nl, .Number, .Nl, .Number, .Period, .Integer, .Nl, .Number, .Comma, .Integer, }); } test "format string" { expectTokens( \\f f"\u{12}{12:12} foo \t\n {f"foo bar" ++ {1:2} as str:3} \x12 " : , &[_]Token.Id{ .Identifier, .FormatStart, .Integer, .Format, .Identifier, .String, .PlusPlus, .LBrace, .Integer, .Colon, .Integer, .RBrace, .Keyword_as, .Identifier, .FormatEnd, .Colon, }); }
src/tokenizer.zig
//-------------------------------------------------------------------------------- // Section: Types (2) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows8.0' const IID_IWICImageEncoder_Value = Guid.initString("04c75bf8-3ce1-473b-acc5-3cc4f5e94999"); pub const IID_IWICImageEncoder = &IID_IWICImageEncoder_Value; pub const IWICImageEncoder = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, WriteFrame: fn( self: *const IWICImageEncoder, pImage: ?*ID2D1Image, pFrameEncode: ?*IWICBitmapFrameEncode, pImageParameters: ?*const WICImageParameters, ) callconv(@import("std").os.windows.WINAPI) HRESULT, WriteFrameThumbnail: fn( self: *const IWICImageEncoder, pImage: ?*ID2D1Image, pFrameEncode: ?*IWICBitmapFrameEncode, pImageParameters: ?*const WICImageParameters, ) callconv(@import("std").os.windows.WINAPI) HRESULT, WriteThumbnail: fn( self: *const IWICImageEncoder, pImage: ?*ID2D1Image, pEncoder: ?*IWICBitmapEncoder, pImageParameters: ?*const WICImageParameters, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWICImageEncoder_WriteFrame(self: *const T, pImage: ?*ID2D1Image, pFrameEncode: ?*IWICBitmapFrameEncode, pImageParameters: ?*const WICImageParameters) callconv(.Inline) HRESULT { return @ptrCast(*const IWICImageEncoder.VTable, self.vtable).WriteFrame(@ptrCast(*const IWICImageEncoder, self), pImage, pFrameEncode, pImageParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWICImageEncoder_WriteFrameThumbnail(self: *const T, pImage: ?*ID2D1Image, pFrameEncode: ?*IWICBitmapFrameEncode, pImageParameters: ?*const WICImageParameters) callconv(.Inline) HRESULT { return @ptrCast(*const IWICImageEncoder.VTable, self.vtable).WriteFrameThumbnail(@ptrCast(*const IWICImageEncoder, self), pImage, pFrameEncode, pImageParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWICImageEncoder_WriteThumbnail(self: *const T, pImage: ?*ID2D1Image, pEncoder: ?*IWICBitmapEncoder, pImageParameters: ?*const WICImageParameters) callconv(.Inline) HRESULT { return @ptrCast(*const IWICImageEncoder.VTable, self.vtable).WriteThumbnail(@ptrCast(*const IWICImageEncoder, self), pImage, pEncoder, pImageParameters); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IWICImagingFactory2_Value = Guid.initString("7b816b45-1996-4476-b132-de9e247c8af0"); pub const IID_IWICImagingFactory2 = &IID_IWICImagingFactory2_Value; pub const IWICImagingFactory2 = extern struct { pub const VTable = extern struct { base: IWICImagingFactory.VTable, CreateImageEncoder: fn( self: *const IWICImagingFactory2, pD2DDevice: ?*ID2D1Device, ppWICImageEncoder: ?*?*IWICImageEncoder, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IWICImagingFactory.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWICImagingFactory2_CreateImageEncoder(self: *const T, pD2DDevice: ?*ID2D1Device, ppWICImageEncoder: ?*?*IWICImageEncoder) callconv(.Inline) HRESULT { return @ptrCast(*const IWICImagingFactory2.VTable, self.vtable).CreateImageEncoder(@ptrCast(*const IWICImagingFactory2, self), pD2DDevice, ppWICImageEncoder); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (9) //-------------------------------------------------------------------------------- const Guid = @import("../../zig.zig").Guid; const HRESULT = @import("../../foundation.zig").HRESULT; const ID2D1Device = @import("../../graphics/direct2d.zig").ID2D1Device; const ID2D1Image = @import("../../graphics/direct2d.zig").ID2D1Image; const IUnknown = @import("../../system/com.zig").IUnknown; const IWICBitmapEncoder = @import("../../graphics/imaging.zig").IWICBitmapEncoder; const IWICBitmapFrameEncode = @import("../../graphics/imaging.zig").IWICBitmapFrameEncode; const IWICImagingFactory = @import("../../graphics/imaging.zig").IWICImagingFactory; const WICImageParameters = @import("../../graphics/imaging.zig").WICImageParameters; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/graphics/imaging/d2d.zig
const std = @import("std"); const testing = std.testing; const expectEqualStrings = testing.expectEqualStrings; const expectEqual = testing.expectEqual; const expectError = testing.expectError; pub const Transport = enum { /// Unix domain sockets unix, /// launchd launchd, /// systemd socket activation systemd, /// TCP/IP based connections tcp, /// Nonce-authenticated TCP Sockets nonce_tcp, /// Executed Subprocesses on Unix unixexec, /// Autolaunch a session bus if not present autolaunch, pub fn prefix(self: Transport) []const u8 { return switch (self) { .unix => "unix:", .launchd => "launchd:", .systemd => "systemd:", .tcp => "tcp:", .nonce_tcp => "nonce-tcp:", .unixexec => "unixexec:", .autolaunch => "autolaunch:", }; } }; pub const UnixAddress = union(enum) { path: []const u8, dir: []const u8, tmpdir: []const u8, abstract: []const u8, runtime: void, }; pub const SocketFamily = enum { ipv4, ipv6, }; pub const TcpAddress = struct { host: []const u8, bind: []const u8, port: u16, family: ?SocketFamily, }; pub const NonceTcpAddress = struct { host: []const u8, bind: []const u8, port: u16, family: ?SocketFamily, noncefile: ?[]const u8, }; pub const Address = union(Transport) { unix: UnixAddress, /// Environment variable containing the path of the unix domain /// socket for the launchd created dbus-daemon launchd: []const u8, /// No extra information provided systemd: void, tcp: TcpAddress, nonce_tcp: NonceTcpAddress, unixexec: void, // TODO autolaunch: void, // TODO }; pub const Parser = struct { iterator: std.mem.SplitIterator, pub fn init(address: []const u8) !Parser { return Parser{ .iterator = std.mem.split(address, ";"), }; } pub fn nextAddress(self: *Parser) !?Address { const address = self.iterator.next() orelse return null; // Parse transport const transport = inline for (std.meta.fields(Transport)) |field| { const item = @field(Transport, field.name); const prefix = item.prefix(); if (std.mem.startsWith(u8, address, prefix)) break item; } else return error.InvalidTransport; // Start parsing after the : const parts = address[transport.prefix().len..]; if (parts.len == 0) { switch (transport) { .systemd => return Address.systemd, else => return error.ExpectedKeyValuePair, } } var tcp_parse_state: struct { host: ?[]const u8 = null, bind: ?[]const u8 = null, port: ?u16 = null, family: ?SocketFamily = null, noncefile: ?[]const u8 = null, } = .{}; var part_iter = std.mem.split(parts, ","); while (part_iter.next()) |part| { var kv_iter = std.mem.split(part, "="); const key = kv_iter.next() orelse return error.InvalidKeyValuePair; const value = kv_iter.next() orelse return error.InvalidKeyValuePair; if (kv_iter.next() != null) return error.InvalidKeyValuePair; switch (transport) { .unix => { if (part_iter.next() != null) return error.InvalidUnixAddress; if (std.mem.eql(u8, "path", key)) { return Address{ .unix = .{ .path = value } }; } else if (std.mem.eql(u8, "dir", key)) { return Address{ .unix = .{ .dir = value } }; } else if (std.mem.eql(u8, "tmpdir", key)) { return Address{ .unix = .{ .tmpdir = value } }; } else if (std.mem.eql(u8, "abstract", key)) { return Address{ .unix = .{ .abstract = value } }; } else if (std.mem.eql(u8, "runtime", key)) { if (!std.mem.eql(u8, "yes", value)) return error.InvalidValue; return Address{ .unix = .{ .runtime = {} } }; } else { return error.InvalidKey; } }, .launchd => { if (part_iter.next() != null) return error.InvalidUnixAddress; if (std.mem.eql(u8, "env", key)) { return Address{ .launchd = value }; } else { return error.InvalidKey; } }, .systemd => return error.InvalidSystemdAddress, .tcp => { if (std.mem.eql(u8, "host", key)) { tcp_parse_state.host = value; } else if (std.mem.eql(u8, "bind", key)) { tcp_parse_state.bind = value; } else if (std.mem.eql(u8, "port", key)) { tcp_parse_state.port = std.fmt.parseInt(u16, value, 10) catch return error.InvalidTcpPort; } else if (std.mem.eql(u8, "family", key)) { tcp_parse_state.family = std.meta.stringToEnum(SocketFamily, value); } else { return error.InvalidKey; } }, // TODO merging this with tcp results in a // segmentation fault when compiling with stage1 .nonce_tcp => { if (std.mem.eql(u8, "host", key)) { tcp_parse_state.host = value; } else if (std.mem.eql(u8, "bind", key)) { tcp_parse_state.bind = value; } else if (std.mem.eql(u8, "port", key)) { tcp_parse_state.port = std.fmt.parseInt(u16, value, 10) catch return error.InvalidTcpPort; } else if (std.mem.eql(u8, "family", key)) { tcp_parse_state.family = std.meta.stringToEnum(SocketFamily, value); } else if (std.mem.eql(u8, "noncefile", key)) { tcp_parse_state.noncefile = value; } else { return error.InvalidKey; } }, else => return error.NotImplemented, } } switch (transport) { .unix, .systemd, .launchd => unreachable, // We return immediately from all switch prongs .tcp => return Address{ .tcp = .{ .host = tcp_parse_state.host orelse return error.MissingTcpHost, .bind = tcp_parse_state.bind orelse tcp_parse_state.host.?, .port = tcp_parse_state.port orelse 0, .family = tcp_parse_state.family, } }, .nonce_tcp => return Address{ .nonce_tcp = .{ .host = tcp_parse_state.host orelse return error.MissingTcpHost, .bind = tcp_parse_state.bind orelse tcp_parse_state.host.?, .port = tcp_parse_state.port orelse 0, .family = tcp_parse_state.family, .noncefile = tcp_parse_state.noncefile, } }, else => unreachable, // We return error.NotImplemented in all other switch prongs } } }; test "parse unix address 1" { const address = "unix:path=/tmp/dbus-test"; var parser = try Parser.init(address); try expectEqualStrings("/tmp/dbus-test", (try parser.nextAddress()).?.unix.path); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse unix address 2" { const address = "unix:invalidkey=value"; var parser = try Parser.init(address); try expectError(error.InvalidKey, parser.nextAddress()); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse systemd address 1" { const address = "systemd:"; var parser = try Parser.init(address); try expectEqual(Address.systemd, (try parser.nextAddress()).?); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse systemd address 2" { const address = "systemd:invalidkey=value"; var parser = try Parser.init(address); try expectError(error.InvalidSystemdAddress, parser.nextAddress()); } test "parse launchd address 1" { const address = "launchd:env=ENVIRONMENT_VARIABLE"; var parser = try Parser.init(address); try expectEqualStrings("ENVIRONMENT_VARIABLE", (try parser.nextAddress()).?.launchd); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse launchd address 2" { const address = "launchd:"; var parser = try Parser.init(address); try expectError(error.ExpectedKeyValuePair, parser.nextAddress()); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse tcp address 1" { const address = "tcp:"; var parser = try Parser.init(address); try expectError(error.ExpectedKeyValuePair, parser.nextAddress()); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse tcp address 2" { const address = "tcp:host=127.0.0.1"; var parser = try Parser.init(address); const parsed = (try parser.nextAddress()).?.tcp; try expectEqualStrings("127.0.0.1", parsed.host); try expectEqualStrings("127.0.0.1", parsed.bind); try expectEqual(@as(u16, 0), parsed.port); try expectEqual(@as(?SocketFamily, null), parsed.family); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse tcp address 3" { const address = "tcp:host=127.0.0.1,port=abc"; var parser = try Parser.init(address); try expectError(error.InvalidTcpPort, parser.nextAddress()); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse tcp address 4" { const address = "tcp:port=123"; var parser = try Parser.init(address); try expectError(error.MissingTcpHost, parser.nextAddress()); try expectEqual(@as(?Address, null), try parser.nextAddress()); } test "parse nonce_tcp address 1" { const address = "nonce-tcp:host=127.0.0.1,noncefile=/tmp/test"; var parser = try Parser.init(address); const parsed = (try parser.nextAddress()).?.nonce_tcp; try expectEqualStrings("127.0.0.1", parsed.host); try expectEqualStrings("127.0.0.1", parsed.bind); try expectEqual(@as(u16, 0), parsed.port); try expectEqual(@as(?SocketFamily, null), parsed.family); try expectEqualStrings("/tmp/test", parsed.noncefile.?); try expectEqual(@as(?Address, null), try parser.nextAddress()); }
src/address.zig
const clap = @import("clap"); const format = @import("format"); const it = @import("ziter"); const std = @import("std"); const ston = @import("ston"); const util = @import("util"); const debug = std.debug; const fmt = std.fmt; const fs = std.fs; const heap = std.heap; const io = std.io; const log = std.log; const math = std.math; const mem = std.mem; const meta = std.meta; const os = std.os; const rand = std.rand; const testing = std.testing; const Program = @This(); allocator: mem.Allocator, options: Options, first_evos: Set = Set{}, types: Set = Set{}, abilities: Set = Set{}, items: Set = Set{}, moves: Moves = Moves{}, pokemons: Pokemons = Pokemons{}, tms: Machines = Machines{}, hms: Machines = Machines{}, const Options = struct { seed: u64, abilities: Method, types: Method, items: Method, stats: Method, same_total_stats: bool, machines: Method, chance_to_learn_non_stab_machine: f64, chance_to_learn_stab_machine: f64, status_moves_are_stab: bool, }; const Method = enum { unchanged, random, random_follow_evos, }; pub const main = util.generateMain(Program); pub const version = "0.0.0"; pub const description = \\Randomizes Pokémons. \\ ; pub const params = &[_]clap.Param(clap.Help){ clap.parseParam( "-h, --help " ++ "Display this help text and exit.", ) catch unreachable, clap.parseParam( "-s, --seed <INT> " ++ "The seed to use for random numbers. A random seed will be picked if this is not " ++ "specified.", ) catch unreachable, clap.parseParam( "-v, --version " ++ "Output version information and exit.", ) catch unreachable, clap.parseParam( "-a, --abilities <unchanged|random|random_follow_evos> " ++ "The method for which pokemon abilities will be randomized. (default: unchanged)", ) catch unreachable, clap.parseParam( "-i, --items <unchanged|random|random_follow_evos> " ++ "The method for which pokemon items will be randomized. (default: unchanged)", ) catch unreachable, clap.parseParam( "-t, --types <unchanged|random|random_follow_evos> " ++ "The method for which pokemon types will be randomized. (default: unchanged)", ) catch unreachable, clap.parseParam( "-S, --stats <unchanged|random|random_follow_evos> " ++ "The method for which pokemon stats will be randomized. (default: unchanged)", ) catch unreachable, clap.parseParam( " --same-total-stats " ++ "Pokémons will have the same total stats after randomization.", ) catch unreachable, clap.parseParam( "-m, --machines <unchanged|random|random_follow_evos> " ++ "The method for which pokemon machines learned (tms,hms) will be randomized. " ++ "(default: unchanged)", ) catch unreachable, clap.parseParam( " --non-stab-machine-chance <FLOAT> " ++ "The chance a pokemon can learn a machine providing a non stab move when " ++ "randomizing machines. (default: 0.5)", ) catch unreachable, clap.parseParam( " --stab-machine-chance <FLOAT> " ++ "The chance a pokemon can learn a machine providing a stab move when randomizing " ++ "machines. (default: 0.5)", ) catch unreachable, clap.parseParam( " --status-moves-are-stab " ++ "Whether status moves, which are the same typing as the pokemon, should be " ++ "considered as stab moves when determining the chance that the move should be " ++ "learnable.", ) catch unreachable, }; pub fn init(allocator: mem.Allocator, args: anytype) !Program { const options = Options{ .seed = try util.args.seed(args), .same_total_stats = args.flag("--same-total-stats"), .status_moves_are_stab = args.flag("--status-moves-are-stab"), .abilities = (try util.args.enumeration(args, "--abilities", Method)) orelse .unchanged, .types = (try util.args.enumeration(args, "--types", Method)) orelse .unchanged, .items = (try util.args.enumeration(args, "--items", Method)) orelse .unchanged, .stats = (try util.args.enumeration(args, "--stats", Method)) orelse .unchanged, .machines = (try util.args.enumeration(args, "--machines", Method)) orelse .unchanged, .chance_to_learn_non_stab_machine = (try util.args.float(args, "--non-stab-machine-chance", f64)) orelse 0.5, .chance_to_learn_stab_machine = (try util.args.float(args, "--stab-machine-chance", f64)) orelse 0.5, }; return Program{ .allocator = allocator, .options = options, }; } pub fn run( program: *Program, comptime Reader: type, comptime Writer: type, stdio: util.CustomStdIoStreams(Reader, Writer), ) anyerror!void { try format.io(program.allocator, stdio.in, stdio.out, program, useGame); try program.first_evos.ensureTotalCapacity(program.allocator, program.pokemons.count()); for (program.pokemons.keys()) |species| program.first_evos.putAssumeCapacity(species, {}); for (program.pokemons.values()) |pokemon| { for (pokemon.evos.values()) |species| _ = program.first_evos.swapRemove(species); } try program.randomize(); try program.output(stdio.out); } fn output(program: *Program, writer: anytype) !void { for (program.pokemons.values()) |*pokemon, i| { const species = program.pokemons.keys()[i]; try ston.serialize(writer, .{ .pokemons = ston.index(species, .{ .types = pokemon.types, .abilities = pokemon.abilities, .items = pokemon.items, }) }); var stat_it = pokemon.stats.iterator(); while (stat_it.next()) |entry| { try ston.serialize(writer, .{ .pokemons = ston.index(species, .{ .stats = ston.field(@tagName(entry.key), entry.value.*), }) }); } for (pokemon.evos.keys()) |evo_id, j| { const evo = pokemon.evos.values()[j]; try ston.serialize(writer, .{ .pokemons = ston.index(species, .{ .evos = ston.index(evo_id, .{ .target = evo }), }) }); } for (pokemon.evo_params.keys()) |evo_id, j| { const param = pokemon.evo_params.values()[j]; try ston.serialize(writer, .{ .pokemons = ston.index(species, .{ .evos = ston.index(evo_id, .{ .param = param }), }) }); } for (pokemon.evo_methods.keys()) |evo_id, j| { const method = pokemon.evo_methods.values()[j]; try ston.serialize(writer, .{ .pokemons = ston.index(species, .{ .evos = ston.index(evo_id, .{ .method = method }), }) }); } var j: usize = 0; while (j < pokemon.tms_learned.len) : (j += 1) { if (!pokemon.tms_occupied.get(j)) continue; try ston.serialize(writer, .{ .pokemons = ston.index(species, .{ .tms = ston.index(j, pokemon.tms_learned.get(j)), }) }); } j = 0; while (j < pokemon.hms_learned.len) : (j += 1) { if (!pokemon.hms_occupied.get(j)) continue; try ston.serialize(writer, .{ .pokemons = ston.index(species, .{ .hms = ston.index(j, pokemon.hms_learned.get(j)), }) }); } } } fn useGame(program: *Program, parsed: format.Game) !void { const allocator = program.allocator; switch (parsed) { .pokemons => |pokemons| { const pokemon = (try program.pokemons.getOrPutValue( allocator, pokemons.index, .{}, )).value_ptr; switch (pokemons.value) { .tms => |tms| { pokemon.tms_occupied.set(tms.index, true); pokemon.tms_learned.set(tms.index, tms.value); }, .hms => |hms| { pokemon.hms_occupied.set(hms.index, true); pokemon.hms_learned.set(hms.index, hms.value); }, .stats => |stats| pokemon.stats.put(stats, stats.value()), .types => |types| _ = try pokemon.types.put(allocator, types.index, types.value), .items => |items| _ = try pokemon.items.put(allocator, items.index, items.value), .abilities => |abilities| _ = try pokemon.abilities.put( allocator, abilities.index, abilities.value, ), .evos => |evos| switch (evos.value) { .target => |target| _ = try pokemon.evos.put(allocator, evos.index, target), .param => |param| _ = try pokemon.evo_params.put( allocator, evos.index, param, ), .method => |method| _ = try pokemon.evo_methods.put( allocator, evos.index, method, ), }, .catch_rate, .base_exp_yield, .ev_yield, .gender_ratio, .egg_cycles, .base_friendship, .growth_rate, .egg_groups, .color, .moves, .name, .pokedex_entry, => return error.DidNotConsumeData, } return; }, .moves => |moves| { const move = (try program.moves.getOrPutValue(allocator, moves.index, .{})).value_ptr; switch (moves.value) { .type => |_type| move.type = _type, .category => |category| move.category = category, .name, .description, .effect, .accuracy, .pp, .target, .priority, .power, => {}, } return error.DidNotConsumeData; }, .tms => |tms| { _ = try program.tms.put(allocator, tms.index, tms.value); return error.DidNotConsumeData; }, .hms => |hms| { _ = try program.hms.put(allocator, hms.index, hms.value); return error.DidNotConsumeData; }, .types => |types| { _ = try program.types.put(allocator, types.index, {}); return error.DidNotConsumeData; }, .abilities => |abilities| { // TODO: Ability 0 is invalid in games. Figure out a generic way of figuring this // out. if (abilities.index != 0) _ = try program.abilities.put(allocator, abilities.index, {}); return error.DidNotConsumeData; }, .items => |items| { // TODO: Item 0 is invalid in games. Figure out a generic way of figuring this // out. if (items.index != 0) _ = try program.items.put(allocator, items.index, {}); return error.DidNotConsumeData; }, else => return error.DidNotConsumeData, } unreachable; } fn randomize(program: *Program) !void { const random = rand.DefaultPrng.init(program.options.seed).random(); if (program.types.keys().len != 0) switch (program.options.types) { .unchanged => {}, .random => for (program.pokemons.values()) |*pokemon| { util.random.items(random, pokemon.types.values(), program.types.keys()); }, .random_follow_evos => for (program.first_evos.keys()) |species| { const pokemon = program.pokemons.getPtr(species).?; util.random.items(random, pokemon.types.values(), program.types.keys()); program.copyFieldsToEvolutions(pokemon.*, &.{"types"}); }, }; if (program.abilities.keys().len != 0) switch (program.options.abilities) { .unchanged => {}, .random => for (program.pokemons.values()) |*pokemon| { util.random.items(random, pokemon.abilities.values(), program.abilities.keys()); }, .random_follow_evos => for (program.first_evos.keys()) |species| { const pokemon = program.pokemons.getPtr(species).?; util.random.items(random, pokemon.abilities.values(), program.abilities.keys()); program.copyFieldsToEvolutions(pokemon.*, &.{"abilities"}); }, }; if (program.items.keys().len != 0) switch (program.options.items) { .unchanged => {}, .random => for (program.pokemons.values()) |*pokemon| { util.random.items(random, pokemon.items.values(), program.items.keys()); }, .random_follow_evos => for (program.first_evos.keys()) |species| { const pokemon = program.pokemons.getPtr(species).?; util.random.items(random, pokemon.items.values(), program.items.keys()); program.copyFieldsToEvolutions(pokemon.*, &.{"items"}); }, }; switch (program.options.stats) { .unchanged => {}, .random => for (program.pokemons.values()) |*pokemon| { program.randomizeStats(random, pokemon); }, .random_follow_evos => for (program.first_evos.keys()) |species| { const pokemon = program.pokemons.getPtr(species).?; program.randomizeStats(random, pokemon); }, } switch (program.options.machines) { .unchanged => {}, .random => for (program.pokemons.values()) |*pokemon| { program.randomizeMachinesLearned( random, &pokemon.tms_learned, pokemon.types.values(), program.tms, ); program.randomizeMachinesLearned( random, &pokemon.hms_learned, pokemon.types.values(), program.hms, ); }, .random_follow_evos => for (program.first_evos.keys()) |species| { const pokemon = program.pokemons.getPtr(species).?; program.randomizeMachinesLearned( random, &pokemon.tms_learned, pokemon.types.values(), program.tms, ); program.randomizeMachinesLearned( random, &pokemon.hms_learned, pokemon.types.values(), program.hms, ); program.copyFieldsToEvolutions(pokemon.*, &.{ "hms_learned", "tms_learned" }); }, } } fn randomizeMachinesLearned( program: *Program, random: rand.Random, learned: *MachinesLearned, pokemon_types: []const u16, machines: Machines, ) void { var i: usize = 0; while (i < learned.len) : (i += 1) { const chance: f64 = blk: { const no_stab_chance = program.options.chance_to_learn_non_stab_machine; const move_id = machines.get(@intCast(u8, i)) orelse break :blk no_stab_chance; const move = program.moves.get(move_id) orelse break :blk no_stab_chance; if (move.category == .status and !program.options.status_moves_are_stab) break :blk no_stab_chance; if (mem.indexOfScalar(u16, pokemon_types, move.type) == null) break :blk no_stab_chance; break :blk program.options.chance_to_learn_stab_machine; }; learned.set(i, random.float(f64) < chance); } } fn copyFieldsToEvolutions( program: *Program, pokemon: Pokemon, comptime fields: []const []const u8, ) void { for (pokemon.evos.values()) |evo_species| { const evo = program.pokemons.getPtr(evo_species).?; inline for (fields) |field| @field(evo, field) = @field(pokemon, field); program.copyFieldsToEvolutions(evo.*, fields); } } fn randomizeStats(program: *Program, random: rand.Random, pokemon: *Pokemon) void { var stats = pokemon.statsToArray(); mem.set(u8, stats.slice(), 0); program.randomizeStatsEx(random, pokemon, stats); } fn randomizeStatsEx( program: *Program, random: rand.Random, pokemon: *Pokemon, stats_to_start_from: std.BoundedArray(u8, Stats.len), ) void { const new_total = if (program.options.same_total_stats) blk: { const stats = pokemon.statsToArray(); break :blk it.fold(stats.constSlice(), @as(usize, 0), foldu8); } else blk: { const min_total = it.fold(stats_to_start_from.constSlice(), @as(usize, 0), foldu8) + 1; const max_total = stats_to_start_from.len * math.maxInt(u8); break :blk random.intRangeAtMost(usize, math.min(min_total, max_total), max_total); }; var stats = stats_to_start_from; randomUntilSum(random, u8, stats.slice(), new_total); pokemon.statsFromSlice(stats.slice()); if (program.options.stats == .random_follow_evos) { for (pokemon.evos.values()) |species| { const evo = program.pokemons.getPtr(species).?; program.randomizeStatsEx(random, evo, stats); } } } fn randomUntilSum( random: rand.Random, comptime T: type, buf: []T, sum: usize, ) void { var curr = it.fold(buf, @as(usize, 0), foldu8); const max = math.min(sum, buf.len * math.maxInt(T)); while (curr < max) { const item = util.random.item(random, buf).?; const old = item.*; item.* +|= 1; curr += item.* - old; } while (curr > max) { const item = util.random.item(random, buf).?; const old = item.*; item.* -|= 1; curr -= old - item.*; } } fn foldu8(a: usize, b: u8) usize { return a + b; } fn foldf32(a: f64, b: f32) f64 { return a + b; } const EvoMethods = std.AutoArrayHashMapUnmanaged(u8, format.Evolution.Method); const EvoParams = std.AutoArrayHashMapUnmanaged(u8, u16); const Evos = std.AutoArrayHashMapUnmanaged(u8, u16); const Machines = std.AutoArrayHashMapUnmanaged(u8, u16); const Map = std.AutoArrayHashMapUnmanaged(u8, u16); const Moves = std.AutoArrayHashMapUnmanaged(u16, Move); const Pokemons = std.AutoArrayHashMapUnmanaged(u16, Pokemon); const Set = std.AutoArrayHashMapUnmanaged(u16, void); const MachinesLearned = std.PackedIntArray(bool, math.maxInt(u7) + 1); const Stats = std.EnumMap(meta.Tag(format.Stats(u8)), u8); const Pokemon = struct { stats: Stats = Stats{}, types: Map = Map{}, abilities: Map = Map{}, items: Map = Map{}, tms_learned: MachinesLearned = mem.zeroes(MachinesLearned), tms_occupied: MachinesLearned = mem.zeroes(MachinesLearned), hms_learned: MachinesLearned = mem.zeroes(MachinesLearned), hms_occupied: MachinesLearned = mem.zeroes(MachinesLearned), evos: Evos = Evos{}, evo_params: EvoParams = EvoParams{}, evo_methods: EvoMethods = EvoMethods{}, fn statsToArray(pokemon: *Pokemon) std.BoundedArray(u8, Stats.len) { var res = std.BoundedArray(u8, Stats.len){ .buffer = undefined }; var stats_it = pokemon.stats.iterator(); while (stats_it.next()) |stat| res.appendAssumeCapacity(stat.value.*); return res; } fn statsFromSlice(pokemon: *Pokemon, stats: []const u8) void { var i: usize = 0; var stats_it = pokemon.stats.iterator(); while (stats_it.next()) |stat| : (i += 1) stat.value.* = stats[i]; } }; const Move = struct { type: u16 = math.maxInt(u16), category: format.Move.Category = .physical, }; // // Testing // fn runProgram(arena: mem.Allocator, opt: util.testing.RunProgramOptions) !Program { const res = try util.testing.runProgram(Program, opt); defer testing.allocator.free(res); return collectData(arena, res); } fn collectData(arena: mem.Allocator, data: [:0]const u8) !Program { var program = Program{ .allocator = arena, .options = undefined }; var parser = ston.Parser{ .str = data }; var des = ston.Deserializer(format.Game){ .parser = &parser }; while (des.next()) |game| { program.useGame(game) catch |err| switch (err) { error.DidNotConsumeData => continue, else => { try testing.expect(false); unreachable; }, }; } else |_| { try testing.expectEqual(parser.str.len, parser.i); } return program; } fn expectStatsFollowEvos(program: Program, allow_evo_with_lower_stats: bool) !void { const pokemons = program.pokemons.values(); for (pokemons) |*pokemon| { const pokemon_stats = pokemon.statsToArray(); const pokemon_total = it.fold(pokemon_stats.constSlice(), @as(usize, 0), foldu8); for (pokemon.evos.values()) |species| { const evo = program.pokemons.getPtr(species).?; const evo_stats = evo.statsToArray(); const evo_total = it.fold(evo_stats.constSlice(), @as(usize, 0), foldu8); try testing.expectEqual(pokemon_stats.len, evo_stats.len); for (pokemon_stats.constSlice()) |poke_stat, i| { const evo_stat = evo_stats.constSlice()[i]; const evo_has_more_stats = pokemon_total <= evo_total; if (!allow_evo_with_lower_stats) try testing.expect(evo_has_more_stats); try testing.expect((evo_has_more_stats and poke_stat <= evo_stat) or (!evo_has_more_stats and poke_stat >= evo_stat)); } } } } fn expectPokemonMapFieldFollowEvo(program: Program, comptime field: []const u8) !void { const pokemons = program.pokemons.values(); for (pokemons) |*pokemon| { for (pokemon.evos.values()) |species| { const evo = program.pokemons.getPtr(species).?; try util.set.expectEqual(@field(pokemon, field), @field(evo, field)); } } } fn expectSameTotalStats(old_prog: Program, new_prog: Program) !void { const old_keys = old_prog.pokemons.keys(); const new_keys = new_prog.pokemons.keys(); const old_pokemons = old_prog.pokemons.values(); const new_pokemons = new_prog.pokemons.values(); try testing.expectEqual(old_pokemons.len, new_pokemons.len); for (old_pokemons) |*old, i| { const new = &new_pokemons[i]; const old_stats = old.statsToArray(); const new_stats = new.statsToArray(); try testing.expectEqual(old_keys[i], new_keys[i]); try testing.expectEqual(old_stats.len, new_stats.len); try testing.expectEqual( it.fold(old_stats.constSlice(), @as(usize, 0), foldu8), it.fold(new_stats.constSlice(), @as(usize, 0), foldu8), ); } } const number_of_seeds = 40; const Pattern = util.testing.Pattern; test "stats" { const test_case = try util.testing.filter(util.testing.test_case, &.{ ".pokemons[*].stats.*", ".pokemons[*].evos[*].*", }); defer testing.allocator.free(test_case); var original_arena = std.heap.ArenaAllocator.init(testing.allocator); defer original_arena.deinit(); const original = try collectData(original_arena.allocator(), test_case); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{"--stats=random"}, .patterns = &[_]Pattern{ Pattern.glob(710, 710, ".pokemons[*].stats.hp=*"), Pattern.glob(710, 710, ".pokemons[*].stats.attack=*"), Pattern.glob(710, 710, ".pokemons[*].stats.defense=*"), Pattern.glob(710, 710, ".pokemons[*].stats.speed=*"), Pattern.glob(710, 710, ".pokemons[*].stats.sp_attack=*"), Pattern.glob(710, 710, ".pokemons[*].stats.sp_defense=*"), }, }); var seed: usize = 0; while (seed < number_of_seeds) : (seed += 1) { var buf: [20]u8 = undefined; const seed_arg = std.fmt.bufPrint(&buf, "--seed={}", .{seed}) catch unreachable; var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); const data = try runProgram(arena.allocator(), .{ .in = test_case, .args = &[_][]const u8{ "--stats=random_follow_evos", seed_arg }, }); try expectStatsFollowEvos(data, false); } seed = 0; while (seed < number_of_seeds) : (seed += 1) { var buf: [20]u8 = undefined; const seed_arg = std.fmt.bufPrint(&buf, "--seed={}", .{seed}) catch unreachable; var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); const data = try runProgram(arena.allocator(), .{ .in = test_case, .args = &[_][]const u8{ "--stats=random_follow_evos", "--same-total-stats", seed_arg, }, }); try expectSameTotalStats(original, data); try expectStatsFollowEvos(data, true); } } test "machines" { const test_case = try util.testing.filter(util.testing.test_case, &.{ ".pokemons[*].types[*]=*", ".pokemons[*].tms[*]=*", ".pokemons[*].hms[*]=*", ".pokemons[*].evos[*].*", ".moves[*].type=*", ".moves[*].category=*", ".tms[*]=*", ".hms[*]=*", }); defer testing.allocator.free(test_case); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{}, .patterns = &[_]Pattern{ Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=*"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=*"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random", "--stab-machine-chance=1.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].tms[*]=false"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random", "--stab-machine-chance=1.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].tms[*]=false"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random", "--stab-machine-chance=1.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(4146, 4146, ".pokemons[*].tms[*]=true"), Pattern.glob(63304, 63304, ".pokemons[*].tms[*]=false"), Pattern.glob(650, 650, ".pokemons[*].hms[*]=true"), Pattern.glob(3610, 3610, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random", "--stab-machine-chance=1.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(6612, 6612, ".pokemons[*].tms[*]=true"), Pattern.glob(60838, 60838, ".pokemons[*].tms[*]=false"), Pattern.glob(650, 650, ".pokemons[*].hms[*]=true"), Pattern.glob(3610, 3610, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random", "--stab-machine-chance=0.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(63304, 63304, ".pokemons[*].tms[*]=true"), Pattern.glob(4146, 4146, ".pokemons[*].tms[*]=false"), Pattern.glob(3610, 3610, ".pokemons[*].hms[*]=true"), Pattern.glob(650, 650, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random", "--stab-machine-chance=0.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(60838, 60838, ".pokemons[*].tms[*]=true"), Pattern.glob(6612, 6612, ".pokemons[*].tms[*]=false"), Pattern.glob(3610, 3610, ".pokemons[*].hms[*]=true"), Pattern.glob(650, 650, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random", "--stab-machine-chance=0.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(0, 0, ".pokemons[*].tms[*]=true"), Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=false"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=true"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random", "--stab-machine-chance=0.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(0, 0, ".pokemons[*].tms[*]=true"), Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=false"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=true"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random_follow_evos", "--stab-machine-chance=1.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].tms[*]=false"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random_follow_evos", "--stab-machine-chance=1.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].tms[*]=false"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=true"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random_follow_evos", "--stab-machine-chance=1.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(4078, 4078, ".pokemons[*].tms[*]=true"), Pattern.glob(63372, 63372, ".pokemons[*].tms[*]=false"), Pattern.glob(651, 651, ".pokemons[*].hms[*]=true"), Pattern.glob(3609, 3609, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random_follow_evos", "--stab-machine-chance=1.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(6587, 6587, ".pokemons[*].tms[*]=true"), Pattern.glob(60863, 60863, ".pokemons[*].tms[*]=false"), Pattern.glob(651, 651, ".pokemons[*].hms[*]=true"), Pattern.glob(3609, 3609, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random_follow_evos", "--stab-machine-chance=0.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(63372, 63372, ".pokemons[*].tms[*]=true"), Pattern.glob(4078, 4078, ".pokemons[*].tms[*]=false"), Pattern.glob(3609, 3609, ".pokemons[*].hms[*]=true"), Pattern.glob(651, 651, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random_follow_evos", "--stab-machine-chance=0.0", "--non-stab-machine-chance=1.0", }, .patterns = &[_]Pattern{ Pattern.glob(60863, 60863, ".pokemons[*].tms[*]=true"), Pattern.glob(6587, 6587, ".pokemons[*].tms[*]=false"), Pattern.glob(3609, 3609, ".pokemons[*].hms[*]=true"), Pattern.glob(651, 651, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--machines=random_follow_evos", "--stab-machine-chance=0.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(0, 0, ".pokemons[*].tms[*]=true"), Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=false"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=true"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=false"), }, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{ "--status-moves-are-stab", "--machines=random_follow_evos", "--stab-machine-chance=0.0", "--non-stab-machine-chance=0.0", }, .patterns = &[_]Pattern{ Pattern.glob(0, 0, ".pokemons[*].tms[*]=true"), Pattern.glob(67450, 67450, ".pokemons[*].tms[*]=false"), Pattern.glob(0, 0, ".pokemons[*].hms[*]=true"), Pattern.glob(4260, 4260, ".pokemons[*].hms[*]=false"), }, }); } test "types" { const test_case = try util.testing.filter(util.testing.test_case, &.{ ".pokemons[*].types[*]=*", ".pokemons[*].evos[*].*", ".types[*].name=*", }); defer testing.allocator.free(test_case); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{"--types=unchanged"}, .patterns = &[_]Pattern{Pattern.glob(1420, 1420, ".pokemons[*].types[*]=*")}, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{"--types=random"}, .patterns = &[_]Pattern{Pattern.glob(1420, 1420, ".pokemons[*].types[*]=*")}, }); var seed: usize = 0; while (seed < number_of_seeds) : (seed += 1) { var buf: [20]u8 = undefined; const seed_arg = std.fmt.bufPrint(&buf, "--seed={}", .{seed}) catch unreachable; var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); const data = try runProgram(arena.allocator(), .{ .in = test_case, .args = &[_][]const u8{ "--types=random_follow_evos", seed_arg }, }); try expectPokemonMapFieldFollowEvo(data, "types"); } } test "abilities" { const test_case = try util.testing.filter(util.testing.test_case, &.{ ".pokemons[*].abilities[*]=*", ".pokemons[*].evos[*].*", ".abilities[*].name=*", }); defer testing.allocator.free(test_case); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{"--abilities=unchanged"}, .patterns = &[_]Pattern{Pattern.glob(2130, 2130, ".pokemons[*].abilities[*]=*")}, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{"--abilities=random"}, .patterns = &[_]Pattern{Pattern.glob(2130, 2130, ".pokemons[*].abilities[*]=*")}, }); var seed: usize = 0; while (seed < number_of_seeds) : (seed += 1) { var buf: [20]u8 = undefined; const seed_arg = std.fmt.bufPrint(&buf, "--seed={}", .{seed}) catch unreachable; var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); const data = try runProgram(arena.allocator(), .{ .in = test_case, .args = &[_][]const u8{ "--abilities=random_follow_evos", seed_arg }, }); try expectPokemonMapFieldFollowEvo(data, "abilities"); } } test "items" { const test_case = try util.testing.filter(util.testing.test_case, &.{ ".pokemons[*].items[*]=*", ".pokemons[*].evos[*].*", ".items[*].name=*", }); defer testing.allocator.free(test_case); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{"--items=unchanged"}, .patterns = &[_]Pattern{Pattern.glob(2130, 2130, ".pokemons[*].items[*]=*")}, }); try util.testing.runProgramFindPatterns(Program, .{ .in = test_case, .args = &[_][]const u8{"--items=random"}, .patterns = &[_]Pattern{Pattern.glob(2130, 2130, ".pokemons[*].items[*]=*")}, }); var seed: usize = 0; while (seed < number_of_seeds) : (seed += 1) { var buf: [20]u8 = undefined; const seed_arg = std.fmt.bufPrint(&buf, "--seed={}", .{seed}) catch unreachable; var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); const data = try runProgram(arena.allocator(), .{ .in = test_case, .args = &[_][]const u8{ "--items=random_follow_evos", seed_arg }, }); try expectPokemonMapFieldFollowEvo(data, "items"); } }
src/randomizers/tm35-rand-pokemons.zig
const std = @import("std"); const assert = std.debug.assert; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const allocator = gpa.allocator(); var input_file = try std.fs.cwd().openFile("input/10.txt", .{}); defer input_file.close(); var buffered_reader = std.io.bufferedReader(input_file.reader()); const total_score = try calculateTotalScore(allocator, buffered_reader.reader()); std.debug.print("total score: {}\n", .{total_score}); } fn calculateTotalScore(gpa: std.mem.Allocator, reader: anytype) !u64 { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = arena.allocator(); var buf: [4096]u8 = undefined; var sum: u64 = 0; lines: while (try reader.readUntilDelimiterOrEof(&buf, '\n')) |line| { var parenthesis_stack = std.ArrayList(u8).init(allocator); for (line) |c| { switch (c) { '(', '[', '{', '<' => try parenthesis_stack.append(c), ')' => { if (parenthesis_stack.pop() != '(') { sum += 3; continue :lines; } }, ']' => { if (parenthesis_stack.pop() != '[') { sum += 57; continue :lines; } }, '}' => { if (parenthesis_stack.pop() != '{') { sum += 1197; continue :lines; } }, '>' => { if (parenthesis_stack.pop() != '<') { sum += 25137; continue :lines; } }, else => return error.InvalidSymbol, } } } return sum; } test "example 1" { const text = \\[({(<(())[]>[[{[]{<()<>> \\[(()[<>])]({[<{<<[]>>( \\{([(<{}[<>[]}>{[]{[(<()> \\(((({<>}<{<{<>}{[]{[]{} \\[[<[([]))<([[{}[[()]]] \\[{[{({}]{}}([{[{{{}}([] \\{<[[]]>}<{[{[{[]{()[[[] \\[<(<(<(<{}))><([]([]() \\<{([([[(<>()){}]>(<<{{ \\<{([{{}}[<[[[<>{}]]]>[]] ; var fbs = std.io.fixedBufferStream(text); const total_score = try calculateTotalScore(std.testing.allocator, fbs.reader()); try std.testing.expectEqual(@as(u64, 26397), total_score); }
src/10.zig
const std = @import("std"); const testing = std.testing; pub const c = @cImport({ @cInclude("curl/curl.h"); }); pub fn globalInit() Error!void { return tryCurl(c.curl_global_init(c.CURL_GLOBAL_ALL)); } pub fn globalCleanup() void { c.curl_global_cleanup(); } pub const XferInfoFn = c.curl_xferinfo_callback; pub const WriteFn = c.curl_write_callback; pub const ReadFn = c.curl_read_callback; pub const Offset = c.curl_off_t; /// if you set this as a write function, you must set write data to a fifo of the same type pub fn writeToFifo(comptime FifoType: type) WriteFn { return struct { fn writeFn(ptr: ?[*]u8, size: usize, nmemb: usize, data: ?*anyopaque) callconv(.C) usize { _ = size; var slice = (ptr orelse return 0)[0..nmemb]; const fifo = @ptrCast( *FifoType, @alignCast( @alignOf(*FifoType), data orelse return 0, ), ); fifo.writer().writeAll(slice) catch return 0; return nmemb; } }.writeFn; } /// if you set this as a read function, you must set read data to an FBS of the same type pub fn readFromFbs(comptime FbsType: type) ReadFn { const BufferType = switch (FbsType) { std.io.FixedBufferStream([]u8) => []u8, std.io.FixedBufferStream([]const u8) => []const u8, else => @compileError("std.io.FixedBufferStream can only have []u8 or []const u8 buffer type"), }; return struct { fn readFn(buffer: ?[*]u8, size: usize, nitems: usize, data: ?*anyopaque) callconv(.C) usize { const to = (buffer orelse return c.CURL_READFUNC_ABORT)[0 .. size * nitems]; var fbs = @ptrCast( *std.io.FixedBufferStream(BufferType), @alignCast( @alignOf(*std.io.FixedBufferStream(BufferType)), data orelse return c.CURL_READFUNC_ABORT, ), ); return fbs.read(to) catch |err| blk: { std.log.err("get fbs read error: {s}", .{@errorName(err)}); break :blk c.CURL_READFUNC_ABORT; }; } }.readFn; } pub const Easy = opaque { pub fn init() Error!*Easy { return @ptrCast(?*Easy, c.curl_easy_init()) orelse error.FailedInit; } pub fn cleanup(self: *Easy) void { c.curl_easy_cleanup(self); } pub fn setUrl(self: *Easy, url: [:0]const u8) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_URL, url.ptr)); } pub fn setFollowLocation(self: *Easy, val: bool) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_FOLLOWLOCATION, @as(c_ulong, if (val) 1 else 0))); } pub fn setVerbose(self: *Easy, val: bool) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_VERBOSE, @as(c_ulong, if (val) 1 else 0))); } pub fn setSslVerifyPeer(self: *Easy, val: bool) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_SSL_VERIFYPEER, @as(c_ulong, if (val) 1 else 0))); } pub fn setAcceptEncodingGzip(self: *Easy) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_ACCEPT_ENCODING, "gzip")); } pub fn setReadFn(self: *Easy, read: ReadFn) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_READFUNCTION, read)); } pub fn setReadData(self: *Easy, data: *anyopaque) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_READDATA, data)); } pub fn setWriteFn(self: *Easy, write: WriteFn) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_WRITEFUNCTION, write)); } pub fn setWriteData(self: *Easy, data: *anyopaque) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_WRITEDATA, data)); } pub fn setNoProgress(self: *Easy, val: bool) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_NOPROGRESS, @as(c_ulong, if (val) 1 else 0))); } pub fn setXferInfoFn(self: *Easy, xfer: XferInfoFn) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_XFERINFOFUNCTION, xfer)); } pub fn setXferInfoData(self: *Easy, data: *anyopaque) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_XFERINFODATA, data)); } pub fn setErrorBuffer(self: *Easy, data: *[c.CURL_ERROR_SIZE]u8) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_XFERINFODATA, data)); } pub fn setHeaders(self: *Easy, headers: HeaderList) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_HTTPHEADER, headers.inner)); } pub fn setPost(self: *Easy) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_POST, @as(c_ulong, 1))); } pub fn setPostFields(self: *Easy, data: *anyopaque) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_POSTFIELDS, @ptrToInt(data))); } pub fn setPostFieldSize(self: *Easy, size: usize) Error!void { return tryCurl(c.curl_easy_setopt(self, c.CURLOPT_POSTFIELDSIZE, @intCast(c_ulong, size))); } pub fn perform(self: *Easy) Error!void { return tryCurl(c.curl_easy_perform(self)); } pub fn getResponseCode(self: *Easy) Error!isize { var code: isize = 0; try tryCurl(c.curl_easy_getinfo(self, c.CURLINFO_RESPONSE_CODE, &code)); return code; } }; fn emptyWrite(ptr: ?[*]u8, size: usize, nmemb: usize, data: ?*anyopaque) callconv(.C) usize { _ = ptr; _ = data; _ = size; return nmemb; } test "https get" { const Fifo = std.fifo.LinearFifo(u8, .{ .Dynamic = {} }); try globalInit(); defer globalCleanup(); var fifo = Fifo.init(std.testing.allocator); defer fifo.deinit(); var easy = try Easy.init(); defer easy.cleanup(); try easy.setUrl("https://httpbin.org/get"); try easy.setSslVerifyPeer(false); try easy.setWriteFn(writeToFifo(Fifo)); try easy.setWriteData(&fifo); try easy.setVerbose(true); try easy.perform(); const code = try easy.getResponseCode(); try std.testing.expectEqual(@as(isize, 200), code); } test "https get gzip encoded" { const Fifo = std.fifo.LinearFifo(u8, .{ .Dynamic = {} }); try globalInit(); defer globalCleanup(); var fifo = Fifo.init(std.testing.allocator); defer fifo.deinit(); var easy = try Easy.init(); defer easy.cleanup(); try easy.setUrl("http://httpbin.org/gzip"); try easy.setSslVerifyPeer(false); try easy.setAcceptEncodingGzip(); try easy.setWriteFn(writeToFifo(Fifo)); try easy.setWriteData(&fifo); try easy.setVerbose(true); try easy.perform(); const code = try easy.getResponseCode(); try std.testing.expectEqual(@as(isize, 200), code); } test "https post" { try globalInit(); defer globalCleanup(); var easy = try Easy.init(); defer easy.cleanup(); const payload = "this is a payload"; var fbs = std.io.fixedBufferStream(payload); fbs.pos = payload.len; try easy.setUrl("https://httpbin.org/post"); try easy.setPost(); try easy.setSslVerifyPeer(false); try easy.setWriteFn(emptyWrite); try easy.setReadFn(readFromFbs(@TypeOf(fbs))); try easy.setReadData(&fbs); try easy.setVerbose(true); try easy.perform(); const code = try easy.getResponseCode(); try std.testing.expectEqual(@as(isize, 200), code); } pub const Url = opaque { pub fn init() UrlError!*Url { return @ptrCast(?*Url, c.curl_url()) orelse error.FailedInit; } pub fn cleanup(self: *Url) void { c.curl_url_cleanup(@ptrCast(*c.CURLU, self)); } pub fn set(self: *Url, url: [:0]const u8) UrlError!void { return tryCurlUrl(c.curl_url_set(@ptrCast(*c.CURLU, self), c.CURLUPART_URL, url.ptr, 0)); } pub fn getHost(self: *Url) UrlError![*:0]u8 { var host: ?[*:0]u8 = undefined; try tryCurlUrl(c.curl_url_get(@ptrCast(*c.CURLU, self), c.CURLUPART_HOST, &host, 0)); return host.?; } pub fn getPath(self: *Url) UrlError![*:0]u8 { var path: ?[*:0]u8 = undefined; try tryCurlUrl(c.curl_url_get(@ptrCast(*c.CURLU, self), c.CURLUPART_PATH, &path, 0)); return path.?; } pub fn getScheme(self: *Url) UrlError![*:0]u8 { var scheme: ?[*:0]u8 = undefined; try tryCurlUrl(c.curl_url_get(@ptrCast(*c.CURLU, self), c.CURLUPART_SCHEME, &scheme, 0)); return scheme.?; } pub fn getPort(self: *Url) UrlError![*:0]u8 { var port: ?[*:0]u8 = undefined; try tryCurlUrl(c.curl_url_get(@ptrCast(*c.CURLU, self), c.CURLUPART_PORT, &port, 0)); return port.?; } pub fn getQuery(self: *Url) UrlError![*:0]u8 { var query: ?[*:0]u8 = undefined; try tryCurlUrl(c.curl_url_get(@ptrCast(*c.CURLU, self), c.CURLUPART_QUERY, &query, 0)); return query.?; } fn tryCurlUrl(code: c.CURLUcode) UrlError!void { if (code != c.CURLUE_OK) return errorFromCurlUrl(code); } }; test "parse url" { const url = try Url.init(); defer url.cleanup(); try url.set("https://arst.com:80/blarg/foo.git?what=yes&please=no"); const scheme = try url.getScheme(); try std.testing.expectEqualStrings("https", std.mem.span(scheme)); const host = try url.getHost(); try std.testing.expectEqualStrings("arst.com", std.mem.span(host)); const port = try url.getPort(); try std.testing.expectEqualStrings("80", std.mem.span(port)); const path = try url.getPath(); try std.testing.expectEqualStrings("/blarg/foo.git", std.mem.span(path)); const query = try url.getQuery(); try std.testing.expectEqualStrings("what=yes&please=no", std.mem.span(query)); } pub const HeaderList = struct { inner: ?*c.curl_slist, pub fn init() HeaderList { return HeaderList{ .inner = null, }; } pub fn freeAll(self: *HeaderList) void { c.curl_slist_free_all(self.inner); } pub fn append(self: *HeaderList, entry: [:0]const u8) !void { if (c.curl_slist_append(self.inner, entry.ptr)) |list| { self.inner = list; } else return error.CurlHeadersAppend; } }; test "headers" { try globalInit(); defer globalCleanup(); var headers = HeaderList.init(); defer headers.freeAll(); // removes a header curl would put in for us try headers.append("Accept:"); // a custom header try headers.append("MyCustomHeader: bruh"); // a header with no value, note the semicolon try headers.append("ThisHasNoValue;"); var easy = try Easy.init(); defer easy.cleanup(); try easy.setUrl("https://httpbin.org/get"); try easy.setSslVerifyPeer(false); try easy.setWriteFn(emptyWrite); try easy.setVerbose(true); try easy.setHeaders(headers); try easy.perform(); const code = try easy.getResponseCode(); try std.testing.expectEqual(@as(isize, 200), code); } pub const UrlError = error{ FailedInit, BadHandle, BadPartpointer, MalformedInput, BadPortNumber, UnsupportedScheme, UrlDecode, OutOfMemory, UserNotAllowed, UnknownPart, NoScheme, NoUser, NoPassword, NoOptions, NoHost, NoPort, NoQuery, NoFragment, UnknownErrorCode, }; pub const Error = error{ UnsupportedProtocol, FailedInit, UrlMalformat, NotBuiltIn, CouldntResolveProxy, CouldntResolveHost, CounldntConnect, WeirdServerReply, RemoteAccessDenied, FtpAcceptFailed, FtpWeirdPassReply, FtpAcceptTimeout, FtpWeirdPasvReply, FtpWeird227Format, FtpCantGetHost, Http2, FtpCouldntSetType, PartialFile, FtpCouldntRetrFile, Obsolete20, QuoteError, HttpReturnedError, WriteError, Obsolete24, UploadFailed, ReadError, OutOfMemory, OperationTimeout, Obsolete29, FtpPortFailed, FtpCouldntUseRest, Obsolete32, RangeError, HttpPostError, SslConnectError, BadDownloadResume, FileCouldntReadFile, LdapCannotBind, LdapSearchFailed, Obsolete40, FunctionNotFound, AbortByCallback, BadFunctionArgument, Obsolete44, InterfaceFailed, Obsolete46, TooManyRedirects, UnknownOption, SetoptOptionSyntax, Obsolete50, Obsolete51, GotNothing, SslEngineNotfound, SslEngineSetfailed, SendError, RecvError, Obsolete57, SslCertproblem, SslCipher, PeerFailedVerification, BadContentEncoding, LdapInvalidUrl, FilesizeExceeded, UseSslFailed, SendFailRewind, SslEngineInitfailed, LoginDenied, TftpNotfound, TftpPerm, RemoteDiskFull, TftpIllegal, Tftp_Unknownid, RemoteFileExists, TftpNosuchuser, ConvFailed, ConvReqd, SslCacertBadfile, RemoteFileNotFound, Ssh, SslShutdownFailed, Again, SslCrlBadfile, SslIssuerError, FtpPretFailed, RtspCseqError, RtspSessionError, FtpBadFileList, ChunkFailed, NoConnectionAvailable, SslPinnedpubkeynotmatch, SslInvalidcertstatus, Http2Stream, RecursiveApiCall, AuthError, Http3, QuicConnectError, Proxy, SslClientCert, UnknownErrorCode, }; fn tryCurl(code: c.CURLcode) Error!void { if (code != c.CURLE_OK) return errorFromCurl(code); } fn errorFromCurl(code: c.CURLcode) Error { return switch (code) { c.CURLE_UNSUPPORTED_PROTOCOL => error.UnsupportedProtocol, c.CURLE_FAILED_INIT => error.FailedInit, c.CURLE_URL_MALFORMAT => error.UrlMalformat, c.CURLE_NOT_BUILT_IN => error.NotBuiltIn, c.CURLE_COULDNT_RESOLVE_PROXY => error.CouldntResolveProxy, c.CURLE_COULDNT_RESOLVE_HOST => error.CouldntResolveHost, c.CURLE_COULDNT_CONNECT => error.CounldntConnect, c.CURLE_WEIRD_SERVER_REPLY => error.WeirdServerReply, c.CURLE_REMOTE_ACCESS_DENIED => error.RemoteAccessDenied, c.CURLE_FTP_ACCEPT_FAILED => error.FtpAcceptFailed, c.CURLE_FTP_WEIRD_PASS_REPLY => error.FtpWeirdPassReply, c.CURLE_FTP_ACCEPT_TIMEOUT => error.FtpAcceptTimeout, c.CURLE_FTP_WEIRD_PASV_REPLY => error.FtpWeirdPasvReply, c.CURLE_FTP_WEIRD_227_FORMAT => error.FtpWeird227Format, c.CURLE_FTP_CANT_GET_HOST => error.FtpCantGetHost, c.CURLE_HTTP2 => error.Http2, c.CURLE_FTP_COULDNT_SET_TYPE => error.FtpCouldntSetType, c.CURLE_PARTIAL_FILE => error.PartialFile, c.CURLE_FTP_COULDNT_RETR_FILE => error.FtpCouldntRetrFile, c.CURLE_OBSOLETE20 => error.Obsolete20, c.CURLE_QUOTE_ERROR => error.QuoteError, c.CURLE_HTTP_RETURNED_ERROR => error.HttpReturnedError, c.CURLE_WRITE_ERROR => error.WriteError, c.CURLE_OBSOLETE24 => error.Obsolete24, c.CURLE_UPLOAD_FAILED => error.UploadFailed, c.CURLE_READ_ERROR => error.ReadError, c.CURLE_OUT_OF_MEMORY => error.OutOfMemory, c.CURLE_OPERATION_TIMEDOUT => error.OperationTimeout, c.CURLE_OBSOLETE29 => error.Obsolete29, c.CURLE_FTP_PORT_FAILED => error.FtpPortFailed, c.CURLE_FTP_COULDNT_USE_REST => error.FtpCouldntUseRest, c.CURLE_OBSOLETE32 => error.Obsolete32, c.CURLE_RANGE_ERROR => error.RangeError, c.CURLE_HTTP_POST_ERROR => error.HttpPostError, c.CURLE_SSL_CONNECT_ERROR => error.SslConnectError, c.CURLE_BAD_DOWNLOAD_RESUME => error.BadDownloadResume, c.CURLE_FILE_COULDNT_READ_FILE => error.FileCouldntReadFile, c.CURLE_LDAP_CANNOT_BIND => error.LdapCannotBind, c.CURLE_LDAP_SEARCH_FAILED => error.LdapSearchFailed, c.CURLE_OBSOLETE40 => error.Obsolete40, c.CURLE_FUNCTION_NOT_FOUND => error.FunctionNotFound, c.CURLE_ABORTED_BY_CALLBACK => error.AbortByCallback, c.CURLE_BAD_FUNCTION_ARGUMENT => error.BadFunctionArgument, c.CURLE_OBSOLETE44 => error.Obsolete44, c.CURLE_INTERFACE_FAILED => error.InterfaceFailed, c.CURLE_OBSOLETE46 => error.Obsolete46, c.CURLE_TOO_MANY_REDIRECTS => error.TooManyRedirects, c.CURLE_UNKNOWN_OPTION => error.UnknownOption, c.CURLE_SETOPT_OPTION_SYNTAX => error.SetoptOptionSyntax, c.CURLE_OBSOLETE50 => error.Obsolete50, c.CURLE_OBSOLETE51 => error.Obsolete51, c.CURLE_GOT_NOTHING => error.GotNothing, c.CURLE_SSL_ENGINE_NOTFOUND => error.SslEngineNotfound, c.CURLE_SSL_ENGINE_SETFAILED => error.SslEngineSetfailed, c.CURLE_SEND_ERROR => error.SendError, c.CURLE_RECV_ERROR => error.RecvError, c.CURLE_OBSOLETE57 => error.Obsolete57, c.CURLE_SSL_CERTPROBLEM => error.SslCertproblem, c.CURLE_SSL_CIPHER => error.SslCipher, c.CURLE_PEER_FAILED_VERIFICATION => error.PeerFailedVerification, c.CURLE_BAD_CONTENT_ENCODING => error.BadContentEncoding, c.CURLE_LDAP_INVALID_URL => error.LdapInvalidUrl, c.CURLE_FILESIZE_EXCEEDED => error.FilesizeExceeded, c.CURLE_USE_SSL_FAILED => error.UseSslFailed, c.CURLE_SEND_FAIL_REWIND => error.SendFailRewind, c.CURLE_SSL_ENGINE_INITFAILED => error.SslEngineInitfailed, c.CURLE_LOGIN_DENIED => error.LoginDenied, c.CURLE_TFTP_NOTFOUND => error.TftpNotfound, c.CURLE_TFTP_PERM => error.TftpPerm, c.CURLE_REMOTE_DISK_FULL => error.RemoteDiskFull, c.CURLE_TFTP_ILLEGAL => error.TftpIllegal, c.CURLE_TFTP_UNKNOWNID => error.Tftp_Unknownid, c.CURLE_REMOTE_FILE_EXISTS => error.RemoteFileExists, c.CURLE_TFTP_NOSUCHUSER => error.TftpNosuchuser, c.CURLE_CONV_FAILED => error.ConvFailed, c.CURLE_CONV_REQD => error.ConvReqd, c.CURLE_SSL_CACERT_BADFILE => error.SslCacertBadfile, c.CURLE_REMOTE_FILE_NOT_FOUND => error.RemoteFileNotFound, c.CURLE_SSH => error.Ssh, c.CURLE_SSL_SHUTDOWN_FAILED => error.SslShutdownFailed, c.CURLE_AGAIN => error.Again, c.CURLE_SSL_CRL_BADFILE => error.SslCrlBadfile, c.CURLE_SSL_ISSUER_ERROR => error.SslIssuerError, c.CURLE_FTP_PRET_FAILED => error.FtpPretFailed, c.CURLE_RTSP_CSEQ_ERROR => error.RtspCseqError, c.CURLE_RTSP_SESSION_ERROR => error.RtspSessionError, c.CURLE_FTP_BAD_FILE_LIST => error.FtpBadFileList, c.CURLE_CHUNK_FAILED => error.ChunkFailed, c.CURLE_NO_CONNECTION_AVAILABLE => error.NoConnectionAvailable, c.CURLE_SSL_PINNEDPUBKEYNOTMATCH => error.SslPinnedpubkeynotmatch, c.CURLE_SSL_INVALIDCERTSTATUS => error.SslInvalidcertstatus, c.CURLE_HTTP2_STREAM => error.Http2Stream, c.CURLE_RECURSIVE_API_CALL => error.RecursiveApiCall, c.CURLE_AUTH_ERROR => error.AuthError, c.CURLE_HTTP3 => error.Http3, c.CURLE_QUIC_CONNECT_ERROR => error.QuicConnectError, c.CURLE_PROXY => error.Proxy, c.CURLE_SSL_CLIENTCERT => error.SslClientCert, else => blk: { std.debug.assert(false); break :blk error.UnknownErrorCode; }, }; } fn errorFromCurlUrl(code: c.CURLUcode) UrlError { return switch (code) { c.CURLUE_BAD_HANDLE => error.BadHandle, c.CURLUE_BAD_PARTPOINTER => error.BadPartpointer, c.CURLUE_MALFORMED_INPUT => error.MalformedInput, c.CURLUE_BAD_PORT_NUMBER => error.BadPortNumber, c.CURLUE_UNSUPPORTED_SCHEME => error.UnsupportedScheme, c.CURLUE_URLDECODE => error.UrlDecode, c.CURLUE_OUT_OF_MEMORY => error.OutOfMemory, c.CURLUE_USER_NOT_ALLOWED => error.UserNotAllowed, c.CURLUE_UNKNOWN_PART => error.UnknownPart, c.CURLUE_NO_SCHEME => error.NoScheme, c.CURLUE_NO_USER => error.NoUser, c.CURLUE_NO_PASSWORD => error.NoPassword, c.CURLUE_NO_OPTIONS => error.NoOptions, c.CURLUE_NO_HOST => error.NoHost, c.CURLUE_NO_PORT => error.NoPort, c.CURLUE_NO_QUERY => error.NoQuery, c.CURLUE_NO_FRAGMENT => error.NoFragment, else => blk: { std.debug.assert(false); break :blk error.UnknownErrorCode; }, }; }
.gyro/zig-libcurl-mattnite-github.com-f1f316dc/pkg/src/main.zig
const std = @import("std"); const assert = std.debug.assert; const loadFile = @import("../Files.zig").loadFile; const compress = @import("../Compress/Compress.zig"); const ModelData = @import("../ModelFiles/ModelFiles.zig").ModelData; const AnimationData = @import("../ModelFiles/AnimationFiles.zig").AnimationData; const wgi = @import("../WindowGraphicsInput/WindowGraphicsInput.zig"); const ConditionVariable = @import("../ConditionVariable.zig").ConditionVariable; const ReferenceCounter = @import("../RefCount.zig").ReferenceCounter; var assets_directory: ?[]const u8 = null; // dir should be a global constant and not be changed again pub fn setAssetsDirectory(dir: []const u8) void { assets_directory = dir; } // Static buffer used for appending asset file path to global asset directory path var path: [256]u8 = undefined; pub const Asset = struct { ref_count: ReferenceCounter = ReferenceCounter{}, // Reference counting is different for assets. // ref_count2 should be incremented at the same time as ref_count. // If the model data / texture data / etc. is no longer needed in system memory but the meta data // in the asset struct is still being used then ref_count2 should be decremented and fn freeData called ref_count2: ReferenceCounter = ReferenceCounter{}, pub const AssetType = enum { Model, Texture, RGB10A2Texture, Animation, // Shader }; // If true freeData does nothing const asset_type_keep_data_on_cpu = [_]bool{ false, false, false, true, }; pub const AssetState = enum { NotLoaded, // Default state Loaded, // File read from HDD/SDD, ready for decompression Ready, // Asset is loaded and ready for use Freed, // Data has been freed }; file_path: [64]u8, file_path_len: u32, asset_type: AssetType, compressed: bool, // if true then this is a *.compressed file state: AssetState, data: ?[]align(4) u8, allocator: ?*std.mem.Allocator = null, // -- Configuration variables -- // if asset_type == AssetType.Texture texture_channels: u32 = 0, whenFileLoaded: ?(fn (*Asset) void) = null, whenAssetDecoded: ?(fn (*Asset) void) = null, // -- Asset (meta)data -- // if asset_type == AssetType.Model model: ?ModelData, // if asset_type == AssetType.Animation animation: ?AnimationData, // if asset_type == AssetType.Texture or asset_type == AssetType.RGB10A2Texture texture_width: ?u32, texture_height: ?u32, texture_type: ?wgi.image.ImageType, // if asset_type == AssetType.RGB10A2Texture rgb10a2_data: ?[]u8, // file_path_ is copied into the returned Asset struct // Don't forget to set the relvant configuration variables pub fn init(file_path_: []const u8) !Asset { if (file_path_.len > 64) { return error.PathTooLong; } var file_path = file_path_; var compressed = false; if (file_path.len >= 11 and std.mem.eql(u8, file_path[file_path.len - 11 ..], ".compressed")) { compressed = true; file_path = file_path[0 .. file_path.len - 11]; } var asset_type: AssetType = undefined; if (file_path.len >= 6 and std.mem.eql(u8, file_path[file_path.len - 6 ..], ".model")) { asset_type = AssetType.Model; } else if (file_path.len >= 5 and std.mem.eql(u8, file_path[file_path.len - 5 ..], ".anim")) { asset_type = AssetType.Animation; } else if (file_path.len >= 4 and std.mem.eql(u8, file_path[file_path.len - 4 ..], ".png")) { asset_type = AssetType.Texture; } else if (file_path.len >= 4 and std.mem.eql(u8, file_path[file_path.len - 4 ..], ".jpg")) { asset_type = AssetType.Texture; } else if (file_path.len >= 4 and std.mem.eql(u8, file_path[file_path.len - 4 ..], ".tga")) { asset_type = AssetType.Texture; } else if (file_path.len >= 4 and std.mem.eql(u8, file_path[file_path.len - 4 ..], ".bmp")) { asset_type = AssetType.Texture; } else if (file_path.len >= 8 and std.mem.eql(u8, file_path[file_path.len - 8 ..], ".rgb10a2")) { asset_type = AssetType.RGB10A2Texture; } // else if(file_path.len >= 3 and std.mem.eql(u8, // file_path[file_path.len-3..], ".vs")) { // asset_type = AssetType.Shader; // } // else if(file_path.len >= 3 and std.mem.eql(u8, // file_path[file_path.len-3..], ".fs")) { // asset_type = AssetType.Shader; // } // else if(file_path.len >= 5 and std.mem.eql(u8, // file_path[file_path.len-5..], ".glsl")) { // asset_type = AssetType.Shader; // } else { return error.UnknownAssetType; } var a = Asset{ .file_path = undefined, .file_path_len = 0, .asset_type = asset_type, .compressed = compressed, .state = AssetState.NotLoaded, .model = null, .animation = null, .data = null, .texture_width = null, .texture_height = null, .rgb10a2_data = null, .texture_type = null, }; std.mem.copy(u8, a.file_path[0..], file_path_); a.file_path_len = @intCast(u32, file_path_.len); return a; } pub fn load(self: *Asset, allocator_: *std.mem.Allocator) !void { if (self.state != AssetState.NotLoaded) { return error.InvalidState; } self.allocator = allocator_; if (assets_directory == null) { self.data = try loadFile(self.file_path[0..self.file_path_len], allocator_); } else { const n = std.fmt.bufPrint(path[0..], "{}{}", .{ assets_directory, self.file_path[0..self.file_path_len] }) catch unreachable; self.data = try loadFile(n, allocator_); } self.state = AssetState.Loaded; if (self.whenFileLoaded != null) { self.whenFileLoaded.?(self); } } // Use same allocator as was used for load() pub fn decompress(self: *Asset) !void { if (self.state != AssetState.Loaded or self.data == null or self.allocator == null) { return error.InvalidState; } if (self.compressed) { const newData = try compress.decompress(self.data.?, self.allocator.?); self.allocator.?.free(self.data.?); self.data = newData; } if (self.asset_type == AssetType.Model) { self.model = try ModelData.init(self.data.?, self.allocator.?); } else if (self.asset_type == AssetType.Animation) { self.animation = try AnimationData.init(self.data.?); } else if (self.asset_type == AssetType.Texture) { var w: u32 = 0; var h: u32 = 0; const newData = try wgi.image.decodeImage(self.data.?, &self.texture_channels, &w, &h, self.allocator.?); self.texture_width = w; self.texture_height = h; wgi.image.freeDecodedImage(self.data.?); self.data = newData; if (self.texture_channels == 3) { self.texture_type = wgi.image.ImageType.RGB; } else if (self.texture_channels == 2) { self.texture_type = wgi.image.ImageType.RG; } else if (self.texture_channels == 1) { self.texture_type = wgi.image.ImageType.R; } else { self.texture_type = wgi.image.ImageType.RGBA; } } else if (self.asset_type == AssetType.RGB10A2Texture) { if (self.data.?.len < 16) { return error.FileTooSmall; } const file_data_u32: []u32 = std.mem.bytesAsSlice(u32, self.data.?); if (file_data_u32[0] != 0x62677200 or file_data_u32[1] != 0x32613031) { return error.InvalidMagic; } const w = file_data_u32[2]; const h = file_data_u32[3]; if (w == 0 or h == 0 or w > 32768 or h > 32768) { return error.InvalidDimensions; } self.texture_width = w; self.texture_height = h; self.rgb10a2_data = self.data.?[16..]; self.texture_type = wgi.image.ImageType.RGB10A2; } // else if(self.asset_type == AssetType.Shader) { // } self.state = AssetState.Ready; if (self.whenAssetDecoded != null) { self.whenAssetDecoded.?(self); } } // Keeps things such as model file metadata loaded but frees the memory that is typicaly stored in video memory // Don't call this if the mesh data is to be freed pub fn freeData(self: *Asset) void { if (self.state == AssetState.NotLoaded or self.state == AssetState.Freed or self.data == null or asset_type_keep_data_on_cpu[@enumToInt(self.asset_type)] or self.allocator == null) { return; } self.ref_count2.deinit(); self.allocator.?.free(self.data.?); self.data = null; } pub fn free(self: *Asset, ignore_reference_counting: bool) void { if (!ignore_reference_counting) { self.ref_count.deinit(); } if (self.state == AssetState.Ready) { if (self.asset_type == AssetType.Model) { self.model.?.free(self.allocator.?); } } if (self.data != null) { self.allocator.?.free(self.data.?); self.data = null; } self.state = AssetState.Freed; } }; var assets_to_load = std.atomic.Int(u32).init(0); var cv: ?ConditionVariable = null; var assets_to_load_queue: std.atomic.Queue(*Asset) = std.atomic.Queue(*Asset).init(); var assets_to_decompress_queue: std.atomic.Queue(*Asset) = std.atomic.Queue(*Asset).init(); var abort_load = std.atomic.Int(u32).init(0); // Do not call this while assets are being loaded pub fn addAssetToQueue(asset: *Asset, allocator: *std.mem.Allocator) !void { if (asset.state != Asset.AssetState.NotLoaded) { return error.InvalidState; } var node = try allocator.create(std.atomic.Queue(*Asset).Node); node.* = std.atomic.Queue(*Asset).Node.init(asset); assets_to_load_queue.put(node); _ = assets_to_load.incr(); } fn fileLoader(allocator: *std.mem.Allocator) void { while (abort_load.get() != 1) { const asset_node = assets_to_load_queue.get(); if (asset_node == null) { break; } else { asset_node.?.data.*.load(allocator) catch |e| { std.debug.warn("Asset '{}' load error: {}\n", .{ asset_node.?.data.file_path[0..asset_node.?.data.file_path_len], e }); _ = assets_to_load.decr(); continue; }; assets_to_decompress_queue.put(asset_node.?); cv.?.notify(); } } } fn assetDecompressor(allocator: *std.mem.Allocator) void { while (assets_to_load.get() > 0) { const asset = assets_to_decompress_queue.get(); if (asset == null) { cv.?.wait(); } else { if (asset.?.data.*.state == Asset.AssetState.Loaded) { asset.?.data.*.decompress() catch |e| { std.debug.warn("Asset '{}' decompress error: {}\n", .{ asset.?.data.file_path[0..asset.?.data.file_path_len], e }); }; } allocator.destroy(asset); if (assets_to_load.decr() == 1) { break; } } } } pub fn startAssetLoader_(assets_list: ?([]Asset), allocator: *std.mem.Allocator) !void { cv = ConditionVariable.init(); if (assets_list != null) { for (assets_list.?) |*a| { addAssetToQueue(a, allocator) catch { std.debug.warn("Asset {} added to load queue but is already loaded\n", .{a.file_path[0..a.file_path_len]}); }; } } abort_load.set(0); errdefer abort_load.set(1); _ = try std.Thread.spawn(allocator, fileLoader); _ = try std.Thread.spawn(allocator, assetDecompressor); } pub fn startAssetLoader(allocator: *std.mem.Allocator) !void { try startAssetLoader_(null, allocator); } pub fn startAssetLoader1(assets_list: []Asset, allocator: *std.mem.Allocator) !void { try startAssetLoader_(assets_list, allocator); } pub fn assetLoaderCleanup() void { cv.?.free(); } pub fn assetsLoaded() bool { return assets_to_load.get() == 0; } pub fn verifyAllAssetsLoaded(assets_list: []Asset) !void { for (assets_list) |a| { if (a.state != Asset.AssetState.Ready) { return error.AssetLoadError; } } return; } test "assets" { var asset = try Asset.init("bleh.jpg"); std.testing.expect(asset.asset_type == Asset.AssetType.Texture); }
src/Assets/Assets.zig
const Self = @This(); const std = @import("std"); const assert = std.debug.assert; const mem = std.mem; const fmt = std.fmt; const wlr = @import("wlroots"); const wayland = @import("wayland"); const wl = wayland.server.wl; const zwlr = wayland.server.zwlr; const render = @import("render.zig"); const server = &@import("main.zig").server; const util = @import("util.zig"); const Box = @import("Box.zig"); const LayerSurface = @import("LayerSurface.zig"); const Layout = @import("Layout.zig"); const LayoutDemand = @import("LayoutDemand.zig"); const View = @import("View.zig"); const ViewStack = @import("view_stack.zig").ViewStack; const OutputStatus = @import("OutputStatus.zig"); const State = struct { /// A bit field of focused tags tags: u32, /// Active layout, or null if views are un-arranged. /// /// If null, views which are manually moved or resized (with the pointer or /// or command) will not be automatically set to floating. Everything is /// already floating, so this would be an unexpected change of a views state /// the user will only notice once a layout affects the views. So instead we /// "snap back" all manually moved views the next time a layout is active. /// This is similar to dwms behvaviour. Note that this of course does not /// affect already floating views. layout: ?*Layout = null, }; wlr_output: *wlr.Output, damage: *wlr.OutputDamage, /// All layer surfaces on the output, indexed by the layer enum. layers: [4]std.TailQueue(LayerSurface) = [1]std.TailQueue(LayerSurface){.{}} ** 4, /// The area left for views and other layer surfaces after applying the /// exclusive zones of exclusive layer surfaces. /// TODO: this should be part of the output's State usable_box: Box, /// The top of the stack is the "most important" view. views: ViewStack(View) = .{}, /// The double-buffered state of the output. current: State = State{ .tags = 1 << 0 }, pending: State = State{ .tags = 1 << 0 }, /// Remembered version of tags (from last run) previous_tags: u32 = 1 << 0, /// The currently active LayoutDemand layout_demand: ?LayoutDemand = null, /// List of all layouts layouts: std.TailQueue(Layout) = .{}, /// The current layout namespace of the output. If null, /// config.default_layout_namespace should be used instead. /// Call handleLayoutNamespaceChange() after setting this. layout_namespace: ?[]const u8 = null, /// Bitmask that whitelists tags for newly spawned views spawn_tagmask: u32 = std.math.maxInt(u32), /// List of status tracking objects relaying changes to this output to clients. status_trackers: std.SinglyLinkedList(OutputStatus) = .{}, destroy: wl.Listener(*wlr.Output) = wl.Listener(*wlr.Output).init(handleDestroy), enable: wl.Listener(*wlr.Output) = wl.Listener(*wlr.Output).init(handleEnable), mode: wl.Listener(*wlr.Output) = wl.Listener(*wlr.Output).init(handleMode), frame: wl.Listener(*wlr.OutputDamage) = wl.Listener(*wlr.OutputDamage).init(handleFrame), damage_destroy: wl.Listener(*wlr.OutputDamage) = wl.Listener(*wlr.OutputDamage).init(handleDamageDestroy), pub fn init(self: *Self, wlr_output: *wlr.Output) !void { assert(!wlr_output.isNoop()); // Some backends don't have modes. DRM+KMS does, and we need to set a mode // before we can use the output. The mode is a tuple of (width, height, // refresh rate), and each monitor supports only a specific set of modes. We // just pick the monitor's preferred mode, a more sophisticated compositor // would let the user configure it. if (wlr_output.preferredMode()) |mode| { wlr_output.setMode(mode); wlr_output.enable(true); try wlr_output.commit(); } self.* = .{ .wlr_output = wlr_output, .damage = try wlr.OutputDamage.create(wlr_output), .usable_box = undefined, }; wlr_output.data = @ptrToInt(self); wlr_output.events.destroy.add(&self.destroy); wlr_output.events.enable.add(&self.enable); wlr_output.events.mode.add(&self.mode); self.damage.events.frame.add(&self.frame); self.damage.events.destroy.add(&self.damage_destroy); // Ensure that a cursor image at the output's scale factor is loaded // for each seat. var it = server.input_manager.seats.first; while (it) |node| : (it = node.next) { const seat = &node.data; seat.cursor.xcursor_manager.load(wlr_output.scale) catch std.log.scoped(.cursor).err("failed to load xcursor theme at scale {}", .{wlr_output.scale}); } const effective_resolution = self.getEffectiveResolution(); self.usable_box = .{ .x = 0, .y = 0, .width = effective_resolution.width, .height = effective_resolution.height, }; self.setTitle(); } pub fn getLayer(self: *Self, layer: zwlr.LayerShellV1.Layer) *std.TailQueue(LayerSurface) { return &self.layers[@intCast(usize, @enumToInt(layer))]; } pub fn sendViewTags(self: Self) void { var it = self.status_trackers.first; while (it) |node| : (it = node.next) node.data.sendViewTags(); } pub fn sendUrgentTags(self: Self) void { var urgent_tags: u32 = 0; var view_it = self.views.first; while (view_it) |node| : (view_it = node.next) { if (node.view.current.urgent) urgent_tags |= node.view.current.tags; } var it = self.status_trackers.first; while (it) |node| : (it = node.next) node.data.sendUrgentTags(urgent_tags); } pub fn arrangeFilter(view: *View, filter_tags: u32) bool { return view.surface != null and !view.pending.float and !view.pending.fullscreen and view.pending.tags & filter_tags != 0; } /// Start a layout demand with the currently active (pending) layout. /// Note that this function does /not/ decide which layout shall be active. That /// is done in two places: 1) When the user changed the layout namespace option /// of this output and 2) when a new layout is added. /// /// If no layout is active, all views will simply retain their current /// dimensions. So without any active layouts, river will function like a simple /// floating WM. /// /// The changes of view dimensions are async. Therefore all transactions are /// blocked until the layout demand has either finished or was aborted. Both /// cases will start a transaction. pub fn arrangeViews(self: *Self) void { if (self == &server.root.noop_output) return; // If there is already an active layout demand, discard it. if (self.layout_demand) |demand| { demand.deinit(); self.layout_demand = null; } // We only need to do something if there is an active layout. if (self.pending.layout) |layout| { // If the usable area has a zero dimension, trying to arrange the layout // would cause an underflow and is pointless anyway. if (self.usable_box.width == 0 or self.usable_box.height == 0) return; // How many views will be part of the layout? var views: u32 = 0; var view_it = ViewStack(View).iter(self.views.first, .forward, self.pending.tags, arrangeFilter); while (view_it.next() != null) views += 1; // No need to arrange an empty output. if (views == 0) return; // Note that this is async. A layout demand will start a transaction // once its done. layout.startLayoutDemand(views); } } const ArrangeLayersTarget = enum { mapped, unmapped }; /// Arrange all layer surfaces of this output and adjust the usable area /// If target is unmapped, this function is pure aside from the /// wlr.LayerSurfaceV1.configure() calls made on umapped layer surfaces. pub fn arrangeLayers(self: *Self, target: ArrangeLayersTarget) void { const effective_resolution = self.getEffectiveResolution(); const full_box: Box = .{ .x = 0, .y = 0, .width = effective_resolution.width, .height = effective_resolution.height, }; // This box is modified as exclusive zones are applied var usable_box = full_box; const layers = [_]zwlr.LayerShellV1.Layer{ .overlay, .top, .bottom, .background }; // Arrange all layer surfaces with exclusive zones, applying them to the // usable box along the way. for (layers) |layer| self.arrangeLayer(self.getLayer(layer).*, full_box, &usable_box, true, target); // If the the usable_box has changed, we need to rearrange the output if (target == .mapped and !std.meta.eql(self.usable_box, usable_box)) { self.usable_box = usable_box; self.arrangeViews(); } // Arrange the layers without exclusive zones for (layers) |layer| self.arrangeLayer(self.getLayer(layer).*, full_box, &usable_box, false, target); if (target == .unmapped) return; // Find the topmost layer surface in the top or overlay layers which // requests keyboard interactivity if any. const topmost_surface = outer: for (layers[0..2]) |layer| { // Iterate in reverse order since the last layer is rendered on top var it = self.getLayer(layer).last; while (it) |node| : (it = node.prev) { const layer_surface = &node.data; if (layer_surface.wlr_layer_surface.current.keyboard_interactive == .exclusive) { break :outer layer_surface; } } } else null; var it = server.input_manager.seats.first; while (it) |node| : (it = node.next) { const seat = &node.data; // Only grab focus of seats which have the output focused if (seat.focused_output != self) continue; if (topmost_surface) |to_focus| { // If we found a surface that requires focus, grab the focus of all // seats. seat.setFocusRaw(.{ .layer = to_focus }); } else if (seat.focused == .layer) { // If the seat is currently focusing a layer without keyboard // interactivity, stop focusing that layer. if (seat.focused.layer.wlr_layer_surface.current.keyboard_interactive != .exclusive) { seat.setFocusRaw(.{ .none = {} }); seat.focus(null); } } } } /// Arrange the layer surfaces of a given layer fn arrangeLayer( self: *Self, layer: std.TailQueue(LayerSurface), full_box: Box, usable_box: *Box, exclusive: bool, target: ArrangeLayersTarget, ) void { var it = layer.first; while (it) |node| : (it = node.next) { const layer_surface = &node.data; const current_state = layer_surface.wlr_layer_surface.current; // If the value of exclusive_zone is greater than zero, then it exclusivly // occupies some area of the screen. if (exclusive != (current_state.exclusive_zone > 0)) continue; // If the exclusive zone is set to -1, this means the the client would like // to ignore any exclusive zones and use the full area of the output. const bounds = if (current_state.exclusive_zone == -1) &full_box else usable_box; var new_box: Box = undefined; // Horizontal alignment const horizontal_margin_size = current_state.margin.left + current_state.margin.right; if (horizontal_margin_size >= bounds.width) { // TODO find a better solution // We currently have not reached a conclusion on how to gracefully // handle this case yet, so we just close the surface. That will // cause the output to be re-arranged eventually, so we can just // exit here. Technically doing this is incorrect, but this case // should only ever be encountered very rarely and matches the // behavior of other compositors. std.log.scoped(.layer_shell).warn( "margins of layer surface '{s}' are too large to be reasonably handled. Closing.", .{layer_surface.wlr_layer_surface.namespace}, ); layer_surface.wlr_layer_surface.close(); return; } else if (horizontal_margin_size + current_state.desired_width > bounds.width) { new_box.y = bounds.y; new_box.width = bounds.width - horizontal_margin_size; } else if (current_state.desired_width == 0) { std.debug.assert(current_state.anchor.right and current_state.anchor.left); new_box.x = bounds.x + @intCast(i32, current_state.margin.left); new_box.width = bounds.width - (current_state.margin.left + current_state.margin.right); } else if (current_state.anchor.left) { new_box.x = bounds.x + @intCast(i32, current_state.margin.left); new_box.width = current_state.desired_width; } else if (current_state.anchor.right) { new_box.x = bounds.x + @intCast(i32, bounds.width - current_state.desired_width - current_state.margin.right); new_box.width = current_state.desired_width; } else { new_box.x = bounds.x + @intCast(i32, bounds.width / 2 - current_state.desired_width / 2); new_box.width = current_state.desired_width; } // Vertical alignment const vertical_margin_size = current_state.margin.bottom + current_state.margin.top; if (vertical_margin_size >= bounds.height) { // TODO find a better solution, see explanation above std.log.scoped(.layer_shell).warn( "margins of layer surface '{s}' are too large to be reasonably handled. Closing.", .{layer_surface.wlr_layer_surface.namespace}, ); layer_surface.wlr_layer_surface.close(); return; } else if (vertical_margin_size + current_state.desired_height > bounds.height) { new_box.y = bounds.y; new_box.height = bounds.height - vertical_margin_size; } else if (current_state.desired_height == 0) { std.debug.assert(current_state.anchor.top and current_state.anchor.bottom); new_box.y = bounds.y + @intCast(i32, current_state.margin.top); new_box.height = bounds.height - (current_state.margin.top + current_state.margin.bottom); } else if (current_state.anchor.top) { new_box.y = bounds.y + @intCast(i32, current_state.margin.top); new_box.height = current_state.desired_height; } else if (current_state.anchor.bottom) { new_box.y = bounds.y + @intCast(i32, bounds.height - current_state.desired_height - current_state.margin.bottom); new_box.height = current_state.desired_height; } else { new_box.y = bounds.y + @intCast(i32, bounds.height / 2 - current_state.desired_height / 2); new_box.height = current_state.desired_height; } // Apply the exclusive zone to the current bounds const edges = [4]struct { single: zwlr.LayerSurfaceV1.Anchor, triple: zwlr.LayerSurfaceV1.Anchor, to_increase: ?*i32, to_decrease: *u32, margin: u32, }{ .{ .single = .{ .top = true }, .triple = .{ .top = true, .left = true, .right = true }, .to_increase = &usable_box.y, .to_decrease = &usable_box.height, .margin = current_state.margin.top, }, .{ .single = .{ .bottom = true }, .triple = .{ .bottom = true, .left = true, .right = true }, .to_increase = null, .to_decrease = &usable_box.height, .margin = current_state.margin.bottom, }, .{ .single = .{ .left = true }, .triple = .{ .left = true, .top = true, .bottom = true }, .to_increase = &usable_box.x, .to_decrease = &usable_box.width, .margin = current_state.margin.left, }, .{ .single = .{ .right = true }, .triple = .{ .right = true, .top = true, .bottom = true }, .to_increase = null, .to_decrease = &usable_box.width, .margin = current_state.margin.right, }, }; for (edges) |edge| { if ((std.meta.eql(current_state.anchor, edge.single) or std.meta.eql(current_state.anchor, edge.triple)) and current_state.exclusive_zone + @intCast(i32, edge.margin) > 0) { const delta = current_state.exclusive_zone + @intCast(i32, edge.margin); if (edge.to_increase) |value| value.* += delta; edge.to_decrease.* -= @intCast(u32, delta); break; } } switch (target) { .mapped => { assert(layer_surface.wlr_layer_surface.mapped); layer_surface.box = new_box; layer_surface.wlr_layer_surface.configure(new_box.width, new_box.height); }, .unmapped => if (!layer_surface.wlr_layer_surface.mapped) { layer_surface.wlr_layer_surface.configure(new_box.width, new_box.height); }, } } } fn handleDamageDestroy(listener: *wl.Listener(*wlr.OutputDamage), wlr_output: *wlr.OutputDamage) void { const self = @fieldParentPtr(Self, "damage_destroy", listener); // The wlr.OutputDamage is only destroyed by wlroots when the output is // destroyed and is never destroyed manually by river. self.frame.link.remove(); // Ensure that it is safe to call remove() again in handleDestroy() self.frame.link = .{ .prev = &self.frame.link, .next = &self.frame.link }; } fn handleDestroy(listener: *wl.Listener(*wlr.Output), wlr_output: *wlr.Output) void { const self = @fieldParentPtr(Self, "destroy", listener); std.log.scoped(.server).debug("output '{s}' destroyed", .{mem.sliceTo(&self.wlr_output.name, 0)}); // Remove the destroyed output from root if it wasn't already removed server.root.removeOutput(self); assert(self.views.first == null and self.views.last == null); for (self.layers) |layer| assert(layer.len == 0); assert(self.layouts.len == 0); var it = server.root.all_outputs.first; while (it) |all_node| : (it = all_node.next) { if (all_node.data == self) { server.root.all_outputs.remove(all_node); break; } } // Remove all listeners self.destroy.link.remove(); self.enable.link.remove(); self.frame.link.remove(); self.mode.link.remove(); // Free all memory and clean up the wlr.Output if (self.layout_demand) |demand| demand.deinit(); if (self.layout_namespace) |namespace| util.gpa.free(namespace); self.wlr_output.data = undefined; const node = @fieldParentPtr(std.TailQueue(Self).Node, "data", self); util.gpa.destroy(node); } fn handleEnable(listener: *wl.Listener(*wlr.Output), wlr_output: *wlr.Output) void { const self = @fieldParentPtr(Self, "enable", listener); // Add the output to root.outputs and the output layout if it has not // already been added. if (wlr_output.enabled) server.root.addOutput(self); } fn handleFrame(listener: *wl.Listener(*wlr.OutputDamage), wlr_output: *wlr.OutputDamage) void { // This function is called every time an output is ready to display a frame, // generally at the output's refresh rate (e.g. 60Hz). const self = @fieldParentPtr(Self, "frame", listener); render.renderOutput(self); } fn handleMode(listener: *wl.Listener(*wlr.Output), wlr_output: *wlr.Output) void { const self = @fieldParentPtr(Self, "mode", listener); self.arrangeLayers(.mapped); self.arrangeViews(); server.root.startTransaction(); } pub fn getEffectiveResolution(self: *Self) struct { width: u32, height: u32 } { var width: c_int = undefined; var height: c_int = undefined; self.wlr_output.effectiveResolution(&width, &height); return .{ .width = @intCast(u32, width), .height = @intCast(u32, height), }; } fn setTitle(self: Self) void { var buf: ["river - ".len + self.wlr_output.name.len + 1]u8 = undefined; const title = fmt.bufPrintZ(&buf, "river - {s}", .{mem.sliceTo(&self.wlr_output.name, 0)}) catch unreachable; if (self.wlr_output.isWl()) { self.wlr_output.wlSetTitle(title); } else if (wlr.config.has_x11_backend and self.wlr_output.isX11()) { self.wlr_output.x11SetTitle(title); } } pub fn handleLayoutNamespaceChange(self: *Self) void { // The user changed the layout namespace of this output. Try to find a // matching layout. var it = self.layouts.first; self.pending.layout = while (it) |node| : (it = node.next) { if (mem.eql(u8, self.layoutNamespace(), node.data.namespace)) break &node.data; } else null; self.arrangeViews(); server.root.startTransaction(); } pub fn layoutNamespace(self: Self) []const u8 { return self.layout_namespace orelse server.config.default_layout_namespace; }
source/river-0.1.0/river/Output.zig
const std = @import("std"); const process = std.process; const chunks = @import("./chunks.zig"); const debug = @import("./debug.zig"); const Allocator = std.mem.Allocator; const OpCode = chunks.OpCode; const Chunk = chunks.Chunk; const Vm = @import("./vm.zig").Vm; pub fn main() anyerror!u8 { var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = general_purpose_allocator.deinit(); const gpa = &general_purpose_allocator.allocator; const args = try process.argsAlloc(gpa); defer process.argsFree(gpa, args); var vm = Vm.init(gpa); defer vm.deinit(); switch (args.len) { 1 => try repl(&vm), 2 => runFile(args[1], &vm, gpa), else => { std.log.err("Usage: zilox [path]\n", .{}); process.exit(64); }, } return 0; } fn repl(vm: *Vm) !void { const in = std.io.getStdIn(); const stdout = std.io.getStdOut(); var buf = std.io.bufferedReader(in.reader()); var reader = buf.reader(); var line_buf: [1024]u8 = undefined; while (true) { try stdout.writeAll("> "); var line = (try reader.readUntilDelimiterOrEof(&line_buf, '\n')) orelse { try stdout.writeAll("\n"); break; }; vm.interpret(line) catch {}; } } fn runFile(fileName: []const u8, vm: *Vm, allocator: *Allocator) void { const source = readFile(fileName, allocator); defer allocator.free(source); vm.interpret(source) catch {}; } fn readFile(path: []const u8, allocator: *Allocator) []const u8 { const file = std.fs.cwd().openFile( path, .{ .read = true }, ) catch |err| { std.log.err("Could not open file \"{s}\", error: {any}.\n", .{ path, err }); process.exit(74); }; defer file.close(); return file.readToEndAlloc(allocator, 100_000_000) catch |err| { std.log.err("Could not read file \"{s}\", error: {any}.\n", .{ path, err }); process.exit(74); }; }
src/main.zig
// test "Item.BrightPowder" { // // An attack against the holder has its accuracy out of 255 lowered by 20. // return error.SkipZigTest; // } // test "Item.MetalPowder" { // // If held by a Ditto, its Defense and Sp. Def are 1.5x, even while Transformed. // return error.SkipZigTest; // } // test "Item.QuickClaw" { // // Each turn, holder has a ~23.4% chance to move first in its priority bracket. // return error.SkipZigTest; // } // test "Item.PSNCureBerry" { // // (Gen 2) Holder is cured if it is poisoned. Single use. // return error.SkipZigTest; // } // test "Item.SoftSand" { // // Holder's Ground-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.SharpBeak" { // // Holder's Flying-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.PRZCureBerry" { // // (Gen 2) Holder cures itself if it is paralyzed. Single use. // return error.SkipZigTest; // } // test "Item.BurntBerry" { // // (Gen 2) Holder is cured if it is frozen. Single use. // return error.SkipZigTest; // } // test "Item.IceBerry" { // // (Gen 2) Holder is cured if it is burned. Single use. // return error.SkipZigTest; // } // test "Item.PoisonBarb" { // // Holder's Poison-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.KingsRock" { // // Holder's attacks without a chance to make the target flinch gain a 10% chance to make the // // target flinch. Evolves Poliwhirl into Politoed and Slowpoke into Slowking when traded. // return error.SkipZigTest; // } // test "Item.BitterBerry" { // // (Gen 2) Holder is cured if it is confused. Single use. // return error.SkipZigTest; // } // test "Item.MintBerry" { // // (Gen 2) Holder wakes up if it is asleep. Single use. // return error.SkipZigTest; // } // test "Item.SilverPowder" { // // Holder's Bug-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.MysticWater" { // // Holder's Water-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.TwistedSpoon" { // // Holder's Psychic-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.BlackBelt" { // // Holder's Fighting-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.BlackGlasses" { // // Holder's Dark-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.{PinkBow,PolkadotBow}" { // // (Gen 2) Holder's Normal-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.NeverMeltIce" { // // Holder's Ice-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.Magnet" { // // Holder's Electric-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.MiracleBerry" { // // (Gen 2) Holder cures itself if it is confused or has a status condition. Single use. // return error.SkipZigTest; // } // test "Item.SpellTag" { // // Holder's Ghost-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.MiracleSeed" { // // Holder's Grass-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.FocusBand" { // // Holder has a ~11.7% chance to survive an attack that would KO it with 1 HP. // return error.SkipZigTest; // } // test "Item.HardStone" { // // Holder's Rock-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.Charcoal" { // // Holder's Fire-type attacks have 1.1x power. // return error.SkipZigTest; // } // test "Item.BerryJuice" { // // Restores 20 HP when at 1/2 max HP or less. Single use. // return error.SkipZigTest; // } // test "Item.ScopeLens" { // // Holder's critical hit ratio is raised by 1 stage. // return error.SkipZigTest; // } // test "Item.MetalCoat" { // // Holder's Steel-type attacks have 1.1x power. Evolves Onix into Steelix and Scyther into // // Scizor when traded. // return error.SkipZigTest; // } // test "Item.Leftovers" { // // At the end of every turn, holder restores 1/16 of its max HP. // return error.SkipZigTest; // } // test "Item.MysteryBerry" { // // (Gen 2) Restores 5 PP to the first of the holder's moves to reach 0 PP. Single use. // return error.SkipZigTest; // } // test "Item.DragonScale" { // // Holder's Dragon-type attacks have 1.1x power. Evolves Seadra (trade). // return error.SkipZigTest; // } // test "Item.*Mail" { // // Mail // return error.SkipZigTest; // } // test "Item.Berry" { // // (Gen 2) Restores 10 HP when at 1/2 max HP or less. Single use. // return error.SkipZigTest; // } // test "Item.GoldBerry" { // // (Gen 2) Restores 30 HP when at 1/2 max HP or less. Single use. // return error.SkipZigTest; // } // // Moves // test "Move.{KarateChop,RazorLeaf,Crabhammer,Slash,Aeroblast,CrossChop}" { // // Has a higher chance for a critical hit. // return error.SkipZigTest; // } // test "Move.{DoubleSlap,CometPunch,FuryAttack,PinMissile,SpikeCannon,Barrage,FurySwipes,BoneRush}" { // // Hits two to five times. Has a 3/8 chance to hit two or three times, and a 1/8 chance to hit // // four or five times. If one of the hits breaks the target's substitute, it will take damage // // for the remaining hits. // return error.SkipZigTest; // } // test "Move.{FirePunch,Ember,Flamethrower,FireBlast,FlameWheel}" { // // Has a 10% chance to burn the target. // return error.SkipZigTest; // } // test "Move.{IcePunch,IceBeam,Blizzard,PowderSnow}" { // // Has a 10% chance to freeze the target. // return error.SkipZigTest; // } // test "Move.{ThunderPunch,ThunderShock,Thunderbolt}" { // // Has a 10% chance to paralyze the target. // return error.SkipZigTest; // } // test "Move.{Guillotine,HornDrill}" { // // Deals 65535 damage to the target. This attack's accuracy out of 256 is equal to the lesser of // // (2 * (user's level - target's level) + 76) and 255, before applying accuracy and evasiveness // // modifiers. Fails if the target is at a higher level. // return error.SkipZigTest; // } // test "Move.RazorWind" { // // Has a higher chance for a critical hit. This attack charges on the first turn and executes on // // the second. // return error.SkipZigTest; // } // test "Move.SwordsDance" { // // Raises the user's Attack by 2 stages. // return error.SkipZigTest; // } // test "Move.Gust" { // // Power doubles if the target is using Fly. // return error.SkipZigTest; // } // test "Move.{Whirlwind,Roar}" { // // The target is forced to switch out and be replaced with a random unfainted ally. Fails if the // // target is the last unfainted Pokemon in its party, or if the user moves before the target. // return error.SkipZigTest; // } // test "Move.Fly" { // // This attack charges on the first turn and executes on the second. On the first turn, the user // // avoids all attacks other than Gust, Thunder, Twister, and Whirlwind, and Gust and Twister // // have doubled power when used against it. // return error.SkipZigTest; // } // test "Move.{Bind,Wrap,FireSpin,Clamp,Whirlpool}" { // // Prevents the target from switching for two to five turns. Causes damage to the target equal // // to 1/16 of its maximum HP, rounded down, at the end of each turn during effect. The target // // can still switch out if it uses Baton Pass. The effect ends if either the user or the target // // leaves the field, or if the target uses Rapid Spin or Substitute successfully. This effect is // // not stackable or reset by using this or another binding move. // return error.SkipZigTest; // } // test "Move.Stomp" { // // Has a 30% chance to make the target flinch. Power doubles if the target is under the effect // // of Minimize. // return error.SkipZigTest; // } // test "Move.{DoubleKick,Bonemerang}" { // // Hits twice. If the first hit breaks the target's substitute, it will take damage for the // // second hit. // return error.SkipZigTest; // } // test "Move.{JumpKick,HighJumpKick}" { // // If this attack is not successful and the target was not immune, the user loses HP equal to // // 1/8 the damage the target would have taken, rounded down, but not less than 1 HP, as crash // // damage. // return error.SkipZigTest; // } // test "Move.{RollingKick,Headbutt,Bite,LowKick,RockSlide}" { // // Has a 30% chance to make the target flinch. // return error.SkipZigTest; // } // test "Move.{SandAttack,Smokescreen,Kinesis,Flash}" { // // Lowers the target's accuracy by 1 stage. // return error.SkipZigTest; // } // test "Move.{BodySlam,Lick,Spark,DragonBreath}" { // // Has a 30% chance to paralyze the target. // return error.SkipZigTest; // } // test "Move.{TakeDown,Submission}" { // // If the target lost HP, the user takes recoil damage equal to 1/4 the HP lost by the target, // // rounded half up, but not less than 1 HP. If this move hits a substitute, the recoil damage is // // always 1 HP. // return error.SkipZigTest; // } // test "Move.{Thrash,PetalDance,Outrage}" { // // Whether or not this move is successful, the user spends two or three turns locked into this // // move and becomes confused immediately after its move on the last turn of the effect, even if // // it is already confused. If the user is prevented from moving, the effect ends without causing // // confusion. If this move is called by Sleep Talk, the move is used for one turn and does not // // confuse the user. // return error.SkipZigTest; // } // test "Move.DoubleEdge" { // // If the target lost HP, the user takes recoil damage equal to 1/4 the HP lost by the target, // // rounded down, but not less than 1 HP. If this move hits a substitute, the recoil damage is // // always 1 HP. // return error.SkipZigTest; // } // test "Move.{TailWhip,Leer}" { // // Lowers the target's Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.{PoisonSting,Sludge,SludgeBomb}" { // // Has a 30% chance to poison the target. // return error.SkipZigTest; // } // test "Move.Twineedle" { // // Hits twice, with the second hit having a 20% chance to poison the target. If the first hit // // breaks the target's substitute, it will take damage for the second hit but the target cannot // // be poisoned by it. // return error.SkipZigTest; // } // test "Move.Growl" { // // Lowers the target's Attack by 1 stage. // return error.SkipZigTest; // } // test "Move.{Sing,SleepPowder,Hypnosis,LovelyKiss,Spore}" { // // Causes the target to fall asleep. // return error.SkipZigTest; // } // test "Move.{Supersonic,ConfuseRay,SweetKiss}" { // // Causes the target to become confused. // return error.SkipZigTest; // } // test "Move.SonicBoom" { // // Deals 20 HP of damage to the target. // return error.SkipZigTest; // } // test "Move.Disable" { // // For 1 to 7 turns, the target's last move used becomes disabled. Fails if one of the target's // // moves is already disabled, if the target has not made a move, if the target no longer knows // // the move, or if the move has 0 PP. // return error.SkipZigTest; // } // test "Move.Acid" { // // Has a 10% chance to lower the target's Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.Mist" { // // While the user remains active, it is protected from having its stat stages lowered by other // // Pokemon. Fails if the user already has the effect. Baton Pass can be used to transfer this // // effect to an ally. // return error.SkipZigTest; // } // test "Move.{Psybeam,Confusion}" { // // Has a 10% chance to confuse the target. // return error.SkipZigTest; // } // test "Move.{BubbleBeam,Constrict,Bubble}" { // // Has a 10% chance to lower the target's Speed by 1 stage. // return error.SkipZigTest; // } // test "Move.AuroraBeam" { // // Has a 10% chance to lower the target's Attack by 1 stage. // return error.SkipZigTest; // } // test "Move.HyperBeam" { // // If this move is successful, the user must recharge on the following turn and cannot select a // // move. // return error.SkipZigTest; // } // test "Move.Counter" { // // Deals damage to the opposing Pokemon equal to twice the HP lost by the user from a physical // // attack this turn. This move considers Hidden Power as Normal type, and only the last hit of a // // multi-hit attack is counted. Fails if the user moves first, if the user was not hit by a // // physical attack this turn, or if the user did not lose HP from the attack. If the opposing // // Pokemon used Fissure or Horn Drill and missed, this move deals 65535 damage. // return error.SkipZigTest; // } // test "Move.{SeismicToss,NightShade}" { // // Deals damage to the target equal to the user's level. // return error.SkipZigTest; // } // test "Move.Absorb" { // // The user recovers 1/2 the HP lost by the target, rounded down. If the target has a // // substitute, this move misses. // return error.SkipZigTest; // } // test "Move.{MegaDrain,LeechLife,GigaDrain}" { // // The user recovers 1/2 the HP lost by the target, rounded down. // return error.SkipZigTest; // } // test "Move.LeechSeed" { // // The Pokemon at the user's position steals 1/8 of the target's maximum HP, rounded down, at // // the end of each turn. If the target uses Baton Pass, the replacement will continue being // // leeched. If the target switches out or uses Rapid Spin, the effect ends. Grass-type Pokemon // // are immune to this move on use, but not its effect. // return error.SkipZigTest; // } // test "Move.Growth" { // // Raises the user's Special Attack by 1 stage. // return error.SkipZigTest; // } // test "Move.SolarBeam" { // // This attack charges on the first turn and executes on the second. Damage is halved if the // // weather is Rain Dance. If the weather is Sunny Day, the move completes in one turn. // return error.SkipZigTest; // } // test "Move.{PoisonPowder,PoisonGas}" { // // Poisons the target. // return error.SkipZigTest; // } // test "Move.{StunSpore,ThunderWave,Glare}" { // // Paralyzes the target. This move does not ignore type immunity. // return error.SkipZigTest; // } // test "Move.StringShot" { // // Lowers the target's Speed by 1 stage. // return error.SkipZigTest; // } // test "Move.DragonRage" { // // Deals 40 HP of damage to the target. // return error.SkipZigTest; // } // test "Move.Thunder" { // // Has a 30% chance to paralyze the target. This move can hit a target using Fly. If the weather // // is Rain Dance, this move does not check accuracy. If the weather is Sunny Day, this move's // // accuracy is 50%. // return error.SkipZigTest; // } // test "Move.Earthquake" { // // Power doubles if the target is using Dig. // return error.SkipZigTest; // } // test "Move.Fissure" { // // Deals 65535 damage to the target. This attack's accuracy out of 256 is equal to the lesser of // // (2 * (user's level - target's level) + 76) and 255, before applying accuracy and evasiveness // // modifiers. Fails if the target is at a higher level. Can hit a target using Dig. // return error.SkipZigTest; // } // test "Move.Dig" { // // This attack charges on the first turn and executes on the second. On the first turn, the user // // avoids all attacks other than Earthquake, Fissure, and Magnitude, the user is unaffected by // // weather, and Earthquake and Magnitude have doubled power when used against the user. // return error.SkipZigTest; // } // test "Move.Toxic" { // // Badly poisons the target. // return error.SkipZigTest; // } // test "Move.Psychic" { // // Has a 10% chance to lower the target's Special Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.{Meditate,Sharpen}" { // // Raises the user's Attack by 1 stage. // return error.SkipZigTest; // } // test "Move.Agility" { // // Raises the user's Speed by 2 stages. // return error.SkipZigTest; // } // test "Move.Rage" { // // Once this move is successfully used, X starts at 1. This move's damage is multiplied by X, // // and whenever the user is hit by the opposing Pokemon, X increases by 1, with a maximum of // // 255. X resets to 1 when the user is no longer active or did not choose this move for use. // return error.SkipZigTest; // } // test "Move.Teleport" { // // Fails when used. // return error.SkipZigTest; // } // test "Move.Mimic" { // // While the user remains active, this move is replaced by the last move used by the target. The // // copied move has 5 PP. Fails if the target has not made a move, if the user already knows the // // move, or if the move is Struggle. // return error.SkipZigTest; // } // test "Move.Screech" { // // Lowers the target's Defense by 2 stages. // return error.SkipZigTest; // } // test "Move.DoubleTeam" { // // Raises the user's evasiveness by 1 stage. // return error.SkipZigTest; // } // test "Move.{Recover,SoftBoiled,MilkDrink}" { // // The user restores 1/2 of its maximum HP, rounded down. // return error.SkipZigTest; // } // test "Move.{Harden,Withdraw}" { // // Raises the user's Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.Minimize" { // // Raises the user's evasiveness by 1 stage. Whether or not the user's evasiveness was changed, // // Stomp will have its power doubled if used against the user while it is active. Baton Pass can // // be used to transfer this effect to an ally. // return error.SkipZigTest; // } // test "Move.DefenseCurl" { // // Raises the user's Defense by 1 stage. While the user remains active, the power of the user's // // Rollout will be doubled (this effect is not stackable). Baton Pass can be used to transfer // // this effect to an ally. // return error.SkipZigTest; // } // test "Move.{Barrier,AcidArmor}" { // // Raises the user's Defense by 2 stages. // return error.SkipZigTest; // } // test "Move.LightScreen" { // // For 5 turns, the user and its party members have their Special Defense doubled. Critical hits // // ignore this effect. Fails if the effect is already active on the user's side. // return error.SkipZigTest; // } // test "Move.Haze" { // // Resets the stat stages of all active Pokemon to 0. // return error.SkipZigTest; // } // test "Move.Reflect" { // // For 5 turns, the user and its party members have their Defense doubled. Critical hits ignore // // this effect. Fails if the effect is already active on the user's side. // return error.SkipZigTest; // } // test "Move.FocusEnergy" { // // Raises the user's chance for a critical hit by 1 stage. Fails if the user already has the // // effect. Baton Pass can be used to transfer this effect to an ally. // return error.SkipZigTest; // } // test "Move.Bide" { // // The user spends two or three turns locked into this move and then, on the second or third // // turn after using this move, the user attacks the opponent, inflicting double the damage in HP // // it lost during those turns. If the user is prevented from moving during this move's use, the // // effect ends. This move does not ignore type immunity. // return error.SkipZigTest; // } // test "Move.Metronome" { // // A random move is selected for use, other than Counter, Destiny Bond, Detect, Endure, // // Metronome, Mimic, Mirror Coat, Protect, Sketch, Sleep Talk, Struggle, or Thief. // return error.SkipZigTest; // } // test "Move.MirrorMove" { // // The user uses the last move used by the target. Fails if the target has not made a move, or // // if the last move used was Metronome, Mimic, Mirror Move, Sketch, Sleep Talk, Transform, or // // any move the user knows. // return error.SkipZigTest; // } // test "Move.{SelfDestruct,Explosion}" { // // The user faints after using this move. The target's Defense is halved during damage // // calculation. // return error.SkipZigTest; // } // test "Move.Smog" { // // Has a 40% chance to poison the target. // return error.SkipZigTest; // } // test "Move.{BoneClub,HyperFang}" { // // Has a 10% chance to make the target flinch. // return error.SkipZigTest; // } // test "Move.{Swift,FeintAttack,VitalThrow}" { // // This move does not check accuracy. // return error.SkipZigTest; // } // test "Move.SkullBash" { // // This attack charges on the first turn and executes on the second. Raises the user's Defense // // by 1 stage on the first turn. // return error.SkipZigTest; // } // test "Move.Amnesia" { // // Raises the user's Special Defense by 2 stages. // return error.SkipZigTest; // } // test "Move.DreamEater" { // // The target is unaffected by this move unless it is asleep and does not have a substitute. The // // user recovers 1/2 the HP lost by the target, rounded down, but not less than 1 HP. // return error.SkipZigTest; // } // test "Move.SkyAttack" { // // This attack charges on the first turn and executes on the second. // return error.SkipZigTest; // } // test "Move.Transform" { // // The user transforms into the target. The target's current stats, stat stages, types, moves, // // DVs, species, and sprite are copied. The user's level and HP remain the same and each copied // // move receives only 5 PP. This move fails if the target has transformed. // return error.SkipZigTest; // } // test "Move.DizzyPunch" { // // Has a 20% chance to confuse the target. // return error.SkipZigTest; // } // test "Move.Psywave" { // // Deals damage to the target equal to a random number from 1 to (user's level * 1.5 - 1), // // rounded down, but not less than 1 HP. // return error.SkipZigTest; // } // test "Move.Splash" { // // No competitive use. // return error.SkipZigTest; // } // test "Move.Rest" { // // The user falls asleep for the next two turns and restores all of its HP, curing itself of any // // non-volatile status condition in the process, even if it was already asleep. Fails if the // // user has full HP. // return error.SkipZigTest; // } // test "Move.Conversion" { // // The user's type changes to match the original type of one of its known moves besides Curse, // // at random, but not either of its current types. Fails if the user cannot change its type, or // // if this move would only be able to select one of the user's current types. // return error.SkipZigTest; // } // test "Move.TriAttack" { // // This move selects burn, freeze, or paralysis at random, and has a 20% chance to inflict the // // target with that status. If the target is frozen and burn was selected, it thaws out. // return error.SkipZigTest; // } // test "Move.SuperFang" { // // Deals damage to the target equal to half of its current HP, rounded down, but not less than 1 // // HP. // return error.SkipZigTest; // } // test "Move.Substitute" { // // The user takes 1/4 of its maximum HP, rounded down, and puts it into a substitute to take its // // place in battle. The substitute is removed once enough damage is inflicted on it, or if the // // user switches out or faints. Baton Pass can be used to transfer the substitute to an ally, // // and the substitute will keep its remaining HP. Until the substitute is broken, it receives // // damage from all attacks made by other Pokemon and shields the user from status effects and // // stat stage changes caused by other Pokemon. The user still takes normal damage from weather // // and status effects while behind its substitute. If the substitute breaks during a multi-hit // // attack, the user will take damage from any remaining hits. If a substitute is created while // // the user is trapped by a binding move, the binding effect ends immediately. Fails if the user // // does not have enough HP remaining to create a substitute without fainting, or if it already // // has a substitute. // return error.SkipZigTest; // } // test "Move.Struggle" { // // Deals typeless damage. If this move was successful, the user takes damage equal to 1/4 the HP // // lost by the target, rounded down, but not less than 1 HP. This move is automatically used if // // none of the user's known moves can be selected. // return error.SkipZigTest; // } // test "Move.Sketch" { // // Fails when used in Link Battles. // return error.SkipZigTest; // } // test "Move.TripleKick" { // // Hits one to three times, at random. Power increases to 20 for the second hit and 30 for the // // third. // return error.SkipZigTest; // } // test "Move.Thief" { // // Has a 100% chance to steal the target's held item if the user is not holding one. The // // target's item is not stolen if it is a Mail. // return error.SkipZigTest; // } // test "Move.{SpiderWeb,MeanLook}" { // // Prevents the target from switching out. The target can still switch out if it uses Baton // // Pass. If the target leaves the field using Baton Pass, the replacement will remain trapped. // // The effect ends if the user leaves the field, unless it uses Baton Pass, in which case the // // target will remain trapped. // return error.SkipZigTest; // } // test "Move.{MindReader,LockOn}" { // // The next accuracy check against the target succeeds. The target will still avoid Earthquake, // // Fissure, and Magnitude if it is using Fly. If the target leaves the field using Baton Pass, // // the replacement remains under this effect. This effect ends when the target leaves the field // // or an accuracy check is done against it. // return error.SkipZigTest; // } // test "Move.Nightmare" { // // Causes the target to lose 1/4 of its maximum HP, rounded down, at the end of each turn as // // long as it is asleep. This move does not affect the target unless it is asleep. The effect // // ends when the target wakes up, even if it falls asleep again in the same turn. // return error.SkipZigTest; // } // test "Move.Snore" { // // Has a 30% chance to make the target flinch. Fails if the user is not asleep. // return error.SkipZigTest; // } // test "Move.Curse" { // // If the user is not a Ghost type, lowers the user's Speed by 1 stage and raises the user's // // Attack and Defense by 1 stage, unless the user's Attack and Defense stats are both at stage // // 6. If the user is a Ghost type, the user loses 1/2 of its maximum HP, rounded down and even // // if it would cause fainting, in exchange for the target losing 1/4 of its maximum HP, rounded // // down, at the end of each turn while it is active. If the target uses Baton Pass, the // // replacement will continue to be affected. Fails if the target is already affected or has a // // substitute. // return error.SkipZigTest; // } // test "Move.{Flail,Reversal}" { // // The power of this move is 20 if X is 33 to 48, 40 if X is 17 to 32, 80 if X is 10 to 16, 100 // // if X is 5 to 9, 150 if X is 2 to 4, and 200 if X is 0 or 1, where X is equal to (user's // // current HP * 48 / user's maximum HP), rounded down. This move does not apply damage variance // // and cannot be a critical hit. // return error.SkipZigTest; // } // test "Move.Conversion2" { // // The user's type changes to match a type that resists or is immune to the type of the last // // move used by the opposing Pokemon, even if it is one of the user's current types. The // // original type of the move is used rather than the determined type. Fails if the opposing // // Pokemon has not used a move. // return error.SkipZigTest; // } // test "Move.{CottonSpore,ScaryFace}" { // // Lowers the target's Speed by 2 stages. // return error.SkipZigTest; // } // test "Move.Spite" { // // Causes the target's last move used to lose 2 to 5 PP, at random. Fails if the target has not // // made a move, or if the move has 0 PP. // return error.SkipZigTest; // } // test "Move.{Protect,Detect}" { // // The user is protected from attacks made by the opponent during this turn. This move has an // // X/255 chance of being successful, where X starts at 255 and halves, rounded down, each time // // this move is successfully used. X resets to 255 if this move fails or if the user's last move // // used is not Detect, Endure, or Protect. Fails if the user has a substitute or moves last this // // turn. // return error.SkipZigTest; // } // test "Move.BellyDrum" { // // Raises the user's Attack by 12 stages in exchange for the user losing 1/2 of its maximum HP, // // rounded down. Fails if the user would faint or if its Attack stat stage is 6. // return error.SkipZigTest; // } // test "Move.MudSlap" { // // Has a 100% chance to lower the target's accuracy by 1 stage. // return error.SkipZigTest; // } // test "Move.Octazooka" { // // Has a 50% chance to lower the target's accuracy by 1 stage. // return error.SkipZigTest; // } // test "Move.Spikes" { // // Sets up a hazard on the opposing side of the field, causing each opposing Pokemon that // // switches in to lose 1/8 of their maximum HP, rounded down, unless it is a Flying-type // // Pokemon. Fails if the effect is already active on the opposing side. Can be removed from the // // opposing side if any opposing Pokemon uses Rapid Spin successfully. // return error.SkipZigTest; // } // test "Move.ZapCannon" { // // Has a 100% chance to paralyze the target. // return error.SkipZigTest; // } // test "Move.Foresight" { // // As long as the target remains active, if its evasiveness stat stage is greater than the // // attacker's accuracy stat stage, both are ignored during accuracy checks, and Normal- and // // Fighting-type attacks can hit the target if it is a Ghost type. If the target leaves the // // field using Baton Pass, the replacement will remain under this effect. Fails if the target is // // already affected. // return error.SkipZigTest; // } // test "Move.DestinyBond" { // // Until the user's next turn, if an opposing Pokemon's attack knocks the user out, that Pokemon // // faints as well. // return error.SkipZigTest; // } // test "Move.PerishSong" { // // Each active Pokemon receives a perish count of 4 if it doesn't already have a perish count. // // At the end of each turn including the turn used, the perish count of all active Pokemon // // lowers by 1 and Pokemon faint if the number reaches 0. The perish count is removed from // // Pokemon that switch out. If a Pokemon uses Baton Pass while it has a perish count, the // // replacement will gain the perish count and continue to count down. // return error.SkipZigTest; // } // test "Move.IcyWind" { // // Has a 100% chance to lower the target's Speed by 1 stage. // return error.SkipZigTest; // } // test "Move.Sandstorm" { // // For 5 turns, the weather becomes Sandstorm. At the end of each turn except the last, all // // active Pokemon lose 1/8 of their maximum HP, rounded down, unless they are a Ground, Rock, or // // Steel type. Fails if the current weather is Sandstorm. // return error.SkipZigTest; // } // test "Move.Endure" { // // The user will survive attacks made by the opponent during this turn with at least 1 HP. This // // move has an X/255 chance of being successful, where X starts at 255 and halves, rounded down, // // each time this move is successfully used. X resets to 255 if this move fails or if the user's // // last move used is not Detect, Endure, or Protect. Fails if the user has a substitute or moves // // last this turn. // return error.SkipZigTest; // } // test "Move.Charm" { // // Lowers the target's Attack by 2 stages. // return error.SkipZigTest; // } // test "Move.Rollout" { // // If this move is successful, the user is locked into this move and cannot make another move // // until it misses, 5 turns have passed, or the attack cannot be used. Power doubles with each // // successful hit of this move and doubles again if Defense Curl was used previously by the // // user. If this move is called by Sleep Talk, the move is used for one turn. // return error.SkipZigTest; // } // test "Move.FalseSwipe" { // // Leaves the target with at least 1 HP. // return error.SkipZigTest; // } // test "Move.Swagger" { // // Raises the target's Attack by 2 stages and confuses it. This move will miss if the target's // // Attack cannot be raised. // return error.SkipZigTest; // } // test "Move.FuryCutter" { // // Power doubles with each successful hit, up to a maximum of 160 power. The power is reset if // // this move misses or another move is used. // return error.SkipZigTest; // } // test "Move.SteelWing" { // // Has a 10% chance to raise the user's Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.Attract" { // // Causes the target to become infatuated, making it unable to attack 50% of the time. Fails if // // both the user and the target are the same gender, if either is genderless, or if the target // // is already infatuated. The effect ends when either the user or the target is no longer // // active. // return error.SkipZigTest; // } // test "Move.SleepTalk" { // // One of the user's known moves, besides this move, is selected for use at random. Fails if the // // user is not asleep. The selected move does not have PP deducted from it, and can currently // // have 0 PP. This move cannot select Bide, Sleep Talk, or any two-turn move. // return error.SkipZigTest; // } // test "Move.HealBell" { // // Every Pokemon in the user's party is cured of its non-volatile status condition. // return error.SkipZigTest; // } // test "Move.Return" { // // Power is equal to the greater of (user's Happiness * 2/5), rounded down, or 1. // return error.SkipZigTest; // } // test "Move.Present" { // // If this move is successful, it deals damage or heals the target. 102/256 chance for 40 power, // // 76/256 chance for 80 power, 26/256 chance for 120 power, or 52/256 chance to heal the target // // by 1/4 of its maximum HP, rounded down. If this move deals damage, it uses an abnormal // // version of the damage formula by substituting certain values. The user's Attack stat is // // replaced with 10 times the effectiveness of this move against the target, the target's // // Defense stat is replaced with the index number of the user's secondary type, and the user's // // level is replaced with the index number of the target's secondary type. If a Pokemon does not // // have a secondary type, its primary type is used. The index numbers for each type are Normal: // // 0, Fighting: 1, Flying: 2, Poison: 3, Ground: 4, Rock: 5, Bug: 7, Ghost: 8, Steel: 9, Fire: // // 20, Water: 21, Grass: 22, Electric: 23, Psychic: 24, Ice: 25, Dragon: 26, Dark: 27. If at any // // point a division by 0 would happen in the damage formula, it divides by 1 instead. // return error.SkipZigTest; // } // test "Move.Frustration" { // // Power is equal to the greater of ((255 - user's Happiness) * 2/5), rounded down, or 1. // return error.SkipZigTest; // } // test "Move.Safeguard" { // // For 5 turns, the user and its party members cannot have non-volatile status conditions or // // confusion inflicted on them by other Pokemon. During the effect, Outrage, Thrash, and Petal // // Dance do not confuse the user. Fails if the effect is already active on the user's side. // return error.SkipZigTest; // } // test "Move.PainSplit" { // // The user and the target's HP become the average of their current HP, rounded down, but not // // more than the maximum HP of either one. // return error.SkipZigTest; // } // test "Move.SacredFire" { // // Has a 50% chance to burn the target. // return error.SkipZigTest; // } // test "Move.Magnitude" { // // The power of this move varies. 5% chances for 10 and 150 power, 10% chances for 30 and 110 // // power, 20% chances for 50 and 90 power, and 30% chance for 70 power. Power doubles if the // // target is using Dig. // return error.SkipZigTest; // } // test "Move.DynamicPunch" { // // Has a 100% chance to confuse the target. // return error.SkipZigTest; // } // test "Move.BatonPass" { // // The user is replaced with another Pokemon in its party. The selected Pokemon has the user's // // stat stage changes, confusion, and certain move effects transferred to it. // return error.SkipZigTest; // } // test "Move.Encore" { // // For 3 to 6 turns, the target is forced to repeat its last move used. If the affected move // // runs out of PP, the effect ends. Fails if the target is already under this effect, if it has // // not made a move, if the move has 0 PP, or if the move is Encore, Metronome, Mimic, Mirror // // Move, Sketch, Sleep Talk, Struggle, or Transform. // return error.SkipZigTest; // } // test "Move.Pursuit" { // // If the target switches out this turn, this move hits it before it leaves the field with // // doubled power and the user's turn is over. // return error.SkipZigTest; // } // test "Move.RapidSpin" { // // If this move is successful, the effects of Leech Seed and binding moves end for the user, and // // Spikes are removed from the user's side of the field. // return error.SkipZigTest; // } // test "Move.SweetScent" { // // Lowers the target's evasiveness by 1 stage. // return error.SkipZigTest; // } // test "Move.IronTail" { // // Has a 30% chance to lower the target's Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.MetalClaw" { // // Has a 10% chance to raise the user's Attack by 1 stage. // return error.SkipZigTest; // } // test "Move.{MorningSun,Synthesis,Moonlight}" { // // The user restores 1/2 of its maximum HP if no weather conditions are in effect, all of its HP // // if the weather is Sunny Day, and 1/4 of its maximum HP if the weather is Rain Dance or // // Sandstorm, all rounded down. // return error.SkipZigTest; // } // test "Move.HiddenPower" { // // This move's type and power depend on the user's individual values (IVs). Power varies between // // 30 and 70, and type can be any but Normal. // return error.SkipZigTest; // } // test "Move.Twister" { // // Has a 20% chance to make the target flinch. Power doubles if the target is using Fly. // return error.SkipZigTest; // } // test "Move.RainDance" { // // For 5 turns, the weather becomes Rain Dance, even if the current weather is Rain Dance. The // // damage of Water-type attacks is multiplied by 1.5 and the damage of Fire-type attacks is // // multiplied by 0.5 during the effect. // return error.SkipZigTest; // } // test "Move.SunnyDay" { // // For 5 turns, the weather becomes Sunny Day, even if the current weather is Sunny Day. The // // damage of Fire-type attacks is multiplied by 1.5 and the damage of Water-type attacks is // // multiplied by 0.5 during the effect. // return error.SkipZigTest; // } // test "Move.{Crunch,ShadowBall}" { // // Has a 20% chance to lower the target's Special Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.MirrorCoat" { // // Deals damage to the opposing Pokemon equal to twice the HP lost by the user from a special // // attack this turn. This move considers Hidden Power as Normal type, and only the last hit of a // // multi-hit attack is counted. Fails if the user moves first, if the user was not hit by a // // special attack this turn, or if the user did not lose HP from the attack. // return error.SkipZigTest; // } // test "Move.PsychUp" { // // The user copies all of the target's current stat stage changes. Fails if the target's stat // // stages are 0. // return error.SkipZigTest; // } // test "Move.AncientPower" { // // Has a 10% chance to raise the user's Attack, Defense, Special Attack, Special Defense, and // // Speed by 1 stage. // return error.SkipZigTest; // } // test "Move.FutureSight" { // // Deals typeless damage that cannot be a critical hit two turns after this move is used. Damage // // is calculated against the target on use, and at the end of the final turn that damage is // // dealt to the Pokemon at the position the original target had at the time. Fails if this move // // is already in effect for the target's position. // return error.SkipZigTest; // } // test "Move.RockSmash" { // // Has a 50% chance to lower the target's Defense by 1 stage. // return error.SkipZigTest; // } // test "Move.BeatUp" { // // Deals typeless damage. Hits one time for each unfainted Pokemon without a non-volatile status // // condition in the user's party. For each hit, the damage formula uses the participating // // Pokemon's level, its base Attack as the Attack stat, the target's base Defense as the Defense // // stat, and ignores stat stages and other effects that modify Attack or Defense. Fails if no // // party members can participate. // return error.SkipZigTest; // } // TODO: https://bulbapedia.bulbagarden.net/wiki/List_of_glitches_(Generation_II) // TODO: https://glitchcity.wiki/Category:Generation_II_glitches // TODO: https://www.youtube.com/c/Crystal_2/videos comptime { _ = @import("data.zig"); }
src/lib/gen2/test.zig
const std = @import("std"); const allocator = std.heap.c_allocator; const napi = @import("./napi/headers/c.zig"); pub usingnamespace napi; pub const serde = @import("./serde.zig"); pub const bind = opaque { // pub fn class() void {} pub const function = @import("./bind/function.zig").bind; }; pub const js_type = enum { js_null, js_number, js_string, js_symbol, js_object, js_bigint, js_boolean, js_external, js_function, js_undefined, }; pub fn register(comptime f: fn(env, object) anyerror!void) void { const wrapper = opaque { fn napi_register_module_v1(re: napi.napi_env, m: napi.napi_value) callconv(.C) napi.napi_value { const e = env.init(re); const exports = object.init(m); f(e, exports) catch |er| { defer std.os.exit(1); std.log.err("failed to register module: {s}", .{er}); }; return exports.raw; } }; @export(wrapper.napi_register_module_v1, .{ .linkage = .Strong, .name = "napi_register_module_v1" }); } pub const safe = @import("./napi/c-napi/safe.zig").call; /// js runtime types /// pub const env = struct { raw: napi.napi_env, pub fn init(raw: napi.napi_env) env { return env { .raw = raw }; } pub fn create(self: env, v: anytype) !value { return serde.init(self).serialize(v); } pub fn throw_error(self: env, e: [:0]const u8) !void { try safe(napi.napi_throw_error, .{self.raw, null, e}); } pub fn eval(self: env, script: string) !value { var raw: napi.napi_value = undefined; try safe(napi.napi_run_script, .{self.raw, script.raw, &raw}); return value.init(raw); } }; pub const value = struct { raw: napi.napi_value, pub fn init(raw: napi.napi_value) value { return value { .raw = raw }; } pub fn typeof(self: value, e: env) !js_type { var t: napi.napi_valuetype = undefined; try safe(napi.napi_typeof, .{e.raw, self.raw, &t}); return switch (t) { else => unreachable, napi.napi_null => js_type.js_null, napi.napi_number => js_type.js_number, napi.napi_string => js_type.js_string, napi.napi_symbol => js_type.js_symbol, napi.napi_object => js_type.js_object, napi.napi_bigint => js_type.js_bigint, napi.napi_boolean => js_type.js_boolean, napi.napi_external => js_type.js_external, napi.napi_function => js_type.js_function, napi.napi_undefined => js_type.js_undefined, }; } }; /// specialized js types /// // TODO: runtime support pub const object = struct { raw: napi.napi_value, pub fn init(raw: napi.napi_value) object { return object { .raw = raw }; } pub fn set(self: object, e: env, k: [:0]const u8, v: value) !void { try safe(napi.napi_set_named_property, .{e.raw, self.raw, k, v.raw}); } pub fn new(e: env) !object { var raw: napi.napi_value = undefined; try safe(napi.napi_create_object, .{e.raw, &raw}); return object { .raw = raw }; } pub fn get(self: object, e: env, k: [:0]const u8) !value { var raw: napi.napi_value = undefined; try safe(napi.napi_get_named_property, .{e.raw, self.raw, k, &raw}); return value { .raw = raw }; } }; // TODO: runtime support pub const array = struct { raw: napi.napi_value, pub fn init(raw: napi.napi_value) array { return array { .raw = raw }; } pub fn set(self: array, e: env, index: u32, v: value) !void { try safe(napi.napi_set_element, .{e.raw, self.raw, index, v.raw}); } pub fn len(self: array, e: env) !usize { var l: u32 = undefined; try safe(napi.napi_get_array_length, .{e.raw, self.raw, &l}); return l; } pub fn new(e: env, length: u32) !array { var raw: napi.napi_value = undefined; try safe(napi.napi_create_array_with_length, .{e.raw, length, &raw}); return array { .raw = raw }; } pub fn get(self: array, e: env, index: u32) !value { var v: napi.napi_value = undefined; try safe(napi.napi_get_element, .{e.raw, self.raw, index, &v}); return value { .raw = v }; } }; pub const string = struct { raw: napi.napi_value, pub const encoding = enum { utf8, utf16, latin1, pub fn size(self: encoding) type { return switch (self) { .utf8 => u8, .utf16 => u16, .latin1 => u8, }; } }; pub fn init(raw: napi.napi_value) string { return string { .raw = raw }; } pub fn new(e: env, comptime c: encoding, s: anytype) !string { const T = c.size(); var raw: napi.napi_value = undefined; const slice = std.mem.sliceAsBytes(s[0..]); try safe(switch (c) { .utf8 => napi.napi_create_string_utf8, .utf16 => napi.napi_create_string_utf16, .latin1 => napi.napi_create_string_latin1, }, .{e.raw, @ptrCast([*]const T, slice.ptr), s.len, &raw}); return string { .raw = raw }; } pub fn get(self: string, e: env, comptime c: encoding, A: std.mem.Allocator) ![]c.size() { const T = c.size(); var size: usize = undefined; const f = switch (c) { .utf8 => napi.napi_get_value_string_utf8, .utf16 => napi.napi_get_value_string_utf16, .latin1 => napi.napi_get_value_string_latin1, }; try safe(f, .{e.raw, self.raw, null, 0, &size}); const s = try A.alloc(T, size); errdefer A.free(s); try safe(f, .{e.raw, self.raw, s.ptr, 1 + size, &size}); return s; } };
src/napi.zig
const builtin = @import("builtin"); const std = @import("std"); const fmt = std.fmt; const fs = std.fs; const log = std.log; const mem = std.mem; const process = std.process; const Allocator = std.mem.Allocator; const utils = @import("utils.zig"); const fatal = utils.fatal; const DotEnv = @import("dotenv.zig").DotEnv; const Release = @import("release.zig").Release; const SliceIterator = utils.SliceIterator; const win_ansi = @cImport(@cInclude("win_ansi_fix.h")); const RunMode = enum { elixir, elixirc, iex, }; fn elixir(allocator: Allocator, rel: Release, args: []const []const u8) !void { var ex = std.ArrayList([]const u8).init(allocator); defer ex.deinit(); var erl = std.ArrayList([]const u8).init(allocator); defer erl.deinit(); var before_extra = std.ArrayList([]const u8).init(allocator); defer before_extra.deinit(); var endLoop = false; var useWerl = false; var runMode = RunMode.elixir; var args_iter = SliceIterator{ .args = args }; // script path const erts_dir = try fmt.allocPrint(allocator, "erts-{s}", .{rel.erts_vsn}); const erts_bin = try fs.path.join(allocator, &[_][]const u8{ rel.root, erts_dir, "bin" }); var arg: ?[]const u8 = undefined; var arg1: ?[]const u8 = undefined; var arg2: ?[]const u8 = undefined; while (true) { arg = args_iter.next() catch break; if (arg == null) break; if (mem.eql(u8, arg.?, "")) break; if (endLoop) try ex.append(arg.?); // execution options if (builtin.os.tag == .windows and mem.eql(u8, arg.?, "--werl")) { useWerl = true; } else if (mem.eql(u8, arg.?, "+iex")) { try ex.append("+iex"); runMode = RunMode.iex; } else if (mem.eql(u8, arg.?, "+elixirc")) { try ex.append("+elixirc"); runMode = RunMode.elixirc; } // eval paramters else if (mem.eql(u8, arg.?, "-e")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "--e", arg1.? }); } else if (mem.eql(u8, arg.?, "--eval")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "--eval", arg1.? }); } else if (mem.eql(u8, arg.?, "--rpc-eval")) { arg1 = try args_iter.next(); arg2 = try args_iter.next(); try ex.appendSlice(&.{ "--rpc-eval", arg1.?, arg2.? }); } // elixir parameters else if (mem.eql(u8, arg.?, "-r")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "-r", arg1.? }); } else if (mem.eql(u8, arg.?, "-pr")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "-pr", arg1.? }); } else if (mem.eql(u8, arg.?, "-pa")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "-pa", arg1.? }); } else if (mem.eql(u8, arg.?, "-pz")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "-pz", arg1.? }); } else if (mem.eql(u8, arg.?, "-v")) { try ex.append("-v"); } else if (mem.eql(u8, arg.?, "--app")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "--app", arg1.? }); } else if (mem.eql(u8, arg.?, "--no-halt")) { try ex.append("--no-halt"); } else if (mem.eql(u8, arg.?, "--remsh")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "--remsh", arg1.? }); } else if (mem.eql(u8, arg.?, "--dot-iex")) { arg1 = try args_iter.next(); try ex.appendSlice(&.{ "--dot-iex", arg1.? }); } // erlang parameters else if (mem.eql(u8, arg.?, "--boot")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-boot", arg1.? }); } else if (mem.eql(u8, arg.?, "--boot-var")) { arg1 = try args_iter.next(); arg2 = try args_iter.next(); try erl.appendSlice(&.{ "-boot_var", arg1.?, arg2.? }); } else if (mem.eql(u8, arg.?, "--cookie")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-setcookie", arg1.? }); } else if (mem.eql(u8, arg.?, "--hidden")) { try erl.append("-hidden"); } else if (mem.eql(u8, arg.?, "--detached")) { log.warn("the --detached option is deprecated", .{}); try erl.append("-detached"); } else if (mem.eql(u8, arg.?, "--erl-config")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-config", arg1.? }); } else if (mem.eql(u8, arg.?, "--logger-otp-reports")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-logger", "handle_otp_reports", arg1.? }); } else if (mem.eql(u8, arg.?, "--logger-sasl-reports")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-logger", "handle_sasl_reports", arg1.? }); } else if (mem.eql(u8, arg.?, "--name")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-name", arg1.? }); } else if (mem.eql(u8, arg.?, "--sname")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-sname", arg1.? }); } else if (mem.eql(u8, arg.?, "--vm-args")) { arg1 = try args_iter.next(); try erl.appendSlice(&.{ "-args_file", arg1.? }); } else if (mem.eql(u8, arg.?, "-mode")) { arg1 = try args_iter.next(); try before_extra.appendSlice(&.{ "-mode", arg1.? }); } else if (mem.eql(u8, arg.?, "--pipe-to")) { if (builtin.os.tag == .windows) fatal("--pipe-to option is not supported on Windows", .{}); // TODO: handle other OSs } else { if (arg1) |a| try ex.append(a); endLoop = true; } } //// expand erl libs - this doesn't get called in the release's batch file // var ext_libs = String.init(allocator); // defer ext_libs.deinit(); // var lib_dir_path = try fs.path.resolve(allocator, &[_][]const u8{ rel.root, "lib" }); // var lib_dir = try fs.openDirAbsolute(lib_dir_path, .{ .iterate = true }); // errdefer dir.close(); // var iterator = lib_dir.iterate(); // var dir: ?fs.Dir.Entry = undefined; // while (true) { // dir = iterator.next() catch break; // if (dir == null) break else { // if (dir.?.kind == fs.File.Kind.Directory) { // try ext_libs.concat(allocator, try fmt.allocPrint(allocator, " -pa {s}", .{dir.?.name})); // } // } // } // enable virtual terminal sequences // https://docs.microsoft.com/en-us/windows/console/classic-vs-vt if (builtin.os.tag == .windows) win_ansi.enable_virtual_term(); const exe = try fs.path.join(allocator, &[_][]const u8{ erts_bin, if (useWerl) "werl.exe" else "erl.exe" }); log.debug("executable path: {s}", .{exe}); var argv = std.ArrayList([]const u8).init(allocator); defer argv.deinit(); try argv.append(exe); if (process.getEnvVarOwned(allocator, "ELIXIR_ERL_OPTIONS")) |elixir_erl_options| try argv.append(elixir_erl_options) else |_| {} try argv.appendSlice(erl.items); if (runMode != RunMode.iex) try argv.appendSlice(&.{ "-noshell", "-s", "elixir", "start_cli" }); try argv.appendSlice(&.{ "-elixir", "ansi_enabled", "true" }); try argv.appendSlice(before_extra.items); try argv.append("-extra"); try argv.appendSlice(ex.items); var env_map = try std.process.getEnvMap(allocator); try putReleaseValues(&env_map, rel); try putDotEnvValues(allocator, &env_map, rel); for (argv.items) |a, i| log.debug("erl.exe arg[{d: >2}] {s}", .{ i, a }); const child_proc = try std.ChildProcess.init(argv.items, allocator); child_proc.env_map = &env_map; child_proc.stdin_behavior = .Inherit; child_proc.stdout_behavior = .Inherit; child_proc.cwd = rel.vsn_dir; const exec_result = try child_proc.spawnAndWait(); log.debug("Elixir run result: {s}", .{exec_result}); } fn putReleaseValues(map: *std.BufMap, rel: Release) !void { try map.put("RELEASE_BOOT_SCRIPT", rel.boot_script); try map.put("RELEASE_BOOT_SCRIPT_CLEAN", rel.boot_script_clean); try map.put("RELEASE_COMMAND", rel.command); try map.put("RELEASE_COOKIE", rel.cookie); try map.put("RELEASE_DISTRIBUTION", rel.distribution); try map.put("ERTS_VSN", rel.erts_vsn); // TODO: extra? try map.put("RELEASE_MODE", rel.mode); try map.put("RELEASE_NAME", rel.name); try map.put("RELEASE_NODE", rel.node); try map.put("RELEASE_PROG", rel.prog); try map.put("RELEASE_REMOTE_VM_ARGS", rel.remote_vm_args); try map.put("RELEASE_ROOT", rel.root); try map.put("RELEASE_SYS_CONFIG", rel.sys_config); try map.put("RELEASE_TMP", rel.tmp); try map.put("RELEASE_VM_ARGS", rel.vm_args); try map.put("RELEASE_VSN", rel.vsn); try map.put("RELEASE_VSN_DIR", rel.vsn_dir); // todo: set vm args log.debug("RELEASE_BOOT_SCRIPT: {s}", .{rel.boot_script}); log.debug("RELEASE_BOOT_SCRIPT_CLEAN: {s}", .{rel.boot_script_clean}); log.debug("RELEASE_COMMAND: {s}", .{rel.command}); log.debug("RELEASE_COOKIE: {s}", .{rel.cookie}); log.debug("RELEASE_DISTRIBUTION: {s}", .{rel.distribution}); log.debug("ERTS_VSN: {s}", .{rel.erts_vsn}); log.debug("RELEASE_MODE: {s}", .{rel.mode}); log.debug("RELEASE_NAME: {s}", .{rel.name}); log.debug("RELEASE_NODE: {s}", .{rel.node}); log.debug("RELEASE_PROG: {s}", .{rel.prog}); log.debug("RELEASE_REMOTE_VM_ARGS: {s}", .{rel.remote_vm_args}); log.debug("RELEASE_ROOT: {s}", .{rel.root}); log.debug("RELEASE_SYS_CONFIG: {s}", .{rel.sys_config}); log.debug("RELEASE_TMP: {s}", .{rel.tmp}); log.debug("RELEASE_VM_ARGS: {s}", .{rel.vm_args}); log.debug("RELEASE_VSN: {s}", .{rel.vsn}); log.debug("RELEASE_VSN_DIR: {s}", .{rel.vsn_dir}); } fn putDotEnvValues(allocator: Allocator, map: *std.BufMap, rel: Release) !void { // load values from .env.<command> or .env in release version directory ... const dotenv_command = try std.fmt.allocPrint(allocator, ".env.{s}", .{rel.command}); const dotenv_command_path = try fs.path.join(allocator, &[_][]const u8{ rel.vsn_dir, dotenv_command }); const dotenv_path = try fs.path.join(allocator, &[_][]const u8{ rel.vsn_dir, ".env" }); var dotenv: DotEnv = DotEnv.init(allocator); defer dotenv.deinit(); if (fs.cwd().openFile(dotenv_command_path, .{})) |file| { defer file.close(); const bytes_read = try file.reader().readAllAlloc(allocator, 1_000_000); try dotenv.parse(bytes_read); } else |_| { if (fs.cwd().openFile(dotenv_path, .{})) |file| { defer file.close(); const bytes_read = try file.reader().readAllAlloc(allocator, 1_000_000); try dotenv.parse(bytes_read); } else |_| { log.debug(".env not found", .{}); } } // ...and add them to the provided map var iter = dotenv.map.iterator(); while (iter.next()) |entry| { try map.put(entry.key_ptr.*, entry.value_ptr.*); } } pub fn start(allocator: Allocator, rel: Release) !void { var args = try std.ArrayList([]const u8).initCapacity(allocator, 14); defer args.deinit(); try args.appendSlice(&.{ rel.extra, "--cookie", rel.cookie, }); // distribution flag if (!mem.eql(u8, rel.distribution, "none")) { try args.appendSlice(&.{ try fmt.allocPrint(allocator, "--{s}", .{rel.distribution}), rel.node, }); } try args.appendSlice(&.{ "-mode", rel.mode, "--erl-config", rel.sys_config, "--boot", try fmt.allocPrint(allocator, "{s}\\{s}", .{ rel.vsn_dir, rel.boot_script }), "--boot-var", "RELEASE_LIB", try fmt.allocPrint(allocator, "{s}\\lib", .{rel.root}), "--vm-args", rel.vm_args, }); try elixir(allocator, rel, args.items); } pub fn iex(allocator: Allocator, rel: Release, iex_args: []const []const u8) !void { var args = std.ArrayList([]const u8).init(allocator); defer args.deinit(); try args.appendSlice(&.{ "--no-halt", "--erl", "-noshell -user Elixir.IEx.CLI", "+iex" }); if (iex_args.len > 0) try args.appendSlice(iex_args); for (args.items) |arg, i| log.debug("iex arg[{d}] {s}", .{ i, arg }); try elixir( allocator, rel, args.items, ); } pub fn remote(allocator: Allocator, rel: Release) !void { var args = std.ArrayList([]const u8).init(allocator); defer args.deinit(); try args.appendSlice(&.{ "--werl", "--hidden", "--cookie", rel.cookie }); // distribution flag if (!mem.eql(u8, rel.distribution, "none")) { const random = std.crypto.random.intRangeAtMost(u16, 0, 32767); try args.appendSlice(&.{ try fmt.allocPrint(allocator, "--{s}", .{rel.distribution}), try fmt.allocPrint(allocator, "rem-{d}-{s}", .{ random, rel.node }), }); } try args.appendSlice(&.{ "--boot", try fmt.allocPrint(allocator, "{s}\\{s}", .{ rel.vsn_dir, rel.boot_script_clean }), "--boot-var", "RELEASE_LIB", try fmt.allocPrint(allocator, "{s}\\lib", .{rel.root}), "--vm-args", rel.vm_args, "--remsh", rel.node, }); try iex(allocator, rel, args.items); } pub fn rpc(allocator: Allocator, rel: Release, expr: []const u8) !void { var args = std.ArrayList([]const u8).init(allocator); defer args.deinit(); try args.appendSlice(&.{ "--hidden", "--cookie", rel.cookie, }); // distribution flag if (!mem.eql(u8, rel.distribution, "none")) { const random = std.crypto.random.intRangeAtMost(u16, 0, 32767); try args.appendSlice(&.{ try fmt.allocPrint(allocator, "--{s}", .{rel.distribution}), try fmt.allocPrint(allocator, "rpc-{d}-{s}", .{ random, rel.node }), }); } try args.appendSlice( &.{ "--boot", try fmt.allocPrint(allocator, "{s}\\{s}", .{ rel.vsn_dir, rel.boot_script_clean }), "--boot-var", "RELEASE_LIB", try fmt.allocPrint(allocator, "{s}\\lib", .{rel.root}), "--vm-args", rel.vm_args, "--rpc-eval", rel.node, expr, }, ); try elixir(allocator, rel, args.items); } pub fn stop(allocator: Allocator, rel: Release) !void { try rpc(allocator, rel, "System.stop()"); } pub fn restart(allocator: Allocator, rel: Release) !void { try rpc(allocator, rel, "System.restart()"); } pub fn pid(allocator: Allocator, rel: Release) !void { try rpc(allocator, rel, "IO.puts(System.pid())"); } pub fn eval(allocator: Allocator, rel: Release, expr: []const u8) !void { try elixir(allocator, rel, &.{ "--eval", expr, "--cookie", rel.cookie, "--erl-config", rel.sys_config, "--boot", try fmt.allocPrint(allocator, "{s}\\{s}", .{ rel.vsn_dir, rel.boot_script_clean }), "--boot-var", "RELEASE_LIB", try fmt.allocPrint(allocator, "{s}\\lib", .{rel.root}), "--vm-args", rel.vm_args, }); }
src/elixir.zig
const cmp = @import("cmp.zig"); const testing = @import("std").testing; fn test__cmpti2(a: i128, b: i128, expected: i128) !void { var result = cmp.__cmpti2(a, b); try testing.expectEqual(expected, result); } test "cmpti2" { // minInt == -170141183460469231731687303715884105728 // maxInt == 170141183460469231731687303715884105727 // minInt/2 == -85070591730234615865843651857942052864 // maxInt/2 == 85070591730234615865843651857942052863 // 1. equality minInt, minInt+1, minInt/2, 0, maxInt/2, maxInt-1, maxInt try test__cmpti2(-170141183460469231731687303715884105728, -170141183460469231731687303715884105728, 1); try test__cmpti2(-170141183460469231731687303715884105727, -170141183460469231731687303715884105727, 1); try test__cmpti2(-85070591730234615865843651857942052864, -85070591730234615865843651857942052864, 1); try test__cmpti2(-1, -1, 1); try test__cmpti2(0, 0, 1); try test__cmpti2(1, 1, 1); try test__cmpti2(85070591730234615865843651857942052863, 85070591730234615865843651857942052863, 1); try test__cmpti2(170141183460469231731687303715884105726, 170141183460469231731687303715884105726, 1); try test__cmpti2(170141183460469231731687303715884105727, 170141183460469231731687303715884105727, 1); // 2. cmp minInt, { minInt + 1, minInt/2, -1,0,1, maxInt/2, maxInt-1, maxInt} try test__cmpti2(-170141183460469231731687303715884105728, -170141183460469231731687303715884105727, 0); try test__cmpti2(-170141183460469231731687303715884105728, -85070591730234615865843651857942052864, 0); try test__cmpti2(-170141183460469231731687303715884105728, -1, 0); try test__cmpti2(-170141183460469231731687303715884105728, 0, 0); try test__cmpti2(-170141183460469231731687303715884105728, 1, 0); try test__cmpti2(-170141183460469231731687303715884105728, 85070591730234615865843651857942052863, 0); try test__cmpti2(-170141183460469231731687303715884105728, 170141183460469231731687303715884105726, 0); try test__cmpti2(-170141183460469231731687303715884105728, 170141183460469231731687303715884105727, 0); // 3. cmp minInt+1, {minInt, minInt/2, -1,0,1, maxInt/2, maxInt-1, maxInt} try test__cmpti2(-170141183460469231731687303715884105727, -170141183460469231731687303715884105728, 2); try test__cmpti2(-170141183460469231731687303715884105727, -85070591730234615865843651857942052864, 0); try test__cmpti2(-170141183460469231731687303715884105727, -1, 0); try test__cmpti2(-170141183460469231731687303715884105727, 0, 0); try test__cmpti2(-170141183460469231731687303715884105727, 1, 0); try test__cmpti2(-170141183460469231731687303715884105727, 85070591730234615865843651857942052863, 0); try test__cmpti2(-170141183460469231731687303715884105727, 170141183460469231731687303715884105726, 0); try test__cmpti2(-170141183460469231731687303715884105727, 170141183460469231731687303715884105727, 0); // 4. cmp minInt/2, {minInt, minInt + 1, -1,0,1, maxInt/2, maxInt-1, maxInt} try test__cmpti2(-85070591730234615865843651857942052864, -170141183460469231731687303715884105728, 2); try test__cmpti2(-85070591730234615865843651857942052864, -170141183460469231731687303715884105727, 2); try test__cmpti2(-85070591730234615865843651857942052864, -1, 0); try test__cmpti2(-85070591730234615865843651857942052864, 0, 0); try test__cmpti2(-85070591730234615865843651857942052864, 1, 0); try test__cmpti2(-85070591730234615865843651857942052864, 85070591730234615865843651857942052863, 0); try test__cmpti2(-85070591730234615865843651857942052864, 170141183460469231731687303715884105726, 0); try test__cmpti2(-85070591730234615865843651857942052864, 170141183460469231731687303715884105727, 0); // 5. cmp -1, {minInt, minInt + 1, minInt/2, 0,1, maxInt/2, maxInt-1, maxInt} try test__cmpti2(-1, -170141183460469231731687303715884105728, 2); try test__cmpti2(-1, -170141183460469231731687303715884105727, 2); try test__cmpti2(-1, -85070591730234615865843651857942052864, 2); try test__cmpti2(-1, 0, 0); try test__cmpti2(-1, 1, 0); try test__cmpti2(-1, 85070591730234615865843651857942052863, 0); try test__cmpti2(-1, 170141183460469231731687303715884105726, 0); try test__cmpti2(-1, 170141183460469231731687303715884105727, 0); // 6. cmp 0, {minInt, minInt + 1, minInt/2, -1, 1, maxInt/2, maxInt-1, maxInt} try test__cmpti2(0, -170141183460469231731687303715884105728, 2); try test__cmpti2(0, -170141183460469231731687303715884105727, 2); try test__cmpti2(0, -85070591730234615865843651857942052864, 2); try test__cmpti2(0, -1, 2); try test__cmpti2(0, 1, 0); try test__cmpti2(0, 85070591730234615865843651857942052863, 0); try test__cmpti2(0, 170141183460469231731687303715884105726, 0); try test__cmpti2(0, 170141183460469231731687303715884105727, 0); // 7. cmp 1, {minInt, minInt + 1, minInt/2, -1,0, maxInt/2, maxInt-1, maxInt} try test__cmpti2(1, -170141183460469231731687303715884105728, 2); try test__cmpti2(1, -170141183460469231731687303715884105727, 2); try test__cmpti2(1, -85070591730234615865843651857942052864, 2); try test__cmpti2(1, -1, 2); try test__cmpti2(1, 0, 2); try test__cmpti2(1, 85070591730234615865843651857942052863, 0); try test__cmpti2(1, 170141183460469231731687303715884105726, 0); try test__cmpti2(1, 170141183460469231731687303715884105727, 0); // 8. cmp maxInt/2, {minInt, minInt + 1, minInt/2, -1,0,1, maxInt-1, maxInt} try test__cmpti2(85070591730234615865843651857942052863, -170141183460469231731687303715884105728, 2); try test__cmpti2(85070591730234615865843651857942052863, -170141183460469231731687303715884105727, 2); try test__cmpti2(85070591730234615865843651857942052863, -85070591730234615865843651857942052864, 2); try test__cmpti2(85070591730234615865843651857942052863, -1, 2); try test__cmpti2(85070591730234615865843651857942052863, 0, 2); try test__cmpti2(85070591730234615865843651857942052863, 1, 2); try test__cmpti2(85070591730234615865843651857942052863, 170141183460469231731687303715884105726, 0); try test__cmpti2(85070591730234615865843651857942052863, 170141183460469231731687303715884105727, 0); // 9. cmp maxInt-1, {minInt, minInt + 1, minInt/2, -1,0,1, maxInt/2, maxInt} try test__cmpti2(170141183460469231731687303715884105726, -170141183460469231731687303715884105728, 2); try test__cmpti2(170141183460469231731687303715884105726, -170141183460469231731687303715884105727, 2); try test__cmpti2(170141183460469231731687303715884105726, -85070591730234615865843651857942052864, 2); try test__cmpti2(170141183460469231731687303715884105726, -1, 2); try test__cmpti2(170141183460469231731687303715884105726, 0, 2); try test__cmpti2(170141183460469231731687303715884105726, 1, 2); try test__cmpti2(170141183460469231731687303715884105726, 85070591730234615865843651857942052863, 2); try test__cmpti2(170141183460469231731687303715884105726, 170141183460469231731687303715884105727, 0); // 10.cmp maxInt, {minInt, minInt + 1, minInt/2, -1,0,1, maxInt/2, maxInt-1, } try test__cmpti2(170141183460469231731687303715884105727, -170141183460469231731687303715884105728, 2); try test__cmpti2(170141183460469231731687303715884105727, -170141183460469231731687303715884105727, 2); try test__cmpti2(170141183460469231731687303715884105727, -85070591730234615865843651857942052864, 2); try test__cmpti2(170141183460469231731687303715884105727, -1, 2); try test__cmpti2(170141183460469231731687303715884105727, 0, 2); try test__cmpti2(170141183460469231731687303715884105727, 1, 2); try test__cmpti2(170141183460469231731687303715884105727, 85070591730234615865843651857942052863, 2); try test__cmpti2(170141183460469231731687303715884105727, 170141183460469231731687303715884105726, 2); }
lib/std/special/compiler_rt/cmpti2_test.zig
const std = @import("../std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const mem = std.mem; const ast = std.zig.ast; const Token = std.zig.Token; const indent_delta = 4; pub const Error = error{ /// Ran out of memory allocating call stack frames to complete rendering. OutOfMemory, }; /// Returns whether anything changed pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@TypeOf(stream).Child.Error || Error)!bool { comptime assert(@typeId(@TypeOf(stream)) == builtin.TypeId.Pointer); var anything_changed: bool = false; // make a passthrough stream that checks whether something changed const MyStream = struct { const MyStream = @This(); const StreamError = @TypeOf(stream).Child.Error; const Stream = std.io.OutStream(StreamError); anything_changed_ptr: *bool, child_stream: @TypeOf(stream), stream: Stream, source_index: usize, source: []const u8, fn write(iface_stream: *Stream, bytes: []const u8) StreamError!void { const self = @fieldParentPtr(MyStream, "stream", iface_stream); if (!self.anything_changed_ptr.*) { const end = self.source_index + bytes.len; if (end > self.source.len) { self.anything_changed_ptr.* = true; } else { const src_slice = self.source[self.source_index..end]; self.source_index += bytes.len; if (!mem.eql(u8, bytes, src_slice)) { self.anything_changed_ptr.* = true; } } } try self.child_stream.write(bytes); } }; var my_stream = MyStream{ .stream = MyStream.Stream{ .writeFn = MyStream.write }, .child_stream = stream, .anything_changed_ptr = &anything_changed, .source_index = 0, .source = tree.source, }; try renderRoot(allocator, &my_stream.stream, tree); if (!anything_changed and my_stream.source_index != my_stream.source.len) { anything_changed = true; } return anything_changed; } fn renderRoot( allocator: *mem.Allocator, stream: var, tree: *ast.Tree, ) (@TypeOf(stream).Child.Error || Error)!void { var tok_it = tree.tokens.iterator(0); // render all the line comments at the beginning of the file while (tok_it.next()) |token| { if (token.id != .LineComment) break; try stream.print("{}\n", .{mem.trimRight(u8, tree.tokenSlicePtr(token), " ")}); if (tok_it.peek()) |next_token| { const loc = tree.tokenLocationPtr(token.end, next_token); if (loc.line >= 2) { try stream.writeByte('\n'); } } } var start_col: usize = 0; var it = tree.root_node.decls.iterator(0); while (true) { var decl = (it.next() orelse return).*; // This loop does the following: // // - Iterates through line/doc comment tokens that precedes the current // decl. // - Figures out the first token index (`copy_start_token_index`) which // hasn't been copied to the output stream yet. // - Detects `zig fmt: (off|on)` in the line comment tokens, and // determines whether the current decl should be reformatted or not. // var token_index = decl.firstToken(); var fmt_active = true; var found_fmt_directive = false; var copy_start_token_index = token_index; while (token_index != 0) { token_index -= 1; const token = tree.tokens.at(token_index); switch (token.id) { .LineComment => {}, .DocComment => { copy_start_token_index = token_index; continue; }, else => break, } if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) { if (!found_fmt_directive) { fmt_active = false; found_fmt_directive = true; } } else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) { if (!found_fmt_directive) { fmt_active = true; found_fmt_directive = true; } } } if (!fmt_active) { // Reformatting is disabled for the current decl and possibly some // more decls that follow. // Find the next `decl` for which reformatting is re-enabled. token_index = decl.firstToken(); while (!fmt_active) { decl = (it.next() orelse { // If there's no next reformatted `decl`, just copy the // remaining input tokens and bail out. const start = tree.tokens.at(copy_start_token_index).start; try copyFixingWhitespace(stream, tree.source[start..]); return; }).*; var decl_first_token_index = decl.firstToken(); while (token_index < decl_first_token_index) : (token_index += 1) { const token = tree.tokens.at(token_index); switch (token.id) { .LineComment => {}, .Eof => unreachable, else => continue, } if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) { fmt_active = true; } else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) { fmt_active = false; } } } // Found the next `decl` for which reformatting is enabled. Copy // the input tokens before the `decl` that haven't been copied yet. var copy_end_token_index = decl.firstToken(); token_index = copy_end_token_index; while (token_index != 0) { token_index -= 1; const token = tree.tokens.at(token_index); switch (token.id) { .LineComment => {}, .DocComment => { copy_end_token_index = token_index; continue; }, else => break, } } const start = tree.tokens.at(copy_start_token_index).start; const end = tree.tokens.at(copy_end_token_index).start; try copyFixingWhitespace(stream, tree.source[start..end]); } try renderTopLevelDecl(allocator, stream, tree, 0, &start_col, decl); if (it.peek()) |next_decl| { try renderExtraNewline(tree, stream, &start_col, next_decl.*); } } } fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) @TypeOf(stream).Child.Error!void { const first_token = node.firstToken(); var prev_token = first_token; while (tree.tokens.at(prev_token - 1).id == .DocComment) { prev_token -= 1; } const prev_token_end = tree.tokens.at(prev_token - 1).end; const loc = tree.tokenLocation(prev_token_end, first_token); if (loc.line >= 2) { try stream.writeByte('\n'); start_col.* = 0; } } fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Child.Error || Error)!void { switch (decl.id) { .FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl); try renderDocComments(tree, stream, fn_proto, indent, start_col); if (fn_proto.body_node) |body_node| { try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.Space); try renderExpression(allocator, stream, tree, indent, start_col, body_node, Space.Newline); } else { try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.None); try renderToken(tree, stream, tree.nextToken(decl.lastToken()), indent, start_col, Space.Newline); } }, .Use => { const use_decl = @fieldParentPtr(ast.Node.Use, "base", decl); if (use_decl.visib_token) |visib_token| { try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub } try renderToken(tree, stream, use_decl.use_token, indent, start_col, Space.Space); // usingnamespace try renderExpression(allocator, stream, tree, indent, start_col, use_decl.expr, Space.None); try renderToken(tree, stream, use_decl.semicolon_token, indent, start_col, Space.Newline); // ; }, .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl); try renderDocComments(tree, stream, var_decl, indent, start_col); try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl); }, .TestDecl => { const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl); try renderDocComments(tree, stream, test_decl, indent, start_col); try renderToken(tree, stream, test_decl.test_token, indent, start_col, Space.Space); try renderExpression(allocator, stream, tree, indent, start_col, test_decl.name, Space.Space); try renderExpression(allocator, stream, tree, indent, start_col, test_decl.body_node, Space.Newline); }, .ContainerField => { const field = @fieldParentPtr(ast.Node.ContainerField, "base", decl); try renderDocComments(tree, stream, field, indent, start_col); if (field.comptime_token) |t| { try renderToken(tree, stream, t, indent, start_col, Space.Space); // comptime } if (field.type_expr == null and field.value_expr == null) { return renderToken(tree, stream, field.name_token, indent, start_col, Space.Comma); // name, } else if (field.type_expr != null and field.value_expr == null) { try renderToken(tree, stream, field.name_token, indent, start_col, Space.None); // name try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // : if (field.align_expr) |align_value_expr| { try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Space); // type const lparen_token = tree.prevToken(align_value_expr.firstToken()); const align_kw = tree.prevToken(lparen_token); const rparen_token = tree.nextToken(align_value_expr.lastToken()); try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, Space.None); // alignment try renderToken(tree, stream, rparen_token, indent, start_col, Space.Comma); // ), } else { try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Comma); // type, } } else if (field.type_expr == null and field.value_expr != null) { try renderToken(tree, stream, field.name_token, indent, start_col, Space.Space); // name try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // = return renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, Space.Comma); // value } else { try renderToken(tree, stream, field.name_token, indent, start_col, Space.None); // name try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // : if (field.align_expr) |align_value_expr| { try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Space); // type const lparen_token = tree.prevToken(align_value_expr.firstToken()); const align_kw = tree.prevToken(lparen_token); const rparen_token = tree.nextToken(align_value_expr.lastToken()); try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, Space.None); // alignment try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) } else { try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Space); // type } try renderToken(tree, stream, tree.prevToken(field.value_expr.?.firstToken()), indent, start_col, Space.Space); // = return renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, Space.Comma); // value, } }, .Comptime => { assert(!decl.requireSemiColon()); try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.Newline); }, .DocComment => { const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl); var it = comment.lines.iterator(0); while (it.next()) |line_token_index| { try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.Newline); if (it.peek()) |_| { try stream.writeByteNTimes(' ', indent); } } }, else => unreachable, } } fn renderExpression( allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, base: *ast.Node, space: Space, ) (@TypeOf(stream).Child.Error || Error)!void { switch (base.id) { .Identifier => { const identifier = @fieldParentPtr(ast.Node.Identifier, "base", base); return renderToken(tree, stream, identifier.token, indent, start_col, space); }, .Block => { const block = @fieldParentPtr(ast.Node.Block, "base", base); if (block.label) |label| { try renderToken(tree, stream, label, indent, start_col, Space.None); try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); } if (block.statements.len == 0) { try renderToken(tree, stream, block.lbrace, indent + indent_delta, start_col, Space.None); return renderToken(tree, stream, block.rbrace, indent, start_col, space); } else { const block_indent = indent + indent_delta; try renderToken(tree, stream, block.lbrace, block_indent, start_col, Space.Newline); var it = block.statements.iterator(0); while (it.next()) |statement| { try stream.writeByteNTimes(' ', block_indent); try renderStatement(allocator, stream, tree, block_indent, start_col, statement.*); if (it.peek()) |next_statement| { try renderExtraNewline(tree, stream, start_col, next_statement.*); } } try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, block.rbrace, indent, start_col, space); } }, .Defer => { const defer_node = @fieldParentPtr(ast.Node.Defer, "base", base); try renderToken(tree, stream, defer_node.defer_token, indent, start_col, Space.Space); return renderExpression(allocator, stream, tree, indent, start_col, defer_node.expr, space); }, .Comptime => { const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", base); try renderToken(tree, stream, comptime_node.comptime_token, indent, start_col, Space.Space); return renderExpression(allocator, stream, tree, indent, start_col, comptime_node.expr, space); }, .Suspend => { const suspend_node = @fieldParentPtr(ast.Node.Suspend, "base", base); if (suspend_node.body) |body| { try renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, Space.Space); return renderExpression(allocator, stream, tree, indent, start_col, body, space); } else { return renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, space); } }, .InfixOp => { const infix_op_node = @fieldParentPtr(ast.Node.InfixOp, "base", base); const op_space = switch (infix_op_node.op) { ast.Node.InfixOp.Op.Period, ast.Node.InfixOp.Op.ErrorUnion, ast.Node.InfixOp.Op.Range => Space.None, else => Space.Space, }; try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space); const after_op_space = blk: { const loc = tree.tokenLocation(tree.tokens.at(infix_op_node.op_token).end, tree.nextToken(infix_op_node.op_token)); break :blk if (loc.line == 0) op_space else Space.Newline; }; try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space); if (after_op_space == Space.Newline and tree.tokens.at(tree.nextToken(infix_op_node.op_token)).id != .MultilineStringLiteralLine) { try stream.writeByteNTimes(' ', indent + indent_delta); start_col.* = indent + indent_delta; } switch (infix_op_node.op) { ast.Node.InfixOp.Op.Catch => |maybe_payload| if (maybe_payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); }, else => {}, } return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space); }, .PrefixOp => { const prefix_op_node = @fieldParentPtr(ast.Node.PrefixOp, "base", base); switch (prefix_op_node.op) { .PtrType => |ptr_info| { const op_tok_id = tree.tokens.at(prefix_op_node.op_token).id; switch (op_tok_id) { .Asterisk, .AsteriskAsterisk => try stream.writeByte('*'), .LBracket => if (tree.tokens.at(prefix_op_node.op_token + 2).id == .Identifier) try stream.write("[*c") else try stream.write("[*"), else => unreachable, } if (ptr_info.sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // : const sentinel_space = switch (op_tok_id) { .LBracket => Space.None, else => Space.Space, }; try renderExpression(allocator, stream, tree, indent, start_col, sentinel, sentinel_space); } switch (op_tok_id) { .Asterisk, .AsteriskAsterisk => {}, .LBracket => try stream.writeByte(']'), else => unreachable, } if (ptr_info.allowzero_token) |allowzero_token| { try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero } if (ptr_info.align_info) |align_info| { const lparen_token = tree.prevToken(align_info.node.firstToken()); const align_token = tree.prevToken(lparen_token); try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None); if (align_info.bit_range) |bit_range| { const colon1 = tree.prevToken(bit_range.start.firstToken()); const colon2 = tree.prevToken(bit_range.end.firstToken()); try renderToken(tree, stream, colon1, indent, start_col, Space.None); // : try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None); try renderToken(tree, stream, colon2, indent, start_col, Space.None); // : try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None); const rparen_token = tree.nextToken(bit_range.end.lastToken()); try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) } else { const rparen_token = tree.nextToken(align_info.node.lastToken()); try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) } } if (ptr_info.const_token) |const_token| { try renderToken(tree, stream, const_token, indent, start_col, Space.Space); // const } if (ptr_info.volatile_token) |volatile_token| { try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile } }, .SliceType => |ptr_info| { try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); // [ if (ptr_info.sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // : try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None); try renderToken(tree, stream, tree.nextToken(sentinel.lastToken()), indent, start_col, Space.None); // ] } else { try renderToken(tree, stream, tree.nextToken(prefix_op_node.op_token), indent, start_col, Space.None); // ] } if (ptr_info.allowzero_token) |allowzero_token| { try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero } if (ptr_info.align_info) |align_info| { const lparen_token = tree.prevToken(align_info.node.firstToken()); const align_token = tree.prevToken(lparen_token); try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None); if (align_info.bit_range) |bit_range| { const colon1 = tree.prevToken(bit_range.start.firstToken()); const colon2 = tree.prevToken(bit_range.end.firstToken()); try renderToken(tree, stream, colon1, indent, start_col, Space.None); // : try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None); try renderToken(tree, stream, colon2, indent, start_col, Space.None); // : try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None); const rparen_token = tree.nextToken(bit_range.end.lastToken()); try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) } else { const rparen_token = tree.nextToken(align_info.node.lastToken()); try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // ) } } if (ptr_info.const_token) |const_token| { try renderToken(tree, stream, const_token, indent, start_col, Space.Space); } if (ptr_info.volatile_token) |volatile_token| { try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); } }, .ArrayType => |array_info| { const lbracket = prefix_op_node.op_token; const rbracket = tree.nextToken(if (array_info.sentinel) |sentinel| sentinel.lastToken() else array_info.len_expr.lastToken()); try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [ const starts_with_comment = tree.tokens.at(lbracket + 1).id == .LineComment; const ends_with_comment = tree.tokens.at(rbracket - 1).id == .LineComment; const new_indent = if (ends_with_comment) indent + indent_delta else indent; const new_space = if (ends_with_comment) Space.Newline else Space.None; try renderExpression(allocator, stream, tree, new_indent, start_col, array_info.len_expr, new_space); if (starts_with_comment) { try stream.writeByte('\n'); } if (ends_with_comment or starts_with_comment) { try stream.writeByteNTimes(' ', indent); } if (array_info.sentinel) |sentinel| { const colon_token = tree.prevToken(sentinel.firstToken()); try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // : try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None); } try renderToken(tree, stream, rbracket, indent, start_col, Space.None); // ] }, .BitNot, .BoolNot, .Negation, .NegationWrap, .OptionalType, .AddressOf, => { try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); }, .Try, .Await, .Cancel, .Resume, => { try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.Space); }, } return renderExpression(allocator, stream, tree, indent, start_col, prefix_op_node.rhs, space); }, .SuffixOp => { const suffix_op = @fieldParentPtr(ast.Node.SuffixOp, "base", base); switch (suffix_op.op) { .Call => |*call_info| { if (call_info.async_token) |async_token| { try renderToken(tree, stream, async_token, indent, start_col, Space.Space); } try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None); const lparen = tree.nextToken(suffix_op.lhs.node.lastToken()); if (call_info.params.len == 0) { try renderToken(tree, stream, lparen, indent, start_col, Space.None); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } const src_has_trailing_comma = blk: { const maybe_comma = tree.prevToken(suffix_op.rtoken); break :blk tree.tokens.at(maybe_comma).id == .Comma; }; if (src_has_trailing_comma) { const new_indent = indent + indent_delta; try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); var it = call_info.params.iterator(0); while (true) { const param_node = it.next().?; const param_node_new_indent = if (param_node.*.id == .MultilineStringLiteral) blk: { break :blk indent; } else blk: { try stream.writeByteNTimes(' ', new_indent); break :blk new_indent; }; if (it.peek()) |next_node| { try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node.*, Space.None); const comma = tree.nextToken(param_node.*.lastToken()); try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // , try renderExtraNewline(tree, stream, start_col, next_node.*); } else { try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node.*, Space.Comma); try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } } } try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( var it = call_info.params.iterator(0); while (it.next()) |param_node| { try renderExpression(allocator, stream, tree, indent, start_col, param_node.*, Space.None); if (it.peek() != null) { const comma = tree.nextToken(param_node.*.lastToken()); try renderToken(tree, stream, comma, indent, start_col, Space.Space); } } return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); }, .ArrayAccess => |index_expr| { const lbracket = tree.prevToken(index_expr.firstToken()); const rbracket = tree.nextToken(index_expr.lastToken()); try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None); try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [ const starts_with_comment = tree.tokens.at(lbracket + 1).id == .LineComment; const ends_with_comment = tree.tokens.at(rbracket - 1).id == .LineComment; const new_indent = if (ends_with_comment) indent + indent_delta else indent; const new_space = if (ends_with_comment) Space.Newline else Space.None; try renderExpression(allocator, stream, tree, new_indent, start_col, index_expr, new_space); if (starts_with_comment) { try stream.writeByte('\n'); } if (ends_with_comment or starts_with_comment) { try stream.writeByteNTimes(' ', indent); } return renderToken(tree, stream, rbracket, indent, start_col, space); // ] }, .Deref => { try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // .* }, .UnwrapOptional => { try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None); try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // . return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ? }, .Slice => |range| { try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None); const lbracket = tree.prevToken(range.start.firstToken()); const dotdot = tree.nextToken(range.start.lastToken()); const after_start_space_bool = nodeCausesSliceOpSpace(range.start) or (if (range.end) |end| nodeCausesSliceOpSpace(end) else false); const after_start_space = if (after_start_space_bool) Space.Space else Space.None; const after_op_space = if (range.end != null) after_start_space else Space.None; try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [ try renderExpression(allocator, stream, tree, indent, start_col, range.start, after_start_space); try renderToken(tree, stream, dotdot, indent, start_col, after_op_space); // .. if (range.end) |end| { try renderExpression(allocator, stream, tree, indent, start_col, end, Space.None); } return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ] }, .StructInitializer => |*field_inits| { const lbrace = switch (suffix_op.lhs) { .dot => |dot| tree.nextToken(dot), .node => |node| tree.nextToken(node.lastToken()), }; if (field_inits.len == 0) { switch (suffix_op.lhs) { .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), } try renderToken(tree, stream, lbrace, indent + indent_delta, start_col, Space.None); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } const src_has_trailing_comma = blk: { const maybe_comma = tree.prevToken(suffix_op.rtoken); break :blk tree.tokens.at(maybe_comma).id == .Comma; }; const src_same_line = blk: { const loc = tree.tokenLocation(tree.tokens.at(lbrace).end, suffix_op.rtoken); break :blk loc.line == 0; }; const expr_outputs_one_line = blk: { // render field expressions until a LF is found var it = field_inits.iterator(0); while (it.next()) |field_init| { var find_stream = FindByteOutStream.init('\n'); var dummy_col: usize = 0; try renderExpression(allocator, &find_stream.stream, tree, 0, &dummy_col, field_init.*, Space.None); if (find_stream.byte_found) break :blk false; } break :blk true; }; if (field_inits.len == 1) blk: { const field_init = field_inits.at(0).*.cast(ast.Node.FieldInitializer).?; if (field_init.expr.cast(ast.Node.SuffixOp)) |nested_suffix_op| { if (nested_suffix_op.op == .StructInitializer) { break :blk; } } // if the expression outputs to multiline, make this struct multiline if (!expr_outputs_one_line or src_has_trailing_comma) { break :blk; } switch (suffix_op.lhs) { .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), } try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); try renderExpression(allocator, stream, tree, indent, start_col, &field_init.base, Space.Space); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } if (!src_has_trailing_comma and src_same_line and expr_outputs_one_line) { // render all on one line, no trailing comma switch (suffix_op.lhs) { .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), } try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); var it = field_inits.iterator(0); while (it.next()) |field_init| { if (it.peek() != null) { try renderExpression(allocator, stream, tree, indent, start_col, field_init.*, Space.None); const comma = tree.nextToken(field_init.*.lastToken()); try renderToken(tree, stream, comma, indent, start_col, Space.Space); } else { try renderExpression(allocator, stream, tree, indent, start_col, field_init.*, Space.Space); } } return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } const new_indent = indent + indent_delta; switch (suffix_op.lhs) { .dot => |dot| try renderToken(tree, stream, dot, new_indent, start_col, Space.None), .node => |node| try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.None), } try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); var it = field_inits.iterator(0); while (it.next()) |field_init| { try stream.writeByteNTimes(' ', new_indent); if (it.peek()) |next_field_init| { try renderExpression(allocator, stream, tree, new_indent, start_col, field_init.*, Space.None); const comma = tree.nextToken(field_init.*.lastToken()); try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); try renderExtraNewline(tree, stream, start_col, next_field_init.*); } else { try renderExpression(allocator, stream, tree, new_indent, start_col, field_init.*, Space.Comma); } } try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); }, .ArrayInitializer => |*exprs| { const lbrace = switch (suffix_op.lhs) { .dot => |dot| tree.nextToken(dot), .node => |node| tree.nextToken(node.lastToken()), }; if (exprs.len == 0) { switch (suffix_op.lhs) { .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), } try renderToken(tree, stream, lbrace, indent, start_col, Space.None); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } if (exprs.len == 1 and tree.tokens.at(exprs.at(0).*.lastToken() + 1).id == .RBrace) { const expr = exprs.at(0).*; switch (suffix_op.lhs) { .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), } try renderToken(tree, stream, lbrace, indent, start_col, Space.None); try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None); return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } switch (suffix_op.lhs) { .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None), .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None), } // scan to find row size const maybe_row_size: ?usize = blk: { var count: usize = 1; var it = exprs.iterator(0); while (true) { const expr = it.next().?.*; if (it.peek()) |next_expr| { const expr_last_token = expr.*.lastToken() + 1; const loc = tree.tokenLocation(tree.tokens.at(expr_last_token).end, next_expr.*.firstToken()); if (loc.line != 0) break :blk count; count += 1; } else { const expr_last_token = expr.*.lastToken(); const loc = tree.tokenLocation(tree.tokens.at(expr_last_token).end, suffix_op.rtoken); if (loc.line == 0) { // all on one line const src_has_trailing_comma = trailblk: { const maybe_comma = tree.prevToken(suffix_op.rtoken); break :trailblk tree.tokens.at(maybe_comma).id == .Comma; }; if (src_has_trailing_comma) { break :blk 1; // force row size 1 } else { break :blk null; // no newlines } } break :blk count; } } }; if (maybe_row_size) |row_size| { // A place to store the width of each expression and its column's maximum var widths = try allocator.alloc(usize, exprs.len + row_size); defer allocator.free(widths); mem.set(usize, widths, 0); var expr_widths = widths[0 .. widths.len - row_size]; var column_widths = widths[widths.len - row_size ..]; // Null stream for counting the printed length of each expression var null_stream = std.io.NullOutStream.init(); var counting_stream = std.io.CountingOutStream(std.io.NullOutStream.Error).init(&null_stream.stream); var it = exprs.iterator(0); var i: usize = 0; while (it.next()) |expr| : (i += 1) { counting_stream.bytes_written = 0; var dummy_col: usize = 0; try renderExpression(allocator, &counting_stream.stream, tree, indent, &dummy_col, expr.*, Space.None); const width = @intCast(usize, counting_stream.bytes_written); const col = i % row_size; column_widths[col] = std.math.max(column_widths[col], width); expr_widths[i] = width; } var new_indent = indent + indent_delta; if (tree.tokens.at(tree.nextToken(lbrace)).id != .MultilineStringLiteralLine) { try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); try stream.writeByteNTimes(' ', new_indent); } else { new_indent -= indent_delta; try renderToken(tree, stream, lbrace, new_indent, start_col, Space.None); } it.set(0); i = 0; var col: usize = 1; while (it.next()) |expr| : (i += 1) { if (it.peek()) |next_expr| { try renderExpression(allocator, stream, tree, new_indent, start_col, expr.*, Space.None); const comma = tree.nextToken(expr.*.lastToken()); if (col != row_size) { try renderToken(tree, stream, comma, new_indent, start_col, Space.Space); // , const padding = column_widths[i % row_size] - expr_widths[i]; try stream.writeByteNTimes(' ', padding); col += 1; continue; } col = 1; if (tree.tokens.at(tree.nextToken(comma)).id != .MultilineStringLiteralLine) { try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // , } else { try renderToken(tree, stream, comma, new_indent, start_col, Space.None); // , } try renderExtraNewline(tree, stream, start_col, next_expr.*); if (next_expr.*.id != .MultilineStringLiteral) { try stream.writeByteNTimes(' ', new_indent); } } else { try renderExpression(allocator, stream, tree, new_indent, start_col, expr.*, Space.Comma); // , } } const last_node = it.prev().?; if (last_node.*.id != .MultilineStringLiteral) { try stream.writeByteNTimes(' ', indent); } return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } else { try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); var it = exprs.iterator(0); while (it.next()) |expr| { if (it.peek()) |next_expr| { try renderExpression(allocator, stream, tree, indent, start_col, expr.*, Space.None); const comma = tree.nextToken(expr.*.lastToken()); try renderToken(tree, stream, comma, indent, start_col, Space.Space); // , } else { try renderExpression(allocator, stream, tree, indent, start_col, expr.*, Space.Space); } } return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); } }, } }, .ControlFlowExpression => { const flow_expr = @fieldParentPtr(ast.Node.ControlFlowExpression, "base", base); switch (flow_expr.kind) { .Break => |maybe_label| { if (maybe_label == null and flow_expr.rhs == null) { return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // break } try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // break if (maybe_label) |label| { const colon = tree.nextToken(flow_expr.ltoken); try renderToken(tree, stream, colon, indent, start_col, Space.None); // : if (flow_expr.rhs == null) { return renderExpression(allocator, stream, tree, indent, start_col, label, space); // label } try renderExpression(allocator, stream, tree, indent, start_col, label, Space.Space); // label } }, .Continue => |maybe_label| { assert(flow_expr.rhs == null); if (maybe_label == null and flow_expr.rhs == null) { return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // continue } try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // continue if (maybe_label) |label| { const colon = tree.nextToken(flow_expr.ltoken); try renderToken(tree, stream, colon, indent, start_col, Space.None); // : return renderExpression(allocator, stream, tree, indent, start_col, label, space); } }, .Return => { if (flow_expr.rhs == null) { return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); } try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); }, } return renderExpression(allocator, stream, tree, indent, start_col, flow_expr.rhs.?, space); }, .Payload => { const payload = @fieldParentPtr(ast.Node.Payload, "base", base); try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None); try renderExpression(allocator, stream, tree, indent, start_col, payload.error_symbol, Space.None); return renderToken(tree, stream, payload.rpipe, indent, start_col, space); }, .PointerPayload => { const payload = @fieldParentPtr(ast.Node.PointerPayload, "base", base); try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None); if (payload.ptr_token) |ptr_token| { try renderToken(tree, stream, ptr_token, indent, start_col, Space.None); } try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None); return renderToken(tree, stream, payload.rpipe, indent, start_col, space); }, .PointerIndexPayload => { const payload = @fieldParentPtr(ast.Node.PointerIndexPayload, "base", base); try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None); if (payload.ptr_token) |ptr_token| { try renderToken(tree, stream, ptr_token, indent, start_col, Space.None); } try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None); if (payload.index_symbol) |index_symbol| { const comma = tree.nextToken(payload.value_symbol.lastToken()); try renderToken(tree, stream, comma, indent, start_col, Space.Space); try renderExpression(allocator, stream, tree, indent, start_col, index_symbol, Space.None); } return renderToken(tree, stream, payload.rpipe, indent, start_col, space); }, .GroupedExpression => { const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", base); try renderToken(tree, stream, grouped_expr.lparen, indent, start_col, Space.None); try renderExpression(allocator, stream, tree, indent, start_col, grouped_expr.expr, Space.None); return renderToken(tree, stream, grouped_expr.rparen, indent, start_col, space); }, .FieldInitializer => { const field_init = @fieldParentPtr(ast.Node.FieldInitializer, "base", base); try renderToken(tree, stream, field_init.period_token, indent, start_col, Space.None); // . try renderToken(tree, stream, field_init.name_token, indent, start_col, Space.Space); // name try renderToken(tree, stream, tree.nextToken(field_init.name_token), indent, start_col, Space.Space); // = return renderExpression(allocator, stream, tree, indent, start_col, field_init.expr, space); }, .IntegerLiteral => { const integer_literal = @fieldParentPtr(ast.Node.IntegerLiteral, "base", base); return renderToken(tree, stream, integer_literal.token, indent, start_col, space); }, .FloatLiteral => { const float_literal = @fieldParentPtr(ast.Node.FloatLiteral, "base", base); return renderToken(tree, stream, float_literal.token, indent, start_col, space); }, .StringLiteral => { const string_literal = @fieldParentPtr(ast.Node.StringLiteral, "base", base); return renderToken(tree, stream, string_literal.token, indent, start_col, space); }, .CharLiteral => { const char_literal = @fieldParentPtr(ast.Node.CharLiteral, "base", base); return renderToken(tree, stream, char_literal.token, indent, start_col, space); }, .BoolLiteral => { const bool_literal = @fieldParentPtr(ast.Node.CharLiteral, "base", base); return renderToken(tree, stream, bool_literal.token, indent, start_col, space); }, .NullLiteral => { const null_literal = @fieldParentPtr(ast.Node.NullLiteral, "base", base); return renderToken(tree, stream, null_literal.token, indent, start_col, space); }, .Unreachable => { const unreachable_node = @fieldParentPtr(ast.Node.Unreachable, "base", base); return renderToken(tree, stream, unreachable_node.token, indent, start_col, space); }, .ErrorType => { const error_type = @fieldParentPtr(ast.Node.ErrorType, "base", base); return renderToken(tree, stream, error_type.token, indent, start_col, space); }, .VarType => { const var_type = @fieldParentPtr(ast.Node.VarType, "base", base); return renderToken(tree, stream, var_type.token, indent, start_col, space); }, .ContainerDecl => { const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", base); if (container_decl.layout_token) |layout_token| { try renderToken(tree, stream, layout_token, indent, start_col, Space.Space); } switch (container_decl.init_arg_expr) { .None => { try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.Space); // union }, .Enum => |enum_tag_type| { try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); const enum_token = tree.nextToken(lparen); try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderToken(tree, stream, enum_token, indent, start_col, Space.None); // enum if (enum_tag_type) |expr| { try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None); const rparen = tree.nextToken(expr.lastToken()); try renderToken(tree, stream, rparen, indent, start_col, Space.None); // ) try renderToken(tree, stream, tree.nextToken(rparen), indent, start_col, Space.Space); // ) } else { try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.Space); // ) } }, .Type => |type_expr| { try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); const rparen = tree.nextToken(type_expr.lastToken()); try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, type_expr, Space.None); try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) }, } if (container_decl.fields_and_decls.len == 0) { try renderToken(tree, stream, container_decl.lbrace_token, indent + indent_delta, start_col, Space.None); // { return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // } } else { const new_indent = indent + indent_delta; try renderToken(tree, stream, container_decl.lbrace_token, new_indent, start_col, Space.Newline); // { var it = container_decl.fields_and_decls.iterator(0); while (it.next()) |decl| { try stream.writeByteNTimes(' ', new_indent); try renderTopLevelDecl(allocator, stream, tree, new_indent, start_col, decl.*); if (it.peek()) |next_decl| { try renderExtraNewline(tree, stream, start_col, next_decl.*); } } try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // } } }, .ErrorSetDecl => { const err_set_decl = @fieldParentPtr(ast.Node.ErrorSetDecl, "base", base); const lbrace = tree.nextToken(err_set_decl.error_token); if (err_set_decl.decls.len == 0) { try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); try renderToken(tree, stream, lbrace, indent, start_col, Space.None); return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); } if (err_set_decl.decls.len == 1) blk: { const node = err_set_decl.decls.at(0).*; // if there are any doc comments or same line comments // don't try to put it all on one line if (node.cast(ast.Node.ErrorTag)) |tag| { if (tag.doc_comments != null) break :blk; } else { break :blk; } try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // { try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None); return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // } } try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error try renderToken(tree, stream, lbrace, indent, start_col, Space.Newline); // { const new_indent = indent + indent_delta; var it = err_set_decl.decls.iterator(0); while (it.next()) |node| { try stream.writeByteNTimes(' ', new_indent); if (it.peek()) |next_node| { try renderExpression(allocator, stream, tree, new_indent, start_col, node.*, Space.None); try renderToken(tree, stream, tree.nextToken(node.*.lastToken()), new_indent, start_col, Space.Newline); // , try renderExtraNewline(tree, stream, start_col, next_node.*); } else { try renderExpression(allocator, stream, tree, new_indent, start_col, node.*, Space.Comma); } } try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // } }, .ErrorTag => { const tag = @fieldParentPtr(ast.Node.ErrorTag, "base", base); try renderDocComments(tree, stream, tag, indent, start_col); return renderToken(tree, stream, tag.name_token, indent, start_col, space); // name }, .MultilineStringLiteral => { // TODO: Don't indent in this function, but let the caller indent. // If this has been implemented, a lot of hacky solutions in i.e. ArrayInit and FunctionCall can be removed const multiline_str_literal = @fieldParentPtr(ast.Node.MultilineStringLiteral, "base", base); var skip_first_indent = true; if (tree.tokens.at(multiline_str_literal.firstToken() - 1).id != .LineComment) { try stream.print("\n", .{}); skip_first_indent = false; } var i: usize = 0; while (i < multiline_str_literal.lines.len) : (i += 1) { const t = multiline_str_literal.lines.at(i).*; if (!skip_first_indent) { try stream.writeByteNTimes(' ', indent + indent_delta); } try renderToken(tree, stream, t, indent, start_col, Space.None); skip_first_indent = false; } try stream.writeByteNTimes(' ', indent); }, .UndefinedLiteral => { const undefined_literal = @fieldParentPtr(ast.Node.UndefinedLiteral, "base", base); return renderToken(tree, stream, undefined_literal.token, indent, start_col, space); }, .BuiltinCall => { const builtin_call = @fieldParentPtr(ast.Node.BuiltinCall, "base", base); // TODO: Remove condition after deprecating 'typeOf'. See https://github.com/ziglang/zig/issues/1348 if (mem.eql(u8, tree.tokenSlicePtr(tree.tokens.at(builtin_call.builtin_token)), "@typeOf")) { try stream.write("@TypeOf"); } else { try renderToken(tree, stream, builtin_call.builtin_token, indent, start_col, Space.None); // @name } try renderToken(tree, stream, tree.nextToken(builtin_call.builtin_token), indent, start_col, Space.None); // ( var it = builtin_call.params.iterator(0); while (it.next()) |param_node| { try renderExpression(allocator, stream, tree, indent, start_col, param_node.*, Space.None); if (it.peek() != null) { const comma_token = tree.nextToken(param_node.*.lastToken()); try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // , } } return renderToken(tree, stream, builtin_call.rparen_token, indent, start_col, space); // ) }, .FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", base); if (fn_proto.visib_token) |visib_token_index| { const visib_token = tree.tokens.at(visib_token_index); assert(visib_token.id == .Keyword_pub or visib_token.id == .Keyword_export); try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub } if (fn_proto.extern_export_inline_token) |extern_export_inline_token| { try renderToken(tree, stream, extern_export_inline_token, indent, start_col, Space.Space); // extern/export } if (fn_proto.lib_name) |lib_name| { try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); } if (fn_proto.cc_token) |cc_token| { try renderToken(tree, stream, cc_token, indent, start_col, Space.Space); // stdcallcc } const lparen = if (fn_proto.name_token) |name_token| blk: { try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name break :blk tree.nextToken(name_token); } else blk: { try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn break :blk tree.nextToken(fn_proto.fn_token); }; const rparen = tree.prevToken(switch (fn_proto.return_type) { ast.Node.FnProto.ReturnType.Explicit => |node| node.firstToken(), ast.Node.FnProto.ReturnType.InferErrorSet => |node| tree.prevToken(node.firstToken()), }); const src_params_trailing_comma = blk: { const maybe_comma = tree.tokens.at(rparen - 1).id; break :blk maybe_comma == .Comma or maybe_comma == .LineComment; }; if (!src_params_trailing_comma) { try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( // render all on one line, no trailing comma var it = fn_proto.params.iterator(0); while (it.next()) |param_decl_node| { try renderParamDecl(allocator, stream, tree, indent, start_col, param_decl_node.*, Space.None); if (it.peek() != null) { const comma = tree.nextToken(param_decl_node.*.lastToken()); try renderToken(tree, stream, comma, indent, start_col, Space.Space); // , } } } else { // one param per line const new_indent = indent + indent_delta; try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); // ( var it = fn_proto.params.iterator(0); while (it.next()) |param_decl_node| { try stream.writeByteNTimes(' ', new_indent); try renderParamDecl(allocator, stream, tree, new_indent, start_col, param_decl_node.*, Space.Comma); } try stream.writeByteNTimes(' ', indent); } try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) if (fn_proto.align_expr) |align_expr| { const align_rparen = tree.nextToken(align_expr.lastToken()); const align_lparen = tree.prevToken(align_expr.firstToken()); const align_kw = tree.prevToken(align_lparen); try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align try renderToken(tree, stream, align_lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, align_expr, Space.None); try renderToken(tree, stream, align_rparen, indent, start_col, Space.Space); // ) } if (fn_proto.section_expr) |section_expr| { const section_rparen = tree.nextToken(section_expr.lastToken()); const section_lparen = tree.prevToken(section_expr.firstToken()); const section_kw = tree.prevToken(section_lparen); try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // section try renderToken(tree, stream, section_lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, section_expr, Space.None); try renderToken(tree, stream, section_rparen, indent, start_col, Space.Space); // ) } switch (fn_proto.return_type) { ast.Node.FnProto.ReturnType.Explicit => |node| { return renderExpression(allocator, stream, tree, indent, start_col, node, space); }, ast.Node.FnProto.ReturnType.InferErrorSet => |node| { try renderToken(tree, stream, tree.prevToken(node.firstToken()), indent, start_col, Space.None); // ! return renderExpression(allocator, stream, tree, indent, start_col, node, space); }, } }, .AnyFrameType => { const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base); if (anyframe_type.result) |result| { try renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, Space.None); // anyframe try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // -> return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space); } else { return renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, space); // anyframe } }, .DocComment => unreachable, // doc comments are attached to nodes .Switch => { const switch_node = @fieldParentPtr(ast.Node.Switch, "base", base); try renderToken(tree, stream, switch_node.switch_token, indent, start_col, Space.Space); // switch try renderToken(tree, stream, tree.nextToken(switch_node.switch_token), indent, start_col, Space.None); // ( const rparen = tree.nextToken(switch_node.expr.lastToken()); const lbrace = tree.nextToken(rparen); if (switch_node.cases.len == 0) { try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None); try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // { return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // } } try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None); const new_indent = indent + indent_delta; try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); // { var it = switch_node.cases.iterator(0); while (it.next()) |node| { try stream.writeByteNTimes(' ', new_indent); try renderExpression(allocator, stream, tree, new_indent, start_col, node.*, Space.Comma); if (it.peek()) |next_node| { try renderExtraNewline(tree, stream, start_col, next_node.*); } } try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // } }, .SwitchCase => { const switch_case = @fieldParentPtr(ast.Node.SwitchCase, "base", base); assert(switch_case.items.len != 0); const src_has_trailing_comma = blk: { const last_node = switch_case.items.at(switch_case.items.len - 1).*; const maybe_comma = tree.nextToken(last_node.lastToken()); break :blk tree.tokens.at(maybe_comma).id == .Comma; }; if (switch_case.items.len == 1 or !src_has_trailing_comma) { var it = switch_case.items.iterator(0); while (it.next()) |node| { if (it.peek()) |next_node| { try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.None); const comma_token = tree.nextToken(node.*.lastToken()); try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // , try renderExtraNewline(tree, stream, start_col, next_node.*); } else { try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.Space); } } } else { var it = switch_case.items.iterator(0); while (true) { const node = it.next().?; if (it.peek()) |next_node| { try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.None); const comma_token = tree.nextToken(node.*.lastToken()); try renderToken(tree, stream, comma_token, indent, start_col, Space.Newline); // , try renderExtraNewline(tree, stream, start_col, next_node.*); try stream.writeByteNTimes(' ', indent); } else { try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.Comma); try stream.writeByteNTimes(' ', indent); break; } } } try renderToken(tree, stream, switch_case.arrow_token, indent, start_col, Space.Space); // => if (switch_case.payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); } return renderExpression(allocator, stream, tree, indent, start_col, switch_case.expr, space); }, .SwitchElse => { const switch_else = @fieldParentPtr(ast.Node.SwitchElse, "base", base); return renderToken(tree, stream, switch_else.token, indent, start_col, space); }, .Else => { const else_node = @fieldParentPtr(ast.Node.Else, "base", base); const body_is_block = nodeIsBlock(else_node.body); const same_line = body_is_block or tree.tokensOnSameLine(else_node.else_token, else_node.body.lastToken()); const after_else_space = if (same_line or else_node.payload != null) Space.Space else Space.Newline; try renderToken(tree, stream, else_node.else_token, indent, start_col, after_else_space); if (else_node.payload) |payload| { const payload_space = if (same_line) Space.Space else Space.Newline; try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space); } if (same_line) { return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space); } try stream.writeByteNTimes(' ', indent + indent_delta); start_col.* = indent + indent_delta; return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space); }, .While => { const while_node = @fieldParentPtr(ast.Node.While, "base", base); if (while_node.label) |label| { try renderToken(tree, stream, label, indent, start_col, Space.None); // label try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // : } if (while_node.inline_token) |inline_token| { try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline } try renderToken(tree, stream, while_node.while_token, indent, start_col, Space.Space); // while try renderToken(tree, stream, tree.nextToken(while_node.while_token), indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, while_node.condition, Space.None); const cond_rparen = tree.nextToken(while_node.condition.lastToken()); const body_is_block = nodeIsBlock(while_node.body); var block_start_space: Space = undefined; var after_body_space: Space = undefined; if (body_is_block) { block_start_space = Space.BlockStart; after_body_space = if (while_node.@"else" == null) space else Space.SpaceOrOutdent; } else if (tree.tokensOnSameLine(cond_rparen, while_node.body.lastToken())) { block_start_space = Space.Space; after_body_space = if (while_node.@"else" == null) space else Space.Space; } else { block_start_space = Space.Newline; after_body_space = if (while_node.@"else" == null) space else Space.Newline; } { const rparen_space = if (while_node.payload != null or while_node.continue_expr != null) Space.Space else block_start_space; try renderToken(tree, stream, cond_rparen, indent, start_col, rparen_space); // ) } if (while_node.payload) |payload| { const payload_space = if (while_node.continue_expr != null) Space.Space else block_start_space; try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space); } if (while_node.continue_expr) |continue_expr| { const rparen = tree.nextToken(continue_expr.lastToken()); const lparen = tree.prevToken(continue_expr.firstToken()); const colon = tree.prevToken(lparen); try renderToken(tree, stream, colon, indent, start_col, Space.Space); // : try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, continue_expr, Space.None); try renderToken(tree, stream, rparen, indent, start_col, block_start_space); // ) } var new_indent = indent; if (block_start_space == Space.Newline) { new_indent += indent_delta; try stream.writeByteNTimes(' ', new_indent); start_col.* = new_indent; } try renderExpression(allocator, stream, tree, indent, start_col, while_node.body, after_body_space); if (while_node.@"else") |@"else"| { if (after_body_space == Space.Newline) { try stream.writeByteNTimes(' ', indent); start_col.* = indent; } return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space); } }, .For => { const for_node = @fieldParentPtr(ast.Node.For, "base", base); if (for_node.label) |label| { try renderToken(tree, stream, label, indent, start_col, Space.None); // label try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // : } if (for_node.inline_token) |inline_token| { try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline } try renderToken(tree, stream, for_node.for_token, indent, start_col, Space.Space); // for try renderToken(tree, stream, tree.nextToken(for_node.for_token), indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, for_node.array_expr, Space.None); const rparen = tree.nextToken(for_node.array_expr.lastToken()); const body_is_block = for_node.body.id == .Block; const src_one_line_to_body = !body_is_block and tree.tokensOnSameLine(rparen, for_node.body.firstToken()); const body_on_same_line = body_is_block or src_one_line_to_body; try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) const space_after_payload = if (body_on_same_line) Space.Space else Space.Newline; try renderExpression(allocator, stream, tree, indent, start_col, for_node.payload, space_after_payload); // |x| const space_after_body = blk: { if (for_node.@"else") |@"else"| { const src_one_line_to_else = tree.tokensOnSameLine(rparen, @"else".firstToken()); if (body_is_block or src_one_line_to_else) { break :blk Space.Space; } else { break :blk Space.Newline; } } else { break :blk space; } }; const body_indent = if (body_on_same_line) indent else indent + indent_delta; if (!body_on_same_line) try stream.writeByteNTimes(' ', body_indent); try renderExpression(allocator, stream, tree, body_indent, start_col, for_node.body, space_after_body); // { body } if (for_node.@"else") |@"else"| { if (space_after_body == Space.Newline) try stream.writeByteNTimes(' ', indent); return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space); // else } }, .If => { const if_node = @fieldParentPtr(ast.Node.If, "base", base); const lparen = tree.prevToken(if_node.condition.firstToken()); const rparen = tree.nextToken(if_node.condition.lastToken()); try renderToken(tree, stream, if_node.if_token, indent, start_col, Space.Space); // if try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, if_node.condition, Space.None); // condition const body_is_if_block = if_node.body.id == .If; const body_is_block = nodeIsBlock(if_node.body); if (body_is_if_block) { try renderExtraNewline(tree, stream, start_col, if_node.body); } else if (body_is_block) { const after_rparen_space = if (if_node.payload == null) Space.BlockStart else Space.Space; try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // ) if (if_node.payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.BlockStart); // |x| } if (if_node.@"else") |@"else"| { try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.SpaceOrOutdent); return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space); } else { return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space); } } const src_has_newline = !tree.tokensOnSameLine(rparen, if_node.body.lastToken()); if (src_has_newline) { const after_rparen_space = if (if_node.payload == null) Space.Newline else Space.Space; try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // ) if (if_node.payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline); } const new_indent = indent + indent_delta; try stream.writeByteNTimes(' ', new_indent); if (if_node.@"else") |@"else"| { const else_is_block = nodeIsBlock(@"else".body); try renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, Space.Newline); try stream.writeByteNTimes(' ', indent); if (else_is_block) { try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space); // else if (@"else".payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); } return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space); } else { const after_else_space = if (@"else".payload == null) Space.Newline else Space.Space; try renderToken(tree, stream, @"else".else_token, indent, start_col, after_else_space); // else if (@"else".payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline); } try stream.writeByteNTimes(' ', new_indent); return renderExpression(allocator, stream, tree, new_indent, start_col, @"else".body, space); } } else { return renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, space); } } try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // ) if (if_node.payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); } if (if_node.@"else") |@"else"| { try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.Space); try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space); if (@"else".payload) |payload| { try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space); } return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space); } else { return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space); } }, .Asm => { const asm_node = @fieldParentPtr(ast.Node.Asm, "base", base); try renderToken(tree, stream, asm_node.asm_token, indent, start_col, Space.Space); // asm if (asm_node.volatile_token) |volatile_token| { try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile try renderToken(tree, stream, tree.nextToken(volatile_token), indent, start_col, Space.None); // ( } else { try renderToken(tree, stream, tree.nextToken(asm_node.asm_token), indent, start_col, Space.None); // ( } if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.None); return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); } try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.Newline); const indent_once = indent + indent_delta; if (asm_node.template.id == .MultilineStringLiteral) { // After rendering a multiline string literal the cursor is // already offset by indent try stream.writeByteNTimes(' ', indent_delta); } else { try stream.writeByteNTimes(' ', indent_once); } const colon1 = tree.nextToken(asm_node.template.lastToken()); const indent_extra = indent_once + 2; const colon2 = if (asm_node.outputs.len == 0) blk: { try renderToken(tree, stream, colon1, indent, start_col, Space.Newline); // : try stream.writeByteNTimes(' ', indent_once); break :blk tree.nextToken(colon1); } else blk: { try renderToken(tree, stream, colon1, indent, start_col, Space.Space); // : var it = asm_node.outputs.iterator(0); while (true) { const asm_output = it.next().?; const node = &(asm_output.*).base; if (it.peek()) |next_asm_output| { try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.None); const next_node = &(next_asm_output.*).base; const comma = tree.prevToken(next_asm_output.*.firstToken()); try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // , try renderExtraNewline(tree, stream, start_col, next_node); try stream.writeByteNTimes(' ', indent_extra); } else if (asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) { try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline); try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); } else { try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline); try stream.writeByteNTimes(' ', indent_once); const comma_or_colon = tree.nextToken(node.lastToken()); break :blk switch (tree.tokens.at(comma_or_colon).id) { .Comma => tree.nextToken(comma_or_colon), else => comma_or_colon, }; } } }; const colon3 = if (asm_node.inputs.len == 0) blk: { try renderToken(tree, stream, colon2, indent, start_col, Space.Newline); // : try stream.writeByteNTimes(' ', indent_once); break :blk tree.nextToken(colon2); } else blk: { try renderToken(tree, stream, colon2, indent, start_col, Space.Space); // : var it = asm_node.inputs.iterator(0); while (true) { const asm_input = it.next().?; const node = &(asm_input.*).base; if (it.peek()) |next_asm_input| { try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.None); const next_node = &(next_asm_input.*).base; const comma = tree.prevToken(next_asm_input.*.firstToken()); try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // , try renderExtraNewline(tree, stream, start_col, next_node); try stream.writeByteNTimes(' ', indent_extra); } else if (asm_node.clobbers.len == 0) { try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline); try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); // ) } else { try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline); try stream.writeByteNTimes(' ', indent_once); const comma_or_colon = tree.nextToken(node.lastToken()); break :blk switch (tree.tokens.at(comma_or_colon).id) { .Comma => tree.nextToken(comma_or_colon), else => comma_or_colon, }; } } }; try renderToken(tree, stream, colon3, indent, start_col, Space.Space); // : var it = asm_node.clobbers.iterator(0); while (true) { const clobber_node = it.next().?.*; if (it.peek() == null) { try renderExpression(allocator, stream, tree, indent_extra, start_col, clobber_node, Space.Newline); try stream.writeByteNTimes(' ', indent); return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); } else { try renderExpression(allocator, stream, tree, indent_extra, start_col, clobber_node, Space.None); const comma = tree.nextToken(clobber_node.lastToken()); try renderToken(tree, stream, comma, indent_once, start_col, Space.Space); // , } } }, .AsmInput => { const asm_input = @fieldParentPtr(ast.Node.AsmInput, "base", base); try stream.write("["); try renderExpression(allocator, stream, tree, indent, start_col, asm_input.symbolic_name, Space.None); try stream.write("] "); try renderExpression(allocator, stream, tree, indent, start_col, asm_input.constraint, Space.None); try stream.write(" ("); try renderExpression(allocator, stream, tree, indent, start_col, asm_input.expr, Space.None); return renderToken(tree, stream, asm_input.lastToken(), indent, start_col, space); // ) }, .AsmOutput => { const asm_output = @fieldParentPtr(ast.Node.AsmOutput, "base", base); try stream.write("["); try renderExpression(allocator, stream, tree, indent, start_col, asm_output.symbolic_name, Space.None); try stream.write("] "); try renderExpression(allocator, stream, tree, indent, start_col, asm_output.constraint, Space.None); try stream.write(" ("); switch (asm_output.kind) { ast.Node.AsmOutput.Kind.Variable => |variable_name| { try renderExpression(allocator, stream, tree, indent, start_col, &variable_name.base, Space.None); }, ast.Node.AsmOutput.Kind.Return => |return_type| { try stream.write("-> "); try renderExpression(allocator, stream, tree, indent, start_col, return_type, Space.None); }, } return renderToken(tree, stream, asm_output.lastToken(), indent, start_col, space); // ) }, .EnumLiteral => { const enum_literal = @fieldParentPtr(ast.Node.EnumLiteral, "base", base); try renderToken(tree, stream, enum_literal.dot, indent, start_col, Space.None); // . return renderToken(tree, stream, enum_literal.name, indent, start_col, space); // name }, .ContainerField, .Root, .VarDecl, .Use, .TestDecl, .ParamDecl, => unreachable, } } fn renderVarDecl( allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, var_decl: *ast.Node.VarDecl, ) (@TypeOf(stream).Child.Error || Error)!void { if (var_decl.visib_token) |visib_token| { try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub } if (var_decl.extern_export_token) |extern_export_token| { try renderToken(tree, stream, extern_export_token, indent, start_col, Space.Space); // extern if (var_decl.lib_name) |lib_name| { try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); // "lib" } } if (var_decl.comptime_token) |comptime_token| { try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); // comptime } if (var_decl.thread_local_token) |thread_local_token| { try renderToken(tree, stream, thread_local_token, indent, start_col, Space.Space); // threadlocal } try renderToken(tree, stream, var_decl.mut_token, indent, start_col, Space.Space); // var const name_space = if (var_decl.type_node == null and (var_decl.align_node != null or var_decl.section_node != null or var_decl.init_node != null)) Space.Space else Space.None; try renderToken(tree, stream, var_decl.name_token, indent, start_col, name_space); if (var_decl.type_node) |type_node| { try renderToken(tree, stream, tree.nextToken(var_decl.name_token), indent, start_col, Space.Space); const s = if (var_decl.align_node != null or var_decl.section_node != null or var_decl.init_node != null) Space.Space else Space.None; try renderExpression(allocator, stream, tree, indent, start_col, type_node, s); } if (var_decl.align_node) |align_node| { const lparen = tree.prevToken(align_node.firstToken()); const align_kw = tree.prevToken(lparen); const rparen = tree.nextToken(align_node.lastToken()); try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, align_node, Space.None); const s = if (var_decl.section_node != null or var_decl.init_node != null) Space.Space else Space.None; try renderToken(tree, stream, rparen, indent, start_col, s); // ) } if (var_decl.section_node) |section_node| { const lparen = tree.prevToken(section_node.firstToken()); const section_kw = tree.prevToken(lparen); const rparen = tree.nextToken(section_node.lastToken()); try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // linksection try renderToken(tree, stream, lparen, indent, start_col, Space.None); // ( try renderExpression(allocator, stream, tree, indent, start_col, section_node, Space.None); const s = if (var_decl.init_node != null) Space.Space else Space.None; try renderToken(tree, stream, rparen, indent, start_col, s); // ) } if (var_decl.init_node) |init_node| { const s = if (init_node.id == .MultilineStringLiteral) Space.None else Space.Space; try renderToken(tree, stream, var_decl.eq_token, indent, start_col, s); // = try renderExpression(allocator, stream, tree, indent, start_col, init_node, Space.None); } try renderToken(tree, stream, var_decl.semicolon_token, indent, start_col, Space.Newline); } fn renderParamDecl( allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, base: *ast.Node, space: Space, ) (@TypeOf(stream).Child.Error || Error)!void { const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", base); try renderDocComments(tree, stream, param_decl, indent, start_col); if (param_decl.comptime_token) |comptime_token| { try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); } if (param_decl.noalias_token) |noalias_token| { try renderToken(tree, stream, noalias_token, indent, start_col, Space.Space); } if (param_decl.name_token) |name_token| { try renderToken(tree, stream, name_token, indent, start_col, Space.None); try renderToken(tree, stream, tree.nextToken(name_token), indent, start_col, Space.Space); // : } if (param_decl.var_args_token) |var_args_token| { try renderToken(tree, stream, var_args_token, indent, start_col, space); } else { try renderExpression(allocator, stream, tree, indent, start_col, param_decl.type_node, space); } } fn renderStatement( allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, base: *ast.Node, ) (@TypeOf(stream).Child.Error || Error)!void { switch (base.id) { .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base); try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl); }, else => { if (base.requireSemiColon()) { try renderExpression(allocator, stream, tree, indent, start_col, base, Space.None); const semicolon_index = tree.nextToken(base.lastToken()); assert(tree.tokens.at(semicolon_index).id == .Semicolon); try renderToken(tree, stream, semicolon_index, indent, start_col, Space.Newline); } else { try renderExpression(allocator, stream, tree, indent, start_col, base, Space.Newline); } }, } } const Space = enum { None, Newline, Comma, Space, SpaceOrOutdent, NoNewline, NoComment, BlockStart, }; fn renderTokenOffset( tree: *ast.Tree, stream: var, token_index: ast.TokenIndex, indent: usize, start_col: *usize, space: Space, token_skip_bytes: usize, ) (@TypeOf(stream).Child.Error || Error)!void { if (space == Space.BlockStart) { if (start_col.* < indent + indent_delta) return renderToken(tree, stream, token_index, indent, start_col, Space.Space); try renderToken(tree, stream, token_index, indent, start_col, Space.Newline); try stream.writeByteNTimes(' ', indent); start_col.* = indent; return; } var token = tree.tokens.at(token_index); try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(token)[token_skip_bytes..], " ")); if (space == Space.NoComment) return; var next_token = tree.tokens.at(token_index + 1); if (space == Space.Comma) switch (next_token.id) { .Comma => return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline), .LineComment => { try stream.write(", "); return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline); }, else => { if (token_index + 2 < tree.tokens.len and tree.tokens.at(token_index + 2).id == .MultilineStringLiteralLine) { try stream.write(","); return; } else { try stream.write(",\n"); start_col.* = 0; return; } }, }; // Skip over same line doc comments var offset: usize = 1; if (next_token.id == .DocComment) { const loc = tree.tokenLocationPtr(token.end, next_token); if (loc.line == 0) { offset += 1; next_token = tree.tokens.at(token_index + offset); } } if (next_token.id != .LineComment) blk: { switch (space) { Space.None, Space.NoNewline => return, Space.Newline => { if (next_token.id == .MultilineStringLiteralLine) { return; } else { try stream.write("\n"); start_col.* = 0; return; } }, Space.Space, Space.SpaceOrOutdent => { if (next_token.id == .MultilineStringLiteralLine) return; try stream.writeByte(' '); return; }, Space.NoComment, Space.Comma, Space.BlockStart => unreachable, } } while (true) { const comment_is_empty = mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ").len == 2; if (comment_is_empty) { switch (space) { Space.Newline => { offset += 1; token = next_token; next_token = tree.tokens.at(token_index + offset); if (next_token.id != .LineComment) { try stream.writeByte('\n'); start_col.* = 0; return; } }, else => break, } } else { break; } } var loc = tree.tokenLocationPtr(token.end, next_token); if (loc.line == 0) { try stream.print(" {}", .{mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ")}); offset = 2; token = next_token; next_token = tree.tokens.at(token_index + offset); if (next_token.id != .LineComment) { switch (space) { Space.None, Space.Space => { try stream.writeByte('\n'); const after_comment_token = tree.tokens.at(token_index + offset); const next_line_indent = switch (after_comment_token.id) { .RParen, .RBrace, .RBracket => indent, else => indent + indent_delta, }; try stream.writeByteNTimes(' ', next_line_indent); start_col.* = next_line_indent; }, Space.SpaceOrOutdent => { try stream.writeByte('\n'); try stream.writeByteNTimes(' ', indent); start_col.* = indent; }, Space.Newline => { if (next_token.id == .MultilineStringLiteralLine) { return; } else { try stream.write("\n"); start_col.* = 0; return; } }, Space.NoNewline => {}, Space.NoComment, Space.Comma, Space.BlockStart => unreachable, } return; } loc = tree.tokenLocationPtr(token.end, next_token); } while (true) { assert(loc.line != 0); const newline_count = if (loc.line == 1) @as(u8, 1) else @as(u8, 2); try stream.writeByteNTimes('\n', newline_count); try stream.writeByteNTimes(' ', indent); try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ")); offset += 1; token = next_token; next_token = tree.tokens.at(token_index + offset); if (next_token.id != .LineComment) { switch (space) { Space.Newline => { if (next_token.id == .MultilineStringLiteralLine) { return; } else { try stream.write("\n"); start_col.* = 0; return; } }, Space.None, Space.Space => { try stream.writeByte('\n'); const after_comment_token = tree.tokens.at(token_index + offset); const next_line_indent = switch (after_comment_token.id) { .RParen, .RBrace, .RBracket => blk: { if (indent > indent_delta) { break :blk indent - indent_delta; } else { break :blk 0; } }, else => indent, }; try stream.writeByteNTimes(' ', next_line_indent); start_col.* = next_line_indent; }, Space.SpaceOrOutdent => { try stream.writeByte('\n'); try stream.writeByteNTimes(' ', indent); start_col.* = indent; }, Space.NoNewline => {}, Space.NoComment, Space.Comma, Space.BlockStart => unreachable, } return; } loc = tree.tokenLocationPtr(token.end, next_token); } } fn renderToken( tree: *ast.Tree, stream: var, token_index: ast.TokenIndex, indent: usize, start_col: *usize, space: Space, ) (@TypeOf(stream).Child.Error || Error)!void { return renderTokenOffset(tree, stream, token_index, indent, start_col, space, 0); } fn renderDocComments( tree: *ast.Tree, stream: var, node: var, indent: usize, start_col: *usize, ) (@TypeOf(stream).Child.Error || Error)!void { const comment = node.doc_comments orelse return; var it = comment.lines.iterator(0); const first_token = node.firstToken(); while (it.next()) |line_token_index| { if (line_token_index.* < first_token) { try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.Newline); try stream.writeByteNTimes(' ', indent); } else { try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.NoComment); try stream.write("\n"); try stream.writeByteNTimes(' ', indent); } } } fn nodeIsBlock(base: *const ast.Node) bool { return switch (base.id) { .Block, .If, .For, .While, .Switch, => true, else => false, }; } fn nodeCausesSliceOpSpace(base: *ast.Node) bool { const infix_op = base.cast(ast.Node.InfixOp) orelse return false; return switch (infix_op.op) { ast.Node.InfixOp.Op.Period => false, else => true, }; } // An OutStream that returns whether the given character has been written to it. // The contents are not written to anything. const FindByteOutStream = struct { const Self = FindByteOutStream; pub const Error = error{}; pub const Stream = std.io.OutStream(Error); stream: Stream, byte_found: bool, byte: u8, pub fn init(byte: u8) Self { return Self{ .stream = Stream{ .writeFn = writeFn }, .byte = byte, .byte_found = false, }; } fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void { const self = @fieldParentPtr(Self, "stream", out_stream); if (self.byte_found) return; self.byte_found = blk: { for (bytes) |b| if (b == self.byte) break :blk true; break :blk false; }; } }; fn copyFixingWhitespace(stream: var, slice: []const u8) @TypeOf(stream).Child.Error!void { for (slice) |byte| switch (byte) { '\t' => try stream.write(" "), '\r' => {}, else => try stream.writeByte(byte), }; }
lib/std/zig/render.zig
pub usingnamespace @import("builtin"); /// Deprecated: use `std.Target`. pub const Target = std.Target; /// Deprecated: use `std.Target.Os`. pub const Os = std.Target.Os; /// Deprecated: use `std.Target.Cpu.Arch`. pub const Arch = std.Target.Cpu.Arch; /// Deprecated: use `std.Target.Abi`. pub const Abi = std.Target.Abi; /// Deprecated: use `std.Target.ObjectFormat`. pub const ObjectFormat = std.Target.ObjectFormat; /// Deprecated: use `std.Target.SubSystem`. pub const SubSystem = std.Target.SubSystem; /// Deprecated: use `std.Target.Cpu`. pub const Cpu = std.Target.Cpu; /// `explicit_subsystem` is missing when the subsystem is automatically detected, /// so Zig standard library has the subsystem detection logic here. This should generally be /// used rather than `explicit_subsystem`. /// On non-Windows targets, this is `null`. pub const subsystem: ?SubSystem = blk: { if (@hasDecl(@This(), "explicit_subsystem")) break :blk explicit_subsystem; switch (os.tag) { .windows => { if (is_test) { break :blk SubSystem.Console; } if (@hasDecl(root, "main") or @hasDecl(root, "WinMain") or @hasDecl(root, "wWinMain") or @hasDecl(root, "WinMainCRTStartup") or @hasDecl(root, "wWinMainCRTStartup")) { break :blk SubSystem.Windows; } else { break :blk SubSystem.Console; } }, else => break :blk null, } }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const StackTrace = struct { index: usize, instruction_addresses: []usize, pub fn format( self: StackTrace, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const debug_info = std.debug.getSelfDebugInfo() catch |err| { return writer.print("\nUnable to print stack trace: Unable to open debug info: {}\n", .{@errorName(err)}); }; const tty_config = std.debug.detectTTYConfig(); try writer.writeAll("\n"); std.debug.writeStackTrace(self, writer, &arena.allocator, debug_info, tty_config) catch |err| { try writer.print("Unable to print stack trace: {}\n", .{@errorName(err)}); }; try writer.writeAll("\n"); } }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const GlobalLinkage = enum { Internal, Strong, Weak, LinkOnce, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const AtomicOrder = enum { Unordered, Monotonic, Acquire, Release, AcqRel, SeqCst, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ReduceOp = enum { And, Or, Xor, Min, Max, Add, Mul, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const AtomicRmwOp = enum { Xchg, Add, Sub, And, Nand, Or, Xor, Max, Min, }; /// The code model puts constraints on the location of symbols and the size of code and data. /// The selection of a code model is a trade off on speed and restrictions that needs to be selected on a per application basis to meet its requirements. /// A slightly more detailed explanation can be found in (for example) the [System V Application Binary Interface (x86_64)](https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf) 3.5.1. /// /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const CodeModel = enum { default, tiny, small, kernel, medium, large, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Mode = enum { Debug, ReleaseSafe, ReleaseFast, ReleaseSmall, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const CallingConvention = enum { Unspecified, C, Naked, Async, Interrupt, Signal, Stdcall, Fastcall, Vectorcall, Thiscall, APCS, AAPCS, AAPCSVFP, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const SourceLocation = struct { file: [:0]const u8, fn_name: [:0]const u8, line: u32, column: u32, }; pub const TypeId = @TagType(TypeInfo); /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const TypeInfo = union(enum) { Type: void, Void: void, Bool: void, NoReturn: void, Int: Int, Float: Float, Pointer: Pointer, Array: Array, Struct: Struct, ComptimeFloat: void, ComptimeInt: void, Undefined: void, Null: void, Optional: Optional, ErrorUnion: ErrorUnion, ErrorSet: ErrorSet, Enum: Enum, Union: Union, Fn: Fn, BoundFn: Fn, Opaque: Opaque, Frame: Frame, AnyFrame: AnyFrame, Vector: Vector, EnumLiteral: void, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Int = struct { signedness: Signedness, bits: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Float = struct { bits: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Pointer = struct { size: Size, is_const: bool, is_volatile: bool, alignment: comptime_int, child: type, is_allowzero: bool, /// This field is an optional type. /// The type of the sentinel is the element type of the pointer, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use `var`. sentinel: anytype, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Size = enum { One, Many, Slice, C, }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Array = struct { len: comptime_int, child: type, /// This field is an optional type. /// The type of the sentinel is the element type of the array, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use `var`. sentinel: anytype, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ContainerLayout = enum { Auto, Extern, Packed, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const StructField = struct { name: []const u8, field_type: type, default_value: anytype, is_comptime: bool, alignment: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Struct = struct { layout: ContainerLayout, fields: []const StructField, decls: []const Declaration, is_tuple: bool, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Optional = struct { child: type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ErrorUnion = struct { error_set: type, payload: type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Error = struct { name: []const u8, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ErrorSet = ?[]const Error; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const EnumField = struct { name: []const u8, value: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Enum = struct { layout: ContainerLayout, tag_type: type, fields: []const EnumField, decls: []const Declaration, is_exhaustive: bool, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const UnionField = struct { name: []const u8, field_type: type, alignment: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Union = struct { layout: ContainerLayout, tag_type: ?type, fields: []const UnionField, decls: []const Declaration, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const FnArg = struct { is_generic: bool, is_noalias: bool, arg_type: ?type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Fn = struct { calling_convention: CallingConvention, alignment: comptime_int, is_generic: bool, is_var_args: bool, return_type: ?type, args: []const FnArg, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Opaque = struct { decls: []const Declaration, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Frame = struct { function: anytype, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const AnyFrame = struct { child: ?type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Vector = struct { len: comptime_int, child: type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Declaration = struct { name: []const u8, is_pub: bool, data: Data, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Data = union(enum) { Type: type, Var: type, Fn: FnDecl, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const FnDecl = struct { fn_type: type, inline_type: Inline, is_var_args: bool, is_extern: bool, is_export: bool, lib_name: ?[]const u8, return_type: type, arg_names: []const []const u8, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Inline = enum { Auto, Always, Never, }; }; }; }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const FloatMode = enum { Strict, Optimized, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Endian = enum { Big, Little, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Signedness = enum { signed, unsigned, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const OutputMode = enum { Exe, Lib, Obj, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const LinkMode = enum { Static, Dynamic, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Version = struct { major: u32, minor: u32, patch: u32 = 0, pub const Range = struct { min: Version, max: Version, pub fn includesVersion(self: Range, ver: Version) bool { if (self.min.order(ver) == .gt) return false; if (self.max.order(ver) == .lt) return false; return true; } /// Checks if system is guaranteed to be at least `version` or older than `version`. /// Returns `null` if a runtime check is required. pub fn isAtLeast(self: Range, ver: Version) ?bool { if (self.min.order(ver) != .lt) return true; if (self.max.order(ver) == .lt) return false; return null; } }; pub fn order(lhs: Version, rhs: Version) std.math.Order { if (lhs.major < rhs.major) return .lt; if (lhs.major > rhs.major) return .gt; if (lhs.minor < rhs.minor) return .lt; if (lhs.minor > rhs.minor) return .gt; if (lhs.patch < rhs.patch) return .lt; if (lhs.patch > rhs.patch) return .gt; return .eq; } pub fn parse(text: []const u8) !Version { var end: usize = 0; while (end < text.len) : (end += 1) { const c = text[end]; if (!std.ascii.isDigit(c) and c != '.') break; } // found no digits or '.' before unexpected character if (end == 0) return error.InvalidVersion; var it = std.mem.split(text[0..end], "."); // substring is not empty, first call will succeed const major = it.next().?; if (major.len == 0) return error.InvalidVersion; const minor = it.next() orelse "0"; // ignore 'patch' if 'minor' is invalid const patch = if (minor.len == 0) "0" else (it.next() orelse "0"); return Version{ .major = try std.fmt.parseUnsigned(u32, major, 10), .minor = try std.fmt.parseUnsigned(u32, if (minor.len == 0) "0" else minor, 10), .patch = try std.fmt.parseUnsigned(u32, if (patch.len == 0) "0" else patch, 10), }; } pub fn format( self: Version, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { if (fmt.len == 0) { if (self.patch == 0) { if (self.minor == 0) { return std.fmt.format(out_stream, "{}", .{self.major}); } else { return std.fmt.format(out_stream, "{}.{}", .{ self.major, self.minor }); } } else { return std.fmt.format(out_stream, "{}.{}.{}", .{ self.major, self.minor, self.patch }); } } else { @compileError("Unknown format string: '" ++ fmt ++ "'"); } } }; test "Version.parse" { @setEvalBranchQuota(3000); try testVersionParse(); comptime (try testVersionParse()); } pub fn testVersionParse() !void { const f = struct { fn eql(text: []const u8, v1: u32, v2: u32, v3: u32) !void { const v = try Version.parse(text); std.testing.expect(v.major == v1 and v.minor == v2 and v.patch == v3); } fn err(text: []const u8, expected_err: anyerror) !void { _ = Version.parse(text) catch |actual_err| { if (actual_err == expected_err) return; return actual_err; }; return error.Unreachable; } }; try f.eql("2.6.32.11-svn21605", 2, 6, 32); // Debian PPC try f.eql("2.11.2(0.329/5/3)", 2, 11, 2); // MinGW try f.eql("5.4.0-1018-raspi", 5, 4, 0); // Ubuntu try f.eql("5.7.12_3", 5, 7, 12); // Void try f.eql("2.13-DEVELOPMENT", 2, 13, 0); // DragonFly try f.eql("2.3-35", 2, 3, 0); try f.eql("1a.4", 1, 0, 0); try f.eql("3.b1.0", 3, 0, 0); try f.eql("1.4beta", 1, 4, 0); try f.eql("2.7.pre", 2, 7, 0); try f.eql("0..3", 0, 0, 0); try f.eql("8.008.", 8, 8, 0); try f.eql("01...", 1, 0, 0); try f.eql("55", 55, 0, 0); try f.eql("4294967295.0.1", 4294967295, 0, 1); try f.eql("429496729_6", 429496729, 0, 0); try f.err("foobar", error.InvalidVersion); try f.err("", error.InvalidVersion); try f.err("-1", error.InvalidVersion); try f.err("+4", error.InvalidVersion); try f.err(".", error.InvalidVersion); try f.err("....3", error.InvalidVersion); try f.err("4294967296", error.Overflow); try f.err("5000877755", error.Overflow); // error.InvalidCharacter is not possible anymore } /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const CallOptions = struct { modifier: Modifier = .auto, /// Only valid when `Modifier` is `Modifier.async_kw`. stack: ?[]align(std.Target.stack_align) u8 = null, pub const Modifier = enum { /// Equivalent to function call syntax. auto, /// Equivalent to async keyword used with function call syntax. async_kw, /// Prevents tail call optimization. This guarantees that the return /// address will point to the callsite, as opposed to the callsite's /// callsite. If the call is otherwise required to be tail-called /// or inlined, a compile error is emitted instead. never_tail, /// Guarantees that the call will not be inlined. If the call is /// otherwise required to be inlined, a compile error is emitted instead. never_inline, /// Asserts that the function call will not suspend. This allows a /// non-async function to call an async function. no_async, /// Guarantees that the call will be generated with tail call optimization. /// If this is not possible, a compile error is emitted instead. always_tail, /// Guarantees that the call will inlined at the callsite. /// If this is not possible, a compile error is emitted instead. always_inline, /// Evaluates the call at compile-time. If the call cannot be completed at /// compile-time, a compile error is emitted instead. compile_time, }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ExportOptions = struct { name: []const u8, linkage: GlobalLinkage = .Strong, section: ?[]const u8 = null, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ExternOptions = struct { name: []const u8, library_name: ?[]const u8 = null, linkage: GlobalLinkage = .Strong, is_thread_local: bool = false, }; /// This function type is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const TestFn = struct { name: []const u8, func: fn () anyerror!void, async_frame_size: ?usize, }; /// This function type is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const PanicFn = fn ([]const u8, ?*StackTrace) noreturn; /// This function is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const panic: PanicFn = if (@hasDecl(root, "panic")) root.panic else default_panic; /// This function is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn { @setCold(true); if (@hasDecl(root, "os") and @hasDecl(root.os, "panic")) { root.os.panic(msg, error_return_trace); unreachable; } switch (os.tag) { .freestanding => { while (true) { @breakpoint(); } }, .wasi => { std.debug.warn("{}", .{msg}); std.os.abort(); }, .uefi => { // TODO look into using the debug info and logging helpful messages std.os.abort(); }, else => { const first_trace_addr = @returnAddress(); std.debug.panicExtra(error_return_trace, first_trace_addr, "{}", .{msg}); }, } } const std = @import("std.zig"); const root = @import("root");
lib/std/builtin.zig
const assert = @import("std").debug.assert; const mem = @import("std").mem; test "arrays" { var array: [5]u32 = undefined; var i: u32 = 0; while (i < 5) { array[i] = i + 1; i = array[i]; } i = 0; var accumulator = u32(0); while (i < 5) { accumulator += array[i]; i += 1; } assert(accumulator == 15); assert(getArrayLen(array) == 5); } fn getArrayLen(a: []const u32) usize { return a.len; } test "void arrays" { var array: [4]void = undefined; array[0] = void{}; array[1] = array[2]; assert(@sizeOf(@typeOf(array)) == 0); assert(array.len == 4); } test "array literal" { const hex_mult = []u16{ 4096, 256, 16, 1, }; assert(hex_mult.len == 4); assert(hex_mult[1] == 256); } test "array dot len const expr" { assert(comptime x: { break :x some_array.len == 4; }); } const ArrayDotLenConstExpr = struct { y: [some_array.len]u8, }; const some_array = []u8{ 0, 1, 2, 3, }; test "nested arrays" { const array_of_strings = [][]const u8{ "hello", "this", "is", "my", "thing", }; for (array_of_strings) |s, i| { if (i == 0) assert(mem.eql(u8, s, "hello")); if (i == 1) assert(mem.eql(u8, s, "this")); if (i == 2) assert(mem.eql(u8, s, "is")); if (i == 3) assert(mem.eql(u8, s, "my")); if (i == 4) assert(mem.eql(u8, s, "thing")); } } var s_array: [8]Sub = undefined; const Sub = struct { b: u8, }; const Str = struct { a: []Sub, }; test "set global var array via slice embedded in struct" { var s = Str{ .a = s_array[0..] }; s.a[0].b = 1; s.a[1].b = 2; s.a[2].b = 3; assert(s_array[0].b == 1); assert(s_array[1].b == 2); assert(s_array[2].b == 3); } test "array literal with specified size" { var array = [2]u8{ 1, 2, }; assert(array[0] == 1); assert(array[1] == 2); } test "array child property" { var x: [5]i32 = undefined; assert(@typeOf(x).Child == i32); } test "array len property" { var x: [5]i32 = undefined; assert(@typeOf(x).len == 5); } test "array len field" { var arr = [4]u8{ 0, 0, 0, 0 }; var ptr = &arr; assert(arr.len == 4); comptime assert(arr.len == 4); assert(ptr.len == 4); comptime assert(ptr.len == 4); } test "single-item pointer to array indexing and slicing" { testSingleItemPtrArrayIndexSlice(); comptime testSingleItemPtrArrayIndexSlice(); } fn testSingleItemPtrArrayIndexSlice() void { var array = "aaaa"; doSomeMangling(&array); assert(mem.eql(u8, "azya", array)); } fn doSomeMangling(array: *[4]u8) void { array[1] = 'z'; array[2..3][0] = 'y'; } test "implicit cast single-item pointer" { testImplicitCastSingleItemPtr(); comptime testImplicitCastSingleItemPtr(); } fn testImplicitCastSingleItemPtr() void { var byte: u8 = 100; const slice = (*[1]u8)(&byte)[0..]; slice[0] += 1; assert(byte == 101); }
test/cases/array.zig
const mmio = @import("mmio.zig"); const timer = @import("../../timer.zig"); const Property = struct { first: u32, second: u32, reserved: u32, fn enable_pin(self: *volatile Property, pin: u8) void { var one: u32 = 1; if (pin < 32) { self.first |= (one << @intCast(u5, pin)); } else { self.second |= (one << @intCast(u5, pin - 32)); } } fn disable_pin(self: *volatile Property, pin: u8) void { var one: u32 = 1; if (pin < 32) { self.first |= ~(one << @intCast(u5, pin)); } else { self.second |= ~(one << @intCast(u5, pin - 32)); } } fn clear(self: *volatile Property) void { self.first = 0; self.second = 0; } }; /// Reference: https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf const GPIO = struct { functions: [6]u32, reserved1: u32, output_set: Property, output_clear: Property, pin_level: Property, event_detect_system: Property, rising_edgne_detect: Property, falling_edge_detect: Property, high_detect: Property, low_detect: Property, async_rising_edge: Property, async_falling_edge: Property, pull_up_down: u32, pull_up_down_clock: Property, fn get_pin_offset(pin: u8) u5 { return @intCast(u5, (pin % 10) * 3); } fn zero_out_pin_bits(state: *volatile u32, pin: u8) void { var pin_offset: u5 = GPIO.get_pin_offset(pin); var ones: u32 = 0b111; var mask: u32 = ones << pin_offset; state.* &= ~mask; } fn set_pin_mode(self: *volatile GPIO, pin: u8, mode: Mode) void { var select = &self.functions[pin / 10]; GPIO.zero_out_pin_bits(select, pin); select.* |= (mode.to_number() << GPIO.get_pin_offset(pin)); } }; pub const Mode = enum { input = 0b000, // GPIO Pin is an input output = 0b001, // GPIO Pin is an output alt0 = 0b100, // GPIO Pin takes alternate function 0 alt1 = 0b101, // GPIO Pin takes alternate function 1 alt2 = 0b110, // GPIO Pin takes alternate function 2 alt3 = 0b111, // GPIO Pin takes alternate function 3 alt4 = 0b011, // GPIO Pin takes alternate function 4 alt5 = 0b010, // GPIO Pin takes alternate function 5 fn to_number(self: Mode) u32 { return @enumToInt(self); } }; var gpio align(32) = @intToPtr(*volatile GPIO, mmio.GPIO); pub fn set_pins_mode(pins: []u8, mode: Mode) void { if (pins.len <= 0) { return; } for (pins) |pin| { gpio.set_pin_mode(pin, mode); } gpio.pull_up_down = 0; timer.wait_for_cicles(150); for (pins) |pin| { gpio.pull_up_down_clock.enable_pin(pin); } timer.wait_for_cicles(150); gpio.pull_up_down_clock.clear(); } pub fn set_pin_mode(pin: u8, mode: Mode) void { var pins = [_]u8{pin}; set_pins_mode(&pins, mode); } pub fn output_set(pin: u8, on: bool) void { if(on) { gpio.output_set.enable_pin(pin); } else { gpio.output_set.disable_pin(pin); } } test "gpio registers" { const expectEqual = @import("std").testing.expectEqual; var registers = @intToPtr(*GPIO, 0x10000000); expectEqual(@as(usize, 0x10000000), @ptrToInt(&registers.functions)); expectEqual(@as(usize, 0x1000001C), @ptrToInt(&registers.output_set.first)); expectEqual(@as(usize, 0x10000020), @ptrToInt(&registers.output_set.second)); expectEqual(@as(usize, 0x10000028), @ptrToInt(&registers.output_clear.first)); expectEqual(@as(usize, 0x1000002C), @ptrToInt(&registers.output_clear.second)); expectEqual(@as(usize, 0x10000034), @ptrToInt(&registers.pin_level.first)); expectEqual(@as(usize, 0x10000038), @ptrToInt(&registers.pin_level.second)); expectEqual(@as(usize, 0x10000040), @ptrToInt(&registers.event_detect_system.first)); expectEqual(@as(usize, 0x10000044), @ptrToInt(&registers.event_detect_system.second)); expectEqual(@as(usize, 0x1000004C), @ptrToInt(&registers.rising_edgne_detect.first)); expectEqual(@as(usize, 0x10000050), @ptrToInt(&registers.rising_edgne_detect.second)); expectEqual(@as(usize, 0x10000058), @ptrToInt(&registers.falling_edge_detect.first)); expectEqual(@as(usize, 0x1000005C), @ptrToInt(&registers.falling_edge_detect.second)); expectEqual(@as(usize, 0x10000064), @ptrToInt(&registers.high_detect.first)); expectEqual(@as(usize, 0x10000068), @ptrToInt(&registers.high_detect.second)); expectEqual(@as(usize, 0x10000070), @ptrToInt(&registers.low_detect.first)); expectEqual(@as(usize, 0x10000074), @ptrToInt(&registers.low_detect.second)); expectEqual(@as(usize, 0x1000007C), @ptrToInt(&registers.async_rising_edge.first)); expectEqual(@as(usize, 0x10000080), @ptrToInt(&registers.async_rising_edge.second)); expectEqual(@as(usize, 0x10000088), @ptrToInt(&registers.async_falling_edge.first)); expectEqual(@as(usize, 0x1000008C), @ptrToInt(&registers.async_falling_edge.second)); expectEqual(@as(usize, 0x10000094), @ptrToInt(&registers.pull_up_down)); expectEqual(@as(usize, 0x10000098), @ptrToInt(&registers.pull_up_down_clock.first)); expectEqual(@as(usize, 0x1000009C), @ptrToInt(&registers.pull_up_down_clock.second)); }
src/arm/io/gpio.zig
const std = @import("std"); const math = std.math; const mem = std.mem; const Allocator = mem.Allocator; const debug = std.debug; const assert = debug.assert; const testing = std.testing; pub fn FixedSizeFifo(comptime T: type) type { return struct { allocator: *Allocator, buf: []u8, head: usize, count: usize, const Self = @This(); pub fn init(allocator: *Allocator) Self { return Self{ .allocator = allocator, .buf = [_]T{}, .head = 0, .count = 0, }; } pub fn deinit(self: *Self) void { self.allocator.free(self.buf); self.* = undefined; } pub fn realign(self: *Self) void { if (self.buf.len - self.head >= self.count) { // this copy overlaps mem.copy(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]); self.head = 0; } else { var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined; while (self.head != 0) { const n = math.min(self.head, tmp.len); const m = self.buf.len - n; mem.copy(T, tmp[0..n], self.buf[0..n]); // this middle copy overlaps; the others here don't mem.copy(T, self.buf[0..m], self.buf[n..][0..m]); mem.copy(T, self.buf[m..], tmp[0..n]); self.head -= n; } } { // set unused area to undefined const unused = @sliceToBytes(self.buf[self.count..]); @memset(unused.ptr, undefined, unused.len); } } /// Reduce allocated capacity to `size`. pub fn shrink(self: *Self, size: usize) void { assert(size >= self.count); self.realign(); self.buf = self.allocator.realloc(self.buf, size) catch |e| switch (e) { error.OutOfMemory => return, // no problem, capacity is still correct then. }; } /// Ensure that the buffer can fit at least `size` items pub fn ensureCapacity(self: *Self, size: usize) error{OutOfMemory}!void { if (self.buf.len >= size) return; self.realign(); const new_size = math.ceilPowerOfTwo(usize, size) catch return error.OutOfMemory; self.buf = try self.allocator.realloc(self.buf, new_size); } /// Makes sure at least `size` items are unused pub fn ensureUnusedCapacity(self: *Self, size: usize) error{OutOfMemory}!void { if (self.writableLength() >= size) return; return try self.ensureCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory); } /// Returns number of items currently in fifo pub fn readableLength(self: Self) usize { return self.count; } /// Returns a writable slice from the 'read' end of the fifo fn readableSliceMut(self: Self, offset: usize) []T { if (offset > self.count) return [_]T{}; const start = self.head + offset; if (start >= self.buf.len) { return self.buf[start - self.buf.len ..][0 .. self.count - offset]; } else { const end: usize = self.head + self.count; if (end >= self.buf.len) { return self.buf[start..self.buf.len]; } else { return self.buf[start..end]; } } } /// Returns a readable slice from `offset` pub fn readableSlice(self: Self, offset: usize) []const T { return self.readableSliceMut(offset); } const autoalign = false; /// Discard first `count` bytes of readable data pub fn discard(self: *Self, count: usize) void { assert(count <= self.count); { // set old range to undefined. Note: may be wrapped around const slice = self.readableSliceMut(0); if (slice.len >= count) { const unused = @sliceToBytes(slice[0..count]); @memset(unused.ptr, undefined, unused.len); } else { const unused = @sliceToBytes(slice[0..]); @memset(unused.ptr, undefined, unused.len); const unused2 = @sliceToBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]); @memset(unused2.ptr, undefined, unused2.len); } } self.head = (self.head + count) % self.buf.len; self.count -= count; if (autoalign and self.count == 0) self.head = 0; } /// Read the next item from the fifo pub fn readItem(self: *Self) !T { if (self.count == 0) return error.EndOfStream; const c = self.buf[self.head]; self.discard(1); return c; } /// Read data from the fifo into `dst`, returns slice of bytes copied (subslice of `dst`) pub fn read(self: *Self, dst: []T) []T { var dst_left = dst; while (dst_left.len > 0) { const slice = self.readableSlice(0); if (slice.len == 0) break; const n = math.min(slice.len, dst_left.len); mem.copy(T, dst_left, slice[0..n]); self.discard(n); dst_left = dst_left[n..]; } return dst[0 .. dst.len - dst_left.len]; } /// Returns number of bytes available in fifo pub fn writableLength(self: Self) usize { return self.buf.len - self.count; } /// Returns the first section of writable buffer /// Note that this may be of length 0 pub fn writableSlice(self: Self, offset: usize) []T { if (offset > self.buf.len) return [_]T{}; const tail = self.head + offset + self.count; if (tail < self.buf.len) { return self.buf[tail..]; } else { return self.buf[tail - self.buf.len ..][0 .. self.writableLength() - offset]; } } /// Returns a writable buffer of at least `size` bytes, allocating memory as needed. /// Use `fifo.update` once you've written data to it. pub fn writeableWithSize(self: *Self, size: usize) ![]T { try self.ensureUnusedCapacity(size); // try to avoid realigning buffer var slice = self.writableSlice(0); if (slice.len < size) { self.realign(); slice = self.writableSlice(0); } return slice; } /// Update the tail location of the buffer (usually follows use of writable/writeableWithSize) pub fn update(self: *Self, count: usize) void { assert(self.count + count <= self.buf.len); self.count += count; } /// Appends the data in `src` to the fifo. You must pub fn writeAssumeCapacity(self: *Self, src: []const T) void { assert(self.writableLength() >= src.len); var src_left = src; while (src_left.len > 0) { const writable_slice = self.writableSlice(0); assert(writable_slice.len != 0); const n = math.min(writable_slice.len, src_left.len); mem.copy(T, writable_slice, src_left[0..n]); self.update(n); src_left = src_left[n..]; } } /// Appends the data in `src` to the fifo. /// Allocates more memory as necessary pub fn write(self: *Self, src: []const T) !void { try self.ensureUnusedCapacity(src.len); return self.writeAssumeCapacity(src); } pub fn print(self: *Self, comptime format: []const u8, args: ...) !void { return std.fmt.format(self, error{OutOfMemory}, Self.write, format, args); } /// Make `count` bytes available before the current read location fn rewind(self: *Self, size: usize) void { assert(self.writableLength() >= size); self.head = (self.head + (self.buf.len - size)) % self.buf.len; self.count += size; } /// Place data back into the read stream pub fn unget(self: *Self, src: []const T) !void { try self.ensureUnusedCapacity(src.len); self.rewind(src.len); const slice = self.readableSliceMut(0); mem.copy(T, slice, src[0..slice.len]); const slice2 = self.readableSliceMut(slice.len); mem.copy(T, slice2, src[slice.len..]); } /// Peek at the item at `offset` pub fn peekItem(self: Self, offset: usize) error{EndOfStream}!T { if (offset >= self.count) return error.EndOfStream; return self.buf[(self.head + offset) % self.buf.len]; } }; } const ByteFifo = FixedSizeFifo(u8); test "ByteFifo" { var fifo = ByteFifo.init(debug.global_allocator); defer fifo.deinit(); try fifo.write("HELLO"); testing.expectEqual(@as(usize, 5), fifo.readableLength()); testing.expectEqualSlices(u8, "HELLO", fifo.readableSlice(0)); { var i: usize = 0; while (i < 5) : (i += 1) { try fifo.write([_]u8{try fifo.peekItem(i)}); } testing.expectEqual(@as(usize, 10), fifo.readableLength()); testing.expectEqualSlices(u8, "HELLOHELLO", fifo.readableSlice(0)); } { testing.expectEqual(@as(u8, 'H'), try fifo.readItem()); testing.expectEqual(@as(u8, 'E'), try fifo.readItem()); testing.expectEqual(@as(u8, 'L'), try fifo.readItem()); testing.expectEqual(@as(u8, 'L'), try fifo.readItem()); testing.expectEqual(@as(u8, 'O'), try fifo.readItem()); } testing.expectEqual(@as(usize, 5), fifo.readableLength()); { // Writes that wrap around testing.expectEqual(@as(usize, 11), fifo.writableLength()); testing.expectEqual(@as(usize, 6), fifo.writableSlice(0).len); fifo.writeAssumeCapacity("6<chars<11"); testing.expectEqualSlices(u8, "HELLO6<char", fifo.readableSlice(0)); testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(11)); fifo.discard(11); testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(0)); fifo.discard(4); testing.expectEqual(@as(usize, 0), fifo.readableLength()); } { const buf = try fifo.writeableWithSize(12); testing.expectEqual(@as(usize, 12), buf.len); var i: u8 = 0; while (i < 10) : (i += 1) { buf[i] = i + 'a'; } fifo.update(10); testing.expectEqualSlices(u8, "abcdefghij", fifo.readableSlice(0)); } { try fifo.unget("prependedstring"); var result: [30]u8 = undefined; testing.expectEqualSlices(u8, "prependedstringabcdefghij", fifo.read(&result)); } fifo.shrink(0); { try fifo.print("{}, {}!", "Hello", "World"); var result: [30]u8 = undefined; testing.expectEqualSlices(u8, "Hello, World!", fifo.read(&result)); testing.expectEqual(@as(usize, 0), fifo.readableLength()); } }
lib/std/fifo.zig
const std = @import("../std.zig"); const c = std.c; const assert = std.debug.assert; const maxInt = std.math.maxInt; pub use @import("darwin/errno.zig"); pub const PATH_MAX = 1024; pub const STDIN_FILENO = 0; pub const STDOUT_FILENO = 1; pub const STDERR_FILENO = 2; /// [MC2] no permissions pub const PROT_NONE = 0x00; /// [MC2] pages can be read pub const PROT_READ = 0x01; /// [MC2] pages can be written pub const PROT_WRITE = 0x02; /// [MC2] pages can be executed pub const PROT_EXEC = 0x04; /// allocated from memory, swap space pub const MAP_ANONYMOUS = 0x1000; /// map from file (default) pub const MAP_FILE = 0x0000; /// interpret addr exactly pub const MAP_FIXED = 0x0010; /// region may contain semaphores pub const MAP_HASSEMAPHORE = 0x0200; /// changes are private pub const MAP_PRIVATE = 0x0002; /// share changes pub const MAP_SHARED = 0x0001; /// don't cache pages for this mapping pub const MAP_NOCACHE = 0x0400; /// don't reserve needed swap area pub const MAP_NORESERVE = 0x0040; pub const MAP_FAILED = maxInt(usize); /// [XSI] no hang in wait/no child to reap pub const WNOHANG = 0x00000001; /// [XSI] notify on stop, untraced child pub const WUNTRACED = 0x00000002; /// take signal on signal stack pub const SA_ONSTACK = 0x0001; /// restart system on signal return pub const SA_RESTART = 0x0002; /// reset to SIG_DFL when taking signal pub const SA_RESETHAND = 0x0004; /// do not generate SIGCHLD on child stop pub const SA_NOCLDSTOP = 0x0008; /// don't mask the signal we're delivering pub const SA_NODEFER = 0x0010; /// don't keep zombies around pub const SA_NOCLDWAIT = 0x0020; /// signal handler with SA_SIGINFO args pub const SA_SIGINFO = 0x0040; /// do not bounce off kernel's sigtramp pub const SA_USERTRAMP = 0x0100; /// signal handler with SA_SIGINFO args with 64bit regs information pub const SA_64REGSET = 0x0200; pub const O_LARGEFILE = 0x0000; pub const O_PATH = 0x0000; pub const F_OK = 0; pub const X_OK = 1; pub const W_OK = 2; pub const R_OK = 4; /// open for reading only pub const O_RDONLY = 0x0000; /// open for writing only pub const O_WRONLY = 0x0001; /// open for reading and writing pub const O_RDWR = 0x0002; /// do not block on open or for data to become available pub const O_NONBLOCK = 0x0004; /// append on each write pub const O_APPEND = 0x0008; /// create file if it does not exist pub const O_CREAT = 0x0200; /// truncate size to 0 pub const O_TRUNC = 0x0400; /// error if O_CREAT and the file exists pub const O_EXCL = 0x0800; /// atomically obtain a shared lock pub const O_SHLOCK = 0x0010; /// atomically obtain an exclusive lock pub const O_EXLOCK = 0x0020; /// do not follow symlinks pub const O_NOFOLLOW = 0x0100; /// allow open of symlinks pub const O_SYMLINK = 0x200000; /// descriptor requested for event notifications only pub const O_EVTONLY = 0x8000; /// mark as close-on-exec pub const O_CLOEXEC = 0x1000000; pub const O_ACCMODE = 3; pub const O_ALERT = 536870912; pub const O_ASYNC = 64; pub const O_DIRECTORY = 1048576; pub const O_DP_GETRAWENCRYPTED = 1; pub const O_DP_GETRAWUNENCRYPTED = 2; pub const O_DSYNC = 4194304; pub const O_FSYNC = O_SYNC; pub const O_NOCTTY = 131072; pub const O_POPUP = 2147483648; pub const O_SYNC = 128; pub const SEEK_SET = 0x0; pub const SEEK_CUR = 0x1; pub const SEEK_END = 0x2; pub const DT_UNKNOWN = 0; pub const DT_FIFO = 1; pub const DT_CHR = 2; pub const DT_DIR = 4; pub const DT_BLK = 6; pub const DT_REG = 8; pub const DT_LNK = 10; pub const DT_SOCK = 12; pub const DT_WHT = 14; /// block specified signal set pub const SIG_BLOCK = 1; /// unblock specified signal set pub const SIG_UNBLOCK = 2; /// set specified signal set pub const SIG_SETMASK = 3; /// hangup pub const SIGHUP = 1; /// interrupt pub const SIGINT = 2; /// quit pub const SIGQUIT = 3; /// illegal instruction (not reset when caught) pub const SIGILL = 4; /// trace trap (not reset when caught) pub const SIGTRAP = 5; /// abort() pub const SIGABRT = 6; /// pollable event ([XSR] generated, not supported) pub const SIGPOLL = 7; /// compatibility pub const SIGIOT = SIGABRT; /// EMT instruction pub const SIGEMT = 7; /// floating point exception pub const SIGFPE = 8; /// kill (cannot be caught or ignored) pub const SIGKILL = 9; /// bus error pub const SIGBUS = 10; /// segmentation violation pub const SIGSEGV = 11; /// bad argument to system call pub const SIGSYS = 12; /// write on a pipe with no one to read it pub const SIGPIPE = 13; /// alarm clock pub const SIGALRM = 14; /// software termination signal from kill pub const SIGTERM = 15; /// urgent condition on IO channel pub const SIGURG = 16; /// sendable stop signal not from tty pub const SIGSTOP = 17; /// stop signal from tty pub const SIGTSTP = 18; /// continue a stopped process pub const SIGCONT = 19; /// to parent on child stop or exit pub const SIGCHLD = 20; /// to readers pgrp upon background tty read pub const SIGTTIN = 21; /// like TTIN for output if (tp->t_local&LTOSTOP) pub const SIGTTOU = 22; /// input/output possible signal pub const SIGIO = 23; /// exceeded CPU time limit pub const SIGXCPU = 24; /// exceeded file size limit pub const SIGXFSZ = 25; /// virtual time alarm pub const SIGVTALRM = 26; /// profiling time alarm pub const SIGPROF = 27; /// window size changes pub const SIGWINCH = 28; /// information request pub const SIGINFO = 29; /// user defined signal 1 pub const SIGUSR1 = 30; /// user defined signal 2 pub const SIGUSR2 = 31; /// no flag value pub const KEVENT_FLAG_NONE = 0x000; /// immediate timeout pub const KEVENT_FLAG_IMMEDIATE = 0x001; /// output events only include change pub const KEVENT_FLAG_ERROR_EVENTS = 0x002; /// add event to kq (implies enable) pub const EV_ADD = 0x0001; /// delete event from kq pub const EV_DELETE = 0x0002; /// enable event pub const EV_ENABLE = 0x0004; /// disable event (not reported) pub const EV_DISABLE = 0x0008; /// only report one occurrence pub const EV_ONESHOT = 0x0010; /// clear event state after reporting pub const EV_CLEAR = 0x0020; /// force immediate event output /// ... with or without EV_ERROR /// ... use KEVENT_FLAG_ERROR_EVENTS /// on syscalls supporting flags pub const EV_RECEIPT = 0x0040; /// disable event after reporting pub const EV_DISPATCH = 0x0080; /// unique kevent per udata value pub const EV_UDATA_SPECIFIC = 0x0100; /// ... in combination with EV_DELETE /// will defer delete until udata-specific /// event enabled. EINPROGRESS will be /// returned to indicate the deferral pub const EV_DISPATCH2 = EV_DISPATCH | EV_UDATA_SPECIFIC; /// report that source has vanished /// ... only valid with EV_DISPATCH2 pub const EV_VANISHED = 0x0200; /// reserved by system pub const EV_SYSFLAGS = 0xF000; /// filter-specific flag pub const EV_FLAG0 = 0x1000; /// filter-specific flag pub const EV_FLAG1 = 0x2000; /// EOF detected pub const EV_EOF = 0x8000; /// error, data contains errno pub const EV_ERROR = 0x4000; pub const EV_POLL = EV_FLAG0; pub const EV_OOBAND = EV_FLAG1; pub const EVFILT_READ = -1; pub const EVFILT_WRITE = -2; /// attached to aio requests pub const EVFILT_AIO = -3; /// attached to vnodes pub const EVFILT_VNODE = -4; /// attached to struct proc pub const EVFILT_PROC = -5; /// attached to struct proc pub const EVFILT_SIGNAL = -6; /// timers pub const EVFILT_TIMER = -7; /// Mach portsets pub const EVFILT_MACHPORT = -8; /// Filesystem events pub const EVFILT_FS = -9; /// User events pub const EVFILT_USER = -10; /// Virtual memory events pub const EVFILT_VM = -12; /// Exception events pub const EVFILT_EXCEPT = -15; pub const EVFILT_SYSCOUNT = 17; /// On input, NOTE_TRIGGER causes the event to be triggered for output. pub const NOTE_TRIGGER = 0x01000000; /// ignore input fflags pub const NOTE_FFNOP = 0x00000000; /// and fflags pub const NOTE_FFAND = 0x40000000; /// or fflags pub const NOTE_FFOR = 0x80000000; /// copy fflags pub const NOTE_FFCOPY = 0xc0000000; /// mask for operations pub const NOTE_FFCTRLMASK = 0xc0000000; pub const NOTE_FFLAGSMASK = 0x00ffffff; /// low water mark pub const NOTE_LOWAT = 0x00000001; /// OOB data pub const NOTE_OOB = 0x00000002; /// vnode was removed pub const NOTE_DELETE = 0x00000001; /// data contents changed pub const NOTE_WRITE = 0x00000002; /// size increased pub const NOTE_EXTEND = 0x00000004; /// attributes changed pub const NOTE_ATTRIB = 0x00000008; /// link count changed pub const NOTE_LINK = 0x00000010; /// vnode was renamed pub const NOTE_RENAME = 0x00000020; /// vnode access was revoked pub const NOTE_REVOKE = 0x00000040; /// No specific vnode event: to test for EVFILT_READ activation pub const NOTE_NONE = 0x00000080; /// vnode was unlocked by flock(2) pub const NOTE_FUNLOCK = 0x00000100; /// process exited pub const NOTE_EXIT = 0x80000000; /// process forked pub const NOTE_FORK = 0x40000000; /// process exec'd pub const NOTE_EXEC = 0x20000000; /// shared with EVFILT_SIGNAL pub const NOTE_SIGNAL = 0x08000000; /// exit status to be returned, valid for child process only pub const NOTE_EXITSTATUS = 0x04000000; /// provide details on reasons for exit pub const NOTE_EXIT_DETAIL = 0x02000000; /// mask for signal & exit status pub const NOTE_PDATAMASK = 0x000fffff; pub const NOTE_PCTRLMASK = (~NOTE_PDATAMASK); pub const NOTE_EXIT_DETAIL_MASK = 0x00070000; pub const NOTE_EXIT_DECRYPTFAIL = 0x00010000; pub const NOTE_EXIT_MEMORY = 0x00020000; pub const NOTE_EXIT_CSERROR = 0x00040000; /// will react on memory pressure pub const NOTE_VM_PRESSURE = 0x80000000; /// will quit on memory pressure, possibly after cleaning up dirty state pub const NOTE_VM_PRESSURE_TERMINATE = 0x40000000; /// will quit immediately on memory pressure pub const NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000; /// there was an error pub const NOTE_VM_ERROR = 0x10000000; /// data is seconds pub const NOTE_SECONDS = 0x00000001; /// data is microseconds pub const NOTE_USECONDS = 0x00000002; /// data is nanoseconds pub const NOTE_NSECONDS = 0x00000004; /// absolute timeout pub const NOTE_ABSOLUTE = 0x00000008; /// ext[1] holds leeway for power aware timers pub const NOTE_LEEWAY = 0x00000010; /// system does minimal timer coalescing pub const NOTE_CRITICAL = 0x00000020; /// system does maximum timer coalescing pub const NOTE_BACKGROUND = 0x00000040; pub const NOTE_MACH_CONTINUOUS_TIME = 0x00000080; /// data is mach absolute time units pub const NOTE_MACHTIME = 0x00000100; pub const AF_UNSPEC = 0; pub const AF_LOCAL = 1; pub const AF_UNIX = AF_LOCAL; pub const AF_INET = 2; pub const AF_SYS_CONTROL = 2; pub const AF_IMPLINK = 3; pub const AF_PUP = 4; pub const AF_CHAOS = 5; pub const AF_NS = 6; pub const AF_ISO = 7; pub const AF_OSI = AF_ISO; pub const AF_ECMA = 8; pub const AF_DATAKIT = 9; pub const AF_CCITT = 10; pub const AF_SNA = 11; pub const AF_DECnet = 12; pub const AF_DLI = 13; pub const AF_LAT = 14; pub const AF_HYLINK = 15; pub const AF_APPLETALK = 16; pub const AF_ROUTE = 17; pub const AF_LINK = 18; pub const AF_XTP = 19; pub const AF_COIP = 20; pub const AF_CNT = 21; pub const AF_RTIP = 22; pub const AF_IPX = 23; pub const AF_SIP = 24; pub const AF_PIP = 25; pub const AF_ISDN = 28; pub const AF_E164 = AF_ISDN; pub const AF_KEY = 29; pub const AF_INET6 = 30; pub const AF_NATM = 31; pub const AF_SYSTEM = 32; pub const AF_NETBIOS = 33; pub const AF_PPP = 34; pub const AF_MAX = 40; pub const PF_UNSPEC = AF_UNSPEC; pub const PF_LOCAL = AF_LOCAL; pub const PF_UNIX = PF_LOCAL; pub const PF_INET = AF_INET; pub const PF_IMPLINK = AF_IMPLINK; pub const PF_PUP = AF_PUP; pub const PF_CHAOS = AF_CHAOS; pub const PF_NS = AF_NS; pub const PF_ISO = AF_ISO; pub const PF_OSI = AF_ISO; pub const PF_ECMA = AF_ECMA; pub const PF_DATAKIT = AF_DATAKIT; pub const PF_CCITT = AF_CCITT; pub const PF_SNA = AF_SNA; pub const PF_DECnet = AF_DECnet; pub const PF_DLI = AF_DLI; pub const PF_LAT = AF_LAT; pub const PF_HYLINK = AF_HYLINK; pub const PF_APPLETALK = AF_APPLETALK; pub const PF_ROUTE = AF_ROUTE; pub const PF_LINK = AF_LINK; pub const PF_XTP = AF_XTP; pub const PF_COIP = AF_COIP; pub const PF_CNT = AF_CNT; pub const PF_SIP = AF_SIP; pub const PF_IPX = AF_IPX; pub const PF_RTIP = AF_RTIP; pub const PF_PIP = AF_PIP; pub const PF_ISDN = AF_ISDN; pub const PF_KEY = AF_KEY; pub const PF_INET6 = AF_INET6; pub const PF_NATM = AF_NATM; pub const PF_SYSTEM = AF_SYSTEM; pub const PF_NETBIOS = AF_NETBIOS; pub const PF_PPP = AF_PPP; pub const PF_MAX = AF_MAX; pub const SYSPROTO_EVENT = 1; pub const SYSPROTO_CONTROL = 2; pub const SOCK_STREAM = 1; pub const SOCK_DGRAM = 2; pub const SOCK_RAW = 3; pub const SOCK_RDM = 4; pub const SOCK_SEQPACKET = 5; pub const SOCK_MAXADDRLEN = 255; pub const IPPROTO_ICMP = 1; pub const IPPROTO_ICMPV6 = 58; pub const IPPROTO_TCP = 6; pub const IPPROTO_UDP = 17; pub const IPPROTO_IP = 0; pub const IPPROTO_IPV6 = 41; fn wstatus(x: i32) i32 { return x & 0o177; } const wstopped = 0o177; pub fn WEXITSTATUS(x: i32) i32 { return x >> 8; } pub fn WTERMSIG(x: i32) i32 { return wstatus(x); } pub fn WSTOPSIG(x: i32) i32 { return x >> 8; } pub fn WIFEXITED(x: i32) bool { return wstatus(x) == 0; } pub fn WIFSTOPPED(x: i32) bool { return wstatus(x) == wstopped and WSTOPSIG(x) != 0x13; } pub fn WIFSIGNALED(x: i32) bool { return wstatus(x) != wstopped and wstatus(x) != 0; } /// Get the errno from a syscall return value, or 0 for no error. pub fn getErrno(r: usize) usize { const signed_r = @bitCast(isize, r); return if (signed_r > -4096 and signed_r < 0) @intCast(usize, -signed_r) else 0; } pub fn close(fd: i32) usize { return errnoWrap(c.close(fd)); } pub fn abort() noreturn { c.abort(); } // bind(int socket, const struct sockaddr *address, socklen_t address_len) pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize { return errnoWrap(c.bind(@bitCast(c_int, fd), addr, len)); } pub fn exit(code: i32) noreturn { c.exit(code); } pub fn isatty(fd: i32) bool { return c.isatty(fd) != 0; } pub fn fstat(fd: i32, buf: *c.Stat) usize { return errnoWrap(c.@"fstat$INODE64"(fd, buf)); } pub fn lseek(fd: i32, offset: isize, whence: c_int) usize { return errnoWrap(c.lseek(fd, offset, whence)); } // TODO https://github.com/ziglang/zig/issues/265 on the whole file pub fn open(path: [*]const u8, flags: u32, mode: usize) usize { return errnoWrap(c.open(path, @bitCast(c_int, flags), mode)); } pub fn raise(sig: i32) usize { return errnoWrap(c.raise(sig)); } pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize { return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte)); } pub fn pread(fd: i32, buf: [*]u8, nbyte: usize, offset: u64) usize { return errnoWrap(c.pread(fd, @ptrCast(*c_void, buf), nbyte, offset)); } pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize { return errnoWrap(c.stat(path, buf)); } pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize { return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte)); } pub fn pwrite(fd: i32, buf: [*]const u8, nbyte: usize, offset: u64) usize { return errnoWrap(c.pwrite(fd, @ptrCast(*const c_void, buf), nbyte, offset)); } pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { const ptr_result = c.mmap( @ptrCast(?*c_void, address), length, @bitCast(c_int, @intCast(c_uint, prot)), @bitCast(c_int, c_uint(flags)), fd, offset, ); const isize_result = @bitCast(isize, @ptrToInt(ptr_result)); return errnoWrap(isize_result); } pub fn munmap(address: usize, length: usize) usize { return errnoWrap(c.munmap(@intToPtr(?*c_void, address), length)); } pub fn unlink(path: [*]const u8) usize { return errnoWrap(c.unlink(path)); } pub fn getcwd(buf: [*]u8, size: usize) usize { return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(c._errno().*)) else 0; } pub fn waitpid(pid: i32, status: *i32, options: u32) usize { comptime assert(i32.bit_count == c_int.bit_count); return errnoWrap(c.waitpid(pid, @ptrCast(*c_int, status), @bitCast(c_int, options))); } pub fn fork() usize { return errnoWrap(c.fork()); } pub fn access(path: [*]const u8, mode: u32) usize { return errnoWrap(c.access(path, mode)); } pub fn pipe(fds: *[2]i32) usize { comptime assert(i32.bit_count == c_int.bit_count); return errnoWrap(c.pipe(@ptrCast(*[2]c_int, fds))); } pub fn getdirentries64(fd: i32, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize { return errnoWrap(@bitCast(isize, c.__getdirentries64(fd, buf_ptr, buf_len, basep))); } pub fn kqueue() usize { return errnoWrap(c.kqueue()); } pub fn kevent(kq: i32, changelist: []const Kevent, eventlist: []Kevent, timeout: ?*const timespec) usize { return errnoWrap(c.kevent( kq, changelist.ptr, @intCast(c_int, changelist.len), eventlist.ptr, @intCast(c_int, eventlist.len), timeout, )); } pub fn kevent64( kq: i32, changelist: []const kevent64_s, eventlist: []kevent64_s, flags: u32, timeout: ?*const timespec, ) usize { return errnoWrap(c.kevent64(kq, changelist.ptr, changelist.len, eventlist.ptr, eventlist.len, flags, timeout)); } pub fn mkdir(path: [*]const u8, mode: u32) usize { return errnoWrap(c.mkdir(path, mode)); } pub fn symlink(existing: [*]const u8, new: [*]const u8) usize { return errnoWrap(c.symlink(existing, new)); } pub fn sysctl(name: [*]c_int, namelen: c_uint, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) usize { return errnoWrap(c.sysctl(name, namelen, oldp, oldlenp, newp, newlen)); } pub fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) usize { return errnoWrap(c.sysctlbyname(name, oldp, oldlenp, newp, newlen)); } pub fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) usize { return errnoWrap(c.sysctlnametomib(name, wibp, sizep)); } pub fn rename(old: [*]const u8, new: [*]const u8) usize { return errnoWrap(c.rename(old, new)); } pub fn rmdir(path: [*]const u8) usize { return errnoWrap(c.rmdir(path)); } pub fn chdir(path: [*]const u8) usize { return errnoWrap(c.chdir(path)); } pub fn execve(path: [*]const u8, argv: [*]const ?[*]const u8, envp: [*]const ?[*]const u8) usize { return errnoWrap(c.execve(path, argv, envp)); } pub fn dup2(old: i32, new: i32) usize { return errnoWrap(c.dup2(old, new)); } pub fn readlink(noalias path: [*]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { return errnoWrap(c.readlink(path, buf_ptr, buf_len)); } pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) usize { return errnoWrap(c.gettimeofday(tv, tz)); } pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize { return errnoWrap(c.nanosleep(req, rem)); } pub fn realpath(noalias filename: [*]const u8, noalias resolved_name: [*]u8) usize { return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(c._errno().*)) else 0; } pub fn setreuid(ruid: u32, euid: u32) usize { return errnoWrap(c.setreuid(ruid, euid)); } pub fn setregid(rgid: u32, egid: u32) usize { return errnoWrap(c.setregid(rgid, egid)); } pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize { return errnoWrap(c.sigprocmask(@bitCast(c_int, flags), set, oldset)); } pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize { assert(sig != SIGKILL); assert(sig != SIGSTOP); var cact = c.Sigaction{ .handler = @ptrCast(extern fn (c_int) void, act.handler), .sa_flags = @bitCast(c_int, act.flags), .sa_mask = act.mask, }; var coact: c.Sigaction = undefined; const result = errnoWrap(c.sigaction(sig, *cact, *coact)); if (result != 0) { return result; } if (oact) |old| { old.* = Sigaction{ .handler = @ptrCast(extern fn (i32) void, coact.handler), .flags = @bitCast(u32, coact.sa_flags), .mask = coact.sa_mask, }; } return result; } pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize { return errnoWrap(c.socket(@bitCast(c_int, domain), @bitCast(c_int, socket_type), @bitCast(c_int, protocol))); } pub const iovec = extern struct { iov_base: [*]u8, iov_len: usize, }; pub const iovec_const = extern struct { iov_base: [*]const u8, iov_len: usize, }; pub const sigset_t = c.sigset_t; pub const empty_sigset = sigset_t(0); pub const timespec = c.timespec; pub const Stat = c.Stat; pub const dirent = c.dirent; pub const in_port_t = c.in_port_t; pub const sa_family_t = c.sa_family_t; pub const socklen_t = c.socklen_t; pub const sockaddr = c.sockaddr; pub const sockaddr_in = c.sockaddr_in; pub const sockaddr_in6 = c.sockaddr_in6; /// Renamed from `kevent` to `Kevent` to avoid conflict with the syscall. pub const Kevent = c.Kevent; pub const kevent64_s = c.kevent64_s; /// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall. pub const Sigaction = struct { handler: extern fn (i32) void, mask: sigset_t, flags: u32, }; pub fn sigaddset(set: *sigset_t, signo: u5) void { set.* |= u32(1) << (signo - 1); } /// Takes the return value from a syscall and formats it back in the way /// that the kernel represents it to libc. Errno was a mistake, let's make /// it go away forever. fn errnoWrap(value: isize) usize { return @bitCast(usize, if (value == -1) -isize(c._errno().*) else value); } pub const timezone = c.timezone; pub const timeval = c.timeval; pub const mach_timebase_info_data = c.mach_timebase_info_data; pub const mach_absolute_time = c.mach_absolute_time; pub const mach_timebase_info = c.mach_timebase_info;
std/os/darwin.zig
usingnamespace @import("../windows/bits.zig"); pub const fd_t = HANDLE; pub const pid_t = HANDLE; pub const PATH_MAX = 260; pub const time_t = c_longlong; pub const timespec = extern struct { tv_sec: time_t, tv_nsec: c_long, }; pub const sig_atomic_t = c_int; /// maximum signal number + 1 pub const NSIG = 23; // Signal types /// interrupt pub const SIGINT = 2; /// illegal instruction - invalid function image pub const SIGILL = 4; /// floating point exception pub const SIGFPE = 8; /// segment violation pub const SIGSEGV = 11; /// Software termination signal from kill pub const SIGTERM = 15; /// Ctrl-Break sequence pub const SIGBREAK = 21; /// abnormal termination triggered by abort call pub const SIGABRT = 22; /// SIGABRT compatible with other platforms, same as SIGABRT pub const SIGABRT_COMPAT = 6; // Signal action codes /// default signal action pub const SIG_DFL = 0; /// ignore signal pub const SIG_IGN = 1; /// return current value pub const SIG_GET = 2; /// signal gets error pub const SIG_SGE = 3; /// acknowledge pub const SIG_ACK = 4; /// Signal error value (returned by signal call on error) pub const SIG_ERR = -1; pub const SEEK_SET = 0; pub const SEEK_CUR = 1; pub const SEEK_END = 2; pub const EPERM = 1; pub const ENOENT = 2; pub const ESRCH = 3; pub const EINTR = 4; pub const EIO = 5; pub const ENXIO = 6; pub const E2BIG = 7; pub const ENOEXEC = 8; pub const EBADF = 9; pub const ECHILD = 10; pub const EAGAIN = 11; pub const ENOMEM = 12; pub const EACCES = 13; pub const EFAULT = 14; pub const EBUSY = 16; pub const EEXIST = 17; pub const EXDEV = 18; pub const ENODEV = 19; pub const ENOTDIR = 20; pub const EISDIR = 21; pub const ENFILE = 23; pub const EMFILE = 24; pub const ENOTTY = 25; pub const EFBIG = 27; pub const ENOSPC = 28; pub const ESPIPE = 29; pub const EROFS = 30; pub const EMLINK = 31; pub const EPIPE = 32; pub const EDOM = 33; pub const EDEADLK = 36; pub const ENAMETOOLONG = 38; pub const ENOLCK = 39; pub const ENOSYS = 40; pub const ENOTEMPTY = 41; pub const EINVAL = 22; pub const ERANGE = 34; pub const EILSEQ = 42; pub const STRUNCATE = 80; // Support EDEADLOCK for compatibility with older Microsoft C versions pub const EDEADLOCK = EDEADLK; // POSIX Supplement pub const EADDRINUSE = 100; pub const EADDRNOTAVAIL = 101; pub const EAFNOSUPPORT = 102; pub const EALREADY = 103; pub const EBADMSG = 104; pub const ECANCELED = 105; pub const ECONNABORTED = 106; pub const ECONNREFUSED = 107; pub const ECONNRESET = 108; pub const EDESTADDRREQ = 109; pub const EHOSTUNREACH = 110; pub const EIDRM = 111; pub const EINPROGRESS = 112; pub const EISCONN = 113; pub const ELOOP = 114; pub const EMSGSIZE = 115; pub const ENETDOWN = 116; pub const ENETRESET = 117; pub const ENETUNREACH = 118; pub const ENOBUFS = 119; pub const ENODATA = 120; pub const ENOLINK = 121; pub const ENOMSG = 122; pub const ENOPROTOOPT = 123; pub const ENOSR = 124; pub const ENOSTR = 125; pub const ENOTCONN = 126; pub const ENOTRECOVERABLE = 127; pub const ENOTSOCK = 128; pub const ENOTSUP = 129; pub const EOPNOTSUPP = 130; pub const EOTHER = 131; pub const EOVERFLOW = 132; pub const EOWNERDEAD = 133; pub const EPROTO = 134; pub const EPROTONOSUPPORT = 135; pub const EPROTOTYPE = 136; pub const ETIME = 137; pub const ETIMEDOUT = 138; pub const ETXTBSY = 139; pub const EWOULDBLOCK = 140; pub const EDQUOT = 10069; pub const F_OK = 0; /// Remove directory instead of unlinking file pub const AT_REMOVEDIR = 0x200; pub const in_port_t = u16; pub const sa_family_t = u16; pub const socklen_t = u32; pub const sockaddr = extern struct { family: sa_family_t, data: [14]u8, }; pub const sockaddr_in = extern struct { family: sa_family_t = AF_INET, port: in_port_t, addr: in_addr, zero: [8]u8 = [8]u8{ 0, 0, 0, 0, 0, 0, 0, 0 }, }; pub const sockaddr_in6 = extern struct { family: sa_family_t = AF_INET6, port: in_port_t, flowinfo: u32, addr: in6_addr, scope_id: u32, }; pub const in6_addr = [16]u8; pub const in_addr = u32; pub const AF_UNSPEC = 0; pub const AF_UNIX = 1; pub const AF_INET = 2; pub const AF_IMPLINK = 3; pub const AF_PUP = 4; pub const AF_CHAOS = 5; pub const AF_NS = 6; pub const AF_IPX = AF_NS; pub const AF_ISO = 7; pub const AF_OSI = AF_ISO; pub const AF_ECMA = 8; pub const AF_DATAKIT = 9; pub const AF_CCITT = 10; pub const AF_SNA = 11; pub const AF_DECnet = 12; pub const AF_DLI = 13; pub const AF_LAT = 14; pub const AF_HYLINK = 15; pub const AF_APPLETALK = 16; pub const AF_NETBIOS = 17; pub const AF_VOICEVIEW = 18; pub const AF_FIREFOX = 19; pub const AF_UNKNOWN1 = 20; pub const AF_BAN = 21; pub const AF_ATM = 22; pub const AF_INET6 = 23; pub const AF_CLUSTER = 24; pub const AF_12844 = 25; pub const AF_IRDA = 26; pub const AF_NETDES = 28; pub const AF_TCNPROCESS = 29; pub const AF_TCNMESSAGE = 30; pub const AF_ICLFXBM = 31; pub const AF_BTH = 32; pub const AF_MAX = 33;
lib/std/os/bits/windows.zig
const arm_m = @import("arm_m"); const lpc_protchecker = @import("lpc_protchecker.zig"); const flexcomm_driver = @import("flexcomm.zig"); /// Security access rules for flash memory. Each flash sector is 32 kbytes. /// There are 20 FLASH sectors in total. pub const mpc_flash = lpc_protchecker.Mpc{ .base = 0x500ac010, .block_size_shift = 15, .num_blocks = 20, }; /// Security access rules for ROM memory. Each ROM sector is 4 kbytes. There /// are 32 ROM sectors in total. pub const mpc_rom = lpc_protchecker.Mpc{ .base = 0x500ac024, .block_size_shift = 12, .num_blocks = 32, }; /// Security access rules for RAMX. Each RAMX sub region is 4 kbytes. pub const mpc_ramx = lpc_protchecker.Mpc{ .base = 0x500ac040, .block_size_shift = 12, .num_blocks = 8, }; /// Security access rules for RAM0. Each RAMX sub region is 4 kbytes. pub const mpc_ram0 = lpc_protchecker.Mpc{ .base = 0x500ac060, .block_size_shift = 12, .num_blocks = 16, }; /// Security access rules for RAM1. Each RAM1 sub region is 4 kbytes. pub const mpc_ram1 = lpc_protchecker.Mpc{ .base = 0x500ac080, .block_size_shift = 12, .num_blocks = 16, }; /// Security access rules for RAM2. Each RAM2 sub region is 4 kbytes. pub const mpc_ram2 = lpc_protchecker.Mpc{ .base = 0x500ac0a0, .block_size_shift = 12, .num_blocks = 16, }; /// Security access rules for RAM3. Each RAM3 sub region is 4 kbytes. pub const mpc_ram3 = lpc_protchecker.Mpc{ .base = 0x500ac0c0, .block_size_shift = 12, .num_blocks = 16, }; /// Security access rules for RAM4. Each RAM4 sub region is 4 kbytes. pub const mpc_ram4 = lpc_protchecker.Mpc{ .base = 0x500ac0e0, .block_size_shift = 12, .num_blocks = 4, }; pub const PpcApbBridge0 = struct { base: usize, const Self = @This(); fn regCtrl1(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x4); } pub fn setCTimer0Rule(self: Self, rule: lpc_protchecker.ProtCheckerRule) void { lpc_protchecker.setRule(self.regCtrl1(), 0, rule); } pub fn setCTimer1Rule(self: Self, rule: lpc_protchecker.ProtCheckerRule) void { lpc_protchecker.setRule(self.regCtrl1(), 4, rule); } }; pub const ppc_apb_bridge0 = PpcApbBridge0{ .base = 0x500AC100 }; pub const Flexcomm = flexcomm_driver.Flexcomm; /// Flexcomm instances (Secure alias) pub const flexcomm = [8]Flexcomm{ Flexcomm{ .base = 0x50086000 }, Flexcomm{ .base = 0x50087000 }, Flexcomm{ .base = 0x50088000 }, Flexcomm{ .base = 0x50089000 }, Flexcomm{ .base = 0x5008a000 }, Flexcomm{ .base = 0x50096000 }, Flexcomm{ .base = 0x50097000 }, Flexcomm{ .base = 0x50098000 }, }; pub const Syscon = struct { base: usize, const Self = @This(); pub fn regAhbclkctrlset0(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x220); } pub fn regAhbclkctrlclr0(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x240); } pub const AHBCLKCTRL0_IOCON = bit(13); pub const AHBCLKCTRL1_TIMER0 = bit(26); pub const AHBCLKCTRL1_TIMER1 = bit(27); pub fn regAhbclkctrlset1(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x224); } pub fn ahbclkctrl1Fc(comptime i: u3) u32 { return bit(11 + @as(u5, i)); } pub fn regCtimerclkseln(self: Self, i: usize) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x26c + i * 4); } pub const CTIMERCLKSEL_SEL_MAIN_CLOCK: u32 = 0; pub fn regMainclkselb(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x284); } pub const MAINCLKSELB_SEL_PLL0: u32 = 1; /// This register selects the clock source for the PLL0. pub fn regPll0clksel(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x290); } pub const PLL0CLKSEL_CLKIN: u32 = 1; pub fn regFcclksel(self: Self, i: u3) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x2b0 + @as(u32, i) * 4); } pub const FCCLKSEL_PLL0DIV: u32 = 1; pub fn regFlexfrgctrl(self: Self, i: u3) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x320 + @as(u32, i) * 4); } pub fn flexfrgctrlDiv(ratio: u8) u32 { return @as(u32, ratio); } pub fn flexfrgctrlMult(ratio: u8) u32 { return @as(u32, ratio) << 8; } /// This register determines the divider value for the PLL0 output, if used by the application. pub fn regPll0clkdiv(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x3c4); } pub fn regAhbclkdiv(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x380); } /// Divide by `t + 1` pub fn ahbclkdivDiv(t: u8) u32 { return @as(u32, t); } pub fn regFmccr(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x400); } pub const FMCCR_FLASHTIM_MASK: u32 = fmccrFlashtim(0b11111); pub fn fmccrFlashtim(t: u5) u32 { return @as(u32, t) << 12; } /// The PLL0CTRL register provides most of the control over basic /// selections of PLL0 modes and operating details. pub fn regPll0ctrl(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x580); } pub const Pll0ctrl = packed struct { selr: u4 = 0, seli: u6 = 0, selp: u5 = 0, bypasspll: bool = false, bypasspostdiv2: bool = false, limupoff: bool = false, bwdirect: bool = false, bypassprediv: bool = false, bypasspostdiv: bool = false, clken: bool = false, frmen: bool = false, frmclkstable: bool = false, skewen: bool = false, _pad: u7 = undefined, }; comptime { if (@sizeOf(Pll0ctrl) != 4) @compileError("@sizeOf(Pll0ctrl) is not 32-bit long"); } pub fn regPll0stat(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x584); } pub const PLL0STAT_LOCK = bit(0); /// The PLL0NDEC controls operation of the PLL pre-divider. pub fn regPll0ndec(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x588); } pub const PLL0NDEC_NREQ = bit(8); pub fn setPll0NDividerRatio(self: Self, ratio: u8) void { self.regPll0ndec().* = @as(u32, ratio) | PLL0NDEC_NREQ; } /// The PLL0PDEC controls operation of the PLL post-divider. pub fn regPll0pdec(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x58c); } pub const PLL0PDEC_NREQ = bit(5); pub fn setPll0PDividerRatio(self: Self, ratio: u5) void { self.regPll0pdec().* = @as(u32, ratio) | PLL0PDEC_NREQ; } pub fn regPll0sscg0(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x590); } pub fn regPll0sscg1(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x594); } pub fn pll0sscg1MdivExt(x: u16) u32 { return @as(u32, x) << 10; } pub const PLL0SSCG1_MREQ = bit(26); pub const PLL0SSCG1_SEL_EXT = bit(28); pub fn regCpuctrl(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x800); } pub fn regCpboot(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x804); } pub fn regClockCtrl(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0xa18); } pub const CLOCK_CTRL_CLKIN_ENA = bit(5); }; pub const syscon = Syscon{ .base = 0x50000000 }; pub const syscon_ns = Syscon{ .base = 0x40000000 }; pub const Pmc = struct { base: usize, const Self = @This(); /// The power configuration clear register 0 controls the power to various /// analog blocks. pub fn regPdruncdfclr0(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0xc8); } pub const PDRUNCFG0_PDEN_XTAL32M = bit(8); pub const PDRUNCFG0_PDEN_PLL0 = bit(9); pub const PDRUNCFG0_PDEN_LDOXO32M = bit(20); }; pub const pmc = Pmc{ .base = 0x50020000 }; /// Analog control pub const AnaCtrl = struct { base: usize, const Self = @This(); pub fn regXo32mCtrl(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x20); } pub const XO32M_CTRL_ENABLE_SYSTEM_CLK_OUT = bit(24); pub fn regXo32mStatus(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x24); } pub const XO32M_STATUS_XO_READY = bit(0); }; pub const ana_ctrl = AnaCtrl{ .base = 0x50013000 }; /// I/O Pin Configuration pub const Iocon = struct { base: usize, const Self = @This(); pub fn regP0(self: Self, i: u32) *volatile u32 { return @intToPtr(*volatile u32, self.base + i * 4); } pub fn regP1(self: Self, i: u32) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x080 + i * 4); } pub fn pFunc(f: u32) u32 { return f; } pub const P_DIGIMODE = bit(8); }; pub const iocon = Iocon{ .base = 0x50001000 }; /// Standard counter/time pub const CTimer = struct { base: usize, const Self = @This(); /// Interrupt Register. The IR can be written to clear interrupts. The IR /// can be read to identify which of eight possible interrupt sources are /// pending. pub fn regIr(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x00); } /// Interrupt flag for match channel 0. pub const IR_MR0INT = bit(0); /// Interrupt flag for match channel 1. pub const IR_MR1INT = bit(1); /// Interrupt flag for match channel 2. pub const IR_MR2INT = bit(2); /// Interrupt flag for match channel 3. pub const IR_MR3INT = bit(3); /// Interrupt flag for capture channel 0 event. pub const IR_CR0INT = bit(4); /// Interrupt flag for capture channel 1 event. pub const IR_CR1INT = bit(5); /// Interrupt flag for capture channel 2 event. pub const IR_CR2INT = bit(6); /// Interrupt flag for capture channel 3 event. pub const IR_CR3INT = bit(7); /// Timer Control Register. The TCR is used to control the Timer Counter /// functions. The Timer Counter can be disabled or reset through the TCR. pub fn regTcr(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x04); } /// Counter enable. pub const TCR_CEN = bit(0); /// Counter reset. pub const TCR_CRST = bit(1); /// Timer Counter. The 32 bit TC is incremented every PR+1 cycles of the /// APB bus clock. The TC is controlled through the TCR. pub fn regTc(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x08); } /// Prescale Register. When the Prescale Counter (PC) is equal to this /// value, the next clock increments the TC and clears the PC. pub fn regPr(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x0c); } /// Prescale Counter. The 32 bit PC is a counter which is incremented to /// the value stored in PR. When the value in PR is reached, the TC is /// incremented and the PC is cleared. The PC is observable and controllable /// through the bus interface. pub fn regPc(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x10); } /// The MCR is used to control whether an interrupt is generated, whether the /// TC is reset when a Match occurs, and whether the match register is reloaded /// from its shadow register when the TC is reset. pub fn regMcr(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x14); } /// Interrupt on MR0: an interrupt is generated when MR0 matches the value /// in the TC. pub const MCR_MR0I = bit(0); /// Reset on MR0: the TC will be reset if MR0 matches it. pub const MCR_MR0R = bit(1); /// Stop on MR0: the TC and PC will be stopped and TCR[0] will be set to 0 /// if MR0 matches the TC. pub const MCR_MR0S = bit(2); /// Match Register 0–3. MR0 can be enabled through the MCR to reset the TC, /// stop both the TC and PC, and/or generate an interrupt every time MR0 /// matches the TC. pub fn regMr(self: Self, n: usize) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x18 + n * 4); } }; pub const ctimers = [_]CTimer{ CTimer{ .base = 0x50008000 }, CTimer{ .base = 0x50009000 }, CTimer{ .base = 0x50028000 }, CTimer{ .base = 0x50029000 }, CTimer{ .base = 0x5002a000 }, }; pub const ctimers_ns = [_]CTimer{ CTimer{ .base = 0x40008000 }, CTimer{ .base = 0x40009000 }, CTimer{ .base = 0x40028000 }, CTimer{ .base = 0x40029000 }, CTimer{ .base = 0x4002a000 }, }; /// The number of hardware interrupt lines. pub const num_irqs = 60; pub const irqs = struct { pub const CTimer0_IRQn = arm_m.irqs.interruptIRQn(10); pub const CTimer1_IRQn = arm_m.irqs.interruptIRQn(11); pub const CTimer2_IRQn = arm_m.irqs.interruptIRQn(36); pub const CTimer3_IRQn = arm_m.irqs.interruptIRQn(13); pub const CTimer4_IRQn = arm_m.irqs.interruptIRQn(37); pub const CTimern_IRQn = [_]usize{ CTimer0_IRQn, CTimer1_IRQn, CTimer2_IRQn, CTimer3_IRQn, CTimer4_IRQn, }; /// Get the descriptive name of an exception number. Returns `null` if /// the exception number is not known by this module. pub fn getName(comptime i: usize) ?[]const u8 { switch (i) { CTimer0_IRQn => return "CTimer0", CTimer1_IRQn => return "CTimer1", else => return arm_m.irqs.getName(i), } } }; fn bit(comptime n: u32) u32 { return 1 << n; }
examples/drivers/lpc55s69.zig
const std = @import("std"); const getty = @import("getty"); const assert = std.debug.assert; const expectEqual = std.testing.expectEqual; const expectEqualSlices = std.testing.expectEqualSlices; const Token = @import("common/token.zig").Token; pub const Deserializer = struct { tokens: []const Token, const Self = @This(); pub fn init(tokens: []const Token) Self { return .{ .tokens = tokens }; } pub fn remaining(self: Self) usize { return self.tokens.len; } pub fn nextTokenOpt(self: *Self) ?Token { switch (self.remaining()) { 0 => return null, else => |len| { const first = self.tokens[0]; self.tokens = if (len == 1) &[_]Token{} else self.tokens[1..]; return first; }, } } pub fn nextToken(self: *Self) Token { switch (self.remaining()) { 0 => std.debug.panic("ran out of tokens to deserialize", .{}), else => |len| { const first = self.tokens[0]; self.tokens = if (len == 1) &[_]Token{} else self.tokens[1..]; return first; }, } } fn peekTokenOpt(self: Self) ?Token { return if (self.tokens.len > 0) self.tokens[0] else null; } fn peekToken(self: Self) Token { if (self.peekTokenOpt()) |token| { return token; } else { std.debug.panic("ran out of tokens to deserialize", .{}); } } pub usingnamespace getty.Deserializer( *Self, Error, getty.default_dt, getty.default_dt, deserializeBool, deserializeEnum, deserializeFloat, deserializeInt, deserializeMap, deserializeOptional, deserializeSeq, deserializeString, deserializeStruct, deserializeVoid, ); const Error = getty.de.Error || error{TestExpectedEqual}; fn deserializeBool(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Bool => |v| return try visitor.visitBool(allocator, Self.@"getty.Deserializer", v), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn deserializeEnum(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Enum => switch (self.nextToken()) { .U8 => |v| return try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U16 => |v| return try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U32 => |v| return try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U64 => |v| return try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U128 => |v| return try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .String => |v| return try visitor.visitString(allocator, Self.@"getty.Deserializer", v), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), }, else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn deserializeFloat(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { return switch (self.nextToken()) { .F16 => |v| try visitor.visitFloat(allocator, Self.@"getty.Deserializer", v), .F32 => |v| try visitor.visitFloat(allocator, Self.@"getty.Deserializer", v), .F64 => |v| try visitor.visitFloat(allocator, Self.@"getty.Deserializer", v), .F128 => |v| try visitor.visitFloat(allocator, Self.@"getty.Deserializer", v), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), }; } fn deserializeInt(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { return switch (self.nextToken()) { .I8 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .I16 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .I32 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .I64 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .I128 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U8 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U16 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U32 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U64 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), .U128 => |v| try visitor.visitInt(allocator, Self.@"getty.Deserializer", v), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), }; } fn deserializeMap(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Map => |v| { var m = Map{ .de = self, .len = v.len, .end = .MapEnd }; var value = visitor.visitMap(allocator, Self.@"getty.Deserializer", m.map()); try self.assertNextToken(.MapEnd); return value; }, else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn deserializeOptional(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.peekToken()) { .Null => { _ = self.nextToken(); return try visitor.visitNull(allocator, Self.@"getty.Deserializer"); }, .Some => { _ = self.nextToken(); return try visitor.visitSome(allocator, Self.@"getty.Deserializer"); }, else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn deserializeSeq(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Seq => |v| { var s = Seq{ .de = self, .len = v.len, .end = .SeqEnd }; var value = visitor.visitSeq(allocator, Self.@"getty.Deserializer", s.seq()); try self.assertNextToken(.SeqEnd); return value; }, else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn deserializeString(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .String => |v| return try visitor.visitString( allocator, Self.@"getty.Deserializer", try allocator.?.dupe(u8, v), ), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn deserializeStruct(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Struct => |v| { var s = Struct{ .de = self, .len = v.len, .end = .StructEnd }; var value = visitor.visitMap(allocator, Self.@"getty.Deserializer", s.map()); try self.assertNextToken(.StructEnd); return value; }, else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn deserializeVoid(self: *Self, allocator: ?std.mem.Allocator, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Void => return try visitor.visitVoid(allocator, Self.@"getty.Deserializer"), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn assertNextToken(self: *Self, expected: Token) !void { if (self.nextTokenOpt()) |token| { const token_tag = std.meta.activeTag(token); const expected_tag = std.meta.activeTag(expected); if (token_tag == expected_tag) { switch (token) { .MapEnd => try expectEqual(@field(token, "MapEnd"), @field(expected, "MapEnd")), .SeqEnd => try expectEqual(@field(token, "SeqEnd"), @field(expected, "SeqEnd")), .StructEnd => try expectEqual(@field(token, "StructEnd"), @field(expected, "StructEnd")), else => |v| std.debug.panic("unexpected token: {s}", .{@tagName(v)}), } } else { @panic("expected Token::{} but deserialization wants Token::{}"); } } else { @panic("end of tokens but deserialization wants Token::{}"); } } }; const Seq = struct { de: *Deserializer, len: ?usize, end: Token, const Self = @This(); pub usingnamespace getty.de.Seq( *Self, Deserializer.Error, nextElementSeed, ); fn nextElementSeed(self: *Seq, allocator: ?std.mem.Allocator, seed: anytype) Deserializer.Error!?@TypeOf(seed).Value { if (self.de.peekTokenOpt()) |token| { if (std.meta.eql(token, self.end)) return null; } self.len.? -= @as(usize, if (self.len.? > 0) 1 else 0); return try seed.deserialize(allocator, self.de.deserializer()); } }; const Map = struct { de: *Deserializer, len: ?usize, end: Token, const Self = @This(); pub usingnamespace getty.de.Map( *Self, Deserializer.Error, nextKeySeed, nextValueSeed, ); fn nextKeySeed(self: *Map, allocator: ?std.mem.Allocator, seed: anytype) Deserializer.Error!?@TypeOf(seed).Value { if (self.de.peekTokenOpt()) |token| { if (std.meta.eql(token, self.end)) return null; } else { return null; } self.len.? -= @as(usize, if (self.len.? > 0) 1 else 0); return try seed.deserialize(allocator, self.de.deserializer()); } fn nextValueSeed(self: *Map, allocator: ?std.mem.Allocator, seed: anytype) Deserializer.Error!@TypeOf(seed).Value { return try seed.deserialize(allocator, self.de.deserializer()); } }; const Struct = struct { de: *Deserializer, len: ?usize, end: Token, const Self = @This(); pub usingnamespace getty.de.Map( *Self, Deserializer.Error, nextKeySeed, nextValueSeed, ); fn nextKeySeed(self: *Struct, _: ?std.mem.Allocator, seed: anytype) Deserializer.Error!?@TypeOf(seed).Value { if (self.de.peekTokenOpt()) |token| { if (std.meta.eql(token, self.end)) return null; } else { return null; } if (self.de.nextTokenOpt()) |token| { self.len.? -= @as(usize, if (self.len.? > 0) 1 else 0); if (token != .String) { return error.InvalidType; } return token.String; } else { return null; } } fn nextValueSeed(self: *Struct, allocator: ?std.mem.Allocator, seed: anytype) Deserializer.Error!@TypeOf(seed).Value { return try seed.deserialize(allocator, self.de.deserializer()); } };
src/tests/de/deserializer.zig
const std = @import("std"); const mem = std.mem; const Allocator = mem.Allocator; const builtin = std.builtin; const assert = std.debug.assert; pub const ImageReadError = error{EndOfStream} || std.os.ReadError || std.os.SeekError; pub const ImageReader = union(enum) { buffer: BufferReader, file: FileReader, bufferp: *BufferReader, filep: *FileReader, const Self = @This(); pub fn fromFile(file: std.fs.File) Self { return Self{ .file = FileReader.init(file) }; } pub fn fromMemory(buffer: []const u8) Self { return Self{ .buffer = BufferReader.init(buffer) }; } pub fn wrap(file_or_buffer: anytype) Self { if (@TypeOf(file_or_buffer) == *FileReader) return .{ .filep = file_or_buffer }; if (@TypeOf(file_or_buffer) == *BufferReader) return .{ .bufferp = file_or_buffer }; @compileError("ImageReader can only wrap FileReader and BufferReader"); } pub inline fn readNoAlloc(self: *Self, size: usize) ImageReadError![]const u8 { switch (self.*) { .buffer => |*b| return b.readNoAlloc(size), .file => |*f| return f.readNoAlloc(size), .bufferp => |b| return b.readNoAlloc(size), .filep => |f| return f.readNoAlloc(size), } } pub inline fn read(self: *Self, buf: []u8) ImageReadError!usize { switch (self.*) { .buffer => |*b| return b.read(buf), .file => |*f| return f.read(buf), .bufferp => |b| return b.read(buf), .filep => |f| return f.read(buf), } } pub inline fn readStruct(self: *Self, comptime T: type) ImageReadError!*const T { switch (self.*) { .buffer => |*b| return b.readStruct(T), .file => |*f| return f.readStruct(T), .bufferp => |b| return b.readStruct(T), .filep => |f| return f.readStruct(T), } } pub inline fn readInt(self: *Self, comptime T: type) ImageReadError!T { switch (self.*) { .buffer => |*b| return b.readInt(T), .file => |*f| return f.readInt(T), .bufferp => |b| return b.readInt(T), .filep => |f| return f.readInt(T), } } pub fn readIntBig(self: *Self, comptime T: type) ImageReadError!T { switch (self.*) { .buffer => |*b| return b.readIntBig(T), .file => |*f| return f.readIntBig(T), .bufferp => |b| return b.readIntBig(T), .filep => |f| return f.readIntBig(T), } } pub fn readIntLittle(self: *Self, comptime T: type) ImageReadError!T { switch (self.*) { .buffer => |*b| return b.readIntLittle(T), .file => |*f| return f.readIntLittle(T), .bufferp => |b| return b.readIntLittle(T), .filep => |f| return f.readIntLittle(T), } } pub fn seekBy(self: *Self, amt: i64) ImageReadError!void { switch (self.*) { .buffer => |*b| return b.seekBy(amt), .file => |*f| return f.seekBy(amt), .bufferp => |b| return b.seekBy(amt), .filep => |f| return f.seekBy(amt), } } }; pub const BufferReader = struct { buffer: []const u8, pos: usize, const Self = @This(); pub fn init(buf: []const u8) Self { return .{ .buffer = buf, .pos = 0 }; } pub fn readNoAlloc(self: *Self, size: usize) ImageReadError![]const u8 { const end = self.pos + size; if (end > self.buffer.len) return error.EndOfStream; const res = self.buffer[self.pos..end]; self.pos = end; return res; } pub fn read(self: *Self, buf: []u8) ImageReadError!usize { var size = buf.len; var end = self.pos + size; if (end > self.buffer.len) { end = self.buffer.len; size = end - self.pos; } mem.copy(u8, buf, self.buffer[self.pos..end]); self.pos = end; return size; } pub fn readStruct(self: *Self, comptime T: type) ImageReadError!*const T { // Only extern and packed structs have defined in-memory layout. comptime assert(@typeInfo(T).Struct.layout != std.builtin.TypeInfo.ContainerLayout.Auto); const size = @sizeOf(T); const end = self.pos + size; if (end > self.buffer.len) return error.EndOfStream; const start = self.pos; self.pos = end; return @ptrCast(*const T, self.buffer[start..end]); } pub fn readInt(self: *Self, comptime T: type) ImageReadError!T { comptime assert(@typeInfo(T) == .Int); const bitSize = @bitSizeOf(T); const size = @sizeOf(T); comptime assert(bitSize % 8 == 0 and bitSize / 8 == size); // This will not allow u24 as intended var result: T = undefined; const read_size = try self.read(mem.asBytes(&result)); if (read_size != size) return error.EndOfStream; return result; } pub fn readIntBig(self: *Self, comptime T: type) ImageReadError!T { return mem.bigToNative(T, try self.readInt(T)); } pub fn readIntLittle(self: *Self, comptime T: type) ImageReadError!T { return mem.littleToNative(T, try self.readInt(T)); } pub fn seekBy(self: *Self, amt: i64) ImageReadError!void { if (amt < 0) { const abs_amt = std.math.absCast(amt); const abs_amt_usize = std.math.cast(usize, abs_amt) catch std.math.maxInt(usize); if (abs_amt_usize > self.pos) { self.pos = 0; } else { self.pos -= abs_amt_usize; } } else { const amt_usize = std.math.cast(usize, amt) catch std.math.maxInt(usize); const new_pos = self.pos +| amt_usize; self.pos = std.math.min(self.buffer.len, new_pos); } } pub const Reader = std.io.Reader(*Self, ImageReadError, read); pub fn reader(self: *Self) Reader { return .{ .context = self }; } }; pub const FileReader = struct { file: std.fs.File, pos: usize = 0, end: usize = 0, buffer: [16 * 1024]u8 = undefined, const Self = @This(); pub fn init(file: std.fs.File) Self { return Self{ .file = file }; } pub fn readNoAlloc(self: *Self, size: usize) ImageReadError![]const u8 { if (size > self.buffer.len) return error.EndOfStream; var available = self.end - self.pos; if (available < size) { mem.copy(u8, self.buffer[0..available], self.buffer[self.pos..self.end]); const read_size = try self.file.read(self.buffer[available..]); self.pos = 0; available += read_size; self.end = available; } if (available < size) return error.EndOfStream; const endPos = self.pos + size; const result = self.buffer[self.pos..endPos]; self.pos = endPos; return result; } pub fn read(self: *Self, buf: []u8) ImageReadError!usize { const size = buf.len; const available = self.end - self.pos; if (available >= size) { const endPos = self.pos + size; mem.copy(u8, buf[0..], self.buffer[self.pos..endPos]); self.pos = endPos; return size; } mem.copy(u8, buf[0..available], self.buffer[self.pos..self.end]); self.pos = 0; self.end = 0; return self.file.read(buf[available..]); } pub fn readStruct(self: *Self, comptime T: type) ImageReadError!*const T { // Only extern and packed structs have defined in-memory layout. comptime assert(@typeInfo(T).Struct.layout != std.builtin.TypeInfo.ContainerLayout.Auto); const size = @sizeOf(T); if (size > self.buffer.len) return error.EndOfStream; const buf = try self.readNoAlloc(size); return @ptrCast(*const T, buf); } pub fn readInt(self: *Self, comptime T: type) ImageReadError!T { comptime assert(@typeInfo(T) == .Int); const bit_size = @bitSizeOf(T); const size = @sizeOf(T); comptime assert(bit_size % 8 == 0 and bit_size / 8 == size); // This will not allow u24 as intended var result: T = undefined; const read_size = try self.read(mem.asBytes(&result)); if (read_size != size) return error.EndOfStream; return result; } pub fn readIntBig(self: *Self, comptime T: type) ImageReadError!T { return mem.bigToNative(T, try self.readInt(T)); } pub fn readIntLittle(self: *Self, comptime T: type) ImageReadError!T { return mem.littleToNative(T, try self.readInt(T)); } pub fn seekBy(self: *Self, amt: i64) ImageReadError!void { if (amt < 0) { const abs_amt = std.math.absCast(amt); const abs_amt_usize = std.math.cast(usize, abs_amt) catch std.math.maxInt(usize); if (abs_amt_usize > self.pos) { try self.file.seekBy(amt + @intCast(i64, self.pos)); self.pos = 0; self.end = 0; } else { self.pos -= abs_amt_usize; } } else { const amt_usize = std.math.cast(usize, amt) catch std.math.maxInt(usize); const new_pos = self.pos +| amt_usize; if (new_pos > self.end) { try self.file.seekBy(@intCast(i64, new_pos - self.end)); self.pos = 0; self.end = 0; } else { self.pos = new_pos; } } } pub const Reader = std.io.Reader(*Self, ImageReadError, read); pub fn reader(self: *Self) Reader { return .{ .context = self }; } }; // ********************* TESTS ********************* test "FileReader" { const cwd = std.fs.cwd(); try cwd.writeFile("test.tmp", "0123456789Abcdefghijklmnopqr0123456789"); defer cwd.deleteFile("test.tmp") catch {}; const file = try cwd.openFile("test.tmp", .{ .mode = .read_only }); defer file.close(); var reader = ImageReader.fromFile(file); try testReader(&reader); } test "BufferReader" { const buffer = "0123456789Abcdefghijklmnopqr0123456789"; var reader = ImageReader.fromMemory(buffer[0..]); try testReader(&reader); } fn testReader(reader: *ImageReader) !void { const array10 = try reader.readNoAlloc(10); try std.testing.expectEqualSlices(u8, "0123456789", array10); const TestStruct = packed struct { a: u32, b: [11]u8, }; const ts = try reader.readStruct(TestStruct); try std.testing.expectEqual(TestStruct{ .a = 0x64636241, .b = .{ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o' }, }, ts.*); var buf: [8]u8 = undefined; var i: u32 = 0; while (i < 2) : (i += 1) { const read_bytes = try reader.read(buf[0..]); try std.testing.expectEqual(@as(usize, 8), read_bytes); try std.testing.expectEqualSlices(u8, "pqr01234", buf[0..8]); const int = try reader.readIntBig(u32); try std.testing.expectEqual(@as(u32, 0x35363738), int); try reader.seekBy(-@sizeOf(u32) - 8); } }
src/io.zig
const std = @import("std"); const clap = @import("clap"); const io = std.io; const fs = std.fs; const mem = std.mem; const process = std.process; const doctest = @import("./doctest.zig"); const max_doc_file_size = 10 * 1024 * 1024; // TODO: this should be overridable by the user const CommandLineCommand = enum { @"inline", syntax, build, run, @"test", help, @"--help", }; // TODO: test (and maybe run?) should differentiate between panics and other error conditions. // TODO: integrate with hugo & check that output is correct // TODO: tests? // TODO: run should accept arguments for the executable // TODO: I believe the original code had also a syntax + semantic analisys mode. // TODO: refactor duplicated code // TODO: json output mode? // TODO: caching, of course! // TODO: code_begin + syntax used to mean --obj, why? now we're changing those to just syntax. Bad idea? // TODO: cd into the temp directory to produce cleaner outputs // TODO: make sure to match --fail errors in all commands pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = &arena.allocator; var args_it = try clap.args.OsIterator.init(allocator); defer args_it.deinit(); const command_name = (try args_it.next()) orelse show_main_help(); @setEvalBranchQuota(10000); const command = std.meta.stringToEnum(CommandLineCommand, command_name) orelse @panic("unknown command"); switch (command) { .@"inline" => { const summary = "Allows you to place the actual command to run and its options as a comment inside the file."; const params = comptime [_]clap.Param(clap.Help){ clap.parseParam("-h, --help Display this help message") catch unreachable, clap.parseParam("-i, --in_file <PATH> path to the input file, defaults to stdin") catch unreachable, clap.parseParam("-o, --out_file <PATH> path to the output file, defaults to stdout") catch unreachable, }; var diag: clap.Diagnostic = undefined; var args = clap.ComptimeClap( clap.Help, &params, ).parse(allocator, &args_it, &diag) catch |err| { // Report any useful error and exit diag.report(std.io.getStdErr().writer(), err) catch {}; return err; }; check_help(summary, &params, args); const input_file_bytes = try read_input(allocator, args.option("--in_file")); var buffered_out_stream = try open_output(args.option("--out_file")); // TODO: make this a bit flexible const prefix = "// zig-doctest: "; if (!mem.startsWith(u8, input_file_bytes, prefix)) { @panic("the input file doesn't begin with `// zig-doctest: `"); } const first_newline = for (input_file_bytes) |c, idx| { if (c == '\n') break idx; } else { @panic("the script is empty!"); }; var iterator = clap.args.ShellIterator.init( std.heap.page_allocator, input_file_bytes[prefix.len..first_newline], ); const code_without_args_comment = input_file_bytes[first_newline + 1 ..]; // Read the real command string from the file const real_command_name = (try iterator.next()) orelse @panic("expected command arg in zig-doctest comment line"); const real_command = std.meta.stringToEnum(CommandLineCommand, real_command_name) orelse @panic("unknown command in comment line"); switch (real_command) { .@"inline" => @panic("`inline` can only be used as an actual command line argument"), .syntax => try do_syntax(allocator, &iterator, true, code_without_args_comment, buffered_out_stream), .run => try do_run(allocator, &iterator, true, code_without_args_comment, buffered_out_stream), .build => try do_build(allocator, &iterator, true, code_without_args_comment, buffered_out_stream), .@"test" => try do_test(allocator, &iterator, true, code_without_args_comment, buffered_out_stream), .help, .@"--help" => @panic("`help` cannot be used inside the zig-doctest comment"), } }, .syntax => try do_syntax(allocator, &args_it, false, {}, {}), .build => try do_build(allocator, &args_it, false, {}, {}), .run => try do_run(allocator, &args_it, false, {}, {}), .@"test" => try do_test(allocator, &args_it, false, {}, {}), .help, .@"--help" => show_main_help(), } } fn do_syntax( allocator: *mem.Allocator, args_it: anytype, comptime is_inline: bool, cl_input_file_bytes: anytype, cl_buffered_out_stream: anytype, ) !void { const summary = "Tests that the syntax is valid, without running the code."; const params = comptime [_]clap.Param(clap.Help){ clap.parseParam("-h, --help Display this help message") catch unreachable, clap.parseParam("-i, --in_file <PATH> path to the input file, defaults to stdin") catch unreachable, clap.parseParam("-o, --out_file <PATH> path to the output file, defaults to stdout") catch unreachable, }; var diag: clap.Diagnostic = undefined; var args = clap.ComptimeClap( clap.Help, &params, ).parse(allocator, args_it, &diag) catch |err| { // Report any useful error and exit diag.report(std.io.getStdErr().writer(), err) catch {}; return err; }; check_help(summary, &params, args); const input_file_bytes = blk: { if (is_inline) { if (args.option("--in_file")) |_| { @panic("`--in_file` is not allowed in comment arguments!"); } break :blk cl_input_file_bytes; } break :blk try read_input(allocator, args.option("--in_file")); }; var buffered_out_stream = blk: { if (is_inline) { if (args.option("--out_file")) |_| { @panic("`--out_file` is not allowed in comment arguments!"); } break :blk cl_buffered_out_stream; } break :blk try open_output(args.option("--out_file")); }; try doctest.highlightZigCode(input_file_bytes, buffered_out_stream.writer()); try buffered_out_stream.flush(); } fn do_build( allocator: *mem.Allocator, args_it: anytype, comptime is_inline: bool, cl_input_file_bytes: anytype, cl_buffered_out_stream: anytype, ) !void { // TODO: it seems a good idea to have a "check output" flag, rather than // tying output checking just to failure cases. const summary = "Builds a code snippet, checking for the build to succeed or fail as expected."; const params = comptime [_]clap.Param(clap.Help){ clap.parseParam("-h, --help Display this help message") catch unreachable, clap.parseParam("-n, --name <NAME> Name of the script, defaults to the input filename or `code` when using stdin.") catch unreachable, clap.parseParam("-r, --format <OUTPUT_FORMAT> Output format, possible values: `exe`, `obj`, `lib`, defaults to `exe`") catch unreachable, clap.parseParam("-f, --fail <MATCH> Expect the build command to encounter a compile error containing some text that is expected to be present in stderr") catch unreachable, clap.parseParam("-i, --in_file <PATH> Path to the input file, defaults to stdin") catch unreachable, clap.parseParam("-o, --out_file <PATH> Path to the output file, defaults to stdout") catch unreachable, clap.parseParam("-z, --zig_exe <PATH> Path to the zig compiler, defaults to `zig` (i.e. assumes zig present in PATH)") catch unreachable, clap.parseParam("-t, --target <TARGET> Compilation target, expected as a arch-os-abi tripled (e.g. `x86_64-linux-gnu`) defaults to `native`") catch unreachable, clap.parseParam("-k, --keep Don't delete the temp folder, useful for debugging the resulting executable.") catch unreachable, }; var diag: clap.Diagnostic = undefined; var args = clap.ComptimeClap( clap.Help, &params, ).parse(allocator, args_it, &diag) catch |err| { // Report any useful error and exit diag.report(std.io.getStdErr().writer(), err) catch {}; return err; }; check_help(summary, &params, args); const input_file_bytes = blk: { if (is_inline) { if (args.option("--in_file")) |_| { @panic("`--in_file` is not allowed in comment arguments!"); } break :blk cl_input_file_bytes; } break :blk try read_input(allocator, args.option("--in_file")); }; var buffered_out_stream = blk: { if (is_inline) { if (args.option("--out_file")) |_| { @panic("`--out_file` is not allowed in comment arguments!"); } break :blk cl_buffered_out_stream; } break :blk try open_output(args.option("--out_file")); }; // Choose the right name for this example const name = args.option("--name") orelse choose_test_name(args.option("--in_file")); // Print the filename element try buffered_out_stream.writer().print("<p class=\"file\">{s}.zig</p>", .{name}); // Produce the syntax highlighting try doctest.highlightZigCode(input_file_bytes, buffered_out_stream.writer()); // Grab env map and set max output size var env_map = try process.getEnvMap(allocator); try env_map.set("ZIG_DEBUG_COLOR", "1"); // Create a temp folder const tmp_dir_name: []const u8 = while (true) { const tmp_dir_name = try randomized_path_name(allocator, "doctest-"); fs.cwd().makePath(tmp_dir_name) catch |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, }; break tmp_dir_name; } else unreachable; defer if (!args.flag("--keep")) { fs.cwd().deleteTree(tmp_dir_name) catch { @panic("Error while deleting the temp directory!"); }; }; // Build the code and write the resulting output const output_format = if (args.option("--format")) |format| std.meta.stringToEnum(doctest.BuildCommand.Format, format) orelse { std.debug.print("Invalid value for --format!\n", .{}); return error.InvalidFormat; } else .exe; _ = try doctest.runBuild( allocator, input_file_bytes, buffered_out_stream.writer(), &env_map, args.option("--zig_exe") orelse "zig", doctest.BuildCommand{ .name = name, .format = output_format, .tmp_dir_name = tmp_dir_name, .expected_outcome = if (args.option("--fail")) |f| .{ .Failure = f } else .Success, .target_str = args.option("--target"), }, ); try buffered_out_stream.flush(); } fn do_run( allocator: *mem.Allocator, args_it: anytype, comptime is_inline: bool, cl_input_file_bytes: anytype, cl_buffered_out_stream: anytype, ) !void { // TODO: it seems a good idea to have a "check output" flag, rather than // tying output checking just to failure cases. const summary = "Compiles and runs a code snippet, checking for the execution to succeed or fail as expected."; const params = comptime [_]clap.Param(clap.Help){ clap.parseParam("-h, --help Display this help message") catch unreachable, clap.parseParam("-n, --name <NAME> Name of the script, defaults to the input filename or `code` when using stdin.") catch unreachable, clap.parseParam("-f, --fail <MATCH> Expect the execution to encounter a runtime error, optionally provide some text that is expected to be present in stderr") catch unreachable, clap.parseParam("-i, --in_file <PATH> Path to the input file, defaults to stdin") catch unreachable, clap.parseParam("-o, --out_file <PATH> Path to the output file, defaults to stdout") catch unreachable, clap.parseParam("-z, --zig_exe <PATH> Path to the zig compiler, defaults to `zig` (i.e. assumes zig present in PATH)") catch unreachable, }; var diag: clap.Diagnostic = undefined; var args = clap.ComptimeClap( clap.Help, &params, ).parse(allocator, args_it, &diag) catch |err| { // Report any useful error and exit diag.report(std.io.getStdErr().writer(), err) catch {}; return err; }; check_help(summary, &params, args); const input_file_bytes = blk: { if (is_inline) { if (args.option("--in_file")) |_| { @panic("`--in_file` is not allowed in comment arguments!"); } break :blk cl_input_file_bytes; } break :blk try read_input(allocator, args.option("--in_file")); }; var buffered_out_stream = blk: { if (is_inline) { if (args.option("--out_file")) |_| { @panic("`--out_file` is not allowed in comment arguments!"); } break :blk cl_buffered_out_stream; } break :blk try open_output(args.option("--out_file")); }; // Choose the right name for this example const name = args.option("--name") orelse choose_test_name(args.option("--in_file")); // Print the filename element try buffered_out_stream.writer().print("<p class=\"file\">{s}.zig</p>", .{name}); // Produce the syntax highlighting try doctest.highlightZigCode(input_file_bytes, buffered_out_stream.writer()); // Grab env map and set max output size var env_map = try process.getEnvMap(allocator); try env_map.set("ZIG_DEBUG_COLOR", "1"); // Create a temp folder const tmp_dir_name = while (true) { const tmp_dir_name = try randomized_path_name(allocator, "doctest-"); fs.cwd().makePath(tmp_dir_name) catch |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, }; break tmp_dir_name; } else unreachable; defer fs.cwd().deleteTree(tmp_dir_name) catch { @panic("Error while deleting the temp directory!"); }; // Build the code and write the resulting output const executable_path = try doctest.runBuild( allocator, input_file_bytes, buffered_out_stream.writer(), &env_map, args.option("--zig_exe") orelse "zig", doctest.BuildCommand{ .format = .exe, .name = name, .tmp_dir_name = tmp_dir_name, .expected_outcome = .SilentSuccess, .target_str = null, }, ); // Missing executable path means that the build failed. if (executable_path) |exe_path| { const run_outcome = try doctest.runExe( allocator, exe_path, buffered_out_stream.writer(), &env_map, doctest.RunCommand{ .expected_outcome = if (args.option("--fail")) |f| .{ .Failure = f } else .Success, }, ); } try buffered_out_stream.flush(); } fn do_test( allocator: *mem.Allocator, args_it: anytype, comptime is_inline: bool, cl_input_file_bytes: anytype, cl_buffered_out_stream: anytype, ) !void { const summary = "Tests a code snippet, checking for the test to succeed or fail as expected."; const params = comptime [_]clap.Param(clap.Help){ clap.parseParam("-h, --help Display this help message") catch unreachable, clap.parseParam("-n, --name <NAME> Name of the script, defaults to the input filename or `code` when using stdin.") catch unreachable, clap.parseParam("-f, --fail <MATCH> Expect the test to fail, optionally provide some text that is expected to be present in stderr") catch unreachable, clap.parseParam("-i, --in_file <PATH> Path to the input file, defaults to stdin") catch unreachable, clap.parseParam("-o, --out_file <PATH> Path to the output file, defaults to stdout") catch unreachable, clap.parseParam("-z, --zig_exe <PATH> Path to the zig compiler, defaults to `zig` (i.e. assumes zig present in PATH)") catch unreachable, }; var diag: clap.Diagnostic = undefined; var args = clap.ComptimeClap( clap.Help, &params, ).parse(allocator, args_it, &diag) catch |err| { // Report any useful error and exit diag.report(std.io.getStdErr().writer(), err) catch {}; return err; }; check_help(summary, &params, args); const input_file_bytes = blk: { if (is_inline) { if (args.option("--in_file")) |_| { @panic("`--in_file` is not allowed in comment arguments!"); } break :blk cl_input_file_bytes; } break :blk try read_input(allocator, args.option("--in_file")); }; var buffered_out_stream = blk: { if (is_inline) { if (args.option("--out_file")) |_| { @panic("`--out_file` is not allowed in comment arguments!"); } break :blk cl_buffered_out_stream; } break :blk try open_output(args.option("--out_file")); }; // Choose the right name for this example const name = args.option("--name") orelse choose_test_name(args.option("--in_file")); // Print the filename element try buffered_out_stream.writer().print("<p class=\"file\">{s}.zig</p>", .{name}); // Produce the syntax highlighting try doctest.highlightZigCode(input_file_bytes, buffered_out_stream.writer()); // Grab env map and set max output size var env_map = try process.getEnvMap(allocator); try env_map.set("ZIG_DEBUG_COLOR", "1"); // Create a temp folder const tmp_dir_name = while (true) { const tmp_dir_name = try randomized_path_name(allocator, "doctest-"); fs.cwd().makePath(tmp_dir_name) catch |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, }; break tmp_dir_name; } else unreachable; defer fs.cwd().deleteTree(tmp_dir_name) catch { @panic("Error while deleting the temp directory!"); }; const test_outcome = try doctest.runTest( allocator, input_file_bytes, buffered_out_stream.writer(), &env_map, args.option("--zig_exe") orelse "zig", doctest.TestCommand{ .name = name, .expected_outcome = if (args.option("--fail")) |f| .{ .Failure = f } else .Success, .tmp_dir_name = tmp_dir_name, }, ); try buffered_out_stream.flush(); } fn check_help(comptime summary: []const u8, comptime params: anytype, args: anytype) void { if (args.flag("--help")) { std.debug.print("{s}\n\n", .{summary}); clap.help(io.getStdErr().writer(), params) catch {}; std.debug.print("\n", .{}); std.os.exit(0); } } // Nothing to see here, just a normal elegant generic type. const BufferedFileType = @TypeOf(io.bufferedWriter((std.fs.File{ .handle = 0 }).writer())); fn open_output(output: ?[]const u8) !BufferedFileType { const out_file = if (output) |out_file_name| try fs.cwd().createFile(out_file_name, .{}) else io.getStdOut(); return io.bufferedWriter(out_file.writer()); } fn read_input(allocator: *mem.Allocator, input: ?[]const u8) ![]const u8 { const in_file = if (input) |in_file_name| try fs.cwd().openFile(in_file_name, .{ .read = true }) else io.getStdIn(); defer in_file.close(); return try in_file.reader().readAllAlloc(allocator, max_doc_file_size); } // TODO: this way of chopping of the file extension seems kinda dumb. // What should we do if somebody is passing in a .md file, for example? fn choose_test_name(in_file: ?[]const u8) ?[]const u8 { const in_file_name = in_file orelse return "test"; const name_with_ext = fs.path.basename(in_file_name); if (mem.endsWith(u8, name_with_ext, ".zig")) { return name_with_ext[0 .. name_with_ext.len - 3]; } return name_with_ext; } fn randomized_path_name(allocator: *mem.Allocator, prefix: []const u8) ![]const u8 { const seed = @bitCast(u64, @truncate(i64, std.time.nanoTimestamp())); var xoro = std.rand.Xoroshiro128.init(seed); var buf: [4]u8 = undefined; xoro.random.bytes(&buf); var name = try allocator.alloc(u8, prefix.len + 8); errdefer allocator.free(name); return try std.fmt.bufPrint(name, "{s}{}", .{ prefix, std.fmt.fmtSliceHexLower(&buf) }); } fn show_main_help() noreturn { std.debug.print("{s}", .{ \\Doctest runs a Zig code snippet and provides both syntax \\highlighting and colored output in HTML format. \\ \\Available commands: syntax, build, test, run, inline, help. \\ \\Put the `--help` flag after the command to get command-specific \\help. \\ \\Examples: \\ \\ ./doctest syntax --in_file=foo.zig \\ ./doctest build --obj --fail "not handled in switch" \\ ./doctest test --out_file bar.zig --zig_exe="/Downloads/zig/bin/zig" \\ \\ }); std.os.exit(0); }
src/main.zig
const std = @import("std"); const g = @import("spirv/grammar.zig"); pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = &arena.allocator; const args = try std.process.argsAlloc(allocator); if (args.len != 2) { usageAndExit(std.io.getStdErr(), args[0], 1); } const spec_path = args[1]; const spec = try std.fs.cwd().readFileAlloc(allocator, spec_path, std.math.maxInt(usize)); var tokens = std.json.TokenStream.init(spec); var registry = try std.json.parse(g.Registry, &tokens, .{.allocator = allocator}); var bw = std.io.bufferedWriter(std.io.getStdOut().writer()); try render(bw.writer(), registry); try bw.flush(); } fn render(writer: anytype, registry: g.Registry) !void { try writer.writeAll( \\//! This file is auto-generated by tools/gen_spirv_spec.zig. \\ \\const Version = @import("std").builtin.Version; \\ ); switch (registry) { .core => |core_reg| { try writer.print( \\pub const version = Version{{ .major = {}, .minor = {}, .patch = {} }}; \\pub const magic_number: u32 = {s}; \\ , .{ core_reg.major_version, core_reg.minor_version, core_reg.revision, core_reg.magic_number }, ); try renderOpcodes(writer, core_reg.instructions); try renderOperandKinds(writer, core_reg.operand_kinds); }, .extension => |ext_reg| { try writer.print( \\pub const version = Version{{ .major = {}, .minor = 0, .patch = {} }}; \\ , .{ ext_reg.version, ext_reg.revision }, ); try renderOpcodes(writer, ext_reg.instructions); try renderOperandKinds(writer, ext_reg.operand_kinds); } } } fn renderOpcodes(writer: anytype, instructions: []const g.Instruction) !void { try writer.writeAll("pub const Opcode = extern enum(u16) {\n"); for (instructions) |instr| { try writer.print(" {} = {},\n", .{ std.zig.fmtId(instr.opname), instr.opcode }); } try writer.writeAll(" _,\n};\n"); } fn renderOperandKinds(writer: anytype, kinds: []const g.OperandKind) !void { for (kinds) |kind| { switch (kind.category) { .ValueEnum => try renderValueEnum(writer, kind), .BitEnum => try renderBitEnum(writer, kind), else => {}, } } } fn renderValueEnum(writer: anytype, enumeration: g.OperandKind) !void { try writer.print("pub const {s} = extern enum(u32) {{\n", .{ enumeration.kind }); const enumerants = enumeration.enumerants orelse return error.InvalidRegistry; for (enumerants) |enumerant| { if (enumerant.value != .int) return error.InvalidRegistry; try writer.print(" {} = {},\n", .{ std.zig.fmtId(enumerant.enumerant), enumerant.value.int }); } try writer.writeAll(" _,\n};\n"); } fn renderBitEnum(writer: anytype, enumeration: g.OperandKind) !void { try writer.print("pub const {s} = packed struct {{\n", .{ enumeration.kind }); var flags_by_bitpos = [_]?[]const u8{null} ** 32; const enumerants = enumeration.enumerants orelse return error.InvalidRegistry; for (enumerants) |enumerant| { if (enumerant.value != .bitflag) return error.InvalidRegistry; const value = try parseHexInt(enumerant.value.bitflag); if (@popCount(u32, value) != 1) { continue; // Skip combinations and 'none' items } var bitpos = std.math.log2_int(u32, value); if (flags_by_bitpos[bitpos]) |*existing|{ // Keep the shortest if (enumerant.enumerant.len < existing.len) existing.* = enumerant.enumerant; } else { flags_by_bitpos[bitpos] = enumerant.enumerant; } } for (flags_by_bitpos) |maybe_flag_name, bitpos| { try writer.writeAll(" "); if (maybe_flag_name) |flag_name| { try writer.writeAll(flag_name); } else { try writer.print("_reserved_bit_{}", .{bitpos}); } try writer.writeAll(": bool "); if (bitpos == 0) { // Force alignment to integer boundaries try writer.writeAll("align(@alignOf(u32)) "); } try writer.writeAll("= false,\n"); } try writer.writeAll("};\n"); } fn parseHexInt(text: []const u8) !u31 { const prefix = "0x"; if (!std.mem.startsWith(u8, text, prefix)) return error.InvalidHexInt; return try std.fmt.parseInt(u31, text[prefix.len ..], 16); } fn usageAndExit(file: std.fs.File, arg0: []const u8, code: u8) noreturn { file.writer().print( \\Usage: {s} <spirv json spec> \\ \\Generates Zig bindings for a SPIR-V specification .json (either core or \\extinst versions). The result, printed to stdout, should be used to update \\files in src/codegen/spirv. \\ \\The relevant specifications can be obtained from the SPIR-V registry: \\https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/unified1/ \\ , .{arg0} ) catch std.process.exit(1); std.process.exit(code); }
tools/gen_spirv_spec.zig
// zlib License // // (C) 2022 Nelson "darltrash" Lopez // // This software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software // in a product, an acknowledgment in the product documentation would be // appreciated but is not required. // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. const std = @import("std"); const log = std.log.scoped(.iqm); const mem = std.mem; const VertexArrayType = enum(c_int) { position = 0, texcoord, normal, tangent, blendindices, blendweights, color }; const VertexArray = struct { type_: VertexArrayType, flags: u32, format: c_int, size: u32, offset: u32 }; const RawMesh = struct { name: u32, material: u32, first_vertex: u32, num_vertices: u32, first_triangle: u32, num_triangles: u32 }; const Mesh = struct { name: []const u8, vertex_count: u32, vertex_offset: u32, index_count: u32, index_offset: u32 }; pub const EXMData = struct {}; pub const Model = struct { header: Header, meshes: []Mesh, texts: [][]u8, indices: []u32, positions: ?[]f32, texcoords: ?[]f32, normals: ?[]f32, tangents: ?[]f32, blend_indices: ?[]u8, blend_weights: ?[]u8, color: ?[]u8, exm_data: ?EXMData = null }; pub const Header = struct { magic: [16]u8, // 0 version: c_uint, // 16 filesize: c_uint, // 20 flags: c_uint, // 24 num_text: c_uint, // 28 ofs_text: c_uint, // 32 num_meshes: c_uint, // 36 ofs_meshes: c_uint, // 40 num_vertexarrays: c_uint, // 44 num_vertexes: c_uint, // 48 ofs_vertexarrays: c_uint, // 52 num_triangles: c_uint, // 56 ofs_triangles: c_uint, // 60 ofs_adjacency: c_uint, // 64 num_joints: c_uint, ofs_joints: c_uint, num_poses: c_uint, ofs_poses: c_uint, num_anims: c_uint, ofs_anims: c_uint, num_frames: c_uint, num_framechannels: c_uint, ofs_frames: c_uint, ofs_bounds: c_uint, num_comment: c_uint, ofs_comment: c_uint, num_extensions: c_uint, ofs_extensions: c_uint }; fn grab(data: []const u8, where: usize, comptime T: type) T { var raw = data[where..][0..@sizeOf(T)]; return @bitCast(T, raw.*); } pub fn fromBuffer(data: []const u8, isEXM: bool, alloc: mem.Allocator) !Model { // I am sorry big endian friends :( comptime if (@import("builtin").target.cpu.arch.endian() == .Big) return error.UnsupportedEndian; ////// HANDLE HEADER ////////// var header = grab(data, 0, Header); // Check if the file is a correct iqm model. if (!mem.eql(u8, &header.magic, "INTERQUAKEMODEL\x00")) return error.IncorrectHeader; // Only version 2 supported. if (header.version != 2) return error.IncorrectVersion; ////// HANDLE TEXT ////////// // NOTE: I feel like this could be faster AND simpler 🤔 var tx_idx: usize = 0; var tx_max = @intCast(usize, header.num_text); var tx_ofs = @intCast(usize, header.ofs_text); var tx_arl = std.ArrayList(u8).init(alloc); var texts = try alloc.alloc([]u8, tx_max); for (data[tx_ofs..]) |v| { if (v == 0) { texts[tx_idx] = tx_arl.toOwnedSlice(); tx_idx += 1; if (tx_idx > tx_max-1) break; continue; } try tx_arl.append(v); } tx_arl.deinit(); // TODO: Handle materials! // TODO: Handle animations! ////// HANDLE VERTEX ARRAYS ////////// var va_idx: usize = 0; var va_max = @intCast(usize, header.num_vertexarrays); var va_off = @intCast(usize, header.ofs_vertexarrays); var vx_max = @intCast(usize, header.num_vertexes); var va_position = try alloc.alloc(f32, vx_max*3); var va_texcoord = try alloc.alloc(f32, vx_max*2); var va_normal = try alloc.alloc(f32, vx_max*3); var va_tangent = try alloc.alloc(f32, vx_max*4); var va_blend_indices = try alloc.alloc(u8, vx_max*4); var va_blend_weights = try alloc.alloc(u8, vx_max*4); var va_color = try alloc.alloc(u8, vx_max*4); while (va_idx < va_max) { var va_current = grab(data, va_off + (va_idx*@sizeOf(VertexArray)), VertexArray); switch (va_current.type_) { .position => va_position = @bitCast([]const f32, data[va_current.offset..va_current.offset+(vx_max*3)]), .texcoord => va_texcoord = @bitCast([]const f32, data[va_current.offset..va_current.offset+(vx_max*2)]), .normal => va_normal = @bitCast([]const f32, data[va_current.offset..va_current.offset+(vx_max*3)]), .tangent => va_tangent = @bitCast([]const f32, data[va_current.offset..va_current.offset+(vx_max*4)]), .blendindices => mem.copy(u8, va_blend_indices, data[va_current.offset..va_current.offset+(vx_max*4)]), .blendweights => mem.copy(u8, va_blend_weights, data[va_current.offset..va_current.offset+(vx_max*4)]), .color => mem.copy(u8, va_color, data[va_current.offset..va_current.offset+(vx_max*4)]) } va_idx += 1; } ////// HANDLE TRIANGLES ////////// var tr_idx: usize = 0; var tr_max = @intCast(usize, header.num_triangles); var tr_off = @intCast(usize, header.ofs_triangles); var indices = try alloc.alloc(u32, tr_max*3); while (tr_idx < tr_max) { var triangle = grab(data, tr_off+(tr_idx*@sizeOf([3]u32)), [3]u32); indices[(tr_idx*3)+0] = triangle[0]; indices[(tr_idx*3)+1] = triangle[1]; indices[(tr_idx*3)+2] = triangle[2]; tr_idx += 1; } ////// HANDLE MESHES ////////// var me_idx: usize = 0; var me_max = @intCast(usize, header.num_meshes); var me_off = @intCast(usize, header.ofs_meshes); var meshes = try alloc.alloc(Mesh, me_max); while (me_idx < me_max) { var mesh = grab(data, me_off+(me_idx*@sizeOf(RawMesh)), RawMesh); meshes[me_idx] = Mesh { .name = texts[mesh.name], .vertex_count = mesh.num_vertices, .vertex_offset = mesh.first_vertex, .index_count = mesh.num_triangles*3, .index_offset = mesh.first_triangle*3 }; me_idx += 1; } if (isEXM) { // TODO: Handle EXM Data! } var out = Model { .header = header, .meshes = meshes, .texts = texts, .indices = indices, .positions = va_position, .texcoords = va_texcoord, .normals = va_normal, .tangents = va_tangent, .blend_indices = va_blend_indices, .blend_weights = va_blend_weights, .color = va_color }; return out; } pub fn fromFile(name: []const u8, isEXM: ?bool, alloc: mem.Allocator) !Model { var file = try std.fs.cwd().openFile(name, .{ .read = true }); defer file.close(); var raw = try file.readToEndAlloc(alloc, std.math.maxInt(usize)); return try fromBuffer(raw, isEXM orelse mem.endsWith(u8, name, ".exm"), alloc); }
iqm.zig
const vk_xml_path = "C:/VulkanSDK/1.2.198.0/share/vulkan/registry/vk.xml"; //Submodules paths const vkgen = @import("submodules/vulkan-zig/generator/index.zig"); const glfw = @import("submodules/mach-glfw/build.zig"); const builtin = @import("builtin"); const std = @import("std"); const Step = std.build.Step; const Builder = std.build.Builder; const Pkg = std.build.Pkg; pub fn build(b: *Builder) void { const mode = b.standardReleaseOptions(); var exe = b.addExecutable("Saturn", "src/main.zig"); exe.setBuildMode(mode); exe.install(); exe.linkLibC(); //OS specific libraries switch (builtin.os.tag) { .windows => { exe.linkSystemLibrary("kernel32"); exe.linkSystemLibrary("user32"); exe.linkSystemLibrary("shell32"); exe.linkSystemLibrary("gdi32"); }, .linux => {}, else => { @compileError("Platform not supported, unsure of build requirements"); }, } //mach-glfw exe.addPackagePath("glfw", "submodules/mach-glfw/src/main.zig"); glfw.link(b, exe, .{}); //cimgui exe.addIncludeDir("submodules/cimgui/"); exe.linkLibrary(imguiLibrary(b)); //Vulkan Bindings const vk_gen = vkgen.VkGenerateStep.init(b, vk_xml_path, "vk.zig"); exe.step.dependOn(&vk_gen.step); exe.addPackage(vk_gen.package); //Compile Builtin Shaders const res = ResourceGenStep.init(b, "resources.zig"); res.addShader("tri_vert", "assets/tri.vert"); res.addShader("tri_frag", "assets/tri.frag"); res.addShader("imgui_vert", "assets/imgui.vert"); res.addShader("imgui_frag", "assets/imgui.frag"); exe.step.dependOn(&res.step); exe.addPackage(res.package); //Run program const play = b.step("run", "Run the engine"); const run = exe.run(); run.step.dependOn(b.getInstallStep()); play.dependOn(&run.step); } pub const ResourceGenStep = struct { step: Step, shader_step: *vkgen.ShaderCompileStep, builder: *Builder, package: std.build.Pkg, resources: std.ArrayList(u8), pub fn init(builder: *Builder, out: []const u8) *ResourceGenStep { const self = builder.allocator.create(ResourceGenStep) catch unreachable; const full_out_path = std.fs.path.join(builder.allocator, &[_][]const u8{ builder.build_root, builder.cache_root, out, }) catch unreachable; self.* = .{ .step = Step.init(.Custom, "resources", builder.allocator, make), .shader_step = vkgen.ShaderCompileStep.init(builder, &[_][]const u8{ "glslc", "--target-env=vulkan1.2" }), .builder = builder, .package = .{ .name = "resources", .path = full_out_path, .dependencies = null, }, .resources = std.ArrayList(u8).init(builder.allocator), }; self.step.dependOn(&self.shader_step.step); return self; } pub fn addShader(self: *ResourceGenStep, name: []const u8, source: []const u8) void { const shader_out_path = self.shader_step.add(source); var writer = self.resources.writer(); writer.print("pub const {s} align(4) = @embedFile(\"", .{name}) catch unreachable; writer.print("../zig-cache/shaders/{s}", .{source}) catch unreachable; writer.writeAll("\").*;\n") catch unreachable; } fn make(step: *Step) !void { const self = @fieldParentPtr(ResourceGenStep, "step", step); const cwd = std.fs.cwd(); const dir = std.fs.path.dirname(self.package.path).?; try cwd.makePath(dir); try cwd.writeFile(self.package.path, self.resources.items); } }; pub fn imguiLibrary(b: *Builder) *std.build.LibExeObjStep { var imgui = b.addStaticLibrary("imgui", null); imgui.linkLibC(); imgui.linkSystemLibrary("c++"); if (builtin.os.tag == .windows) { imgui.linkSystemLibrary("kernel32"); imgui.linkSystemLibrary("user32"); imgui.linkSystemLibrary("shell32"); imgui.linkSystemLibrary("gdi32"); } imgui.addIncludeDir("submodules/cimgui/"); imgui.addIncludeDir("submodules/cimgui/imgui"); const cpp_args = [_][]const u8{"-Wno-return-type-c-linkage"}; imgui.addCSourceFile("submodules/cimgui/imgui/imgui.cpp", &cpp_args); imgui.addCSourceFile("submodules/cimgui/imgui/imgui_demo.cpp", &cpp_args); imgui.addCSourceFile("submodules/cimgui/imgui/imgui_draw.cpp", &cpp_args); imgui.addCSourceFile("submodules/cimgui/imgui/imgui_widgets.cpp", &cpp_args); imgui.addCSourceFile("submodules/cimgui/imgui/imgui_tables.cpp", &cpp_args); imgui.addCSourceFile("submodules/cimgui/cimgui.cpp", &cpp_args); return imgui; }
build.zig
pub const PACKAGE_PROPERTY_FRAMEWORK = @as(u32, 1); pub const PACKAGE_PROPERTY_RESOURCE = @as(u32, 2); pub const PACKAGE_PROPERTY_BUNDLE = @as(u32, 4); pub const PACKAGE_PROPERTY_OPTIONAL = @as(u32, 8); pub const PACKAGE_FILTER_HEAD = @as(u32, 16); pub const PACKAGE_FILTER_DIRECT = @as(u32, 32); pub const PACKAGE_FILTER_RESOURCE = @as(u32, 64); pub const PACKAGE_FILTER_BUNDLE = @as(u32, 128); pub const PACKAGE_INFORMATION_BASIC = @as(u32, 0); pub const PACKAGE_INFORMATION_FULL = @as(u32, 256); pub const PACKAGE_PROPERTY_DEVELOPMENT_MODE = @as(u32, 65536); pub const PACKAGE_FILTER_OPTIONAL = @as(u32, 131072); pub const PACKAGE_PROPERTY_IS_IN_RELATED_SET = @as(u32, 262144); pub const PACKAGE_FILTER_IS_IN_RELATED_SET = @as(u32, 262144); pub const PACKAGE_PROPERTY_STATIC = @as(u32, 524288); pub const PACKAGE_FILTER_STATIC = @as(u32, 524288); pub const PACKAGE_PROPERTY_DYNAMIC = @as(u32, 1048576); pub const PACKAGE_FILTER_DYNAMIC = @as(u32, 1048576); pub const PACKAGE_PROPERTY_HOSTRUNTIME = @as(u32, 2097152); pub const PACKAGE_FILTER_HOSTRUNTIME = @as(u32, 2097152); pub const PACKAGE_FILTER_ALL_LOADED = @as(u32, 0); pub const PACKAGE_DEPENDENCY_RANK_DEFAULT = @as(u32, 0); //-------------------------------------------------------------------------------- // Section: Types (125) //-------------------------------------------------------------------------------- const CLSID_AppxFactory_Value = Guid.initString("5842a140-ff9f-4166-8f5c-62f5b7b0c781"); pub const CLSID_AppxFactory = &CLSID_AppxFactory_Value; const CLSID_AppxBundleFactory_Value = Guid.initString("378e0446-5384-43b7-8877-e7dbdd883446"); pub const CLSID_AppxBundleFactory = &CLSID_AppxBundleFactory_Value; const CLSID_AppxPackagingDiagnosticEventSinkManager_Value = Guid.initString("50ca0a46-1588-4161-8ed2-ef9e469ced5d"); pub const CLSID_AppxPackagingDiagnosticEventSinkManager = &CLSID_AppxPackagingDiagnosticEventSinkManager_Value; const CLSID_AppxEncryptionFactory_Value = Guid.initString("dc664fdd-d868-46ee-8780-8d196cb739f7"); pub const CLSID_AppxEncryptionFactory = &CLSID_AppxEncryptionFactory_Value; const CLSID_AppxPackageEditor_Value = Guid.initString("f004f2ca-aebc-4b0d-bf58-e516d5bcc0ab"); pub const CLSID_AppxPackageEditor = &CLSID_AppxPackageEditor_Value; pub const APPX_PACKAGE_SETTINGS = extern struct { forceZip32: BOOL, hashMethod: ?*IUri, }; pub const APPX_COMPRESSION_OPTION = enum(i32) { NONE = 0, NORMAL = 1, MAXIMUM = 2, FAST = 3, SUPERFAST = 4, }; pub const APPX_COMPRESSION_OPTION_NONE = APPX_COMPRESSION_OPTION.NONE; pub const APPX_COMPRESSION_OPTION_NORMAL = APPX_COMPRESSION_OPTION.NORMAL; pub const APPX_COMPRESSION_OPTION_MAXIMUM = APPX_COMPRESSION_OPTION.MAXIMUM; pub const APPX_COMPRESSION_OPTION_FAST = APPX_COMPRESSION_OPTION.FAST; pub const APPX_COMPRESSION_OPTION_SUPERFAST = APPX_COMPRESSION_OPTION.SUPERFAST; pub const APPX_PACKAGE_WRITER_PAYLOAD_STREAM = extern struct { inputStream: ?*IStream, fileName: ?[*:0]const u16, contentType: ?[*:0]const u16, compressionOption: APPX_COMPRESSION_OPTION, }; pub const APPX_FOOTPRINT_FILE_TYPE = enum(i32) { MANIFEST = 0, BLOCKMAP = 1, SIGNATURE = 2, CODEINTEGRITY = 3, CONTENTGROUPMAP = 4, }; pub const APPX_FOOTPRINT_FILE_TYPE_MANIFEST = APPX_FOOTPRINT_FILE_TYPE.MANIFEST; pub const APPX_FOOTPRINT_FILE_TYPE_BLOCKMAP = APPX_FOOTPRINT_FILE_TYPE.BLOCKMAP; pub const APPX_FOOTPRINT_FILE_TYPE_SIGNATURE = APPX_FOOTPRINT_FILE_TYPE.SIGNATURE; pub const APPX_FOOTPRINT_FILE_TYPE_CODEINTEGRITY = APPX_FOOTPRINT_FILE_TYPE.CODEINTEGRITY; pub const APPX_FOOTPRINT_FILE_TYPE_CONTENTGROUPMAP = APPX_FOOTPRINT_FILE_TYPE.CONTENTGROUPMAP; pub const APPX_BUNDLE_FOOTPRINT_FILE_TYPE = enum(i32) { FIRST = 0, // MANIFEST = 0, this enum value conflicts with FIRST BLOCKMAP = 1, SIGNATURE = 2, // LAST = 2, this enum value conflicts with SIGNATURE }; pub const APPX_BUNDLE_FOOTPRINT_FILE_TYPE_FIRST = APPX_BUNDLE_FOOTPRINT_FILE_TYPE.FIRST; pub const APPX_BUNDLE_FOOTPRINT_FILE_TYPE_MANIFEST = APPX_BUNDLE_FOOTPRINT_FILE_TYPE.FIRST; pub const APPX_BUNDLE_FOOTPRINT_FILE_TYPE_BLOCKMAP = APPX_BUNDLE_FOOTPRINT_FILE_TYPE.BLOCKMAP; pub const APPX_BUNDLE_FOOTPRINT_FILE_TYPE_SIGNATURE = APPX_BUNDLE_FOOTPRINT_FILE_TYPE.SIGNATURE; pub const APPX_BUNDLE_FOOTPRINT_FILE_TYPE_LAST = APPX_BUNDLE_FOOTPRINT_FILE_TYPE.SIGNATURE; pub const APPX_CAPABILITIES = enum(u32) { INTERNET_CLIENT = 1, INTERNET_CLIENT_SERVER = 2, PRIVATE_NETWORK_CLIENT_SERVER = 4, DOCUMENTS_LIBRARY = 8, PICTURES_LIBRARY = 16, VIDEOS_LIBRARY = 32, MUSIC_LIBRARY = 64, ENTERPRISE_AUTHENTICATION = 128, SHARED_USER_CERTIFICATES = 256, REMOVABLE_STORAGE = 512, APPOINTMENTS = 1024, CONTACTS = 2048, _, pub fn initFlags(o: struct { INTERNET_CLIENT: u1 = 0, INTERNET_CLIENT_SERVER: u1 = 0, PRIVATE_NETWORK_CLIENT_SERVER: u1 = 0, DOCUMENTS_LIBRARY: u1 = 0, PICTURES_LIBRARY: u1 = 0, VIDEOS_LIBRARY: u1 = 0, MUSIC_LIBRARY: u1 = 0, ENTERPRISE_AUTHENTICATION: u1 = 0, SHARED_USER_CERTIFICATES: u1 = 0, REMOVABLE_STORAGE: u1 = 0, APPOINTMENTS: u1 = 0, CONTACTS: u1 = 0, }) APPX_CAPABILITIES { return @intToEnum(APPX_CAPABILITIES, (if (o.INTERNET_CLIENT == 1) @enumToInt(APPX_CAPABILITIES.INTERNET_CLIENT) else 0) | (if (o.INTERNET_CLIENT_SERVER == 1) @enumToInt(APPX_CAPABILITIES.INTERNET_CLIENT_SERVER) else 0) | (if (o.PRIVATE_NETWORK_CLIENT_SERVER == 1) @enumToInt(APPX_CAPABILITIES.PRIVATE_NETWORK_CLIENT_SERVER) else 0) | (if (o.DOCUMENTS_LIBRARY == 1) @enumToInt(APPX_CAPABILITIES.DOCUMENTS_LIBRARY) else 0) | (if (o.PICTURES_LIBRARY == 1) @enumToInt(APPX_CAPABILITIES.PICTURES_LIBRARY) else 0) | (if (o.VIDEOS_LIBRARY == 1) @enumToInt(APPX_CAPABILITIES.VIDEOS_LIBRARY) else 0) | (if (o.MUSIC_LIBRARY == 1) @enumToInt(APPX_CAPABILITIES.MUSIC_LIBRARY) else 0) | (if (o.ENTERPRISE_AUTHENTICATION == 1) @enumToInt(APPX_CAPABILITIES.ENTERPRISE_AUTHENTICATION) else 0) | (if (o.SHARED_USER_CERTIFICATES == 1) @enumToInt(APPX_CAPABILITIES.SHARED_USER_CERTIFICATES) else 0) | (if (o.REMOVABLE_STORAGE == 1) @enumToInt(APPX_CAPABILITIES.REMOVABLE_STORAGE) else 0) | (if (o.APPOINTMENTS == 1) @enumToInt(APPX_CAPABILITIES.APPOINTMENTS) else 0) | (if (o.CONTACTS == 1) @enumToInt(APPX_CAPABILITIES.CONTACTS) else 0) ); } }; pub const APPX_CAPABILITY_INTERNET_CLIENT = APPX_CAPABILITIES.INTERNET_CLIENT; pub const APPX_CAPABILITY_INTERNET_CLIENT_SERVER = APPX_CAPABILITIES.INTERNET_CLIENT_SERVER; pub const APPX_CAPABILITY_PRIVATE_NETWORK_CLIENT_SERVER = APPX_CAPABILITIES.PRIVATE_NETWORK_CLIENT_SERVER; pub const APPX_CAPABILITY_DOCUMENTS_LIBRARY = APPX_CAPABILITIES.DOCUMENTS_LIBRARY; pub const APPX_CAPABILITY_PICTURES_LIBRARY = APPX_CAPABILITIES.PICTURES_LIBRARY; pub const APPX_CAPABILITY_VIDEOS_LIBRARY = APPX_CAPABILITIES.VIDEOS_LIBRARY; pub const APPX_CAPABILITY_MUSIC_LIBRARY = APPX_CAPABILITIES.MUSIC_LIBRARY; pub const APPX_CAPABILITY_ENTERPRISE_AUTHENTICATION = APPX_CAPABILITIES.ENTERPRISE_AUTHENTICATION; pub const APPX_CAPABILITY_SHARED_USER_CERTIFICATES = APPX_CAPABILITIES.SHARED_USER_CERTIFICATES; pub const APPX_CAPABILITY_REMOVABLE_STORAGE = APPX_CAPABILITIES.REMOVABLE_STORAGE; pub const APPX_CAPABILITY_APPOINTMENTS = APPX_CAPABILITIES.APPOINTMENTS; pub const APPX_CAPABILITY_CONTACTS = APPX_CAPABILITIES.CONTACTS; pub const APPX_PACKAGE_ARCHITECTURE = enum(i32) { X86 = 0, ARM = 5, X64 = 9, NEUTRAL = 11, ARM64 = 12, }; pub const APPX_PACKAGE_ARCHITECTURE_X86 = APPX_PACKAGE_ARCHITECTURE.X86; pub const APPX_PACKAGE_ARCHITECTURE_ARM = APPX_PACKAGE_ARCHITECTURE.ARM; pub const APPX_PACKAGE_ARCHITECTURE_X64 = APPX_PACKAGE_ARCHITECTURE.X64; pub const APPX_PACKAGE_ARCHITECTURE_NEUTRAL = APPX_PACKAGE_ARCHITECTURE.NEUTRAL; pub const APPX_PACKAGE_ARCHITECTURE_ARM64 = APPX_PACKAGE_ARCHITECTURE.ARM64; pub const APPX_PACKAGE_ARCHITECTURE2 = enum(i32) { X86 = 0, ARM = 5, X64 = 9, NEUTRAL = 11, ARM64 = 12, X86_ON_ARM64 = 14, UNKNOWN = 65535, }; pub const APPX_PACKAGE_ARCHITECTURE2_X86 = APPX_PACKAGE_ARCHITECTURE2.X86; pub const APPX_PACKAGE_ARCHITECTURE2_ARM = APPX_PACKAGE_ARCHITECTURE2.ARM; pub const APPX_PACKAGE_ARCHITECTURE2_X64 = APPX_PACKAGE_ARCHITECTURE2.X64; pub const APPX_PACKAGE_ARCHITECTURE2_NEUTRAL = APPX_PACKAGE_ARCHITECTURE2.NEUTRAL; pub const APPX_PACKAGE_ARCHITECTURE2_ARM64 = APPX_PACKAGE_ARCHITECTURE2.ARM64; pub const APPX_PACKAGE_ARCHITECTURE2_X86_ON_ARM64 = APPX_PACKAGE_ARCHITECTURE2.X86_ON_ARM64; pub const APPX_PACKAGE_ARCHITECTURE2_UNKNOWN = APPX_PACKAGE_ARCHITECTURE2.UNKNOWN; pub const APPX_BUNDLE_PAYLOAD_PACKAGE_TYPE = enum(i32) { APPLICATION = 0, RESOURCE = 1, }; pub const APPX_BUNDLE_PAYLOAD_PACKAGE_TYPE_APPLICATION = APPX_BUNDLE_PAYLOAD_PACKAGE_TYPE.APPLICATION; pub const APPX_BUNDLE_PAYLOAD_PACKAGE_TYPE_RESOURCE = APPX_BUNDLE_PAYLOAD_PACKAGE_TYPE.RESOURCE; pub const DX_FEATURE_LEVEL = enum(i32) { UNSPECIFIED = 0, @"9" = 1, @"10" = 2, @"11" = 3, }; pub const DX_FEATURE_LEVEL_UNSPECIFIED = DX_FEATURE_LEVEL.UNSPECIFIED; pub const DX_FEATURE_LEVEL_9 = DX_FEATURE_LEVEL.@"9"; pub const DX_FEATURE_LEVEL_10 = DX_FEATURE_LEVEL.@"10"; pub const DX_FEATURE_LEVEL_11 = DX_FEATURE_LEVEL.@"11"; pub const APPX_CAPABILITY_CLASS_TYPE = enum(i32) { DEFAULT = 0, GENERAL = 1, RESTRICTED = 2, WINDOWS = 4, ALL = 7, CUSTOM = 8, }; pub const APPX_CAPABILITY_CLASS_DEFAULT = APPX_CAPABILITY_CLASS_TYPE.DEFAULT; pub const APPX_CAPABILITY_CLASS_GENERAL = APPX_CAPABILITY_CLASS_TYPE.GENERAL; pub const APPX_CAPABILITY_CLASS_RESTRICTED = APPX_CAPABILITY_CLASS_TYPE.RESTRICTED; pub const APPX_CAPABILITY_CLASS_WINDOWS = APPX_CAPABILITY_CLASS_TYPE.WINDOWS; pub const APPX_CAPABILITY_CLASS_ALL = APPX_CAPABILITY_CLASS_TYPE.ALL; pub const APPX_CAPABILITY_CLASS_CUSTOM = APPX_CAPABILITY_CLASS_TYPE.CUSTOM; pub const APPX_PACKAGING_CONTEXT_CHANGE_TYPE = enum(i32) { START = 0, CHANGE = 1, DETAILS = 2, END = 3, }; pub const APPX_PACKAGING_CONTEXT_CHANGE_TYPE_START = APPX_PACKAGING_CONTEXT_CHANGE_TYPE.START; pub const APPX_PACKAGING_CONTEXT_CHANGE_TYPE_CHANGE = APPX_PACKAGING_CONTEXT_CHANGE_TYPE.CHANGE; pub const APPX_PACKAGING_CONTEXT_CHANGE_TYPE_DETAILS = APPX_PACKAGING_CONTEXT_CHANGE_TYPE.DETAILS; pub const APPX_PACKAGING_CONTEXT_CHANGE_TYPE_END = APPX_PACKAGING_CONTEXT_CHANGE_TYPE.END; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxFactory_Value = Guid.initString("beb94909-e451-438b-b5a7-d79e767b75d8"); pub const IID_IAppxFactory = &IID_IAppxFactory_Value; pub const IAppxFactory = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreatePackageWriter: fn( self: *const IAppxFactory, outputStream: ?*IStream, settings: ?*APPX_PACKAGE_SETTINGS, packageWriter: ?*?*IAppxPackageWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreatePackageReader: fn( self: *const IAppxFactory, inputStream: ?*IStream, packageReader: ?*?*IAppxPackageReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateManifestReader: fn( self: *const IAppxFactory, inputStream: ?*IStream, manifestReader: ?*?*IAppxManifestReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateBlockMapReader: fn( self: *const IAppxFactory, inputStream: ?*IStream, blockMapReader: ?*?*IAppxBlockMapReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateValidatedBlockMapReader: fn( self: *const IAppxFactory, blockMapStream: ?*IStream, signatureFileName: ?[*:0]const u16, blockMapReader: ?*?*IAppxBlockMapReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory_CreatePackageWriter(self: *const T, outputStream: ?*IStream, settings: ?*APPX_PACKAGE_SETTINGS, packageWriter: ?*?*IAppxPackageWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory.VTable, self.vtable).CreatePackageWriter(@ptrCast(*const IAppxFactory, self), outputStream, settings, packageWriter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory_CreatePackageReader(self: *const T, inputStream: ?*IStream, packageReader: ?*?*IAppxPackageReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory.VTable, self.vtable).CreatePackageReader(@ptrCast(*const IAppxFactory, self), inputStream, packageReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory_CreateManifestReader(self: *const T, inputStream: ?*IStream, manifestReader: ?*?*IAppxManifestReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory.VTable, self.vtable).CreateManifestReader(@ptrCast(*const IAppxFactory, self), inputStream, manifestReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory_CreateBlockMapReader(self: *const T, inputStream: ?*IStream, blockMapReader: ?*?*IAppxBlockMapReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory.VTable, self.vtable).CreateBlockMapReader(@ptrCast(*const IAppxFactory, self), inputStream, blockMapReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory_CreateValidatedBlockMapReader(self: *const T, blockMapStream: ?*IStream, signatureFileName: ?[*:0]const u16, blockMapReader: ?*?*IAppxBlockMapReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory.VTable, self.vtable).CreateValidatedBlockMapReader(@ptrCast(*const IAppxFactory, self), blockMapStream, signatureFileName, blockMapReader); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxFactory2_Value = Guid.initString("f1346df2-c282-4e22-b918-743a929a8d55"); pub const IID_IAppxFactory2 = &IID_IAppxFactory2_Value; pub const IAppxFactory2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateContentGroupMapReader: fn( self: *const IAppxFactory2, inputStream: ?*IStream, contentGroupMapReader: ?*?*IAppxContentGroupMapReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateSourceContentGroupMapReader: fn( self: *const IAppxFactory2, inputStream: ?*IStream, reader: ?*?*IAppxSourceContentGroupMapReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateContentGroupMapWriter: fn( self: *const IAppxFactory2, stream: ?*IStream, contentGroupMapWriter: ?*?*IAppxContentGroupMapWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory2_CreateContentGroupMapReader(self: *const T, inputStream: ?*IStream, contentGroupMapReader: ?*?*IAppxContentGroupMapReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory2.VTable, self.vtable).CreateContentGroupMapReader(@ptrCast(*const IAppxFactory2, self), inputStream, contentGroupMapReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory2_CreateSourceContentGroupMapReader(self: *const T, inputStream: ?*IStream, reader: ?*?*IAppxSourceContentGroupMapReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory2.VTable, self.vtable).CreateSourceContentGroupMapReader(@ptrCast(*const IAppxFactory2, self), inputStream, reader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFactory2_CreateContentGroupMapWriter(self: *const T, stream: ?*IStream, contentGroupMapWriter: ?*?*IAppxContentGroupMapWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFactory2.VTable, self.vtable).CreateContentGroupMapWriter(@ptrCast(*const IAppxFactory2, self), stream, contentGroupMapWriter); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxPackageReader_Value = Guid.initString("b5c49650-99bc-481c-9a34-3d53a4106708"); pub const IID_IAppxPackageReader = &IID_IAppxPackageReader_Value; pub const IAppxPackageReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetBlockMap: fn( self: *const IAppxPackageReader, blockMapReader: ?*?*IAppxBlockMapReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFootprintFile: fn( self: *const IAppxPackageReader, type: APPX_FOOTPRINT_FILE_TYPE, file: ?*?*IAppxFile, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPayloadFile: fn( self: *const IAppxPackageReader, fileName: ?[*:0]const u16, file: ?*?*IAppxFile, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPayloadFiles: fn( self: *const IAppxPackageReader, filesEnumerator: ?*?*IAppxFilesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetManifest: fn( self: *const IAppxPackageReader, manifestReader: ?*?*IAppxManifestReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageReader_GetBlockMap(self: *const T, blockMapReader: ?*?*IAppxBlockMapReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageReader.VTable, self.vtable).GetBlockMap(@ptrCast(*const IAppxPackageReader, self), blockMapReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageReader_GetFootprintFile(self: *const T, type_: APPX_FOOTPRINT_FILE_TYPE, file: ?*?*IAppxFile) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageReader.VTable, self.vtable).GetFootprintFile(@ptrCast(*const IAppxPackageReader, self), type_, file); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageReader_GetPayloadFile(self: *const T, fileName: ?[*:0]const u16, file: ?*?*IAppxFile) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageReader.VTable, self.vtable).GetPayloadFile(@ptrCast(*const IAppxPackageReader, self), fileName, file); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageReader_GetPayloadFiles(self: *const T, filesEnumerator: ?*?*IAppxFilesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageReader.VTable, self.vtable).GetPayloadFiles(@ptrCast(*const IAppxPackageReader, self), filesEnumerator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageReader_GetManifest(self: *const T, manifestReader: ?*?*IAppxManifestReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageReader.VTable, self.vtable).GetManifest(@ptrCast(*const IAppxPackageReader, self), manifestReader); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxPackageWriter_Value = Guid.initString("9099e33b-246f-41e4-881a-008eb613f858"); pub const IID_IAppxPackageWriter = &IID_IAppxPackageWriter_Value; pub const IAppxPackageWriter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadFile: fn( self: *const IAppxPackageWriter, fileName: ?[*:0]const u16, contentType: ?[*:0]const u16, compressionOption: APPX_COMPRESSION_OPTION, inputStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const IAppxPackageWriter, manifest: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageWriter_AddPayloadFile(self: *const T, fileName: ?[*:0]const u16, contentType: ?[*:0]const u16, compressionOption: APPX_COMPRESSION_OPTION, inputStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageWriter.VTable, self.vtable).AddPayloadFile(@ptrCast(*const IAppxPackageWriter, self), fileName, contentType, compressionOption, inputStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageWriter_Close(self: *const T, manifest: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageWriter.VTable, self.vtable).Close(@ptrCast(*const IAppxPackageWriter, self), manifest); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxPackageWriter2_Value = Guid.initString("2cf5c4fd-e54c-4ea5-ba4e-f8c4b105a8c8"); pub const IID_IAppxPackageWriter2 = &IID_IAppxPackageWriter2_Value; pub const IAppxPackageWriter2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Close: fn( self: *const IAppxPackageWriter2, manifest: ?*IStream, contentGroupMap: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageWriter2_Close(self: *const T, manifest: ?*IStream, contentGroupMap: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageWriter2.VTable, self.vtable).Close(@ptrCast(*const IAppxPackageWriter2, self), manifest, contentGroupMap); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxPackageWriter3_Value = Guid.initString("a83aacd3-41c0-4501-b8a3-74164f50b2fd"); pub const IID_IAppxPackageWriter3 = &IID_IAppxPackageWriter3_Value; pub const IAppxPackageWriter3 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadFiles: fn( self: *const IAppxPackageWriter3, fileCount: u32, payloadFiles: [*]APPX_PACKAGE_WRITER_PAYLOAD_STREAM, memoryLimit: u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageWriter3_AddPayloadFiles(self: *const T, fileCount: u32, payloadFiles: [*]APPX_PACKAGE_WRITER_PAYLOAD_STREAM, memoryLimit: u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageWriter3.VTable, self.vtable).AddPayloadFiles(@ptrCast(*const IAppxPackageWriter3, self), fileCount, payloadFiles, memoryLimit); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxFile_Value = Guid.initString("91df827b-94fd-468f-827b-57f41b2f6f2e"); pub const IID_IAppxFile = &IID_IAppxFile_Value; pub const IAppxFile = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCompressionOption: fn( self: *const IAppxFile, compressionOption: ?*APPX_COMPRESSION_OPTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetContentType: fn( self: *const IAppxFile, contentType: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetName: fn( self: *const IAppxFile, fileName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSize: fn( self: *const IAppxFile, size: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStream: fn( self: *const IAppxFile, stream: ?*?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFile_GetCompressionOption(self: *const T, compressionOption: ?*APPX_COMPRESSION_OPTION) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFile.VTable, self.vtable).GetCompressionOption(@ptrCast(*const IAppxFile, self), compressionOption); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFile_GetContentType(self: *const T, contentType: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFile.VTable, self.vtable).GetContentType(@ptrCast(*const IAppxFile, self), contentType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFile_GetName(self: *const T, fileName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFile.VTable, self.vtable).GetName(@ptrCast(*const IAppxFile, self), fileName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFile_GetSize(self: *const T, size: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFile.VTable, self.vtable).GetSize(@ptrCast(*const IAppxFile, self), size); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFile_GetStream(self: *const T, stream: ?*?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFile.VTable, self.vtable).GetStream(@ptrCast(*const IAppxFile, self), stream); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxFilesEnumerator_Value = Guid.initString("f007eeaf-9831-411c-9847-917cdc62d1fe"); pub const IID_IAppxFilesEnumerator = &IID_IAppxFilesEnumerator_Value; pub const IAppxFilesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxFilesEnumerator, file: ?*?*IAppxFile, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxFilesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxFilesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFilesEnumerator_GetCurrent(self: *const T, file: ?*?*IAppxFile) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFilesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxFilesEnumerator, self), file); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFilesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFilesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxFilesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxFilesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxFilesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxFilesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxBlockMapReader_Value = Guid.initString("5efec991-bca3-42d1-9ec2-e92d609ec22a"); pub const IID_IAppxBlockMapReader = &IID_IAppxBlockMapReader_Value; pub const IAppxBlockMapReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFile: fn( self: *const IAppxBlockMapReader, filename: ?[*:0]const u16, file: ?*?*IAppxBlockMapFile, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFiles: fn( self: *const IAppxBlockMapReader, enumerator: ?*?*IAppxBlockMapFilesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHashMethod: fn( self: *const IAppxBlockMapReader, hashMethod: ?*?*IUri, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStream: fn( self: *const IAppxBlockMapReader, blockMapStream: ?*?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapReader_GetFile(self: *const T, filename: ?[*:0]const u16, file: ?*?*IAppxBlockMapFile) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapReader.VTable, self.vtable).GetFile(@ptrCast(*const IAppxBlockMapReader, self), filename, file); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapReader_GetFiles(self: *const T, enumerator: ?*?*IAppxBlockMapFilesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapReader.VTable, self.vtable).GetFiles(@ptrCast(*const IAppxBlockMapReader, self), enumerator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapReader_GetHashMethod(self: *const T, hashMethod: ?*?*IUri) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapReader.VTable, self.vtable).GetHashMethod(@ptrCast(*const IAppxBlockMapReader, self), hashMethod); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapReader_GetStream(self: *const T, blockMapStream: ?*?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapReader.VTable, self.vtable).GetStream(@ptrCast(*const IAppxBlockMapReader, self), blockMapStream); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxBlockMapFile_Value = Guid.initString("277672ac-4f63-42c1-8abc-beae3600eb59"); pub const IID_IAppxBlockMapFile = &IID_IAppxBlockMapFile_Value; pub const IAppxBlockMapFile = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetBlocks: fn( self: *const IAppxBlockMapFile, blocks: ?*?*IAppxBlockMapBlocksEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLocalFileHeaderSize: fn( self: *const IAppxBlockMapFile, lfhSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetName: fn( self: *const IAppxBlockMapFile, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetUncompressedSize: fn( self: *const IAppxBlockMapFile, size: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ValidateFileHash: fn( self: *const IAppxBlockMapFile, fileStream: ?*IStream, isValid: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFile_GetBlocks(self: *const T, blocks: ?*?*IAppxBlockMapBlocksEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFile.VTable, self.vtable).GetBlocks(@ptrCast(*const IAppxBlockMapFile, self), blocks); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFile_GetLocalFileHeaderSize(self: *const T, lfhSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFile.VTable, self.vtable).GetLocalFileHeaderSize(@ptrCast(*const IAppxBlockMapFile, self), lfhSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFile_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFile.VTable, self.vtable).GetName(@ptrCast(*const IAppxBlockMapFile, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFile_GetUncompressedSize(self: *const T, size: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFile.VTable, self.vtable).GetUncompressedSize(@ptrCast(*const IAppxBlockMapFile, self), size); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFile_ValidateFileHash(self: *const T, fileStream: ?*IStream, isValid: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFile.VTable, self.vtable).ValidateFileHash(@ptrCast(*const IAppxBlockMapFile, self), fileStream, isValid); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxBlockMapFilesEnumerator_Value = Guid.initString("02b856a2-4262-4070-bacb-1a8cbbc42305"); pub const IID_IAppxBlockMapFilesEnumerator = &IID_IAppxBlockMapFilesEnumerator_Value; pub const IAppxBlockMapFilesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxBlockMapFilesEnumerator, file: ?*?*IAppxBlockMapFile, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxBlockMapFilesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxBlockMapFilesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFilesEnumerator_GetCurrent(self: *const T, file: ?*?*IAppxBlockMapFile) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFilesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxBlockMapFilesEnumerator, self), file); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFilesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFilesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxBlockMapFilesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapFilesEnumerator_MoveNext(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapFilesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxBlockMapFilesEnumerator, self), hasCurrent); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxBlockMapBlock_Value = Guid.initString("75cf3930-3244-4fe0-a8c8-e0bcb270b889"); pub const IID_IAppxBlockMapBlock = &IID_IAppxBlockMapBlock_Value; pub const IAppxBlockMapBlock = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetHash: fn( self: *const IAppxBlockMapBlock, bufferSize: ?*u32, buffer: ?*?*u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCompressedSize: fn( self: *const IAppxBlockMapBlock, size: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapBlock_GetHash(self: *const T, bufferSize: ?*u32, buffer: ?*?*u8) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapBlock.VTable, self.vtable).GetHash(@ptrCast(*const IAppxBlockMapBlock, self), bufferSize, buffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapBlock_GetCompressedSize(self: *const T, size: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapBlock.VTable, self.vtable).GetCompressedSize(@ptrCast(*const IAppxBlockMapBlock, self), size); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxBlockMapBlocksEnumerator_Value = Guid.initString("6b429b5b-36ef-479e-b9eb-0c1482b49e16"); pub const IID_IAppxBlockMapBlocksEnumerator = &IID_IAppxBlockMapBlocksEnumerator_Value; pub const IAppxBlockMapBlocksEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxBlockMapBlocksEnumerator, block: ?*?*IAppxBlockMapBlock, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxBlockMapBlocksEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxBlockMapBlocksEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapBlocksEnumerator_GetCurrent(self: *const T, block: ?*?*IAppxBlockMapBlock) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapBlocksEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxBlockMapBlocksEnumerator, self), block); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapBlocksEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapBlocksEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxBlockMapBlocksEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBlockMapBlocksEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBlockMapBlocksEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxBlockMapBlocksEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestReader_Value = Guid.initString("4e1bd148-55a0-4480-a3d1-15544710637c"); pub const IID_IAppxManifestReader = &IID_IAppxManifestReader_Value; pub const IAppxManifestReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPackageId: fn( self: *const IAppxManifestReader, packageId: ?*?*IAppxManifestPackageId, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetProperties: fn( self: *const IAppxManifestReader, packageProperties: ?*?*IAppxManifestProperties, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPackageDependencies: fn( self: *const IAppxManifestReader, dependencies: ?*?*IAppxManifestPackageDependenciesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCapabilities: fn( self: *const IAppxManifestReader, capabilities: ?*APPX_CAPABILITIES, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetResources: fn( self: *const IAppxManifestReader, resources: ?*?*IAppxManifestResourcesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDeviceCapabilities: fn( self: *const IAppxManifestReader, deviceCapabilities: ?*?*IAppxManifestDeviceCapabilitiesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPrerequisite: fn( self: *const IAppxManifestReader, name: ?[*:0]const u16, value: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetApplications: fn( self: *const IAppxManifestReader, applications: ?*?*IAppxManifestApplicationsEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStream: fn( self: *const IAppxManifestReader, manifestStream: ?*?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetPackageId(self: *const T, packageId: ?*?*IAppxManifestPackageId) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetPackageId(@ptrCast(*const IAppxManifestReader, self), packageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetProperties(self: *const T, packageProperties: ?*?*IAppxManifestProperties) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetProperties(@ptrCast(*const IAppxManifestReader, self), packageProperties); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetPackageDependencies(self: *const T, dependencies: ?*?*IAppxManifestPackageDependenciesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetPackageDependencies(@ptrCast(*const IAppxManifestReader, self), dependencies); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetCapabilities(self: *const T, capabilities: ?*APPX_CAPABILITIES) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetCapabilities(@ptrCast(*const IAppxManifestReader, self), capabilities); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetResources(self: *const T, resources: ?*?*IAppxManifestResourcesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetResources(@ptrCast(*const IAppxManifestReader, self), resources); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetDeviceCapabilities(self: *const T, deviceCapabilities: ?*?*IAppxManifestDeviceCapabilitiesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetDeviceCapabilities(@ptrCast(*const IAppxManifestReader, self), deviceCapabilities); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetPrerequisite(self: *const T, name: ?[*:0]const u16, value: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetPrerequisite(@ptrCast(*const IAppxManifestReader, self), name, value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetApplications(self: *const T, applications: ?*?*IAppxManifestApplicationsEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetApplications(@ptrCast(*const IAppxManifestReader, self), applications); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader_GetStream(self: *const T, manifestStream: ?*?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader.VTable, self.vtable).GetStream(@ptrCast(*const IAppxManifestReader, self), manifestStream); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IAppxManifestReader2_Value = Guid.initString("d06f67bc-b31d-4eba-a8af-638e73e77b4d"); pub const IID_IAppxManifestReader2 = &IID_IAppxManifestReader2_Value; pub const IAppxManifestReader2 = extern struct { pub const VTable = extern struct { base: IAppxManifestReader.VTable, GetQualifiedResources: fn( self: *const IAppxManifestReader2, resources: ?*?*IAppxManifestQualifiedResourcesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IAppxManifestReader.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader2_GetQualifiedResources(self: *const T, resources: ?*?*IAppxManifestQualifiedResourcesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader2.VTable, self.vtable).GetQualifiedResources(@ptrCast(*const IAppxManifestReader2, self), resources); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestReader3_Value = Guid.initString("c43825ab-69b7-400a-9709-cc37f5a72d24"); pub const IID_IAppxManifestReader3 = &IID_IAppxManifestReader3_Value; pub const IAppxManifestReader3 = extern struct { pub const VTable = extern struct { base: IAppxManifestReader2.VTable, GetCapabilitiesByCapabilityClass: fn( self: *const IAppxManifestReader3, capabilityClass: APPX_CAPABILITY_CLASS_TYPE, capabilities: ?*?*IAppxManifestCapabilitiesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetTargetDeviceFamilies: fn( self: *const IAppxManifestReader3, targetDeviceFamilies: ?*?*IAppxManifestTargetDeviceFamiliesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IAppxManifestReader2.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader3_GetCapabilitiesByCapabilityClass(self: *const T, capabilityClass: APPX_CAPABILITY_CLASS_TYPE, capabilities: ?*?*IAppxManifestCapabilitiesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader3.VTable, self.vtable).GetCapabilitiesByCapabilityClass(@ptrCast(*const IAppxManifestReader3, self), capabilityClass, capabilities); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader3_GetTargetDeviceFamilies(self: *const T, targetDeviceFamilies: ?*?*IAppxManifestTargetDeviceFamiliesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader3.VTable, self.vtable).GetTargetDeviceFamilies(@ptrCast(*const IAppxManifestReader3, self), targetDeviceFamilies); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestReader4_Value = Guid.initString("4579bb7c-741d-4161-b5a1-47bd3b78ad9b"); pub const IID_IAppxManifestReader4 = &IID_IAppxManifestReader4_Value; pub const IAppxManifestReader4 = extern struct { pub const VTable = extern struct { base: IAppxManifestReader3.VTable, GetOptionalPackageInfo: fn( self: *const IAppxManifestReader4, optionalPackageInfo: ?*?*IAppxManifestOptionalPackageInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IAppxManifestReader3.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader4_GetOptionalPackageInfo(self: *const T, optionalPackageInfo: ?*?*IAppxManifestOptionalPackageInfo) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader4.VTable, self.vtable).GetOptionalPackageInfo(@ptrCast(*const IAppxManifestReader4, self), optionalPackageInfo); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxManifestReader5_Value = Guid.initString("8d7ae132-a690-4c00-b75a-6aae1feaac80"); pub const IID_IAppxManifestReader5 = &IID_IAppxManifestReader5_Value; pub const IAppxManifestReader5 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetMainPackageDependencies: fn( self: *const IAppxManifestReader5, mainPackageDependencies: ?*?*IAppxManifestMainPackageDependenciesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader5_GetMainPackageDependencies(self: *const T, mainPackageDependencies: ?*?*IAppxManifestMainPackageDependenciesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader5.VTable, self.vtable).GetMainPackageDependencies(@ptrCast(*const IAppxManifestReader5, self), mainPackageDependencies); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxManifestReader6_Value = Guid.initString("34deaca4-d3c0-4e3e-b312-e42625e3807e"); pub const IID_IAppxManifestReader6 = &IID_IAppxManifestReader6_Value; pub const IAppxManifestReader6 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIsNonQualifiedResourcePackage: fn( self: *const IAppxManifestReader6, isNonQualifiedResourcePackage: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader6_GetIsNonQualifiedResourcePackage(self: *const T, isNonQualifiedResourcePackage: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader6.VTable, self.vtable).GetIsNonQualifiedResourcePackage(@ptrCast(*const IAppxManifestReader6, self), isNonQualifiedResourcePackage); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestReader7_Value = Guid.initString("8efe6f27-0ce0-4988-b32d-738eb63db3b7"); pub const IID_IAppxManifestReader7 = &IID_IAppxManifestReader7_Value; pub const IAppxManifestReader7 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDriverDependencies: fn( self: *const IAppxManifestReader7, driverDependencies: ?*?*IAppxManifestDriverDependenciesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetOSPackageDependencies: fn( self: *const IAppxManifestReader7, osPackageDependencies: ?*?*IAppxManifestOSPackageDependenciesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHostRuntimeDependencies: fn( self: *const IAppxManifestReader7, hostRuntimeDependencies: ?*?*IAppxManifestHostRuntimeDependenciesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader7_GetDriverDependencies(self: *const T, driverDependencies: ?*?*IAppxManifestDriverDependenciesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader7.VTable, self.vtable).GetDriverDependencies(@ptrCast(*const IAppxManifestReader7, self), driverDependencies); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader7_GetOSPackageDependencies(self: *const T, osPackageDependencies: ?*?*IAppxManifestOSPackageDependenciesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader7.VTable, self.vtable).GetOSPackageDependencies(@ptrCast(*const IAppxManifestReader7, self), osPackageDependencies); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestReader7_GetHostRuntimeDependencies(self: *const T, hostRuntimeDependencies: ?*?*IAppxManifestHostRuntimeDependenciesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestReader7.VTable, self.vtable).GetHostRuntimeDependencies(@ptrCast(*const IAppxManifestReader7, self), hostRuntimeDependencies); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestDriverDependenciesEnumerator_Value = Guid.initString("fe039db2-467f-4755-8404-8f5eb6865b33"); pub const IID_IAppxManifestDriverDependenciesEnumerator = &IID_IAppxManifestDriverDependenciesEnumerator_Value; pub const IAppxManifestDriverDependenciesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestDriverDependenciesEnumerator, driverDependency: ?*?*IAppxManifestDriverDependency, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestDriverDependenciesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestDriverDependenciesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverDependenciesEnumerator_GetCurrent(self: *const T, driverDependency: ?*?*IAppxManifestDriverDependency) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverDependenciesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestDriverDependenciesEnumerator, self), driverDependency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverDependenciesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverDependenciesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestDriverDependenciesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverDependenciesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverDependenciesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestDriverDependenciesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestDriverDependency_Value = Guid.initString("1210cb94-5a92-4602-be24-79f318af4af9"); pub const IID_IAppxManifestDriverDependency = &IID_IAppxManifestDriverDependency_Value; pub const IAppxManifestDriverDependency = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDriverConstraints: fn( self: *const IAppxManifestDriverDependency, driverConstraints: ?*?*IAppxManifestDriverConstraintsEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverDependency_GetDriverConstraints(self: *const T, driverConstraints: ?*?*IAppxManifestDriverConstraintsEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverDependency.VTable, self.vtable).GetDriverConstraints(@ptrCast(*const IAppxManifestDriverDependency, self), driverConstraints); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestDriverConstraintsEnumerator_Value = Guid.initString("d402b2d1-f600-49e0-95e6-975d8da13d89"); pub const IID_IAppxManifestDriverConstraintsEnumerator = &IID_IAppxManifestDriverConstraintsEnumerator_Value; pub const IAppxManifestDriverConstraintsEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestDriverConstraintsEnumerator, driverConstraint: ?*?*IAppxManifestDriverConstraint, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestDriverConstraintsEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestDriverConstraintsEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverConstraintsEnumerator_GetCurrent(self: *const T, driverConstraint: ?*?*IAppxManifestDriverConstraint) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverConstraintsEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestDriverConstraintsEnumerator, self), driverConstraint); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverConstraintsEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverConstraintsEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestDriverConstraintsEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverConstraintsEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverConstraintsEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestDriverConstraintsEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestDriverConstraint_Value = Guid.initString("c031bee4-bbcc-48ea-a237-c34045c80a07"); pub const IID_IAppxManifestDriverConstraint = &IID_IAppxManifestDriverConstraint_Value; pub const IAppxManifestDriverConstraint = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxManifestDriverConstraint, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMinVersion: fn( self: *const IAppxManifestDriverConstraint, minVersion: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMinDate: fn( self: *const IAppxManifestDriverConstraint, minDate: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverConstraint_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverConstraint.VTable, self.vtable).GetName(@ptrCast(*const IAppxManifestDriverConstraint, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverConstraint_GetMinVersion(self: *const T, minVersion: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverConstraint.VTable, self.vtable).GetMinVersion(@ptrCast(*const IAppxManifestDriverConstraint, self), minVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDriverConstraint_GetMinDate(self: *const T, minDate: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDriverConstraint.VTable, self.vtable).GetMinDate(@ptrCast(*const IAppxManifestDriverConstraint, self), minDate); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestOSPackageDependenciesEnumerator_Value = Guid.initString("b84e2fc3-f8ec-4bc1-8ae2-156346f5ffea"); pub const IID_IAppxManifestOSPackageDependenciesEnumerator = &IID_IAppxManifestOSPackageDependenciesEnumerator_Value; pub const IAppxManifestOSPackageDependenciesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestOSPackageDependenciesEnumerator, osPackageDependency: ?*?*IAppxManifestOSPackageDependency, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestOSPackageDependenciesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestOSPackageDependenciesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestOSPackageDependenciesEnumerator_GetCurrent(self: *const T, osPackageDependency: ?*?*IAppxManifestOSPackageDependency) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestOSPackageDependenciesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestOSPackageDependenciesEnumerator, self), osPackageDependency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestOSPackageDependenciesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestOSPackageDependenciesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestOSPackageDependenciesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestOSPackageDependenciesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestOSPackageDependenciesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestOSPackageDependenciesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestOSPackageDependency_Value = Guid.initString("154995ee-54a6-4f14-ac97-d8cf0519644b"); pub const IID_IAppxManifestOSPackageDependency = &IID_IAppxManifestOSPackageDependency_Value; pub const IAppxManifestOSPackageDependency = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxManifestOSPackageDependency, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVersion: fn( self: *const IAppxManifestOSPackageDependency, version: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestOSPackageDependency_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestOSPackageDependency.VTable, self.vtable).GetName(@ptrCast(*const IAppxManifestOSPackageDependency, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestOSPackageDependency_GetVersion(self: *const T, version: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestOSPackageDependency.VTable, self.vtable).GetVersion(@ptrCast(*const IAppxManifestOSPackageDependency, self), version); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestHostRuntimeDependenciesEnumerator_Value = Guid.initString("6427a646-7f49-433e-b1a6-0da309f6885a"); pub const IID_IAppxManifestHostRuntimeDependenciesEnumerator = &IID_IAppxManifestHostRuntimeDependenciesEnumerator_Value; pub const IAppxManifestHostRuntimeDependenciesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestHostRuntimeDependenciesEnumerator, hostRuntimeDependency: ?*?*IAppxManifestHostRuntimeDependency, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestHostRuntimeDependenciesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestHostRuntimeDependenciesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestHostRuntimeDependenciesEnumerator_GetCurrent(self: *const T, hostRuntimeDependency: ?*?*IAppxManifestHostRuntimeDependency) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestHostRuntimeDependenciesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestHostRuntimeDependenciesEnumerator, self), hostRuntimeDependency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestHostRuntimeDependenciesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestHostRuntimeDependenciesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestHostRuntimeDependenciesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestHostRuntimeDependenciesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestHostRuntimeDependenciesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestHostRuntimeDependenciesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestHostRuntimeDependency_Value = Guid.initString("3455d234-8414-410d-95c7-7b35255b8391"); pub const IID_IAppxManifestHostRuntimeDependency = &IID_IAppxManifestHostRuntimeDependency_Value; pub const IAppxManifestHostRuntimeDependency = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxManifestHostRuntimeDependency, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPublisher: fn( self: *const IAppxManifestHostRuntimeDependency, publisher: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMinVersion: fn( self: *const IAppxManifestHostRuntimeDependency, minVersion: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestHostRuntimeDependency_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestHostRuntimeDependency.VTable, self.vtable).GetName(@ptrCast(*const IAppxManifestHostRuntimeDependency, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestHostRuntimeDependency_GetPublisher(self: *const T, publisher: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestHostRuntimeDependency.VTable, self.vtable).GetPublisher(@ptrCast(*const IAppxManifestHostRuntimeDependency, self), publisher); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestHostRuntimeDependency_GetMinVersion(self: *const T, minVersion: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestHostRuntimeDependency.VTable, self.vtable).GetMinVersion(@ptrCast(*const IAppxManifestHostRuntimeDependency, self), minVersion); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestHostRuntimeDependency2_Value = Guid.initString("c26f23a8-ee10-4ad6-b898-2b4d7aebfe6a"); pub const IID_IAppxManifestHostRuntimeDependency2 = &IID_IAppxManifestHostRuntimeDependency2_Value; pub const IAppxManifestHostRuntimeDependency2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPackageFamilyName: fn( self: *const IAppxManifestHostRuntimeDependency2, packageFamilyName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestHostRuntimeDependency2_GetPackageFamilyName(self: *const T, packageFamilyName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestHostRuntimeDependency2.VTable, self.vtable).GetPackageFamilyName(@ptrCast(*const IAppxManifestHostRuntimeDependency2, self), packageFamilyName); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxManifestOptionalPackageInfo_Value = Guid.initString("2634847d-5b5d-4fe5-a243-002ff95edc7e"); pub const IID_IAppxManifestOptionalPackageInfo = &IID_IAppxManifestOptionalPackageInfo_Value; pub const IAppxManifestOptionalPackageInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIsOptionalPackage: fn( self: *const IAppxManifestOptionalPackageInfo, isOptionalPackage: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMainPackageName: fn( self: *const IAppxManifestOptionalPackageInfo, mainPackageName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestOptionalPackageInfo_GetIsOptionalPackage(self: *const T, isOptionalPackage: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestOptionalPackageInfo.VTable, self.vtable).GetIsOptionalPackage(@ptrCast(*const IAppxManifestOptionalPackageInfo, self), isOptionalPackage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestOptionalPackageInfo_GetMainPackageName(self: *const T, mainPackageName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestOptionalPackageInfo.VTable, self.vtable).GetMainPackageName(@ptrCast(*const IAppxManifestOptionalPackageInfo, self), mainPackageName); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxManifestMainPackageDependenciesEnumerator_Value = Guid.initString("a99c4f00-51d2-4f0f-ba46-7ed5255ebdff"); pub const IID_IAppxManifestMainPackageDependenciesEnumerator = &IID_IAppxManifestMainPackageDependenciesEnumerator_Value; pub const IAppxManifestMainPackageDependenciesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestMainPackageDependenciesEnumerator, mainPackageDependency: ?*?*IAppxManifestMainPackageDependency, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestMainPackageDependenciesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestMainPackageDependenciesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestMainPackageDependenciesEnumerator_GetCurrent(self: *const T, mainPackageDependency: ?*?*IAppxManifestMainPackageDependency) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestMainPackageDependenciesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestMainPackageDependenciesEnumerator, self), mainPackageDependency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestMainPackageDependenciesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestMainPackageDependenciesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestMainPackageDependenciesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestMainPackageDependenciesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestMainPackageDependenciesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestMainPackageDependenciesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxManifestMainPackageDependency_Value = Guid.initString("05d0611c-bc29-46d5-97e2-84b9c79bd8ae"); pub const IID_IAppxManifestMainPackageDependency = &IID_IAppxManifestMainPackageDependency_Value; pub const IAppxManifestMainPackageDependency = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxManifestMainPackageDependency, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPublisher: fn( self: *const IAppxManifestMainPackageDependency, publisher: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPackageFamilyName: fn( self: *const IAppxManifestMainPackageDependency, packageFamilyName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestMainPackageDependency_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestMainPackageDependency.VTable, self.vtable).GetName(@ptrCast(*const IAppxManifestMainPackageDependency, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestMainPackageDependency_GetPublisher(self: *const T, publisher: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestMainPackageDependency.VTable, self.vtable).GetPublisher(@ptrCast(*const IAppxManifestMainPackageDependency, self), publisher); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestMainPackageDependency_GetPackageFamilyName(self: *const T, packageFamilyName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestMainPackageDependency.VTable, self.vtable).GetPackageFamilyName(@ptrCast(*const IAppxManifestMainPackageDependency, self), packageFamilyName); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestPackageId_Value = Guid.initString("283ce2d7-7153-4a91-9649-7a0f7240945f"); pub const IID_IAppxManifestPackageId = &IID_IAppxManifestPackageId_Value; pub const IAppxManifestPackageId = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxManifestPackageId, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetArchitecture: fn( self: *const IAppxManifestPackageId, architecture: ?*APPX_PACKAGE_ARCHITECTURE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPublisher: fn( self: *const IAppxManifestPackageId, publisher: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVersion: fn( self: *const IAppxManifestPackageId, packageVersion: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetResourceId: fn( self: *const IAppxManifestPackageId, resourceId: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ComparePublisher: fn( self: *const IAppxManifestPackageId, other: ?[*:0]const u16, isSame: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPackageFullName: fn( self: *const IAppxManifestPackageId, packageFullName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPackageFamilyName: fn( self: *const IAppxManifestPackageId, packageFamilyName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).GetName(@ptrCast(*const IAppxManifestPackageId, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_GetArchitecture(self: *const T, architecture: ?*APPX_PACKAGE_ARCHITECTURE) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).GetArchitecture(@ptrCast(*const IAppxManifestPackageId, self), architecture); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_GetPublisher(self: *const T, publisher: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).GetPublisher(@ptrCast(*const IAppxManifestPackageId, self), publisher); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_GetVersion(self: *const T, packageVersion: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).GetVersion(@ptrCast(*const IAppxManifestPackageId, self), packageVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_GetResourceId(self: *const T, resourceId: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).GetResourceId(@ptrCast(*const IAppxManifestPackageId, self), resourceId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_ComparePublisher(self: *const T, other: ?[*:0]const u16, isSame: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).ComparePublisher(@ptrCast(*const IAppxManifestPackageId, self), other, isSame); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_GetPackageFullName(self: *const T, packageFullName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).GetPackageFullName(@ptrCast(*const IAppxManifestPackageId, self), packageFullName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId_GetPackageFamilyName(self: *const T, packageFamilyName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId.VTable, self.vtable).GetPackageFamilyName(@ptrCast(*const IAppxManifestPackageId, self), packageFamilyName); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxManifestPackageId2_Value = Guid.initString("2256999d-d617-42f1-880e-0ba4542319d5"); pub const IID_IAppxManifestPackageId2 = &IID_IAppxManifestPackageId2_Value; pub const IAppxManifestPackageId2 = extern struct { pub const VTable = extern struct { base: IAppxManifestPackageId.VTable, GetArchitecture2: fn( self: *const IAppxManifestPackageId2, architecture: ?*APPX_PACKAGE_ARCHITECTURE2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IAppxManifestPackageId.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageId2_GetArchitecture2(self: *const T, architecture: ?*APPX_PACKAGE_ARCHITECTURE2) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageId2.VTable, self.vtable).GetArchitecture2(@ptrCast(*const IAppxManifestPackageId2, self), architecture); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestProperties_Value = Guid.initString("03faf64d-f26f-4b2c-aaf7-8fe7789b8bca"); pub const IID_IAppxManifestProperties = &IID_IAppxManifestProperties_Value; pub const IAppxManifestProperties = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetBoolValue: fn( self: *const IAppxManifestProperties, name: ?[*:0]const u16, value: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStringValue: fn( self: *const IAppxManifestProperties, name: ?[*:0]const u16, value: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestProperties_GetBoolValue(self: *const T, name: ?[*:0]const u16, value: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestProperties.VTable, self.vtable).GetBoolValue(@ptrCast(*const IAppxManifestProperties, self), name, value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestProperties_GetStringValue(self: *const T, name: ?[*:0]const u16, value: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestProperties.VTable, self.vtable).GetStringValue(@ptrCast(*const IAppxManifestProperties, self), name, value); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestTargetDeviceFamiliesEnumerator_Value = Guid.initString("36537f36-27a4-4788-88c0-733819575017"); pub const IID_IAppxManifestTargetDeviceFamiliesEnumerator = &IID_IAppxManifestTargetDeviceFamiliesEnumerator_Value; pub const IAppxManifestTargetDeviceFamiliesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestTargetDeviceFamiliesEnumerator, targetDeviceFamily: ?*?*IAppxManifestTargetDeviceFamily, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestTargetDeviceFamiliesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestTargetDeviceFamiliesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestTargetDeviceFamiliesEnumerator_GetCurrent(self: *const T, targetDeviceFamily: ?*?*IAppxManifestTargetDeviceFamily) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestTargetDeviceFamiliesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestTargetDeviceFamiliesEnumerator, self), targetDeviceFamily); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestTargetDeviceFamiliesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestTargetDeviceFamiliesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestTargetDeviceFamiliesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestTargetDeviceFamiliesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestTargetDeviceFamiliesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestTargetDeviceFamiliesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxManifestTargetDeviceFamily_Value = Guid.initString("9091b09b-c8d5-4f31-8687-a338259faefb"); pub const IID_IAppxManifestTargetDeviceFamily = &IID_IAppxManifestTargetDeviceFamily_Value; pub const IAppxManifestTargetDeviceFamily = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxManifestTargetDeviceFamily, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMinVersion: fn( self: *const IAppxManifestTargetDeviceFamily, minVersion: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMaxVersionTested: fn( self: *const IAppxManifestTargetDeviceFamily, maxVersionTested: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestTargetDeviceFamily_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestTargetDeviceFamily.VTable, self.vtable).GetName(@ptrCast(*const IAppxManifestTargetDeviceFamily, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestTargetDeviceFamily_GetMinVersion(self: *const T, minVersion: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestTargetDeviceFamily.VTable, self.vtable).GetMinVersion(@ptrCast(*const IAppxManifestTargetDeviceFamily, self), minVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestTargetDeviceFamily_GetMaxVersionTested(self: *const T, maxVersionTested: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestTargetDeviceFamily.VTable, self.vtable).GetMaxVersionTested(@ptrCast(*const IAppxManifestTargetDeviceFamily, self), maxVersionTested); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestPackageDependenciesEnumerator_Value = Guid.initString("b43bbcf9-65a6-42dd-bac0-8c6741e7f5a4"); pub const IID_IAppxManifestPackageDependenciesEnumerator = &IID_IAppxManifestPackageDependenciesEnumerator_Value; pub const IAppxManifestPackageDependenciesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestPackageDependenciesEnumerator, dependency: ?*?*IAppxManifestPackageDependency, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestPackageDependenciesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestPackageDependenciesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependenciesEnumerator_GetCurrent(self: *const T, dependency: ?*?*IAppxManifestPackageDependency) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependenciesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestPackageDependenciesEnumerator, self), dependency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependenciesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependenciesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestPackageDependenciesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependenciesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependenciesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestPackageDependenciesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestPackageDependency_Value = Guid.initString("e4946b59-733e-43f0-a724-3bde4c1285a0"); pub const IID_IAppxManifestPackageDependency = &IID_IAppxManifestPackageDependency_Value; pub const IAppxManifestPackageDependency = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxManifestPackageDependency, name: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPublisher: fn( self: *const IAppxManifestPackageDependency, publisher: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMinVersion: fn( self: *const IAppxManifestPackageDependency, minVersion: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependency_GetName(self: *const T, name: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependency.VTable, self.vtable).GetName(@ptrCast(*const IAppxManifestPackageDependency, self), name); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependency_GetPublisher(self: *const T, publisher: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependency.VTable, self.vtable).GetPublisher(@ptrCast(*const IAppxManifestPackageDependency, self), publisher); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependency_GetMinVersion(self: *const T, minVersion: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependency.VTable, self.vtable).GetMinVersion(@ptrCast(*const IAppxManifestPackageDependency, self), minVersion); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestPackageDependency2_Value = Guid.initString("dda0b713-f3ff-49d3-898a-2786780c5d98"); pub const IID_IAppxManifestPackageDependency2 = &IID_IAppxManifestPackageDependency2_Value; pub const IAppxManifestPackageDependency2 = extern struct { pub const VTable = extern struct { base: IAppxManifestPackageDependency.VTable, GetMaxMajorVersionTested: fn( self: *const IAppxManifestPackageDependency2, maxMajorVersionTested: ?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IAppxManifestPackageDependency.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependency2_GetMaxMajorVersionTested(self: *const T, maxMajorVersionTested: ?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependency2.VTable, self.vtable).GetMaxMajorVersionTested(@ptrCast(*const IAppxManifestPackageDependency2, self), maxMajorVersionTested); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestPackageDependency3_Value = Guid.initString("1ac56374-6198-4d6b-92e4-749d5ab8a895"); pub const IID_IAppxManifestPackageDependency3 = &IID_IAppxManifestPackageDependency3_Value; pub const IAppxManifestPackageDependency3 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIsOptional: fn( self: *const IAppxManifestPackageDependency3, isOptional: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestPackageDependency3_GetIsOptional(self: *const T, isOptional: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestPackageDependency3.VTable, self.vtable).GetIsOptional(@ptrCast(*const IAppxManifestPackageDependency3, self), isOptional); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestResourcesEnumerator_Value = Guid.initString("de4dfbbd-881a-48bb-858c-d6f2baeae6ed"); pub const IID_IAppxManifestResourcesEnumerator = &IID_IAppxManifestResourcesEnumerator_Value; pub const IAppxManifestResourcesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestResourcesEnumerator, resource: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestResourcesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestResourcesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestResourcesEnumerator_GetCurrent(self: *const T, resource: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestResourcesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestResourcesEnumerator, self), resource); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestResourcesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestResourcesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestResourcesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestResourcesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestResourcesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestResourcesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestDeviceCapabilitiesEnumerator_Value = Guid.initString("30204541-427b-4a1c-bacf-655bf463a540"); pub const IID_IAppxManifestDeviceCapabilitiesEnumerator = &IID_IAppxManifestDeviceCapabilitiesEnumerator_Value; pub const IAppxManifestDeviceCapabilitiesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestDeviceCapabilitiesEnumerator, deviceCapability: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestDeviceCapabilitiesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestDeviceCapabilitiesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDeviceCapabilitiesEnumerator_GetCurrent(self: *const T, deviceCapability: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDeviceCapabilitiesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestDeviceCapabilitiesEnumerator, self), deviceCapability); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDeviceCapabilitiesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDeviceCapabilitiesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestDeviceCapabilitiesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestDeviceCapabilitiesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestDeviceCapabilitiesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestDeviceCapabilitiesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestCapabilitiesEnumerator_Value = Guid.initString("11d22258-f470-42c1-b291-8361c5437e41"); pub const IID_IAppxManifestCapabilitiesEnumerator = &IID_IAppxManifestCapabilitiesEnumerator_Value; pub const IAppxManifestCapabilitiesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestCapabilitiesEnumerator, capability: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestCapabilitiesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestCapabilitiesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestCapabilitiesEnumerator_GetCurrent(self: *const T, capability: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestCapabilitiesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestCapabilitiesEnumerator, self), capability); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestCapabilitiesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestCapabilitiesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestCapabilitiesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestCapabilitiesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestCapabilitiesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestCapabilitiesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestApplicationsEnumerator_Value = Guid.initString("9eb8a55a-f04b-4d0d-808d-686185d4847a"); pub const IID_IAppxManifestApplicationsEnumerator = &IID_IAppxManifestApplicationsEnumerator_Value; pub const IAppxManifestApplicationsEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestApplicationsEnumerator, application: ?*?*IAppxManifestApplication, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestApplicationsEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestApplicationsEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestApplicationsEnumerator_GetCurrent(self: *const T, application: ?*?*IAppxManifestApplication) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestApplicationsEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestApplicationsEnumerator, self), application); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestApplicationsEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestApplicationsEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestApplicationsEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestApplicationsEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestApplicationsEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestApplicationsEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_IAppxManifestApplication_Value = Guid.initString("5da89bf4-3773-46be-b650-7e744863b7e8"); pub const IID_IAppxManifestApplication = &IID_IAppxManifestApplication_Value; pub const IAppxManifestApplication = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetStringValue: fn( self: *const IAppxManifestApplication, name: ?[*:0]const u16, value: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAppUserModelId: fn( self: *const IAppxManifestApplication, appUserModelId: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestApplication_GetStringValue(self: *const T, name: ?[*:0]const u16, value: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestApplication.VTable, self.vtable).GetStringValue(@ptrCast(*const IAppxManifestApplication, self), name, value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestApplication_GetAppUserModelId(self: *const T, appUserModelId: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestApplication.VTable, self.vtable).GetAppUserModelId(@ptrCast(*const IAppxManifestApplication, self), appUserModelId); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestQualifiedResourcesEnumerator_Value = Guid.initString("8ef6adfe-3762-4a8f-9373-2fc5d444c8d2"); pub const IID_IAppxManifestQualifiedResourcesEnumerator = &IID_IAppxManifestQualifiedResourcesEnumerator_Value; pub const IAppxManifestQualifiedResourcesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxManifestQualifiedResourcesEnumerator, resource: ?*?*IAppxManifestQualifiedResource, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxManifestQualifiedResourcesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxManifestQualifiedResourcesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestQualifiedResourcesEnumerator_GetCurrent(self: *const T, resource: ?*?*IAppxManifestQualifiedResource) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestQualifiedResourcesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxManifestQualifiedResourcesEnumerator, self), resource); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestQualifiedResourcesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestQualifiedResourcesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxManifestQualifiedResourcesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestQualifiedResourcesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestQualifiedResourcesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxManifestQualifiedResourcesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxManifestQualifiedResource_Value = Guid.initString("3b53a497-3c5c-48d1-9ea3-bb7eac8cd7d4"); pub const IID_IAppxManifestQualifiedResource = &IID_IAppxManifestQualifiedResource_Value; pub const IAppxManifestQualifiedResource = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetLanguage: fn( self: *const IAppxManifestQualifiedResource, language: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetScale: fn( self: *const IAppxManifestQualifiedResource, scale: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDXFeatureLevel: fn( self: *const IAppxManifestQualifiedResource, dxFeatureLevel: ?*DX_FEATURE_LEVEL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestQualifiedResource_GetLanguage(self: *const T, language: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestQualifiedResource.VTable, self.vtable).GetLanguage(@ptrCast(*const IAppxManifestQualifiedResource, self), language); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestQualifiedResource_GetScale(self: *const T, scale: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestQualifiedResource.VTable, self.vtable).GetScale(@ptrCast(*const IAppxManifestQualifiedResource, self), scale); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxManifestQualifiedResource_GetDXFeatureLevel(self: *const T, dxFeatureLevel: ?*DX_FEATURE_LEVEL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxManifestQualifiedResource.VTable, self.vtable).GetDXFeatureLevel(@ptrCast(*const IAppxManifestQualifiedResource, self), dxFeatureLevel); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IAppxBundleFactory_Value = Guid.initString("bba65864-965f-4a5f-855f-f074bdbf3a7b"); pub const IID_IAppxBundleFactory = &IID_IAppxBundleFactory_Value; pub const IAppxBundleFactory = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateBundleWriter: fn( self: *const IAppxBundleFactory, outputStream: ?*IStream, bundleVersion: u64, bundleWriter: ?*?*IAppxBundleWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateBundleReader: fn( self: *const IAppxBundleFactory, inputStream: ?*IStream, bundleReader: ?*?*IAppxBundleReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateBundleManifestReader: fn( self: *const IAppxBundleFactory, inputStream: ?*IStream, manifestReader: ?*?*IAppxBundleManifestReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleFactory_CreateBundleWriter(self: *const T, outputStream: ?*IStream, bundleVersion: u64, bundleWriter: ?*?*IAppxBundleWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleFactory.VTable, self.vtable).CreateBundleWriter(@ptrCast(*const IAppxBundleFactory, self), outputStream, bundleVersion, bundleWriter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleFactory_CreateBundleReader(self: *const T, inputStream: ?*IStream, bundleReader: ?*?*IAppxBundleReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleFactory.VTable, self.vtable).CreateBundleReader(@ptrCast(*const IAppxBundleFactory, self), inputStream, bundleReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleFactory_CreateBundleManifestReader(self: *const T, inputStream: ?*IStream, manifestReader: ?*?*IAppxBundleManifestReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleFactory.VTable, self.vtable).CreateBundleManifestReader(@ptrCast(*const IAppxBundleFactory, self), inputStream, manifestReader); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IAppxBundleWriter_Value = Guid.initString("ec446fe8-bfec-4c64-ab4f-49f038f0c6d2"); pub const IID_IAppxBundleWriter = &IID_IAppxBundleWriter_Value; pub const IAppxBundleWriter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadPackage: fn( self: *const IAppxBundleWriter, fileName: ?[*:0]const u16, packageStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const IAppxBundleWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter_AddPayloadPackage(self: *const T, fileName: ?[*:0]const u16, packageStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter.VTable, self.vtable).AddPayloadPackage(@ptrCast(*const IAppxBundleWriter, self), fileName, packageStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter_Close(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter.VTable, self.vtable).Close(@ptrCast(*const IAppxBundleWriter, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxBundleWriter2_Value = Guid.initString("6d8fe971-01cc-49a0-b685-233851279962"); pub const IID_IAppxBundleWriter2 = &IID_IAppxBundleWriter2_Value; pub const IAppxBundleWriter2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddExternalPackageReference: fn( self: *const IAppxBundleWriter2, fileName: ?[*:0]const u16, inputStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter2_AddExternalPackageReference(self: *const T, fileName: ?[*:0]const u16, inputStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter2.VTable, self.vtable).AddExternalPackageReference(@ptrCast(*const IAppxBundleWriter2, self), fileName, inputStream); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxBundleWriter3_Value = Guid.initString("ad711152-f969-4193-82d5-9ddf2786d21a"); pub const IID_IAppxBundleWriter3 = &IID_IAppxBundleWriter3_Value; pub const IAppxBundleWriter3 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPackageReference: fn( self: *const IAppxBundleWriter3, fileName: ?[*:0]const u16, inputStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const IAppxBundleWriter3, hashMethodString: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter3_AddPackageReference(self: *const T, fileName: ?[*:0]const u16, inputStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter3.VTable, self.vtable).AddPackageReference(@ptrCast(*const IAppxBundleWriter3, self), fileName, inputStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter3_Close(self: *const T, hashMethodString: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter3.VTable, self.vtable).Close(@ptrCast(*const IAppxBundleWriter3, self), hashMethodString); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxBundleWriter4_Value = Guid.initString("9cd9d523-5009-4c01-9882-dc029fbd47a3"); pub const IID_IAppxBundleWriter4 = &IID_IAppxBundleWriter4_Value; pub const IAppxBundleWriter4 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadPackage: fn( self: *const IAppxBundleWriter4, fileName: ?[*:0]const u16, packageStream: ?*IStream, isDefaultApplicablePackage: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddPackageReference: fn( self: *const IAppxBundleWriter4, fileName: ?[*:0]const u16, inputStream: ?*IStream, isDefaultApplicablePackage: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddExternalPackageReference: fn( self: *const IAppxBundleWriter4, fileName: ?[*:0]const u16, inputStream: ?*IStream, isDefaultApplicablePackage: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter4_AddPayloadPackage(self: *const T, fileName: ?[*:0]const u16, packageStream: ?*IStream, isDefaultApplicablePackage: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter4.VTable, self.vtable).AddPayloadPackage(@ptrCast(*const IAppxBundleWriter4, self), fileName, packageStream, isDefaultApplicablePackage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter4_AddPackageReference(self: *const T, fileName: ?[*:0]const u16, inputStream: ?*IStream, isDefaultApplicablePackage: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter4.VTable, self.vtable).AddPackageReference(@ptrCast(*const IAppxBundleWriter4, self), fileName, inputStream, isDefaultApplicablePackage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleWriter4_AddExternalPackageReference(self: *const T, fileName: ?[*:0]const u16, inputStream: ?*IStream, isDefaultApplicablePackage: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleWriter4.VTable, self.vtable).AddExternalPackageReference(@ptrCast(*const IAppxBundleWriter4, self), fileName, inputStream, isDefaultApplicablePackage); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IAppxBundleReader_Value = Guid.initString("dd75b8c0-ba76-43b0-ae0f-68656a1dc5c8"); pub const IID_IAppxBundleReader = &IID_IAppxBundleReader_Value; pub const IAppxBundleReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFootprintFile: fn( self: *const IAppxBundleReader, fileType: APPX_BUNDLE_FOOTPRINT_FILE_TYPE, footprintFile: ?*?*IAppxFile, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBlockMap: fn( self: *const IAppxBundleReader, blockMapReader: ?*?*IAppxBlockMapReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetManifest: fn( self: *const IAppxBundleReader, manifestReader: ?*?*IAppxBundleManifestReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPayloadPackages: fn( self: *const IAppxBundleReader, payloadPackages: ?*?*IAppxFilesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPayloadPackage: fn( self: *const IAppxBundleReader, fileName: ?[*:0]const u16, payloadPackage: ?*?*IAppxFile, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleReader_GetFootprintFile(self: *const T, fileType: APPX_BUNDLE_FOOTPRINT_FILE_TYPE, footprintFile: ?*?*IAppxFile) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleReader.VTable, self.vtable).GetFootprintFile(@ptrCast(*const IAppxBundleReader, self), fileType, footprintFile); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleReader_GetBlockMap(self: *const T, blockMapReader: ?*?*IAppxBlockMapReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleReader.VTable, self.vtable).GetBlockMap(@ptrCast(*const IAppxBundleReader, self), blockMapReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleReader_GetManifest(self: *const T, manifestReader: ?*?*IAppxBundleManifestReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleReader.VTable, self.vtable).GetManifest(@ptrCast(*const IAppxBundleReader, self), manifestReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleReader_GetPayloadPackages(self: *const T, payloadPackages: ?*?*IAppxFilesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleReader.VTable, self.vtable).GetPayloadPackages(@ptrCast(*const IAppxBundleReader, self), payloadPackages); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleReader_GetPayloadPackage(self: *const T, fileName: ?[*:0]const u16, payloadPackage: ?*?*IAppxFile) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleReader.VTable, self.vtable).GetPayloadPackage(@ptrCast(*const IAppxBundleReader, self), fileName, payloadPackage); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IAppxBundleManifestReader_Value = Guid.initString("cf0ebbc1-cc99-4106-91eb-e67462e04fb0"); pub const IID_IAppxBundleManifestReader = &IID_IAppxBundleManifestReader_Value; pub const IAppxBundleManifestReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPackageId: fn( self: *const IAppxBundleManifestReader, packageId: ?*?*IAppxManifestPackageId, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPackageInfoItems: fn( self: *const IAppxBundleManifestReader, packageInfoItems: ?*?*IAppxBundleManifestPackageInfoEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStream: fn( self: *const IAppxBundleManifestReader, manifestStream: ?*?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestReader_GetPackageId(self: *const T, packageId: ?*?*IAppxManifestPackageId) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestReader.VTable, self.vtable).GetPackageId(@ptrCast(*const IAppxBundleManifestReader, self), packageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestReader_GetPackageInfoItems(self: *const T, packageInfoItems: ?*?*IAppxBundleManifestPackageInfoEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestReader.VTable, self.vtable).GetPackageInfoItems(@ptrCast(*const IAppxBundleManifestReader, self), packageInfoItems); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestReader_GetStream(self: *const T, manifestStream: ?*?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestReader.VTable, self.vtable).GetStream(@ptrCast(*const IAppxBundleManifestReader, self), manifestStream); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxBundleManifestReader2_Value = Guid.initString("5517df70-033f-4af2-8213-87d766805c02"); pub const IID_IAppxBundleManifestReader2 = &IID_IAppxBundleManifestReader2_Value; pub const IAppxBundleManifestReader2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetOptionalBundles: fn( self: *const IAppxBundleManifestReader2, optionalBundles: ?*?*IAppxBundleManifestOptionalBundleInfoEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestReader2_GetOptionalBundles(self: *const T, optionalBundles: ?*?*IAppxBundleManifestOptionalBundleInfoEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestReader2.VTable, self.vtable).GetOptionalBundles(@ptrCast(*const IAppxBundleManifestReader2, self), optionalBundles); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IAppxBundleManifestPackageInfoEnumerator_Value = Guid.initString("f9b856ee-49a6-4e19-b2b0-6a2406d63a32"); pub const IID_IAppxBundleManifestPackageInfoEnumerator = &IID_IAppxBundleManifestPackageInfoEnumerator_Value; pub const IAppxBundleManifestPackageInfoEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxBundleManifestPackageInfoEnumerator, packageInfo: ?*?*IAppxBundleManifestPackageInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxBundleManifestPackageInfoEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxBundleManifestPackageInfoEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfoEnumerator_GetCurrent(self: *const T, packageInfo: ?*?*IAppxBundleManifestPackageInfo) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfoEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxBundleManifestPackageInfoEnumerator, self), packageInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfoEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfoEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxBundleManifestPackageInfoEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfoEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfoEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxBundleManifestPackageInfoEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.1' const IID_IAppxBundleManifestPackageInfo_Value = Guid.initString("54cd06c1-268f-40bb-8ed2-757a9ebaec8d"); pub const IID_IAppxBundleManifestPackageInfo = &IID_IAppxBundleManifestPackageInfo_Value; pub const IAppxBundleManifestPackageInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPackageType: fn( self: *const IAppxBundleManifestPackageInfo, packageType: ?*APPX_BUNDLE_PAYLOAD_PACKAGE_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPackageId: fn( self: *const IAppxBundleManifestPackageInfo, packageId: ?*?*IAppxManifestPackageId, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFileName: fn( self: *const IAppxBundleManifestPackageInfo, fileName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetOffset: fn( self: *const IAppxBundleManifestPackageInfo, offset: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSize: fn( self: *const IAppxBundleManifestPackageInfo, size: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetResources: fn( self: *const IAppxBundleManifestPackageInfo, resources: ?*?*IAppxManifestQualifiedResourcesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo_GetPackageType(self: *const T, packageType: ?*APPX_BUNDLE_PAYLOAD_PACKAGE_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo.VTable, self.vtable).GetPackageType(@ptrCast(*const IAppxBundleManifestPackageInfo, self), packageType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo_GetPackageId(self: *const T, packageId: ?*?*IAppxManifestPackageId) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo.VTable, self.vtable).GetPackageId(@ptrCast(*const IAppxBundleManifestPackageInfo, self), packageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo_GetFileName(self: *const T, fileName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo.VTable, self.vtable).GetFileName(@ptrCast(*const IAppxBundleManifestPackageInfo, self), fileName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo_GetOffset(self: *const T, offset: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo.VTable, self.vtable).GetOffset(@ptrCast(*const IAppxBundleManifestPackageInfo, self), offset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo_GetSize(self: *const T, size: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo.VTable, self.vtable).GetSize(@ptrCast(*const IAppxBundleManifestPackageInfo, self), size); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo_GetResources(self: *const T, resources: ?*?*IAppxManifestQualifiedResourcesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo.VTable, self.vtable).GetResources(@ptrCast(*const IAppxBundleManifestPackageInfo, self), resources); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxBundleManifestPackageInfo2_Value = Guid.initString("44c2acbc-b2cf-4ccb-bbdb-9c6da8c3bc9e"); pub const IID_IAppxBundleManifestPackageInfo2 = &IID_IAppxBundleManifestPackageInfo2_Value; pub const IAppxBundleManifestPackageInfo2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIsPackageReference: fn( self: *const IAppxBundleManifestPackageInfo2, isPackageReference: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetIsNonQualifiedResourcePackage: fn( self: *const IAppxBundleManifestPackageInfo2, isNonQualifiedResourcePackage: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetIsDefaultApplicablePackage: fn( self: *const IAppxBundleManifestPackageInfo2, isDefaultApplicablePackage: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo2_GetIsPackageReference(self: *const T, isPackageReference: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo2.VTable, self.vtable).GetIsPackageReference(@ptrCast(*const IAppxBundleManifestPackageInfo2, self), isPackageReference); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo2_GetIsNonQualifiedResourcePackage(self: *const T, isNonQualifiedResourcePackage: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo2.VTable, self.vtable).GetIsNonQualifiedResourcePackage(@ptrCast(*const IAppxBundleManifestPackageInfo2, self), isNonQualifiedResourcePackage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo2_GetIsDefaultApplicablePackage(self: *const T, isDefaultApplicablePackage: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo2.VTable, self.vtable).GetIsDefaultApplicablePackage(@ptrCast(*const IAppxBundleManifestPackageInfo2, self), isDefaultApplicablePackage); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxBundleManifestPackageInfo3_Value = Guid.initString("6ba74b98-bb74-4296-80d0-5f4256a99675"); pub const IID_IAppxBundleManifestPackageInfo3 = &IID_IAppxBundleManifestPackageInfo3_Value; pub const IAppxBundleManifestPackageInfo3 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetTargetDeviceFamilies: fn( self: *const IAppxBundleManifestPackageInfo3, targetDeviceFamilies: ?*?*IAppxManifestTargetDeviceFamiliesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo3_GetTargetDeviceFamilies(self: *const T, targetDeviceFamilies: ?*?*IAppxManifestTargetDeviceFamiliesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo3.VTable, self.vtable).GetTargetDeviceFamilies(@ptrCast(*const IAppxBundleManifestPackageInfo3, self), targetDeviceFamilies); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxBundleManifestPackageInfo4_Value = Guid.initString("5da6f13d-a8a7-4532-857c-1393d659371d"); pub const IID_IAppxBundleManifestPackageInfo4 = &IID_IAppxBundleManifestPackageInfo4_Value; pub const IAppxBundleManifestPackageInfo4 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIsStub: fn( self: *const IAppxBundleManifestPackageInfo4, isStub: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestPackageInfo4_GetIsStub(self: *const T, isStub: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestPackageInfo4.VTable, self.vtable).GetIsStub(@ptrCast(*const IAppxBundleManifestPackageInfo4, self), isStub); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxBundleManifestOptionalBundleInfoEnumerator_Value = Guid.initString("9a178793-f97e-46ac-aaca-dd5ba4c177c8"); pub const IID_IAppxBundleManifestOptionalBundleInfoEnumerator = &IID_IAppxBundleManifestOptionalBundleInfoEnumerator_Value; pub const IAppxBundleManifestOptionalBundleInfoEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxBundleManifestOptionalBundleInfoEnumerator, optionalBundle: ?*?*IAppxBundleManifestOptionalBundleInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxBundleManifestOptionalBundleInfoEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxBundleManifestOptionalBundleInfoEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestOptionalBundleInfoEnumerator_GetCurrent(self: *const T, optionalBundle: ?*?*IAppxBundleManifestOptionalBundleInfo) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestOptionalBundleInfoEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxBundleManifestOptionalBundleInfoEnumerator, self), optionalBundle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestOptionalBundleInfoEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestOptionalBundleInfoEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxBundleManifestOptionalBundleInfoEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestOptionalBundleInfoEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestOptionalBundleInfoEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxBundleManifestOptionalBundleInfoEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxBundleManifestOptionalBundleInfo_Value = Guid.initString("515bf2e8-bcb0-4d69-8c48-e383147b6e12"); pub const IID_IAppxBundleManifestOptionalBundleInfo = &IID_IAppxBundleManifestOptionalBundleInfo_Value; pub const IAppxBundleManifestOptionalBundleInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPackageId: fn( self: *const IAppxBundleManifestOptionalBundleInfo, packageId: ?*?*IAppxManifestPackageId, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFileName: fn( self: *const IAppxBundleManifestOptionalBundleInfo, fileName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPackageInfoItems: fn( self: *const IAppxBundleManifestOptionalBundleInfo, packageInfoItems: ?*?*IAppxBundleManifestPackageInfoEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestOptionalBundleInfo_GetPackageId(self: *const T, packageId: ?*?*IAppxManifestPackageId) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestOptionalBundleInfo.VTable, self.vtable).GetPackageId(@ptrCast(*const IAppxBundleManifestOptionalBundleInfo, self), packageId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestOptionalBundleInfo_GetFileName(self: *const T, fileName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestOptionalBundleInfo.VTable, self.vtable).GetFileName(@ptrCast(*const IAppxBundleManifestOptionalBundleInfo, self), fileName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxBundleManifestOptionalBundleInfo_GetPackageInfoItems(self: *const T, packageInfoItems: ?*?*IAppxBundleManifestPackageInfoEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxBundleManifestOptionalBundleInfo.VTable, self.vtable).GetPackageInfoItems(@ptrCast(*const IAppxBundleManifestOptionalBundleInfo, self), packageInfoItems); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxContentGroupFilesEnumerator_Value = Guid.initString("1a09a2fd-7440-44eb-8c84-848205a6a1cc"); pub const IID_IAppxContentGroupFilesEnumerator = &IID_IAppxContentGroupFilesEnumerator_Value; pub const IAppxContentGroupFilesEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxContentGroupFilesEnumerator, file: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxContentGroupFilesEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxContentGroupFilesEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupFilesEnumerator_GetCurrent(self: *const T, file: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupFilesEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxContentGroupFilesEnumerator, self), file); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupFilesEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupFilesEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxContentGroupFilesEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupFilesEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupFilesEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxContentGroupFilesEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxContentGroup_Value = Guid.initString("328f6468-c04f-4e3c-b6fa-6b8d27f3003a"); pub const IID_IAppxContentGroup = &IID_IAppxContentGroup_Value; pub const IAppxContentGroup = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetName: fn( self: *const IAppxContentGroup, groupName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFiles: fn( self: *const IAppxContentGroup, enumerator: ?*?*IAppxContentGroupFilesEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroup_GetName(self: *const T, groupName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroup.VTable, self.vtable).GetName(@ptrCast(*const IAppxContentGroup, self), groupName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroup_GetFiles(self: *const T, enumerator: ?*?*IAppxContentGroupFilesEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroup.VTable, self.vtable).GetFiles(@ptrCast(*const IAppxContentGroup, self), enumerator); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxContentGroupsEnumerator_Value = Guid.initString("3264e477-16d1-4d63-823e-7d2984696634"); pub const IID_IAppxContentGroupsEnumerator = &IID_IAppxContentGroupsEnumerator_Value; pub const IAppxContentGroupsEnumerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrent: fn( self: *const IAppxContentGroupsEnumerator, stream: ?*?*IAppxContentGroup, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHasCurrent: fn( self: *const IAppxContentGroupsEnumerator, hasCurrent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MoveNext: fn( self: *const IAppxContentGroupsEnumerator, hasNext: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupsEnumerator_GetCurrent(self: *const T, stream: ?*?*IAppxContentGroup) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupsEnumerator.VTable, self.vtable).GetCurrent(@ptrCast(*const IAppxContentGroupsEnumerator, self), stream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupsEnumerator_GetHasCurrent(self: *const T, hasCurrent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupsEnumerator.VTable, self.vtable).GetHasCurrent(@ptrCast(*const IAppxContentGroupsEnumerator, self), hasCurrent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupsEnumerator_MoveNext(self: *const T, hasNext: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupsEnumerator.VTable, self.vtable).MoveNext(@ptrCast(*const IAppxContentGroupsEnumerator, self), hasNext); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxContentGroupMapReader_Value = Guid.initString("418726d8-dd99-4f5d-9886-157add20de01"); pub const IID_IAppxContentGroupMapReader = &IID_IAppxContentGroupMapReader_Value; pub const IAppxContentGroupMapReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetRequiredGroup: fn( self: *const IAppxContentGroupMapReader, requiredGroup: ?*?*IAppxContentGroup, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAutomaticGroups: fn( self: *const IAppxContentGroupMapReader, automaticGroupsEnumerator: ?*?*IAppxContentGroupsEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupMapReader_GetRequiredGroup(self: *const T, requiredGroup: ?*?*IAppxContentGroup) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupMapReader.VTable, self.vtable).GetRequiredGroup(@ptrCast(*const IAppxContentGroupMapReader, self), requiredGroup); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupMapReader_GetAutomaticGroups(self: *const T, automaticGroupsEnumerator: ?*?*IAppxContentGroupsEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupMapReader.VTable, self.vtable).GetAutomaticGroups(@ptrCast(*const IAppxContentGroupMapReader, self), automaticGroupsEnumerator); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxSourceContentGroupMapReader_Value = Guid.initString("f329791d-540b-4a9f-bc75-3282b7d73193"); pub const IID_IAppxSourceContentGroupMapReader = &IID_IAppxSourceContentGroupMapReader_Value; pub const IAppxSourceContentGroupMapReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetRequiredGroup: fn( self: *const IAppxSourceContentGroupMapReader, requiredGroup: ?*?*IAppxContentGroup, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAutomaticGroups: fn( self: *const IAppxSourceContentGroupMapReader, automaticGroupsEnumerator: ?*?*IAppxContentGroupsEnumerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxSourceContentGroupMapReader_GetRequiredGroup(self: *const T, requiredGroup: ?*?*IAppxContentGroup) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxSourceContentGroupMapReader.VTable, self.vtable).GetRequiredGroup(@ptrCast(*const IAppxSourceContentGroupMapReader, self), requiredGroup); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxSourceContentGroupMapReader_GetAutomaticGroups(self: *const T, automaticGroupsEnumerator: ?*?*IAppxContentGroupsEnumerator) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxSourceContentGroupMapReader.VTable, self.vtable).GetAutomaticGroups(@ptrCast(*const IAppxSourceContentGroupMapReader, self), automaticGroupsEnumerator); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxContentGroupMapWriter_Value = Guid.initString("d07ab776-a9de-4798-8c14-3db31e687c78"); pub const IID_IAppxContentGroupMapWriter = &IID_IAppxContentGroupMapWriter_Value; pub const IAppxContentGroupMapWriter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddAutomaticGroup: fn( self: *const IAppxContentGroupMapWriter, groupName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddAutomaticFile: fn( self: *const IAppxContentGroupMapWriter, fileName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const IAppxContentGroupMapWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupMapWriter_AddAutomaticGroup(self: *const T, groupName: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupMapWriter.VTable, self.vtable).AddAutomaticGroup(@ptrCast(*const IAppxContentGroupMapWriter, self), groupName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupMapWriter_AddAutomaticFile(self: *const T, fileName: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupMapWriter.VTable, self.vtable).AddAutomaticFile(@ptrCast(*const IAppxContentGroupMapWriter, self), fileName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxContentGroupMapWriter_Close(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxContentGroupMapWriter.VTable, self.vtable).Close(@ptrCast(*const IAppxContentGroupMapWriter, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxPackagingDiagnosticEventSink_Value = Guid.initString("17239d47-6adb-45d2-80f6-f9cbc3bf059d"); pub const IID_IAppxPackagingDiagnosticEventSink = &IID_IAppxPackagingDiagnosticEventSink_Value; pub const IAppxPackagingDiagnosticEventSink = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ReportContextChange: fn( self: *const IAppxPackagingDiagnosticEventSink, changeType: APPX_PACKAGING_CONTEXT_CHANGE_TYPE, contextId: i32, contextName: ?[*:0]const u8, contextMessage: ?[*:0]const u16, detailsMessage: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReportError: fn( self: *const IAppxPackagingDiagnosticEventSink, errorMessage: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackagingDiagnosticEventSink_ReportContextChange(self: *const T, changeType: APPX_PACKAGING_CONTEXT_CHANGE_TYPE, contextId: i32, contextName: ?[*:0]const u8, contextMessage: ?[*:0]const u16, detailsMessage: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackagingDiagnosticEventSink.VTable, self.vtable).ReportContextChange(@ptrCast(*const IAppxPackagingDiagnosticEventSink, self), changeType, contextId, contextName, contextMessage, detailsMessage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackagingDiagnosticEventSink_ReportError(self: *const T, errorMessage: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackagingDiagnosticEventSink.VTable, self.vtable).ReportError(@ptrCast(*const IAppxPackagingDiagnosticEventSink, self), errorMessage); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAppxPackagingDiagnosticEventSinkManager_Value = Guid.initString("369648fa-a7eb-4909-a15d-6954a078f18a"); pub const IID_IAppxPackagingDiagnosticEventSinkManager = &IID_IAppxPackagingDiagnosticEventSinkManager_Value; pub const IAppxPackagingDiagnosticEventSinkManager = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetSinkForProcess: fn( self: *const IAppxPackagingDiagnosticEventSinkManager, sink: ?*IAppxPackagingDiagnosticEventSink, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackagingDiagnosticEventSinkManager_SetSinkForProcess(self: *const T, sink: ?*IAppxPackagingDiagnosticEventSink) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackagingDiagnosticEventSinkManager.VTable, self.vtable).SetSinkForProcess(@ptrCast(*const IAppxPackagingDiagnosticEventSinkManager, self), sink); } };} pub usingnamespace MethodMixin(@This()); }; pub const APPX_ENCRYPTED_PACKAGE_SETTINGS = extern struct { keyLength: u32, encryptionAlgorithm: ?[*:0]const u16, useDiffusion: BOOL, blockMapHashAlgorithm: ?*IUri, }; pub const APPX_ENCRYPTED_PACKAGE_OPTIONS = enum(u32) { NONE = 0, DIFFUSION = 1, PAGE_HASHING = 2, _, pub fn initFlags(o: struct { NONE: u1 = 0, DIFFUSION: u1 = 0, PAGE_HASHING: u1 = 0, }) APPX_ENCRYPTED_PACKAGE_OPTIONS { return @intToEnum(APPX_ENCRYPTED_PACKAGE_OPTIONS, (if (o.NONE == 1) @enumToInt(APPX_ENCRYPTED_PACKAGE_OPTIONS.NONE) else 0) | (if (o.DIFFUSION == 1) @enumToInt(APPX_ENCRYPTED_PACKAGE_OPTIONS.DIFFUSION) else 0) | (if (o.PAGE_HASHING == 1) @enumToInt(APPX_ENCRYPTED_PACKAGE_OPTIONS.PAGE_HASHING) else 0) ); } }; pub const APPX_ENCRYPTED_PACKAGE_OPTION_NONE = APPX_ENCRYPTED_PACKAGE_OPTIONS.NONE; pub const APPX_ENCRYPTED_PACKAGE_OPTION_DIFFUSION = APPX_ENCRYPTED_PACKAGE_OPTIONS.DIFFUSION; pub const APPX_ENCRYPTED_PACKAGE_OPTION_PAGE_HASHING = APPX_ENCRYPTED_PACKAGE_OPTIONS.PAGE_HASHING; pub const APPX_ENCRYPTED_PACKAGE_SETTINGS2 = extern struct { keyLength: u32, encryptionAlgorithm: ?[*:0]const u16, blockMapHashAlgorithm: ?*IUri, options: u32, }; pub const APPX_KEY_INFO = extern struct { keyLength: u32, keyIdLength: u32, key: ?*u8, keyId: ?*u8, }; pub const APPX_ENCRYPTED_EXEMPTIONS = extern struct { count: u32, plainTextFiles: ?*?PWSTR, }; // TODO: this type is limited to platform 'windows10.0.14393' const IID_IAppxEncryptionFactory_Value = Guid.initString("80e8e04d-8c88-44ae-a011-7cadf6fb2e72"); pub const IID_IAppxEncryptionFactory = &IID_IAppxEncryptionFactory_Value; pub const IAppxEncryptionFactory = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, EncryptPackage: fn( self: *const IAppxEncryptionFactory, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DecryptPackage: fn( self: *const IAppxEncryptionFactory, inputStream: ?*IStream, outputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateEncryptedPackageWriter: fn( self: *const IAppxEncryptionFactory, outputStream: ?*IStream, manifestStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, packageWriter: ?*?*IAppxEncryptedPackageWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateEncryptedPackageReader: fn( self: *const IAppxEncryptionFactory, inputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO, packageReader: ?*?*IAppxPackageReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EncryptBundle: fn( self: *const IAppxEncryptionFactory, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DecryptBundle: fn( self: *const IAppxEncryptionFactory, inputStream: ?*IStream, outputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateEncryptedBundleWriter: fn( self: *const IAppxEncryptionFactory, outputStream: ?*IStream, bundleVersion: u64, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, bundleWriter: ?*?*IAppxEncryptedBundleWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateEncryptedBundleReader: fn( self: *const IAppxEncryptionFactory, inputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO, bundleReader: ?*?*IAppxBundleReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_EncryptPackage(self: *const T, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).EncryptPackage(@ptrCast(*const IAppxEncryptionFactory, self), inputStream, outputStream, settings, keyInfo, exemptedFiles); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_DecryptPackage(self: *const T, inputStream: ?*IStream, outputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).DecryptPackage(@ptrCast(*const IAppxEncryptionFactory, self), inputStream, outputStream, keyInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_CreateEncryptedPackageWriter(self: *const T, outputStream: ?*IStream, manifestStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, packageWriter: ?*?*IAppxEncryptedPackageWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).CreateEncryptedPackageWriter(@ptrCast(*const IAppxEncryptionFactory, self), outputStream, manifestStream, settings, keyInfo, exemptedFiles, packageWriter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_CreateEncryptedPackageReader(self: *const T, inputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO, packageReader: ?*?*IAppxPackageReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).CreateEncryptedPackageReader(@ptrCast(*const IAppxEncryptionFactory, self), inputStream, keyInfo, packageReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_EncryptBundle(self: *const T, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).EncryptBundle(@ptrCast(*const IAppxEncryptionFactory, self), inputStream, outputStream, settings, keyInfo, exemptedFiles); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_DecryptBundle(self: *const T, inputStream: ?*IStream, outputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).DecryptBundle(@ptrCast(*const IAppxEncryptionFactory, self), inputStream, outputStream, keyInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_CreateEncryptedBundleWriter(self: *const T, outputStream: ?*IStream, bundleVersion: u64, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, bundleWriter: ?*?*IAppxEncryptedBundleWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).CreateEncryptedBundleWriter(@ptrCast(*const IAppxEncryptionFactory, self), outputStream, bundleVersion, settings, keyInfo, exemptedFiles, bundleWriter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory_CreateEncryptedBundleReader(self: *const T, inputStream: ?*IStream, keyInfo: ?*const APPX_KEY_INFO, bundleReader: ?*?*IAppxBundleReader) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory.VTable, self.vtable).CreateEncryptedBundleReader(@ptrCast(*const IAppxEncryptionFactory, self), inputStream, keyInfo, bundleReader); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxEncryptionFactory2_Value = Guid.initString("c1b11eee-c4ba-4ab2-a55d-d015fe8ff64f"); pub const IID_IAppxEncryptionFactory2 = &IID_IAppxEncryptionFactory2_Value; pub const IAppxEncryptionFactory2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateEncryptedPackageWriter: fn( self: *const IAppxEncryptionFactory2, outputStream: ?*IStream, manifestStream: ?*IStream, contentGroupMapStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, packageWriter: ?*?*IAppxEncryptedPackageWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory2_CreateEncryptedPackageWriter(self: *const T, outputStream: ?*IStream, manifestStream: ?*IStream, contentGroupMapStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, packageWriter: ?*?*IAppxEncryptedPackageWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory2.VTable, self.vtable).CreateEncryptedPackageWriter(@ptrCast(*const IAppxEncryptionFactory2, self), outputStream, manifestStream, contentGroupMapStream, settings, keyInfo, exemptedFiles, packageWriter); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxEncryptionFactory3_Value = Guid.initString("09edca37-cd64-47d6-b7e8-1cb11d4f7e05"); pub const IID_IAppxEncryptionFactory3 = &IID_IAppxEncryptionFactory3_Value; pub const IAppxEncryptionFactory3 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, EncryptPackage: fn( self: *const IAppxEncryptionFactory3, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateEncryptedPackageWriter: fn( self: *const IAppxEncryptionFactory3, outputStream: ?*IStream, manifestStream: ?*IStream, contentGroupMapStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, packageWriter: ?*?*IAppxEncryptedPackageWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EncryptBundle: fn( self: *const IAppxEncryptionFactory3, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateEncryptedBundleWriter: fn( self: *const IAppxEncryptionFactory3, outputStream: ?*IStream, bundleVersion: u64, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, bundleWriter: ?*?*IAppxEncryptedBundleWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory3_EncryptPackage(self: *const T, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory3.VTable, self.vtable).EncryptPackage(@ptrCast(*const IAppxEncryptionFactory3, self), inputStream, outputStream, settings, keyInfo, exemptedFiles); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory3_CreateEncryptedPackageWriter(self: *const T, outputStream: ?*IStream, manifestStream: ?*IStream, contentGroupMapStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, packageWriter: ?*?*IAppxEncryptedPackageWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory3.VTable, self.vtable).CreateEncryptedPackageWriter(@ptrCast(*const IAppxEncryptionFactory3, self), outputStream, manifestStream, contentGroupMapStream, settings, keyInfo, exemptedFiles, packageWriter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory3_EncryptBundle(self: *const T, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory3.VTable, self.vtable).EncryptBundle(@ptrCast(*const IAppxEncryptionFactory3, self), inputStream, outputStream, settings, keyInfo, exemptedFiles); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory3_CreateEncryptedBundleWriter(self: *const T, outputStream: ?*IStream, bundleVersion: u64, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, bundleWriter: ?*?*IAppxEncryptedBundleWriter) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory3.VTable, self.vtable).CreateEncryptedBundleWriter(@ptrCast(*const IAppxEncryptionFactory3, self), outputStream, bundleVersion, settings, keyInfo, exemptedFiles, bundleWriter); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxEncryptionFactory4_Value = Guid.initString("a879611f-12fd-41fe-85d5-06ae779bbaf5"); pub const IID_IAppxEncryptionFactory4 = &IID_IAppxEncryptionFactory4_Value; pub const IAppxEncryptionFactory4 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, EncryptPackage: fn( self: *const IAppxEncryptionFactory4, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, memoryLimit: u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptionFactory4_EncryptPackage(self: *const T, inputStream: ?*IStream, outputStream: ?*IStream, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, exemptedFiles: ?*const APPX_ENCRYPTED_EXEMPTIONS, memoryLimit: u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptionFactory4.VTable, self.vtable).EncryptPackage(@ptrCast(*const IAppxEncryptionFactory4, self), inputStream, outputStream, settings, keyInfo, exemptedFiles, memoryLimit); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.14393' const IID_IAppxEncryptedPackageWriter_Value = Guid.initString("f43d0b0b-1379-40e2-9b29-682ea2bf42af"); pub const IID_IAppxEncryptedPackageWriter = &IID_IAppxEncryptedPackageWriter_Value; pub const IAppxEncryptedPackageWriter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadFileEncrypted: fn( self: *const IAppxEncryptedPackageWriter, fileName: ?[*:0]const u16, compressionOption: APPX_COMPRESSION_OPTION, inputStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const IAppxEncryptedPackageWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedPackageWriter_AddPayloadFileEncrypted(self: *const T, fileName: ?[*:0]const u16, compressionOption: APPX_COMPRESSION_OPTION, inputStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedPackageWriter.VTable, self.vtable).AddPayloadFileEncrypted(@ptrCast(*const IAppxEncryptedPackageWriter, self), fileName, compressionOption, inputStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedPackageWriter_Close(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedPackageWriter.VTable, self.vtable).Close(@ptrCast(*const IAppxEncryptedPackageWriter, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.14393' const IID_IAppxEncryptedPackageWriter2_Value = Guid.initString("3e475447-3a25-40b5-8ad2-f953ae50c92d"); pub const IID_IAppxEncryptedPackageWriter2 = &IID_IAppxEncryptedPackageWriter2_Value; pub const IAppxEncryptedPackageWriter2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadFilesEncrypted: fn( self: *const IAppxEncryptedPackageWriter2, fileCount: u32, payloadFiles: [*]APPX_PACKAGE_WRITER_PAYLOAD_STREAM, memoryLimit: u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedPackageWriter2_AddPayloadFilesEncrypted(self: *const T, fileCount: u32, payloadFiles: [*]APPX_PACKAGE_WRITER_PAYLOAD_STREAM, memoryLimit: u64) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedPackageWriter2.VTable, self.vtable).AddPayloadFilesEncrypted(@ptrCast(*const IAppxEncryptedPackageWriter2, self), fileCount, payloadFiles, memoryLimit); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.14393' const IID_IAppxEncryptedBundleWriter_Value = Guid.initString("80b0902f-7bf0-4117-b8c6-4279ef81ee77"); pub const IID_IAppxEncryptedBundleWriter = &IID_IAppxEncryptedBundleWriter_Value; pub const IAppxEncryptedBundleWriter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadPackageEncrypted: fn( self: *const IAppxEncryptedBundleWriter, fileName: ?[*:0]const u16, packageStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const IAppxEncryptedBundleWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedBundleWriter_AddPayloadPackageEncrypted(self: *const T, fileName: ?[*:0]const u16, packageStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedBundleWriter.VTable, self.vtable).AddPayloadPackageEncrypted(@ptrCast(*const IAppxEncryptedBundleWriter, self), fileName, packageStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedBundleWriter_Close(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedBundleWriter.VTable, self.vtable).Close(@ptrCast(*const IAppxEncryptedBundleWriter, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxEncryptedBundleWriter2_Value = Guid.initString("e644be82-f0fa-42b8-a956-8d1cb48ee379"); pub const IID_IAppxEncryptedBundleWriter2 = &IID_IAppxEncryptedBundleWriter2_Value; pub const IAppxEncryptedBundleWriter2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddExternalPackageReference: fn( self: *const IAppxEncryptedBundleWriter2, fileName: ?[*:0]const u16, inputStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedBundleWriter2_AddExternalPackageReference(self: *const T, fileName: ?[*:0]const u16, inputStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedBundleWriter2.VTable, self.vtable).AddExternalPackageReference(@ptrCast(*const IAppxEncryptedBundleWriter2, self), fileName, inputStream); } };} pub usingnamespace MethodMixin(@This()); }; pub const APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_OPTION = enum(i32) { A = 0, }; pub const APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_OPTION_APPEND_DELTA = APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_OPTION.A; pub const APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS = enum(u32) { NONE = 0, SKIP_VALIDATION = 1, LOCALIZED = 2, _, pub fn initFlags(o: struct { NONE: u1 = 0, SKIP_VALIDATION: u1 = 0, LOCALIZED: u1 = 0, }) APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS { return @intToEnum(APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS, (if (o.NONE == 1) @enumToInt(APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS.NONE) else 0) | (if (o.SKIP_VALIDATION == 1) @enumToInt(APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS.SKIP_VALIDATION) else 0) | (if (o.LOCALIZED == 1) @enumToInt(APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS.LOCALIZED) else 0) ); } }; pub const APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTION_NONE = APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS.NONE; pub const APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTION_SKIP_VALIDATION = APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS.SKIP_VALIDATION; pub const APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTION_LOCALIZED = APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS.LOCALIZED; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxEncryptedBundleWriter3_Value = Guid.initString("0d34deb3-5cae-4dd3-977c-504932a51d31"); pub const IID_IAppxEncryptedBundleWriter3 = &IID_IAppxEncryptedBundleWriter3_Value; pub const IAppxEncryptedBundleWriter3 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddPayloadPackageEncrypted: fn( self: *const IAppxEncryptedBundleWriter3, fileName: ?[*:0]const u16, packageStream: ?*IStream, isDefaultApplicablePackage: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddExternalPackageReference: fn( self: *const IAppxEncryptedBundleWriter3, fileName: ?[*:0]const u16, inputStream: ?*IStream, isDefaultApplicablePackage: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedBundleWriter3_AddPayloadPackageEncrypted(self: *const T, fileName: ?[*:0]const u16, packageStream: ?*IStream, isDefaultApplicablePackage: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedBundleWriter3.VTable, self.vtable).AddPayloadPackageEncrypted(@ptrCast(*const IAppxEncryptedBundleWriter3, self), fileName, packageStream, isDefaultApplicablePackage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxEncryptedBundleWriter3_AddExternalPackageReference(self: *const T, fileName: ?[*:0]const u16, inputStream: ?*IStream, isDefaultApplicablePackage: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxEncryptedBundleWriter3.VTable, self.vtable).AddExternalPackageReference(@ptrCast(*const IAppxEncryptedBundleWriter3, self), fileName, inputStream, isDefaultApplicablePackage); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows10.0.10240' const IID_IAppxPackageEditor_Value = Guid.initString("e2adb6dc-5e71-4416-86b6-86e5f5291a6b"); pub const IID_IAppxPackageEditor = &IID_IAppxPackageEditor_Value; pub const IAppxPackageEditor = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetWorkingDirectory: fn( self: *const IAppxPackageEditor, workingDirectory: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateDeltaPackage: fn( self: *const IAppxPackageEditor, updatedPackageStream: ?*IStream, baselinePackageStream: ?*IStream, deltaPackageStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateDeltaPackageUsingBaselineBlockMap: fn( self: *const IAppxPackageEditor, updatedPackageStream: ?*IStream, baselineBlockMapStream: ?*IStream, baselinePackageFullName: ?[*:0]const u16, deltaPackageStream: ?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdatePackage: fn( self: *const IAppxPackageEditor, baselinePackageStream: ?*IStream, deltaPackageStream: ?*IStream, updateOption: APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_OPTION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateEncryptedPackage: fn( self: *const IAppxPackageEditor, baselineEncryptedPackageStream: ?*IStream, deltaPackageStream: ?*IStream, updateOption: APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_OPTION, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdatePackageManifest: fn( self: *const IAppxPackageEditor, packageStream: ?*IStream, updatedManifestStream: ?*IStream, isPackageEncrypted: BOOL, options: APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageEditor_SetWorkingDirectory(self: *const T, workingDirectory: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageEditor.VTable, self.vtable).SetWorkingDirectory(@ptrCast(*const IAppxPackageEditor, self), workingDirectory); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageEditor_CreateDeltaPackage(self: *const T, updatedPackageStream: ?*IStream, baselinePackageStream: ?*IStream, deltaPackageStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageEditor.VTable, self.vtable).CreateDeltaPackage(@ptrCast(*const IAppxPackageEditor, self), updatedPackageStream, baselinePackageStream, deltaPackageStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageEditor_CreateDeltaPackageUsingBaselineBlockMap(self: *const T, updatedPackageStream: ?*IStream, baselineBlockMapStream: ?*IStream, baselinePackageFullName: ?[*:0]const u16, deltaPackageStream: ?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageEditor.VTable, self.vtable).CreateDeltaPackageUsingBaselineBlockMap(@ptrCast(*const IAppxPackageEditor, self), updatedPackageStream, baselineBlockMapStream, baselinePackageFullName, deltaPackageStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageEditor_UpdatePackage(self: *const T, baselinePackageStream: ?*IStream, deltaPackageStream: ?*IStream, updateOption: APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_OPTION) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageEditor.VTable, self.vtable).UpdatePackage(@ptrCast(*const IAppxPackageEditor, self), baselinePackageStream, deltaPackageStream, updateOption); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageEditor_UpdateEncryptedPackage(self: *const T, baselineEncryptedPackageStream: ?*IStream, deltaPackageStream: ?*IStream, updateOption: APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_OPTION, settings: ?*const APPX_ENCRYPTED_PACKAGE_SETTINGS2, keyInfo: ?*const APPX_KEY_INFO) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageEditor.VTable, self.vtable).UpdateEncryptedPackage(@ptrCast(*const IAppxPackageEditor, self), baselineEncryptedPackageStream, deltaPackageStream, updateOption, settings, keyInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAppxPackageEditor_UpdatePackageManifest(self: *const T, packageStream: ?*IStream, updatedManifestStream: ?*IStream, isPackageEncrypted: BOOL, options: APPX_PACKAGE_EDITOR_UPDATE_PACKAGE_MANIFEST_OPTIONS) callconv(.Inline) HRESULT { return @ptrCast(*const IAppxPackageEditor.VTable, self.vtable).UpdatePackageManifest(@ptrCast(*const IAppxPackageEditor, self), packageStream, updatedManifestStream, isPackageEncrypted, options); } };} pub usingnamespace MethodMixin(@This()); }; pub const PACKAGE_VERSION = extern struct { Anonymous: extern union { // WARNING: unable to add field alignment because it's not implemented for unions Version: u64, Anonymous: extern struct { Revision: u16, Build: u16, Minor: u16, Major: u16, }, }, }; pub const PACKAGE_ID = extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug reserved: u32, processorArchitecture: u32, version: PACKAGE_VERSION, name: ?PWSTR, publisher: ?PWSTR, resourceId: ?PWSTR, publisherId: ?PWSTR, }; pub const PackagePathType = enum(i32) { Install = 0, Mutable = 1, Effective = 2, MachineExternal = 3, UserExternal = 4, EffectiveExternal = 5, }; pub const PackagePathType_Install = PackagePathType.Install; pub const PackagePathType_Mutable = PackagePathType.Mutable; pub const PackagePathType_Effective = PackagePathType.Effective; pub const PackagePathType_MachineExternal = PackagePathType.MachineExternal; pub const PackagePathType_UserExternal = PackagePathType.UserExternal; pub const PackagePathType_EffectiveExternal = PackagePathType.EffectiveExternal; pub const PackageOrigin = enum(i32) { Unknown = 0, Unsigned = 1, Inbox = 2, Store = 3, DeveloperUnsigned = 4, DeveloperSigned = 5, LineOfBusiness = 6, }; pub const PackageOrigin_Unknown = PackageOrigin.Unknown; pub const PackageOrigin_Unsigned = PackageOrigin.Unsigned; pub const PackageOrigin_Inbox = PackageOrigin.Inbox; pub const PackageOrigin_Store = PackageOrigin.Store; pub const PackageOrigin_DeveloperUnsigned = PackageOrigin.DeveloperUnsigned; pub const PackageOrigin_DeveloperSigned = PackageOrigin.DeveloperSigned; pub const PackageOrigin_LineOfBusiness = PackageOrigin.LineOfBusiness; pub const _PACKAGE_INFO_REFERENCE = extern struct { reserved: ?*anyopaque, }; pub const PACKAGE_INFO = extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug reserved: u32, flags: u32, path: ?PWSTR, packageFullName: ?PWSTR, packageFamilyName: ?PWSTR, packageId: PACKAGE_ID, }; pub const CreatePackageDependencyOptions = enum(i32) { None = 0, DoNotVerifyDependencyResolution = 1, ScopeIsSystem = 2, }; pub const CreatePackageDependencyOptions_None = CreatePackageDependencyOptions.None; pub const CreatePackageDependencyOptions_DoNotVerifyDependencyResolution = CreatePackageDependencyOptions.DoNotVerifyDependencyResolution; pub const CreatePackageDependencyOptions_ScopeIsSystem = CreatePackageDependencyOptions.ScopeIsSystem; pub const PackageDependencyLifetimeKind = enum(i32) { Process = 0, FilePath = 1, RegistryKey = 2, }; pub const PackageDependencyLifetimeKind_Process = PackageDependencyLifetimeKind.Process; pub const PackageDependencyLifetimeKind_FilePath = PackageDependencyLifetimeKind.FilePath; pub const PackageDependencyLifetimeKind_RegistryKey = PackageDependencyLifetimeKind.RegistryKey; pub const AddPackageDependencyOptions = enum(i32) { None = 0, PrependIfRankCollision = 1, }; pub const AddPackageDependencyOptions_None = AddPackageDependencyOptions.None; pub const AddPackageDependencyOptions_PrependIfRankCollision = AddPackageDependencyOptions.PrependIfRankCollision; pub const PackageDependencyProcessorArchitectures = enum(i32) { None = 0, Neutral = 1, X86 = 2, X64 = 4, Arm = 8, Arm64 = 16, X86A64 = 32, }; pub const PackageDependencyProcessorArchitectures_None = PackageDependencyProcessorArchitectures.None; pub const PackageDependencyProcessorArchitectures_Neutral = PackageDependencyProcessorArchitectures.Neutral; pub const PackageDependencyProcessorArchitectures_X86 = PackageDependencyProcessorArchitectures.X86; pub const PackageDependencyProcessorArchitectures_X64 = PackageDependencyProcessorArchitectures.X64; pub const PackageDependencyProcessorArchitectures_Arm = PackageDependencyProcessorArchitectures.Arm; pub const PackageDependencyProcessorArchitectures_Arm64 = PackageDependencyProcessorArchitectures.Arm64; pub const PackageDependencyProcessorArchitectures_X86A64 = PackageDependencyProcessorArchitectures.X86A64; pub const PACKAGEDEPENDENCY_CONTEXT__ = extern struct { unused: i32, }; pub const AppPolicyLifecycleManagement = enum(i32) { Unmanaged = 0, Managed = 1, }; pub const AppPolicyLifecycleManagement_Unmanaged = AppPolicyLifecycleManagement.Unmanaged; pub const AppPolicyLifecycleManagement_Managed = AppPolicyLifecycleManagement.Managed; pub const AppPolicyWindowingModel = enum(i32) { None = 0, Universal = 1, ClassicDesktop = 2, ClassicPhone = 3, }; pub const AppPolicyWindowingModel_None = AppPolicyWindowingModel.None; pub const AppPolicyWindowingModel_Universal = AppPolicyWindowingModel.Universal; pub const AppPolicyWindowingModel_ClassicDesktop = AppPolicyWindowingModel.ClassicDesktop; pub const AppPolicyWindowingModel_ClassicPhone = AppPolicyWindowingModel.ClassicPhone; pub const AppPolicyMediaFoundationCodecLoading = enum(i32) { All = 0, InboxOnly = 1, }; pub const AppPolicyMediaFoundationCodecLoading_All = AppPolicyMediaFoundationCodecLoading.All; pub const AppPolicyMediaFoundationCodecLoading_InboxOnly = AppPolicyMediaFoundationCodecLoading.InboxOnly; pub const AppPolicyClrCompat = enum(i32) { Other = 0, ClassicDesktop = 1, Universal = 2, PackagedDesktop = 3, }; pub const AppPolicyClrCompat_Other = AppPolicyClrCompat.Other; pub const AppPolicyClrCompat_ClassicDesktop = AppPolicyClrCompat.ClassicDesktop; pub const AppPolicyClrCompat_Universal = AppPolicyClrCompat.Universal; pub const AppPolicyClrCompat_PackagedDesktop = AppPolicyClrCompat.PackagedDesktop; pub const AppPolicyThreadInitializationType = enum(i32) { None = 0, InitializeWinRT = 1, }; pub const AppPolicyThreadInitializationType_None = AppPolicyThreadInitializationType.None; pub const AppPolicyThreadInitializationType_InitializeWinRT = AppPolicyThreadInitializationType.InitializeWinRT; pub const AppPolicyShowDeveloperDiagnostic = enum(i32) { None = 0, ShowUI = 1, }; pub const AppPolicyShowDeveloperDiagnostic_None = AppPolicyShowDeveloperDiagnostic.None; pub const AppPolicyShowDeveloperDiagnostic_ShowUI = AppPolicyShowDeveloperDiagnostic.ShowUI; pub const AppPolicyProcessTerminationMethod = enum(i32) { ExitProcess = 0, TerminateProcess = 1, }; pub const AppPolicyProcessTerminationMethod_ExitProcess = AppPolicyProcessTerminationMethod.ExitProcess; pub const AppPolicyProcessTerminationMethod_TerminateProcess = AppPolicyProcessTerminationMethod.TerminateProcess; pub const AppPolicyCreateFileAccess = enum(i32) { Full = 0, Limited = 1, }; pub const AppPolicyCreateFileAccess_Full = AppPolicyCreateFileAccess.Full; pub const AppPolicyCreateFileAccess_Limited = AppPolicyCreateFileAccess.Limited; pub const PACKAGE_VIRTUALIZATION_CONTEXT_HANDLE__ = extern struct { unused: i32, }; //-------------------------------------------------------------------------------- // Section: Functions (63) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetCurrentPackageId( bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 0? buffer: ?*u8, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetCurrentPackageFullName( packageFullNameLength: ?*u32, packageFullName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetCurrentPackageFamilyName( packageFamilyNameLength: ?*u32, packageFamilyName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetCurrentPackagePath( pathLength: ?*u32, path: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetPackageId( hProcess: ?HANDLE, bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 1? buffer: ?*u8, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetPackageFullName( hProcess: ?HANDLE, packageFullNameLength: ?*u32, packageFullName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn GetPackageFullNameFromToken( token: ?HANDLE, packageFullNameLength: ?*u32, packageFullName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetPackageFamilyName( hProcess: ?HANDLE, packageFamilyNameLength: ?*u32, packageFamilyName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn GetPackageFamilyNameFromToken( token: ?HANDLE, packageFamilyNameLength: ?*u32, packageFamilyName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetPackagePath( packageId: ?*const PACKAGE_ID, reserved: u32, pathLength: ?*u32, path: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.1' pub extern "KERNEL32" fn GetPackagePathByFullName( packageFullName: ?[*:0]const u16, pathLength: ?*u32, path: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.1' pub extern "KERNEL32" fn GetStagedPackagePathByFullName( packageFullName: ?[*:0]const u16, pathLength: ?*u32, path: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-appmodel-runtime-l1-1-3" fn GetPackagePathByFullName2( packageFullName: ?[*:0]const u16, packagePathType: PackagePathType, pathLength: ?*u32, path: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-appmodel-runtime-l1-1-3" fn GetStagedPackagePathByFullName2( packageFullName: ?[*:0]const u16, packagePathType: PackagePathType, pathLength: ?*u32, path: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-appmodel-runtime-l1-1-3" fn GetCurrentPackageInfo2( flags: u32, packagePathType: PackagePathType, bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 2? buffer: ?*u8, count: ?*u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-appmodel-runtime-l1-1-3" fn GetCurrentPackagePath2( packagePathType: PackagePathType, pathLength: ?*u32, path: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn GetCurrentApplicationUserModelId( applicationUserModelIdLength: ?*u32, applicationUserModelId: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn GetApplicationUserModelId( hProcess: ?HANDLE, applicationUserModelIdLength: ?*u32, applicationUserModelId: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn GetApplicationUserModelIdFromToken( token: ?HANDLE, applicationUserModelIdLength: ?*u32, applicationUserModelId: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn VerifyPackageFullName( packageFullName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn VerifyPackageFamilyName( packageFamilyName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn VerifyPackageId( packageId: ?*const PACKAGE_ID, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn VerifyApplicationUserModelId( applicationUserModelId: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn VerifyPackageRelativeApplicationId( packageRelativeApplicationId: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn PackageIdFromFullName( packageFullName: ?[*:0]const u16, flags: u32, bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 2? buffer: ?*u8, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn PackageFullNameFromId( packageId: ?*const PACKAGE_ID, packageFullNameLength: ?*u32, packageFullName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn PackageFamilyNameFromId( packageId: ?*const PACKAGE_ID, packageFamilyNameLength: ?*u32, packageFamilyName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn PackageFamilyNameFromFullName( packageFullName: ?[*:0]const u16, packageFamilyNameLength: ?*u32, packageFamilyName: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn PackageNameAndPublisherIdFromFamilyName( packageFamilyName: ?[*:0]const u16, packageNameLength: ?*u32, packageName: ?[*:0]u16, packagePublisherIdLength: ?*u32, packagePublisherId: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.1' pub extern "KERNEL32" fn FormatApplicationUserModelId( packageFamilyName: ?[*:0]const u16, packageRelativeApplicationId: ?[*:0]const u16, applicationUserModelIdLength: ?*u32, applicationUserModelId: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.1' pub extern "KERNEL32" fn ParseApplicationUserModelId( applicationUserModelId: ?[*:0]const u16, packageFamilyNameLength: ?*u32, packageFamilyName: ?[*:0]u16, packageRelativeApplicationIdLength: ?*u32, packageRelativeApplicationId: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetPackagesByPackageFamily( packageFamilyName: ?[*:0]const u16, count: ?*u32, packageFullNames: ?[*]?PWSTR, bufferLength: ?*u32, buffer: ?[*:0]u16, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.1' pub extern "KERNEL32" fn FindPackagesByPackageFamily( packageFamilyName: ?[*:0]const u16, packageFilters: u32, count: ?*u32, packageFullNames: ?[*]?PWSTR, bufferLength: ?*u32, buffer: ?[*:0]u16, packageProperties: ?[*]u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.1' pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn GetStagedPackageOrigin( packageFullName: ?[*:0]const u16, origin: ?*PackageOrigin, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetCurrentPackageInfo( flags: u32, bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 1? buffer: ?*u8, count: ?*u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn OpenPackageInfoByFullName( packageFullName: ?[*:0]const u16, reserved: u32, packageInfoReference: ?*?*_PACKAGE_INFO_REFERENCE, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "api-ms-win-appmodel-runtime-l1-1-1" fn OpenPackageInfoByFullNameForUser( userSid: ?PSID, packageFullName: ?[*:0]const u16, reserved: u32, packageInfoReference: ?*?*_PACKAGE_INFO_REFERENCE, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn ClosePackageInfo( packageInfoReference: ?*_PACKAGE_INFO_REFERENCE, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.0' pub extern "KERNEL32" fn GetPackageInfo( packageInfoReference: ?*_PACKAGE_INFO_REFERENCE, flags: u32, bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 2? buffer: ?*u8, count: ?*u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows8.1' pub extern "KERNEL32" fn GetPackageApplicationIds( packageInfoReference: ?*_PACKAGE_INFO_REFERENCE, bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 1? buffer: ?*u8, count: ?*u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows10.0.10240' pub extern "api-ms-win-appmodel-runtime-l1-1-3" fn GetPackageInfo2( packageInfoReference: ?*_PACKAGE_INFO_REFERENCE, flags: u32, packagePathType: PackagePathType, bufferLength: ?*u32, // TODO: what to do with BytesParamIndex 3? buffer: ?*u8, count: ?*u32, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn CheckIsMSIXPackage( packageFullName: ?[*:0]const u16, isMSIXPackage: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn TryCreatePackageDependency( user: ?PSID, packageFamilyName: ?[*:0]const u16, minVersion: PACKAGE_VERSION, packageDependencyProcessorArchitectures: PackageDependencyProcessorArchitectures, lifetimeKind: PackageDependencyLifetimeKind, lifetimeArtifact: ?[*:0]const u16, options: CreatePackageDependencyOptions, packageDependencyId: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn DeletePackageDependency( packageDependencyId: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn AddPackageDependency( packageDependencyId: ?[*:0]const u16, rank: i32, options: AddPackageDependencyOptions, packageDependencyContext: ?*?*PACKAGEDEPENDENCY_CONTEXT__, packageFullName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn RemovePackageDependency( packageDependencyContext: ?*PACKAGEDEPENDENCY_CONTEXT__, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn GetResolvedPackageFullNameForPackageDependency( packageDependencyId: ?[*:0]const u16, packageFullName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn GetIdForPackageDependencyContext( packageDependencyContext: ?*PACKAGEDEPENDENCY_CONTEXT__, packageDependencyId: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn AppPolicyGetLifecycleManagement( processToken: ?HANDLE, policy: ?*AppPolicyLifecycleManagement, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn AppPolicyGetWindowingModel( processToken: ?HANDLE, policy: ?*AppPolicyWindowingModel, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn AppPolicyGetMediaFoundationCodecLoading( processToken: ?HANDLE, policy: ?*AppPolicyMediaFoundationCodecLoading, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn AppPolicyGetClrCompat( processToken: ?HANDLE, policy: ?*AppPolicyClrCompat, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn AppPolicyGetThreadInitializationType( processToken: ?HANDLE, policy: ?*AppPolicyThreadInitializationType, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn AppPolicyGetShowDeveloperDiagnostic( processToken: ?HANDLE, policy: ?*AppPolicyShowDeveloperDiagnostic, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn AppPolicyGetProcessTerminationMethod( processToken: ?HANDLE, policy: ?*AppPolicyProcessTerminationMethod, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn AppPolicyGetCreateFileAccess( processToken: ?HANDLE, policy: ?*AppPolicyCreateFileAccess, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "KERNEL32" fn CreatePackageVirtualizationContext( packageFamilyName: ?[*:0]const u16, context: ?*?*PACKAGE_VIRTUALIZATION_CONTEXT_HANDLE__, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn ActivatePackageVirtualizationContext( context: ?*PACKAGE_VIRTUALIZATION_CONTEXT_HANDLE__, cookie: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn ReleasePackageVirtualizationContext( context: ?*PACKAGE_VIRTUALIZATION_CONTEXT_HANDLE__, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "KERNEL32" fn DeactivatePackageVirtualizationContext( cookie: usize, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "KERNEL32" fn DuplicatePackageVirtualizationContext( sourceContext: ?*PACKAGE_VIRTUALIZATION_CONTEXT_HANDLE__, destContext: ?*?*PACKAGE_VIRTUALIZATION_CONTEXT_HANDLE__, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub extern "KERNEL32" fn GetCurrentPackageVirtualizationContext( ) callconv(@import("std").os.windows.WINAPI) ?*PACKAGE_VIRTUALIZATION_CONTEXT_HANDLE__; pub extern "KERNEL32" fn GetProcessesInVirtualizationContext( packageFamilyName: ?[*:0]const u16, count: ?*u32, processes: ?*?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (10) //-------------------------------------------------------------------------------- const Guid = @import("../../zig.zig").Guid; const BOOL = @import("../../foundation.zig").BOOL; const HANDLE = @import("../../foundation.zig").HANDLE; const HRESULT = @import("../../foundation.zig").HRESULT; const IStream = @import("../../system/com.zig").IStream; const IUnknown = @import("../../system/com.zig").IUnknown; const IUri = @import("../../system/com.zig").IUri; const PSID = @import("../../foundation.zig").PSID; const PSTR = @import("../../foundation.zig").PSTR; const PWSTR = @import("../../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/storage/packaging/appx.zig
const std = @import("std"); usingnamespace @import("c.zig"); const todo = @import("todo.zig"); const misc = @import("misc.zig"); pub var dateline_count: i32 = 0; fn drawPointer(window: *WINDOW, enable_unicode: bool, selection_index: usize, list_index: usize, draw_x: *i32, draw_y: i32) void { if (selection_index == list_index) _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(if (enable_unicode) " \u{f061} " else " --> "), 7) // NB: Extra 3 to cover DAY in archived entries else _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(" "), 4); draw_x.* += 4; } fn drawNotStarted(window: *WINDOW, enable_unicode: bool, draw_x: *i32, draw_y: i32) void { _ = wattron(window, COLOR_PAIR(1)); _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(if (enable_unicode) " \u{F62F} " else " --- "), 5); _ = wattroff(window, COLOR_PAIR(1)); draw_x.* += 5; } fn drawPriority(window: *WINDOW, enable_unicode: bool, draw_x: *i32, draw_y: i32) void { _ = wattron(window, COLOR_PAIR(2)); _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(if (enable_unicode) "\u{f96d} \u{e20e} " else " !!! "), 5); _ = wattroff(window, COLOR_PAIR(2)); draw_x.* += 5; } fn drawDoing(window: *WINDOW, enable_unicode: bool, draw_x: *i32, draw_y: i32) void { _ = wattron(window, COLOR_PAIR(4)); _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(if (enable_unicode) " \u{f1ba} " else " -@- "), 5); _ = wattroff(window, COLOR_PAIR(4)); draw_x.* += 5; } fn drawDone(window: *WINDOW, enable_unicode: bool, draw_x: *i32, draw_y: i32) void { _ = wattron(window, COLOR_PAIR(5)); _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(if (enable_unicode) " \u{f00c} " else " $$$ "), 5); _ = wattroff(window, COLOR_PAIR(5)); draw_x.* += 5; } fn drawInReview(window: *WINDOW, enable_unicode: bool, draw_x: *i32, draw_y: i32) void { _ = wattron(window, COLOR_PAIR(3)); _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(if (enable_unicode) " \u{f1d8} " else " >>> "), 5); _ = wattroff(window, COLOR_PAIR(3)); draw_x.* += 5; } fn drawDiscarded(window: *WINDOW, enable_unicode: bool, draw_x: *i32, draw_y: i32) void { _ = wattron(window, COLOR_PAIR(6)); _ = mvwaddnwstr(window, draw_y, draw_x.*, misc.u8ToWideString(if (enable_unicode) " \u{F014} " else " XXX "), 5); _ = wattroff(window, COLOR_PAIR(6)); draw_x.* += 5; } pub fn drawNumberSuffix(window: *WINDOW, enable_unicode: bool, number: i32, draw_x: *i32, draw_y: i32) void { var to_draw: *wchar_t = undefined; const number_mod_10 = @mod(number, 10); switch (number_mod_10) { 1 => { to_draw = misc.u8ToWideString("st"); if (@mod(number_mod_10, 11) == 0) to_draw = misc.u8ToWideString("th"); }, 2 => { to_draw = misc.u8ToWideString("nd"); if (@mod(number_mod_10, 12) == 0) to_draw = misc.u8ToWideString("th"); }, 3 => { to_draw = misc.u8ToWideString("rd"); if (@mod(number_mod_10, 13) == 0) to_draw = misc.u8ToWideString("th"); }, 4...10, 0 => { to_draw = misc.u8ToWideString("th"); }, else => unreachable, } _ = mvwaddnwstr(window, draw_y, draw_x.*, to_draw, 2); draw_x.* += 2; } fn get_week_number(t: *tm) i32 { var local = t.*; local.tm_mday -= local.tm_wday; _ = mktime(&local); const prev_sunday = local.tm_yday; const week_count = @divFloor(prev_sunday, 7); const first_week_length = @mod(prev_sunday, 7); if (first_week_length > 0) return week_count + 1; return week_count; } pub fn drawTodoListViewToWindow(window: *WINDOW, enable_unicode: bool, todoList: todo.List, selection_index: usize, scrolling: usize) void { var window_width: c_int = undefined; var window_height: c_int = undefined; curses_getmaxyx(stdscr, &window_height, &window_width); _ = werase(stdscr); defer _ = wrefresh(stdscr); var draw_x: c_int = 0; var draw_y: c_int = 1; var list_index: usize = 0; var max_x: c_int = undefined; var max_y: c_int = undefined; curses_getmaxyx(window, &max_y, &max_x); var space = std.meta.cast( [*c]wchar_t, calloc(@intCast(usize, max_x + 1), @sizeOf(wchar_t)).?, ); { var i: i32 = 0; while (i < max_x) : (i += 1) _ = wcscat(space, misc.u8ToWideString(" ")); } var scratch_buffer: [64]wchar_t = undefined; var week_tracker_tm = std.mem.zeroes(tm); var printed_header = false; var prev_state = todo.State.Not_Started; dateline_count = 0; for (todoList.items) |entry| { if (draw_y >= window_height) break; if (list_index >= scrolling and (list_index - scrolling) < (window_height - 1)) { var tm_ = std.mem.zeroes(tm); switch (entry.state) { .Discarded, .Priority, .Doing, .In_Review => _ = localtime_r(&entry.time_started, &tm_), .Not_Started => _ = localtime_r(&entry.time_added, &tm_), .Done => _ = localtime_r(&entry.time_complete, &tm_), } if (!printed_header or (get_week_number(&tm_) < get_week_number(&week_tracker_tm)) or (tm_.tm_year < week_tracker_tm.tm_year) or (prev_state != entry.state)) { prev_state = entry.state; week_tracker_tm = tm_; week_tracker_tm.tm_mday -= week_tracker_tm.tm_wday; const time_at_start_of_week = mktime(&week_tracker_tm); printed_header = true; dateline_count += 1; if (entry.state != .Discarded) { var date_string_length = wcsftime(&scratch_buffer[0], scratch_buffer.len, misc.u8ToWideString(" %Y Week #%U - %B %e"), &week_tracker_tm); _ = mvwaddnwstr(window, draw_y, draw_x, &scratch_buffer[0], @intCast(c_int, date_string_length)); draw_x += @intCast(c_int, date_string_length); drawNumberSuffix(window, enable_unicode, week_tracker_tm.tm_mday, &draw_x, draw_y); const time_at_end_of_week = time_at_start_of_week + 60 * 60 * 24 * 7; var temp_tm: tm = undefined; _ = localtime_r(&time_at_end_of_week, &temp_tm); date_string_length = wcsftime(&scratch_buffer[0], scratch_buffer.len, misc.u8ToWideString(" .. %B %e"), &temp_tm); _ = mvwaddnwstr(window, draw_y, draw_x, &scratch_buffer[0], @intCast(c_int, date_string_length)); draw_x += @intCast(c_int, date_string_length); drawNumberSuffix(window, enable_unicode, temp_tm.tm_mday, &draw_x, draw_y); } else { _ = mvwaddnwstr(window, draw_y, draw_x, misc.u8ToWideString(" - ARCHIVED - "), " - ARCHIVED - ".len); } draw_y += 1; draw_x = 0; if (draw_y >= window_height) break; } if (selection_index == list_index) _ = wattron(window, WA_STANDOUT); // Clear whole line _ = mvwaddnwstr(window, draw_y, 0, space, max_x); _ = wattron(window, COLOR_PAIR(1)); drawPointer(window, enable_unicode, selection_index, list_index, &draw_x, draw_y); const day_string_length = wcsftime(&scratch_buffer[0], scratch_buffer.len, misc.u8ToWideString("%a"), &tm_); if (entry.state != .Discarded) _ = mvwaddnwstr(window, draw_y, draw_x, &scratch_buffer[0], @intCast(c_int, day_string_length)); _ = wattroff(window, COLOR_PAIR(1)); draw_x += @intCast(c_int, day_string_length); switch (entry.state) { .Not_Started => drawNotStarted(window, enable_unicode, &draw_x, draw_y), .Priority => drawPriority(window, enable_unicode, &draw_x, draw_y), .Doing => drawDoing(window, enable_unicode, &draw_x, draw_y), .Done => drawDone(window, enable_unicode, &draw_x, draw_y), .In_Review => drawInReview(window, enable_unicode, &draw_x, draw_y), .Discarded => drawDiscarded(window, enable_unicode, &draw_x, draw_y), } _ = mvwaddwstr(window, draw_y, draw_x, entry.text); if (selection_index == list_index) _ = wattroff(window, WA_STANDOUT); draw_y += 1; draw_x = 0; } list_index += 1; } _ = free(space); }
src/draw.zig
const builtin = @import("builtin"); const std = @import("std"); const mem = std.mem; const testing = std.testing; const Blob = @import("sqlite.zig").Blob; /// Text is used to represent a SQLite TEXT value when binding a parameter or reading a column. pub const Text = struct { data: []const u8 }; const BindMarker = union(enum) { Typed: type, Untyped: void, }; pub const ParsedQuery = struct { const Self = @This(); bind_markers: [128]BindMarker, nb_bind_markers: usize, query: [1024]u8, query_size: usize, pub fn from(comptime query: []const u8) Self { const State = enum { Start, BindMarker, BindMarkerType, }; comptime var buf: [query.len]u8 = undefined; comptime var pos = 0; comptime var state = .Start; comptime var current_bind_marker_type: [256]u8 = undefined; comptime var current_bind_marker_type_pos = 0; comptime var parsed_query: ParsedQuery = undefined; parsed_query.nb_bind_markers = 0; inline for (query) |c, i| { switch (state) { .Start => switch (c) { '?' => { state = .BindMarker; buf[pos] = c; pos += 1; }, else => { buf[pos] = c; pos += 1; }, }, .BindMarker => switch (c) { '{' => { state = .BindMarkerType; current_bind_marker_type_pos = 0; }, else => { // This is a bind marker without a type. state = .Start; parsed_query.bind_markers[parsed_query.nb_bind_markers] = BindMarker{ .Untyped = {} }; parsed_query.nb_bind_markers += 1; buf[pos] = c; pos += 1; }, }, .BindMarkerType => switch (c) { '}' => { state = .Start; const typ = parsed_query.parseType(current_bind_marker_type[0..current_bind_marker_type_pos]); parsed_query.bind_markers[parsed_query.nb_bind_markers] = BindMarker{ .Typed = typ }; parsed_query.nb_bind_markers += 1; }, else => { current_bind_marker_type[current_bind_marker_type_pos] = c; current_bind_marker_type_pos += 1; }, }, else => { @compileError("invalid state " ++ @tagName(state)); }, } } // The last character was ? so this must be an untyped bind marker. if (state == .BindMarker) { parsed_query.bind_markers[parsed_query.nb_bind_markers] = BindMarker{ .Untyped = {} }; parsed_query.nb_bind_markers += 1; } if (state == .BindMarkerType) { @compileError("invalid final state " ++ @tagName(state) ++ ", this means you wrote an incomplete bind marker type"); } mem.copy(u8, &parsed_query.query, &buf); parsed_query.query_size = pos; return parsed_query; } fn parseType(comptime self: *Self, type_info: []const u8) type { if (type_info.len <= 0) @compileError("invalid type info " ++ type_info); // Integer if (mem.eql(u8, "usize", type_info)) return usize; if (mem.eql(u8, "isize", type_info)) return isize; if (type_info[0] == 'u' or type_info[0] == 'i') { return @Type(builtin.TypeInfo{ .Int = builtin.TypeInfo.Int{ .signedness = if (type_info[0] == 'i') .signed else .unsigned, .bits = std.fmt.parseInt(usize, type_info[1..type_info.len], 10) catch { @compileError("invalid type info " ++ type_info); }, }, }); } // Float if (mem.eql(u8, "f16", type_info)) return f16; if (mem.eql(u8, "f32", type_info)) return f32; if (mem.eql(u8, "f64", type_info)) return f64; if (mem.eql(u8, "f128", type_info)) return f128; // Bool if (mem.eql(u8, "bool", type_info)) return bool; // Strings if (mem.eql(u8, "[]const u8", type_info) or mem.eql(u8, "[]u8", type_info)) { return []const u8; } if (mem.eql(u8, "text", type_info)) return Text; if (mem.eql(u8, "blob", type_info)) return Blob; @compileError("invalid type info " ++ type_info); } pub fn getQuery(comptime self: *const Self) []const u8 { return self.query[0..self.query_size]; } }; test "parsed query: query" { const testCase = struct { query: []const u8, expected_query: []const u8, }; const testCases = &[_]testCase{ .{ .query = "INSERT INTO user(id, name, age) VALUES(?{usize}, ?{[]const u8}, ?{u32})", .expected_query = "INSERT INTO user(id, name, age) VALUES(?, ?, ?)", }, .{ .query = "SELECT id, name, age FROM user WHER age > ?{u32} AND age < ?{u32}", .expected_query = "SELECT id, name, age FROM user WHER age > ? AND age < ?", }, .{ .query = "SELECT id, name, age FROM user WHER age > ? AND age < ?", .expected_query = "SELECT id, name, age FROM user WHER age > ? AND age < ?", }, }; inline for (testCases) |tc| { comptime var parsed_query = ParsedQuery.from(tc.query); testing.expectEqualStrings(tc.expected_query, parsed_query.getQuery()); } } test "parsed query: bind markers types" { const testCase = struct { query: []const u8, expected_marker: BindMarker, }; const testCases = &[_]testCase{ .{ .query = "foobar ?{usize}", .expected_marker = .{ .Typed = usize }, }, .{ .query = "foobar ?{text}", .expected_marker = .{ .Typed = Text }, }, .{ .query = "foobar ?{blob}", .expected_marker = .{ .Typed = Blob }, }, .{ .query = "foobar ?", .expected_marker = .{ .Untyped = {} }, }, }; inline for (testCases) |tc| { comptime var parsed_query = ParsedQuery.from(tc.query); testing.expectEqual(1, parsed_query.nb_bind_markers); const bind_marker = parsed_query.bind_markers[0]; switch (tc.expected_marker) { .Typed => |typ| testing.expectEqual(typ, bind_marker.Typed), .Untyped => |typ| testing.expectEqual(typ, bind_marker.Untyped), } } }
query.zig
const c = @import("../c_global.zig").c_imp; const std = @import("std"); const za = @import("zalgebra"); // dross-zig const gl = @import("backend/renderer_opengl.zig"); const RendererGl = gl.RendererGl; const app = @import("../core/application.zig"); const Application = app.Application; const TextureId = @import("texture.zig").TextureId; const TextureRegion = @import("texture_region.zig").TextureRegion; const Sprite = @import("sprite.zig").Sprite; const Color = @import("../core/color.zig").Color; const Camera = @import("../renderer/cameras/camera_2d.zig"); const Matrix4 = @import("../core/matrix4.zig").Matrix4; const Vector3 = @import("../core/vector3.zig").Vector3; const Vector2 = @import("../core/vector2.zig").Vector2; const FrameStatistics = @import("../utils/profiling/frame_statistics.zig").FrameStatistics; const String = @import("../utils/strings.zig"); // ----------------------------------------------------------------------------- // ----------------------------------------- // - BackendApi - // ----------------------------------------- /// An enum to keep track of which graphics api is /// being used, so the renderer can be api agnostic. pub const BackendApi = enum(u8) { OpenGl, Vulkan, Dx12, //Metal, // Will probably never happen as it is such a smaller portion }; pub const api: BackendApi = BackendApi.OpenGl; // ----------------------------------------- // - RendererErrors - // ----------------------------------------- pub const RendererErrors = error{ DuplicateRenderer, }; // ----------------------------------------- // - PackingMode - // ----------------------------------------- pub const PackingMode = enum { /// Affects the packing of pixel data Pack, /// Affects the unpacking of pixel data Unpack, }; // ----------------------------------------- // - ByteAlignment - // ----------------------------------------- pub const ByteAlignment = enum { /// Byte-aligned One, /// Rows aligned to even-numbered bytes Two, /// Word-aligned Four, /// Rows start on double-word boundaries Eight, }; // ----------------------------------------- // - Renderer - // ----------------------------------------- var renderer: *Renderer = undefined; /// The main renderer for the application. /// Meant to be MOSTLY backend agnostic. pub const Renderer = struct { gl_backend: ?*RendererGl = undefined, /// Allocates and builds a Renderer instance /// Comments: INTERNAL use only. The Renderer will be the owner of the allocated memory. pub fn new(allocator: *std.mem.Allocator) anyerror!void { renderer = try allocator.create(Renderer); switch (api) { BackendApi.OpenGl => { renderer.gl_backend = try RendererGl.new(allocator); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Frees any allocated memory that the Renderer owns /// Comments: INTERNAL use only. pub fn free(allocator: *std.mem.Allocator) void { if (renderer == undefined) return; switch (api) { BackendApi.OpenGl => { RendererGl.free(allocator, renderer.gl_backend.?); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } allocator.destroy(renderer); } /// Handles the rendering process /// Comments: INTERNAL use only. pub fn render(render_loop: fn () anyerror!void, gui_render_loop: fn () anyerror!void) void { var camera = Camera.currentCamera(); // Prepare for the user defined render loop Renderer.beginRender(camera.?); // Call user-defined render _ = render_loop() catch |err| { std.debug.print("[Renderer]: Render event encountered an error! {s}\n", .{err}); @panic("[Renderer]: Error occurred during the user-defined render event!\n"); }; // Submit the framebuffer to be renderered Renderer.endRender(); // Prepare the user-defined gui render loop Renderer.beginGui(); // Call user-defined gui render _ = gui_render_loop() catch |err| { std.debug.print("[Renderer]: Render event encountered an error! {s}\n", .{err}); @panic("[Renderer]: Error occurred during the user-defined render event!\n"); }; // Profiling stats if (!app.debug_mode) { Renderer.endGui(); return; } const window_size = Application.windowSize(); const string_height = 30.0; const top_padding = 0.0; const left_padding = 20.0; const window_size_y = window_size.y(); const background_size = Vector3.new(window_size.x() * 0.25, 100 + top_padding + (string_height * 5.0), 0.0); var background_color = Color.darkGray(); const background_opacity = 0.5; background_color.a = background_opacity; // Draw background window Renderer.drawColoredQuadGui(Vector3.new(0.0, window_size_y - background_size.y(), 0.0), background_size, background_color); // Populate Stats const frame_time: f64 = FrameStatistics.frameTime(); const update_time: f64 = FrameStatistics.updateTime(); const draw_time: f64 = FrameStatistics.drawTime(); var draw_calls: i64 = FrameStatistics.drawCalls(); var quad_count: i64 = FrameStatistics.quadCount(); var frame_time_buffer: [128]u8 = undefined; var update_time_buffer: [128]u8 = undefined; var draw_time_buffer: [128]u8 = undefined; var draw_calls_buffer: [128]u8 = undefined; var quad_count_buffer: [128]u8 = undefined; var frame_time_string = String.format(&frame_time_buffer, "Frame (ms): {d:5}", .{frame_time}); var update_time_string = String.format(&update_time_buffer, "User Update (ms): {d:5}", .{update_time}); var draw_time_string = String.format(&draw_time_buffer, "Draw (ms): {d:6}", .{draw_time}); draw_calls += 1; quad_count += @intCast(i64, frame_time_string.len); quad_count += @intCast(i64, update_time_string.len); quad_count += @intCast(i64, draw_time_string.len); var draw_calls_string = String.format(&draw_calls_buffer, "Draw Calls: {}", .{draw_calls}); quad_count += @intCast(i64, draw_calls_string.len); var quad_count_string = String.format(&quad_count_buffer, "Quad Count: {}", .{quad_count}); quad_count += @intCast(i64, draw_calls_string.len); // Draw Stats Renderer.drawText(frame_time_string, left_padding, window_size_y - top_padding - (string_height * 1.0), 1.0, Color.white()); Renderer.drawText(update_time_string, left_padding, window_size_y - top_padding - (string_height * 2.0), 1.0, Color.white()); Renderer.drawText(draw_time_string, left_padding, window_size_y - top_padding - (string_height * 3.0), 1.0, Color.white()); Renderer.drawText(draw_calls_string, left_padding, window_size_y - top_padding - (string_height * 4.0), 1.0, Color.white()); Renderer.drawText(quad_count_string, left_padding, window_size_y - top_padding - (string_height * 5.0), 1.0, Color.white()); // Submit the gui to be renderered Renderer.endGui(); } /// Flags and sets up for the start of the user-defined render event /// Comments: INTERNAL use only. pub fn beginRender(camera: *Camera.Camera2d) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.beginRender(camera); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Flags and sets up for the start of the user-defined gui event /// Comments: INTERNAL use only. pub fn beginGui() void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.beginGui(); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Handles the clean up for the end of the user-defined render event /// Comments: INTERNAL use only. pub fn endRender() void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.endRender(); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Handles the clean up for the end of the user-defined gui event /// Comments: INTERNAL use only. pub fn endGui() void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.endGui(); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Change the color the windows clears to. pub fn changeClearColor(color: Color) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.changeClearColor(color); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Sets up renderer to be able to draw a untextured quad. pub fn drawQuad(position: Vector3) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.drawQuad(position); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Sets up renderer to be able to draw a untextured quad. pub fn drawColoredQuad(position: Vector3, size: Vector3, color: Color) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.drawColoredQuad(position, size, color); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } pub fn drawColoredQuadGui(position: Vector3, size: Vector3, color: Color) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.drawColoredQuadGui(position, size, color); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Sets up renderer to be able to draw a textured quad. pub fn drawTexturedQuad(texture_region: *TextureRegion, position: Vector3, scale: Vector2, color: Color, flip_h: bool) void { //pub fn drawTexturedQuad(texture_id: TextureId, position: Vector3, scale: Vector2, color: Color) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.drawTexturedQuad(texture_region, position, scale, color, flip_h); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Sets up renderer to be able to draw a Sprite. pub fn drawSprite(sprite: *Sprite, position: Vector3) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.drawSprite(sprite, position); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Sets up the renderer to be able to draw text pub fn drawText(text: []const u8, x: f32, y: f32, scale: f32, color: Color) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.drawText(text, x, y, scale, color); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Request to disable byte_alignment restriction pub fn setByteAlignment(packing_mode: PackingMode, alignment: ByteAlignment) void { switch (api) { BackendApi.OpenGl => { renderer.gl_backend.?.setByteAlignment(packing_mode, alignment); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Clears out the currently bound texture pub fn clearBoundTexture() void { switch (api) { BackendApi.OpenGl => { gl.RendererGl.clearBoundTexture(); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Resizes the viewport to the given size and position /// Comments: INTERNAL use only. pub fn resizeViewport(x: c_int, y: c_int, width: c_int, height: c_int) void { switch (api) { BackendApi.OpenGl => { gl.resizeViewport(x, y, width, height); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } /// Window resize callback for GLFW /// Comments: INTERNAL use only. pub fn resizeInternal(window: ?*c.GLFWwindow, width: c_int, height: c_int) callconv(.C) void { var x_pos: c_int = 0; var y_pos: c_int = 0; c.glfwGetWindowPos(window, &x_pos, &y_pos); switch (api) { BackendApi.OpenGl => { c.glViewport(x_pos, y_pos, width, height); Application.setWindowSize(@intToFloat(f32, width), @intToFloat(f32, height)); }, BackendApi.Dx12 => {}, BackendApi.Vulkan => {}, } } };
src/renderer/renderer.zig
const std = @import("std"); const panic = std.debug.panic; const builtin = @import("builtin"); const warn = std.debug.warn; const join = std.fs.path.join; usingnamespace @import("c.zig"); const Shader = @import("shader.zig").Shader; // settings const SCR_WIDTH: u32 = 1920; const SCR_HEIGHT: u32 = 1080; pub fn main() !void { const allocator = std.heap.page_allocator; const vertPath = try join(allocator, &[_][]const u8{ "shaders", "1_4_textures.vert" }); const fragPath = try join(allocator, &[_][]const u8{ "shaders", "1_4_textures.frag" }); const ok = glfwInit(); if (ok == 0) { panic("Failed to initialise GLFW\n", .{}); } defer glfwTerminate(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); if (builtin.os.tag == .macosx) { glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); } // glfw: initialize and configure var window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "Learn OpenGL", null, null); if (window == null) { panic("Failed to create GLFW window\n", .{}); } glfwMakeContextCurrent(window); const resizeCallback = glfwSetFramebufferSizeCallback(window, framebuffer_size_callback); // glad: load all OpenGL function pointers if (gladLoadGLLoader(@ptrCast(GLADloadproc, glfwGetProcAddress)) == 0) { panic("Failed to initialise GLAD\n", .{}); } // build and compile our shader program const ourShader = try Shader.init(allocator, vertPath, fragPath); // set up vertex data (and buffer(s)) and configure vertex attributes const vertices = [_]f32{ // positions // colors // texture coords 0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, // top right 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, // bottom right -0.5, -0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, // bottom left -0.5, 0.5, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, // top left }; const indices = [_]u32{ 0, 1, 3, // first triangle 1, 2, 3, // second triangle }; var VAO: c_uint = undefined; var VBO: c_uint = undefined; var EBO: c_uint = undefined; glGenVertexArrays(1, &VAO); defer glDeleteVertexArrays(1, &VAO); glGenBuffers(1, &VBO); defer glDeleteBuffers(1, &VBO); glGenBuffers(1, &EBO); defer glDeleteBuffers(1, &EBO); // bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s). glBindVertexArray(VAO); glBindBuffer(GL_ARRAY_BUFFER, VBO); glBufferData(GL_ARRAY_BUFFER, vertices.len * @sizeOf(f32), &vertices, GL_STATIC_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO); glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.len * @sizeOf(u32), &indices, GL_STATIC_DRAW); // position attribute glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * @sizeOf(f32), null); glEnableVertexAttribArray(0); // color attribute glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * @sizeOf(f32), @intToPtr(*c_void, 3 * @sizeOf(f32))); glEnableVertexAttribArray(1); // texture coord attribute glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * @sizeOf(f32), @intToPtr(*c_void, 6 * @sizeOf(f32))); glEnableVertexAttribArray(2); // load and create a texture var texture1: c_uint = undefined; var texture2: c_uint = undefined; // texture 1 glGenTextures(1, &texture1); glBindTexture(GL_TEXTURE_2D, texture1); // set the texture wrapping parameters glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); // set texture filtering parameters glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // load image, create texture and generate mipmaps var width: c_int = undefined; var height: c_int = undefined; var nrChannels: c_int = undefined; stbi_set_flip_vertically_on_load(1); // tell stb_image.h to flip loaded texture's on the y-axis. // The FileSystem::getPath(...) is part of the GitHub repository so we can find files on any IDE/platform; replace it with your own image path. var data = stbi_load("textures/container.jpg", &width, &height, &nrChannels, 0); if (data != null) { glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data); glGenerateMipmap(GL_TEXTURE_2D); } else { warn("Failed to load texture\n", .{}); } stbi_image_free(data); // texture 2 glGenTextures(1, &texture2); glBindTexture(GL_TEXTURE_2D, texture2); // set the texture wrapping parameters glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); // set texture filtering parameters glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // load image, create texture and generate mipmaps data = stbi_load("textures/awesomeface.png", &width, &height, &nrChannels, 0); if (data != null) { // note that the awesomeface.png has transparency and thus an alpha channel, so make sure to tell OpenGL the data type is of GL_RGBA glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); glGenerateMipmap(GL_TEXTURE_2D); } else { warn("Failed to load texture\n", .{}); } stbi_image_free(data); // tell opengl for each sampler to which texture unit it belongs to (only has to be done once) ourShader.use(); // don't forget to activate/use the shader before setting uniforms! // either set it manually like so: glUniform1i(glGetUniformLocation(ourShader.id, "texture1"), 0); // or set it via the texture class ourShader.setInt("texture2", 1); // render loop while (glfwWindowShouldClose(window) == 0) { // input processInput(window); // render glClearColor(0.2, 0.3, 0.3, 1.0); glClear(GL_COLOR_BUFFER_BIT); // bind textures on corresponding texture units glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, texture1); glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, texture2); // render container ourShader.use(); glBindVertexArray(VAO); glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, null); // glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.) glfwSwapBuffers(window); glfwPollEvents(); } } // process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly pub fn processInput(window: ?*GLFWwindow) callconv(.C) void { if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, 1); } // glfw: whenever the window size changed (by OS or user resize) this callback function executes pub fn framebuffer_size_callback(window: ?*GLFWwindow, width: c_int, height: c_int) callconv(.C) void { // make sure the viewport matches the new window dimensions; note that width and // height will be significantly larger than specified on retina displays. glViewport(0, 0, width, height); }
src/1_4_textures.zig
const std = @import("std"); const georgios = @import("georgios"); comptime {_ = georgios;} const system_calls = georgios.system_calls; const utils = georgios.utils; const print_string = system_calls.print_string; pub fn panic(msg: []const u8, trace: ?*std.builtin.StackTrace) noreturn { georgios.panic(msg, trace); } var img_buffer: [2048]u8 align(@alignOf(u64)) = undefined; fn read_motd() void { if (system_calls.vbe_res()) |res| { var img = georgios.fs.open("/files/dragon.img") catch |e| { print_string("shell: open img error: "); print_string(@errorName(e)); print_string("\n"); return; }; const img_width: u32 = 301; // const img_height: u32 = 170; const pos = utils.Point{.x = res.x - img_width - 10, .y = 10}; var last = utils.Point{}; var got: usize = 1; while (got > 0) { if (img.read(img_buffer[0..])) |g| { got = g; } else |e| { print_string("shell: img file.read error: "); print_string(@errorName(e)); print_string("\n"); got = 0; } if (got > 0) { system_calls.vbe_draw_raw_image_chunk(img_buffer[0..got], img_width, pos, last); } } system_calls.vbe_flush_buffer(); img.close() catch |e| { print_string("shell: img file.close error: "); print_string(@errorName(e)); print_string("\n"); return; }; } var file = georgios.fs.open("/etc/motd") catch |e| { print_string("motd open error: "); print_string(@errorName(e)); print_string("\n"); return; }; var buffer: [128]u8 = undefined; var got: usize = 1; while (got > 0) { if (file.read(buffer[0..])) |g| { got = g; } else |e| { print_string("motd file.read error: "); print_string(@errorName(e)); print_string("\n"); got = 0; } if (got > 0) { print_string(buffer[0..got]); } } file.close() catch |e| { print_string("motd file.close error: "); print_string(@errorName(e)); print_string("\n"); }; } fn check_bin_path(path: []const u8, name: []const u8, buffer: []u8) ?[]const u8 { var dir_entry = georgios.DirEntry{.dir = path}; if (system_calls.next_dir_entry(&dir_entry)) { return null; } var pos = utils.memory_copy_truncate(buffer[0..], name); pos = pos + utils.memory_copy_truncate(buffer[pos..], ".elf"); while (!dir_entry.done) { if (utils.memory_compare(dir_entry.current_entry, buffer[0..pos])) { pos = 0; pos = utils.memory_copy_truncate(buffer, path); pos = pos + utils.memory_copy_truncate(buffer[pos..], "/"); pos = pos + utils.memory_copy_truncate(buffer[pos..], dir_entry.current_entry); return buffer[0..pos]; } if (system_calls.next_dir_entry(&dir_entry)) { print_string("Failure in middle of check_bin_path?\n"); return null; } } return null; } var cwd_buffer: [128]u8 = undefined; fn run_command(command: []const u8) bool { var command_parts: [128][]const u8 = undefined; var command_part_count: usize = 0; var command_part_len: usize = 0; for (command) |c, i| { if (c == ' ') { command_parts[command_part_count] = command[i - command_part_len..i]; command_part_count += 1; command_part_len = 0; } else { command_part_len += 1; } } if (command_part_len > 0) { command_parts[command_part_count] = command[command.len - command_part_len..]; command_part_count += 1; } else { return false; } if (utils.memory_compare(command_parts[0], "exit")) { return true; } else if (utils.memory_compare(command_parts[0], "reset")) { print_string("\x1bc"); // Reset Console } else if (utils.memory_compare(command_parts[0], "pwd")) { if (system_calls.get_cwd(cwd_buffer[0..])) |dir| { print_string(dir); print_string("\n"); } else |e| { print_string("Couldn't get current working directory: "); print_string(@errorName(e)); print_string("\n"); } } else if (utils.memory_compare(command_parts[0], "cd")) { if (command_part_count != 2) { print_string("cd requires exactly one argument\n"); } else { system_calls.set_cwd(command_parts[1]) catch |e| { print_string("Couldn't change current working directory to \""); print_string(command_parts[1]); print_string("\": "); print_string(@errorName(e)); print_string("\n"); }; } } else if (utils.memory_compare(command_parts[0], "sleep")) { if (command_part_count != 2) { print_string("sleep requires exactly one argument\n"); } else { if (std.fmt.parseUnsigned(usize, command_parts[1], 10)) |n| { system_calls.sleep_seconds(n); } else |e| { print_string("invalid argument: "); print_string(@errorName(e)); print_string("\n"); } } } else if (utils.memory_compare(command_parts[0], "koverflow")) { system_calls.overflow_kernel_stack(); } else if (utils.memory_compare(command_parts[0], "motd")) { read_motd(); } else { var command_path = command_parts[0]; var path_buffer: [128]u8 = undefined; if (check_bin_path("/bin", command_parts[0], path_buffer[0..])) |path| { command_path = path[0..]; } system_calls.exec(&georgios.ProcessInfo{ .path = command_path, .name = command_parts[0], .args = command_parts[1..command_part_count], }) catch |e| { print_string("Command: \""); print_string(command); print_string("\" failed: "); print_string(@errorName(e)); print_string("\n"); }; } return false; } pub fn main() void { if (system_calls.get_process_id() == 0) { read_motd(); } var buffer: [128]u8 = undefined; var got: usize = 0; var running = true; while (running) { print_string("░▒▓\x1b[7m"); if (system_calls.get_cwd(cwd_buffer[0..])) |dir| { if (!(dir.len == 1 and dir[0] == '/')) { system_calls.print_string(dir); } } else |e| { print_string("get_cwd failed: "); print_string(@errorName(e)); print_string("\n"); } print_string("%\x1b[7m"); var getline = true; while (getline) { const key_event = system_calls.get_key(.Blocking).?; if (key_event.char) |c| { if (key_event.modifiers.control_is_pressed()) { switch (c) { 'd' => { got = 0; running = false; break; }, else => {}, } } var print = true; if (c == '\n') { getline = false; } else if (c == '\x08') { if (got > 0) { got -= 1; } else { print = false; } } else if ((got + 1) == buffer.len) { print = false; } else { buffer[got] = c; got += 1; } if (print) { print_string(@ptrCast([*]const u8, &c)[0..1]); } } } if (got > 0) { if (run_command(buffer[0..got])) { break; // exit was run } got = 0; } } print_string("<shell about to exit>\n"); system_calls.exit(0); }
programs/shell/shell.zig
const xcb = @import("../xcb.zig"); pub const id = xcb.Extension{ .name = "XVideo", .global_id = 0 }; pub const PORT = u32; pub const ENCODING = u32; pub const Type = extern enum(c_uint) { @"InputMask" = 1, @"OutputMask" = 2, @"VideoMask" = 4, @"StillMask" = 8, @"ImageMask" = 16, }; pub const ImageFormatInfoType = extern enum(c_uint) { @"RGB" = 0, @"YUV" = 1, }; pub const ImageFormatInfoFormat = extern enum(c_uint) { @"Packed" = 0, @"Planar" = 1, }; pub const AttributeFlag = extern enum(c_uint) { @"Gettable" = 1, @"Settable" = 2, }; pub const VideoNotifyReason = extern enum(c_uint) { @"Started" = 0, @"Stopped" = 1, @"Busy" = 2, @"Preempted" = 3, @"HardError" = 4, }; pub const ScanlineOrder = extern enum(c_uint) { @"TopToBottom" = 0, @"BottomToTop" = 1, }; pub const GrabPortStatus = extern enum(c_uint) { @"Success" = 0, @"BadExtension" = 1, @"AlreadyGrabbed" = 2, @"InvalidTime" = 3, @"BadReply" = 4, @"BadAlloc" = 5, }; /// @brief Rational pub const Rational = struct { @"numerator": i32, @"denominator": i32, }; /// @brief Format pub const Format = struct { @"visual": xcb.VISUALID, @"depth": u8, @"pad0": [3]u8, }; /// @brief AdaptorInfo pub const AdaptorInfo = struct { @"base_id": xcb.xv.PORT, @"name_size": u16, @"num_ports": u16, @"num_formats": u16, @"type": u8, @"pad0": u8, @"name": []u8, @"formats": []xcb.xv.Format, }; /// @brief EncodingInfo pub const EncodingInfo = struct { @"encoding": xcb.xv.ENCODING, @"name_size": u16, @"width": u16, @"height": u16, @"pad0": [2]u8, @"rate": xcb.xv.Rational, @"name": []u8, }; /// @brief Image pub const Image = struct { @"id": u32, @"width": u16, @"height": u16, @"data_size": u32, @"num_planes": u32, @"pitches": []u32, @"offsets": []u32, @"data": []u8, }; /// @brief AttributeInfo pub const AttributeInfo = struct { @"flags": u32, @"min": i32, @"max": i32, @"size": u32, @"name": []u8, }; /// @brief ImageFormatInfo pub const ImageFormatInfo = struct { @"id": u32, @"type": u8, @"byte_order": u8, @"pad0": [2]u8, @"guid": [16]u8, @"bpp": u8, @"num_planes": u8, @"pad1": [2]u8, @"depth": u8, @"pad2": [3]u8, @"red_mask": u32, @"green_mask": u32, @"blue_mask": u32, @"format": u8, @"pad3": [3]u8, @"y_sample_bits": u32, @"u_sample_bits": u32, @"v_sample_bits": u32, @"vhorz_y_period": u32, @"vhorz_u_period": u32, @"vhorz_v_period": u32, @"vvert_y_period": u32, @"vvert_u_period": u32, @"vvert_v_period": u32, @"vcomp_order": [32]u8, @"vscanline_order": u8, @"pad4": [11]u8, }; /// Opcode for BadPort. pub const BadPortOpcode = 0; /// @brief BadPortError pub const BadPortError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, }; /// Opcode for BadEncoding. pub const BadEncodingOpcode = 1; /// @brief BadEncodingError pub const BadEncodingError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, }; /// Opcode for BadControl. pub const BadControlOpcode = 2; /// @brief BadControlError pub const BadControlError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, }; /// Opcode for VideoNotify. pub const VideoNotifyOpcode = 0; /// @brief VideoNotifyEvent pub const VideoNotifyEvent = struct { @"response_type": u8, @"reason": u8, @"sequence": u16, @"time": xcb.TIMESTAMP, @"drawable": xcb.DRAWABLE, @"port": xcb.xv.PORT, }; /// Opcode for PortNotify. pub const PortNotifyOpcode = 1; /// @brief PortNotifyEvent pub const PortNotifyEvent = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"time": xcb.TIMESTAMP, @"port": xcb.xv.PORT, @"attribute": xcb.ATOM, @"value": i32, }; /// @brief QueryExtensioncookie pub const QueryExtensioncookie = struct { sequence: c_uint, }; /// @brief QueryExtensionRequest pub const QueryExtensionRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 0, @"length": u16, }; /// @brief QueryExtensionReply pub const QueryExtensionReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"major": u16, @"minor": u16, }; /// @brief QueryAdaptorscookie pub const QueryAdaptorscookie = struct { sequence: c_uint, }; /// @brief QueryAdaptorsRequest pub const QueryAdaptorsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 1, @"length": u16, @"window": xcb.WINDOW, }; /// @brief QueryAdaptorsReply pub const QueryAdaptorsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_adaptors": u16, @"pad1": [22]u8, @"info": []xcb.xv.AdaptorInfo, }; /// @brief QueryEncodingscookie pub const QueryEncodingscookie = struct { sequence: c_uint, }; /// @brief QueryEncodingsRequest pub const QueryEncodingsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 2, @"length": u16, @"port": xcb.xv.PORT, }; /// @brief QueryEncodingsReply pub const QueryEncodingsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_encodings": u16, @"pad1": [22]u8, @"info": []xcb.xv.EncodingInfo, }; /// @brief GrabPortcookie pub const GrabPortcookie = struct { sequence: c_uint, }; /// @brief GrabPortRequest pub const GrabPortRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 3, @"length": u16, @"port": xcb.xv.PORT, @"time": xcb.TIMESTAMP, }; /// @brief GrabPortReply pub const GrabPortReply = struct { @"response_type": u8, @"result": u8, @"sequence": u16, @"length": u32, }; /// @brief UngrabPortRequest pub const UngrabPortRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 4, @"length": u16, @"port": xcb.xv.PORT, @"time": xcb.TIMESTAMP, }; /// @brief PutVideoRequest pub const PutVideoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 5, @"length": u16, @"port": xcb.xv.PORT, @"drawable": xcb.DRAWABLE, @"gc": xcb.GCONTEXT, @"vid_x": i16, @"vid_y": i16, @"vid_w": u16, @"vid_h": u16, @"drw_x": i16, @"drw_y": i16, @"drw_w": u16, @"drw_h": u16, }; /// @brief PutStillRequest pub const PutStillRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 6, @"length": u16, @"port": xcb.xv.PORT, @"drawable": xcb.DRAWABLE, @"gc": xcb.GCONTEXT, @"vid_x": i16, @"vid_y": i16, @"vid_w": u16, @"vid_h": u16, @"drw_x": i16, @"drw_y": i16, @"drw_w": u16, @"drw_h": u16, }; /// @brief GetVideoRequest pub const GetVideoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 7, @"length": u16, @"port": xcb.xv.PORT, @"drawable": xcb.DRAWABLE, @"gc": xcb.GCONTEXT, @"vid_x": i16, @"vid_y": i16, @"vid_w": u16, @"vid_h": u16, @"drw_x": i16, @"drw_y": i16, @"drw_w": u16, @"drw_h": u16, }; /// @brief GetStillRequest pub const GetStillRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 8, @"length": u16, @"port": xcb.xv.PORT, @"drawable": xcb.DRAWABLE, @"gc": xcb.GCONTEXT, @"vid_x": i16, @"vid_y": i16, @"vid_w": u16, @"vid_h": u16, @"drw_x": i16, @"drw_y": i16, @"drw_w": u16, @"drw_h": u16, }; /// @brief StopVideoRequest pub const StopVideoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 9, @"length": u16, @"port": xcb.xv.PORT, @"drawable": xcb.DRAWABLE, }; /// @brief SelectVideoNotifyRequest pub const SelectVideoNotifyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 10, @"length": u16, @"drawable": xcb.DRAWABLE, @"onoff": u8, @"pad0": [3]u8, }; /// @brief SelectPortNotifyRequest pub const SelectPortNotifyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 11, @"length": u16, @"port": xcb.xv.PORT, @"onoff": u8, @"pad0": [3]u8, }; /// @brief QueryBestSizecookie pub const QueryBestSizecookie = struct { sequence: c_uint, }; /// @brief QueryBestSizeRequest pub const QueryBestSizeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 12, @"length": u16, @"port": xcb.xv.PORT, @"vid_w": u16, @"vid_h": u16, @"drw_w": u16, @"drw_h": u16, @"motion": u8, @"pad0": [3]u8, }; /// @brief QueryBestSizeReply pub const QueryBestSizeReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"actual_width": u16, @"actual_height": u16, }; /// @brief SetPortAttributeRequest pub const SetPortAttributeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 13, @"length": u16, @"port": xcb.xv.PORT, @"attribute": xcb.ATOM, @"value": i32, }; /// @brief GetPortAttributecookie pub const GetPortAttributecookie = struct { sequence: c_uint, }; /// @brief GetPortAttributeRequest pub const GetPortAttributeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 14, @"length": u16, @"port": xcb.xv.PORT, @"attribute": xcb.ATOM, }; /// @brief GetPortAttributeReply pub const GetPortAttributeReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"value": i32, }; /// @brief QueryPortAttributescookie pub const QueryPortAttributescookie = struct { sequence: c_uint, }; /// @brief QueryPortAttributesRequest pub const QueryPortAttributesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 15, @"length": u16, @"port": xcb.xv.PORT, }; /// @brief QueryPortAttributesReply pub const QueryPortAttributesReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_attributes": u32, @"text_size": u32, @"pad1": [16]u8, @"attributes": []xcb.xv.AttributeInfo, }; /// @brief ListImageFormatscookie pub const ListImageFormatscookie = struct { sequence: c_uint, }; /// @brief ListImageFormatsRequest pub const ListImageFormatsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 16, @"length": u16, @"port": xcb.xv.PORT, }; /// @brief ListImageFormatsReply pub const ListImageFormatsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_formats": u32, @"pad1": [20]u8, @"format": []xcb.xv.ImageFormatInfo, }; /// @brief QueryImageAttributescookie pub const QueryImageAttributescookie = struct { sequence: c_uint, }; /// @brief QueryImageAttributesRequest pub const QueryImageAttributesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 17, @"length": u16, @"port": xcb.xv.PORT, @"id": u32, @"width": u16, @"height": u16, }; /// @brief QueryImageAttributesReply pub const QueryImageAttributesReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_planes": u32, @"data_size": u32, @"width": u16, @"height": u16, @"pad1": [12]u8, @"pitches": []u32, @"offsets": []u32, }; /// @brief PutImageRequest pub const PutImageRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 18, @"length": u16, @"port": xcb.xv.PORT, @"drawable": xcb.DRAWABLE, @"gc": xcb.GCONTEXT, @"id": u32, @"src_x": i16, @"src_y": i16, @"src_w": u16, @"src_h": u16, @"drw_x": i16, @"drw_y": i16, @"drw_w": u16, @"drw_h": u16, @"width": u16, @"height": u16, @"data": []const u8, }; /// @brief ShmPutImageRequest pub const ShmPutImageRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 19, @"length": u16, @"port": xcb.xv.PORT, @"drawable": xcb.DRAWABLE, @"gc": xcb.GCONTEXT, @"shmseg": xcb.shm.SEG, @"id": u32, @"offset": u32, @"src_x": i16, @"src_y": i16, @"src_w": u16, @"src_h": u16, @"drw_x": i16, @"drw_y": i16, @"drw_w": u16, @"drw_h": u16, @"width": u16, @"height": u16, @"send_event": u8, @"pad0": [3]u8, }; test "" { @import("std").testing.refAllDecls(@This()); }
src/auto/xv.zig
const std = @import("std"); const imgui = @import("imgui"); const colors = @import("../colors.zig"); const Tilemap = @import("tilemap.zig").Tilemap; const Tileset = @import("tileset.zig").Tileset; pub const TilemapEditor = struct { map: Tilemap, tileset: Tileset, shift_dragged: bool = false, dragged: bool = false, prev_mouse_pos: imgui.ImVec2 = .{}, pub fn init(map: Tilemap, tileset: Tileset) TilemapEditor { return .{ .map = map, .tileset = tileset }; } pub fn deinit(self: @This()) void {_ = self;} pub fn mapSize(self: @This()) imgui.ImVec2 { return .{ .x = @intToFloat(f32, self.map.w * self.tileset.tile_size), .y = @intToFloat(f32, self.map.h * self.tileset.tile_size) }; } pub fn draw(self: *@This(), name: [*c]const u8) void { // if the alt key is down dont allow scrolling with the mouse wheel since we will be zooming with it var window_flags = imgui.ImGuiWindowFlags_NoCollapse | imgui.ImGuiWindowFlags_AlwaysHorizontalScrollbar; if (imgui.igGetIO().KeyAlt) window_flags |= imgui.ImGuiWindowFlags_NoScrollWithMouse; defer imgui.igEnd(); if (!imgui.igBegin(name, null, window_flags)) return; var pos = imgui.ogGetCursorScreenPos(); var map_size = self.mapSize(); imgui.ogAddRectFilled(imgui.igGetWindowDrawList(), pos, map_size, colors.rgbToU32(0, 0, 0)); self.drawPostProcessedMap(pos); _ = imgui.ogInvisibleButton("##input_map_button", map_size, imgui.ImGuiButtonFlags_None); const is_hovered = imgui.igIsItemHovered(imgui.ImGuiHoveredFlags_None); if (is_hovered) self.handleInput(pos); // draw a rect over the current tile if (is_hovered and !self.shift_dragged) { var tile = tileIndexUnderPos(imgui.igGetIO().MousePos, self.tileset.tile_size, pos); const tl = imgui.ImVec2{ .x = pos.x + @intToFloat(f32, tile.x * self.tileset.tile_size), .y = pos.y + @intToFloat(f32, tile.y * self.tileset.tile_size) }; imgui.ogAddQuad(imgui.igGetWindowDrawList(), tl, @intToFloat(f32, self.tileset.tile_size), colors.rgbToU32(116, 252, 253), 1); } } pub fn drawTileset(self: *@This(), name: [*c]const u8) void { self.tileset.drawTileset(name); } pub fn drawLayers(self: *@This(), name: [*c]const u8) void { defer imgui.igEnd(); if (!imgui.igBegin(name, null, imgui.ImGuiWindowFlags_None)) return; for (self.map.layers) |layer, i| { imgui.igPushIDInt(@intCast(c_int, i)); defer imgui.igPopID(); if (imgui.ogSelectableBool(layer.name.ptr, i == self.map.current_layer, imgui.ImGuiSelectableFlags_None, .{})) { self.map.current_layer = i; } } if (imgui.ogButton("Add Layer")) self.map.addLayer(); } fn handleInput(self: *@This(), origin: imgui.ImVec2) void { // scrolling via drag with alt or super key down if (imgui.igIsMouseDragging(imgui.ImGuiMouseButton_Left, 0) and (imgui.igGetIO().KeyAlt or imgui.igGetIO().KeySuper)) { var scroll_delta = imgui.ogGetMouseDragDelta(imgui.ImGuiMouseButton_Left, 0); imgui.igSetScrollXFloat(imgui.igGetScrollX() - scroll_delta.x); imgui.igSetScrollYFloat(imgui.igGetScrollY() - scroll_delta.y); imgui.igResetMouseDragDelta(imgui.ImGuiMouseButton_Left); return; } // box selection with left/right mouse + shift if (imgui.ogIsAnyMouseDragging() and imgui.igGetIO().KeyShift) { var drag_delta = imgui.ogGetAnyMouseDragDelta(); var tile1 = tileIndexUnderPos(imgui.igGetIO().MousePos, self.tileset.tile_size, origin); drag_delta = drag_delta.add(origin); var tile2 = tileIndexUnderPos(imgui.igGetIO().MousePos, self.tileset.tile_size, drag_delta); const tile_size = @intToFloat(f32, self.tileset.tile_size); const min_x = @intToFloat(f32, std.math.min(tile1.x, tile2.x)) * tile_size + origin.x; const min_y = @intToFloat(f32, std.math.max(tile1.y, tile2.y)) * tile_size + tile_size + origin.y; const max_x = @intToFloat(f32, std.math.max(tile1.x, tile2.x)) * tile_size + tile_size + origin.x; const max_y = @intToFloat(f32, std.math.min(tile1.y, tile2.y)) * tile_size + origin.y; const color = if (imgui.igIsMouseDragging(imgui.ImGuiMouseButton_Left, 0)) colors.rgbToU32(255, 255, 255) else colors.rgbToU32(220, 0, 0); imgui.ogImDrawList_AddQuad(imgui.igGetWindowDrawList(), &imgui.ImVec2{ .x = min_x, .y = max_y }, &imgui.ImVec2{ .x = max_x, .y = max_y }, &imgui.ImVec2{ .x = max_x, .y = min_y }, &imgui.ImVec2{ .x = min_x, .y = min_y }, color, 2); self.shift_dragged = true; } else if ((imgui.igIsMouseReleased(imgui.ImGuiMouseButton_Left) or imgui.igIsMouseReleased(imgui.ImGuiMouseButton_Right)) and self.shift_dragged) { self.shift_dragged = false; var drag_delta = if (imgui.igIsMouseReleased(imgui.ImGuiMouseButton_Left)) imgui.ogGetMouseDragDelta(imgui.ImGuiMouseButton_Left, 0) else imgui.ogGetMouseDragDelta(imgui.ImGuiMouseButton_Right, 0); var tile1 = tileIndexUnderPos(imgui.igGetIO().MousePos, self.tileset.tile_size, origin); drag_delta = drag_delta.add(origin); var tile2 = tileIndexUnderPos(imgui.igGetIO().MousePos, self.tileset.tile_size, drag_delta); const min_x = std.math.min(tile1.x, tile2.x); var min_y = std.math.min(tile1.y, tile2.y); const max_x = std.math.max(tile1.x, tile2.x); const max_y = std.math.max(tile1.y, tile2.y); // either set the tile to a brush or 0 depending on mouse button const selected_brush_index: usize = self.tileset.selected; // TODO: brushes const tile_value = if (imgui.igIsMouseReleased(imgui.ImGuiMouseButton_Left)) selected_brush_index + 1 else 0; while (min_y <= max_y) : (min_y += 1) { var x = min_x; while (x <= max_x) : (x += 1) { self.map.setTile(x, min_y, @intCast(u8, tile_value)); } } } else if (imgui.ogIsAnyMouseDown() and !imgui.igGetIO().KeyShift) { var tile = tileIndexUnderPos(imgui.igGetIO().MousePos, self.tileset.tile_size, origin); const brush_index: u8 = if (imgui.igIsMouseDown(imgui.ImGuiMouseButton_Left)) self.tileset.selected + 1 else 0; // if the mouse down last frame, get last mouse pos and ensure we dont skip tiles when drawing if (self.dragged) { self.commitInBetweenTiles(tile.x, tile.y, origin, brush_index); } self.dragged = true; self.prev_mouse_pos = imgui.igGetIO().MousePos; self.map.setTile(tile.x, tile.y, brush_index); } else if (imgui.igIsMouseReleased(imgui.ImGuiMouseButton_Left) or imgui.igIsMouseReleased(imgui.ImGuiMouseButton_Right)) { self.dragged = false; } } fn drawPostProcessedMap(self: @This(), origin: imgui.ImVec2) void { for (self.map.layers) |layer| { var y: usize = 0; while (y < self.map.h) : (y += 1) { var x: usize = 0; while (x < self.map.w) : (x += 1) { const tile = layer.data[x + y * self.map.w]; if (tile == 0) continue; const offset = imgui.ImVec2.init(@intToFloat(f32, x * self.tileset.tile_size), @intToFloat(f32, y * self.tileset.tile_size)); var tl = origin.add(offset); self.drawTile(tl, tile - 1, 1); } } } } fn commitInBetweenTiles(self: *@This(), tile_x: usize, tile_y: usize, origin: imgui.ImVec2, color: u8) void { var prev_tile = tileIndexUnderPos(self.prev_mouse_pos, self.tileset.tile_size, origin); const abs_x = std.math.absInt(@intCast(i32, tile_x) - @intCast(i32, prev_tile.x)) catch unreachable; const abs_y = std.math.absInt(@intCast(i32, tile_y) - @intCast(i32, prev_tile.y)) catch unreachable; if (abs_x <= 1 and abs_y <= 1) { return; } self.bresenham(@intToFloat(f32, prev_tile.x), @intToFloat(f32, prev_tile.y), @intToFloat(f32, tile_x), @intToFloat(f32, tile_y), color); } /// fill in all the tiles between the two mouse positions using bresenham's line algo fn bresenham(self: *@This(), in_x1: f32, in_y1: f32, in_x2: f32, in_y2: f32, color: u8) void { var x1 = in_x1; var y1 = in_y1; var x2 = in_x2; var y2 = in_y2; const steep = std.math.absFloat(y2 - y1) > std.math.absFloat(x2 - x1); if (steep) { std.mem.swap(f32, &x1, &y1); std.mem.swap(f32, &x2, &y2); } if (x1 > x2) { std.mem.swap(f32, &x1, &x2); std.mem.swap(f32, &y1, &y2); } const dx: f32 = x2 - x1; const dy: f32 = std.math.absFloat(y2 - y1); var err: f32 = dx / 2.0; var ystep: i32 = if (y1 < y2) 1 else -1; var y: i32 = @floatToInt(i32, y1); const maxX: i32 = @floatToInt(i32, x2); var x: i32 = @floatToInt(i32, x1); while (x <= maxX) : (x += 1) { if (steep) { self.map.setTile(@intCast(usize, y), @intCast(usize, x), color); } else { self.map.setTile(@intCast(usize, x), @intCast(usize, y), color); } err -= dy; if (err < 0) { y += ystep; err += dx; } } } fn drawTile(self: @This(), tl: imgui.ImVec2, tile: usize, zoom: usize) void { var br = tl; br.x += @intToFloat(f32, self.tileset.tile_size * zoom); br.y += @intToFloat(f32, self.tileset.tile_size * zoom); const rect = self.tileset.uvsForTile(tile); const uv0 = imgui.ImVec2{ .x = rect.x, .y = rect.y }; const uv1 = imgui.ImVec2{ .x = rect.x + rect.width, .y = rect.y + rect.height }; imgui.ogImDrawList_AddImage(imgui.igGetWindowDrawList(), self.tileset.tex.imTextureID(), tl, br, uv0, uv1, 0xffffffff); } }; /// helper to find the tile under the position given a top-left position of the grid (origin) and a grid size pub fn tileIndexUnderPos(pos: imgui.ImVec2, rect_size: usize, origin: imgui.ImVec2) struct { x: usize, y: usize } { const final_pos = pos.subtract(origin); return .{ .x = @divTrunc(@floatToInt(usize, final_pos.x), rect_size), .y = @divTrunc(@floatToInt(usize, final_pos.y), rect_size) }; }
src/tilemaps/editor.zig
const std = @import("std"); const comms = @import("comms.zig"); const fuse = @cImport({ @cDefine("FUSE_USE_VERSION", "39"); @cInclude("fuse3/fuse.h"); @cInclude("fuse3/fuse_common.h"); }); const fuse_ops = fuse.struct_fuse_operations; var client: Client = undefined; const FuseFileInfo = extern struct { flags: u32, bitfields: [3]u32, fh: u64, lock_owner: u64, poll_events: u32, }; fn do_fi(fi: ?*fuse.fuse_file_info) *FuseFileInfo { return @ptrCast(*FuseFileInfo, @alignCast(@alignOf(FuseFileInfo), fi.?)); } fn to_fuse_stat(st: std.os.Stat) fuse.struct_stat { var result = std.mem.zeroes(fuse.struct_stat); result.st_dev = 0; result.st_ino = 0; result.st_nlink = st.nlink; result.st_uid = 0; result.st_gid = 0; result.st_rdev = 0; result.st_mode = st.mode; result.st_size = st.size; result.st_blocks = st.blocks; result.st_blksize = st.blksize; result.st_atim.tv_sec = st.atim.tv_sec; result.st_atim.tv_nsec = st.atim.tv_nsec; result.st_mtim.tv_sec = st.mtim.tv_sec; result.st_mtim.tv_nsec = st.mtim.tv_nsec; result.st_ctim.tv_sec = st.ctim.tv_sec; result.st_ctim.tv_nsec = st.ctim.tv_nsec; return result; } const Client = struct { reader: std.fs.File.Reader, writer: std.fs.File.Writer, // This just decides if we show everything as readable. It doesn't matter // since the remote won't accept writing if we're not allowed to, this is // just asking it ahead of time it will allow us or not. show_as_writeable: bool, fuse_ops: fuse_ops = .{ .getattr = struct { fn f(path: [*c]const u8, stbuf: [*c]fuse.struct_stat, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { if (fi != null) { std.log.info("Client: stat '{s}' {d}", .{ path, do_fi(fi).fh }); } else { std.log.info("Client: stat '{s}'", .{path}); } comms.send(client.writer, comms.Command.stat) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); const result = comms.recv(client.reader, i32) catch @panic(""); if (result == 0) { const st = comms.recv(client.reader, std.os.Stat) catch @panic(""); stbuf[0] = to_fuse_stat(st); } return result; } }.f, .readlink = null, .mknod = null, .mkdir = struct { fn f(path: [*c]const u8, mode: fuse.mode_t) callconv(.C) c_int { std.log.info("Client: mkdir '{s}'", .{path}); comms.send(client.writer, comms.Command.mkdir) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); comms.send(client.writer, @intCast(std.os.mode_t, mode)) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .unlink = struct { fn f(path: [*c]const u8) callconv(.C) c_int { std.log.info("Client: unlink '{s}'", .{path}); if (!client.show_as_writeable) return -std.os.EPERM; comms.send(client.writer, comms.Command.unlink) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .rmdir = struct { fn f(path: [*c]const u8) callconv(.C) c_int { std.log.info("Client: rmdir '{s}'", .{path}); if (!client.show_as_writeable) return -std.os.EPERM; comms.send(client.writer, comms.Command.rmdir) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .symlink = null, .rename = struct { fn f(p1: [*c]const u8, p2: [*c]const u8, flags: c_uint) callconv(.C) c_int { _ = flags; std.log.info("Client: rename '{s}' -> '{s}'", .{ p1, p2 }); comms.send(client.writer, comms.Command.rename) catch @panic(""); comms.sendpath(client.writer, p1) catch @panic(""); comms.sendpath(client.writer, p2) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .link = null, .chmod = null, .chown = null, .truncate = struct { fn f(path: [*c]const u8, new_size: c_long, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { std.log.info("Client: truncate '{s}' {d}", .{ path, new_size }); if (!client.show_as_writeable) return -std.os.EPERM; comms.send(client.writer, comms.Command.truncate) catch @panic(""); comms.send(client.writer, @intCast(i32, do_fi(fi).fh)) catch @panic(""); comms.send(client.writer, new_size) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .open = struct { fn f(path: [*c]const u8, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { std.log.info("Client: open '{s}'", .{path}); comms.send(client.writer, comms.Command.open) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); const result = comms.recv(client.reader, i32) catch @panic(""); std.log.info("Client: open returned {d}", .{result}); if (result > 0) { do_fi(fi).fh = @intCast(u32, result); return 0; } return result; } }.f, .read = struct { fn f(path: [*c]const u8, bytes: [*c]u8, bytes_len: usize, foff_c: fuse.off_t, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { std.log.info("Client: read '{s}' {d} {d}", .{ path, do_fi(fi).fh, bytes_len }); var foff = @intCast(usize, foff_c); const end = bytes_len + foff; while (foff < end) { comms.send(client.writer, comms.Command.read) catch @panic(""); comms.send(client.writer, @intCast(i32, do_fi(fi).fh)) catch @panic(""); comms.send(client.writer, @intCast(u32, end - foff)) catch @panic(""); comms.send(client.writer, foff) catch @panic(""); const result = comms.recv(client.reader, i32) catch @panic(""); std.log.info("Client: going to read {d} bytes, out of {d} remaining", .{ result, end - foff }); if (result > 0) { comms.recvinto(client.reader, (bytes + foff - @intCast(usize, foff_c))[0..@intCast(usize, result)]) catch @panic(""); foff += @intCast(usize, result); } else if (result < 0) { return result; } else { // result == 0 break; } } return @intCast(c_int, foff - @intCast(usize, foff_c)); } }.f, .write = struct { fn f(path: [*c]const u8, bytes: [*c]const u8, bytes_len_c: usize, foff_c: fuse.off_t, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { if (!client.show_as_writeable) return -std.os.EPERM; var foff = @intCast(usize, foff_c); const end = bytes_len_c + foff; std.log.info("Client: write '{s}' {d}", .{ path, do_fi(fi).fh }); while (foff < end) { var bytes_len = end - foff; if (bytes_len > comms.max_write_bytes) bytes_len = comms.max_write_bytes; comms.send(client.writer, comms.Command.write) catch @panic(""); comms.send(client.writer, @intCast(i32, do_fi(fi).fh)) catch @panic(""); comms.send(client.writer, @intCast(@TypeOf(comms.max_write_bytes), bytes_len)) catch @panic(""); comms.send(client.writer, foff) catch @panic(""); comms.sendfrom(client.writer, (bytes + foff - @intCast(usize, foff_c))[0..bytes_len]) catch @panic(""); const result = comms.recv(client.reader, i32) catch @panic(""); std.log.info("Client: write returned {d}", .{result}); if (result > 0) { foff += @intCast(usize, result); } else if (result < 0) { return result; } else { // result == 0 break; } } return @intCast(c_int, foff - @intCast(usize, foff_c)); } }.f, .statfs = struct { fn f(path: [*c]const u8, stat_buf: [*c]fuse.struct_statvfs) callconv(.C) c_int { _ = path; stat_buf.*.f_bsize = 512; stat_buf.*.f_frsize = 512; stat_buf.*.f_blocks = 0x84848484; stat_buf.*.f_bfree = 0x42424242; stat_buf.*.f_bavail = 0x42424242; stat_buf.*.f_files = 696969 * 2; stat_buf.*.f_ffree = 696969; stat_buf.*.f_favail = 696969; return 0; } }.f, .flush = null, .release = struct { fn f(path: [*c]const u8, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { std.log.info("Client: release '{s}' {d}", .{ path, do_fi(fi).fh }); if (!client.show_as_writeable) return -std.os.EPERM; comms.send(client.writer, comms.Command.close) catch @panic(""); comms.send(client.writer, @intCast(i32, do_fi(fi).fh)) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .fsync = null, .setxattr = null, .getxattr = null, .listxattr = null, .removexattr = null, .opendir = struct { fn f(path: [*c]const u8, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { std.log.info("Client: opendir '{s}'", .{path}); comms.send(client.writer, comms.Command.opendir) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); const result = comms.recv(client.reader, i32) catch @panic(""); if (result > 0) { do_fi(fi).fh = @intCast(u32, result); return 0; } return result; } }.f, .readdir = struct { fn f(path: [*c]const u8, bytes: ?*c_void, fill: fuse.fuse_fill_dir_t, _: fuse.off_t, fi: ?*fuse.fuse_file_info, flags: fuse.fuse_readdir_flags) callconv(.C) c_int { _ = flags; std.log.info("Client: readdir '{s}' {d}", .{ path, do_fi(fi).fh }); comms.send(client.writer, comms.Command.readdir) catch @panic(""); comms.send(client.writer, @intCast(i32, do_fi(fi).fh)) catch @panic(""); while (comms.recv(client.reader, u8) catch @panic("") != 0) { const st = comms.recv(client.reader, std.os.Stat) catch @panic(""); const f_path = comms.recvpath(client.reader) catch @panic(""); const fuse_st = to_fuse_stat(st); std.log.info("Client: readdir: got dent '{s}'", .{f_path.ptr()}); if (fill.?( bytes, f_path.ptr(), &fuse_st, 0, std.mem.zeroes(fuse.fuse_fill_dir_flags), ) != 0) { std.log.info("Client: readdir: buffer full, not inserting last dent.", .{}); comms.send(client.writer, @as(u8, 0)) catch @panic(""); break; } comms.send(client.writer, @as(u8, 1)) catch @panic(""); } return 0; } }.f, .releasedir = struct { fn f(path: [*c]const u8, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { std.log.info("Client: releasedir {d} ('{s}')", .{ do_fi(fi).fh, path }); comms.send(client.writer, comms.Command.releasedir) catch @panic(""); comms.send(client.writer, @intCast(u32, do_fi(fi).fh)) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .fsyncdir = null, .init = struct { fn f(conn: [*c]fuse.struct_fuse_conn_info, cfg: [*c]fuse.struct_fuse_config) callconv(.C) ?*c_void { _ = conn; cfg[0].use_ino = 0; //cfg[0].entry_timeout = 0; //cfg[0].attr_timeout = 0; //cfg[0].negative_timeout = 0; if (!client.show_as_writeable) { // Disable all writing ops client.fuse_ops.mkdir = null; client.fuse_ops.unlink = null; client.fuse_ops.rmdir = null; client.fuse_ops.rename = null; client.fuse_ops.truncate = null; client.fuse_ops.write = null; client.fuse_ops.create = null; } return fuse.NULL; } }.f, .destroy = null, .access = struct { fn f(path: [*c]const u8, flags: c_int) callconv(.C) c_int { std.log.info("Client: access 0x{X} '{s}'", .{ flags, path }); comms.send(client.writer, comms.Command.access) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); comms.send(client.writer, flags) catch @panic(""); return comms.recv(client.reader, i32) catch @panic(""); } }.f, .create = struct { fn f(path: [*c]const u8, mode: fuse.mode_t, fi: ?*fuse.fuse_file_info) callconv(.C) c_int { std.log.info("Client: create '{s}'", .{path}); comms.send(client.writer, comms.Command.create) catch @panic(""); comms.sendpath(client.writer, path) catch @panic(""); comms.send(client.writer, @intCast(std.os.mode_t, mode)) catch @panic(""); const result = comms.recv(client.reader, i32) catch @panic(""); std.log.info("Client: create returned {d}", .{result}); if (result > 0) { do_fi(fi).fh = @intCast(u32, result); return 0; } return result; } }.f, .lock = null, .utimens = null, .bmap = null, .ioctl = null, .poll = null, .write_buf = null, .read_buf = null, .flock = null, .fallocate = null, .copy_file_range = null, .lseek = null, }, }; pub fn mountDirAndRunClient(reader: std.fs.File.Reader, writer: std.fs.File.Writer, mount_dir: [:0]const u8) !void { client = .{ .reader = reader, .writer = writer, .show_as_writeable = (reader.readIntNative(u8) catch { std.log.err("Unable to read writability byte", .{}); std.os.exit(1); }) != 0, }; var progname = [_:0]u8{ 'n', 'o' }; var fuse_args = fuse.fuse_args{ .allocated = 0, .argc = 1, .argv = &[_][*c]u8{ &progname[0], }, }; const fuse_inst = fuse.fuse_new( &fuse_args, &client.fuse_ops, @sizeOf(@TypeOf(client.fuse_ops)), @intToPtr(*c_void, @ptrToInt(&client)), ); defer fuse.fuse_destroy(fuse_inst); if (fuse.fuse_mount(fuse_inst, mount_dir.ptr) != 0) { std.log.err("Fuse mount failed", .{}); return; } defer fuse.fuse_unmount(fuse_inst); _ = fuse.fuse_loop(fuse_inst); }
src/fuse_client.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const BitSet = std.bit_set.IntegerBitSet; const print = std.debug.print; const data = @embedFile("../inputs/day04.txt"); pub fn main() anyerror!void { var gpa_impl = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa_impl.deinit(); const gpa = gpa_impl.allocator(); return main_with_allocator(gpa); } pub fn main_with_allocator(allocator: Allocator) anyerror!void { const bingo = try Bingo.parse(allocator, data[0..]); defer bingo.deinit(); const res = try solve(allocator, bingo); print("Part 1: {d}\n", .{res.part1}); print("Part 2: {d}\n", .{res.part2}); } const Solve = struct { part1: usize, part2: usize }; fn solve(allocator: Allocator, bingo: Bingo) !Solve { var winner_score: ?usize = null; var winning_number: ?usize = null; var last_winner_score: ?usize = null; var last_winning_number: ?usize = null; var has_won = try std.bit_set.DynamicBitSet.initEmpty(allocator, bingo.boards.len); defer has_won.deinit(); for (bingo.numbers) |n| { for (bingo.boards) |*board, board_idx| { board.play(n); if (!has_won.isSet(board_idx) and board.win()) { has_won.set(board_idx); if (has_won.count() == 1) { winner_score = board.score(); winning_number = @intCast(usize, n); } else if (has_won.count() == bingo.boards.len) { last_winner_score = board.score(); last_winning_number = @intCast(usize, n); } } } } return Solve{ .part1 = winner_score.? * winning_number.?, .part2 = last_winner_score.? * last_winning_number.?, }; } const Board = struct { const Self = @This(); grid: [5][5]u8, seen_rows: [5]BitSet(5), seen_cols: [5]BitSet(5), fn parse(lines: *std.mem.TokenIterator(u8)) !Self { var grid: [5][5]u8 = undefined; var initBitSet: [5]BitSet(5) = undefined; var row: usize = 0; while (row < 5) : (row += 1) { var numbers = std.mem.tokenize(u8, lines.next().?, " "); initBitSet[row] = BitSet(5).initEmpty(); var col: usize = 0; while (col < 5) : (col += 1) { const n: u8 = try std.fmt.parseInt(u8, numbers.next().?, 10); grid[row][col] = n; } } return Board{ .grid = grid, .seen_rows = initBitSet, .seen_cols = initBitSet, }; } fn play(self: *Self, x: u8) void { for (self.grid) |row, row_idx| { for (row) |cell, col_idx| { if (cell == x) { self.seen_rows[row_idx].set(col_idx); self.seen_cols[col_idx].set(row_idx); } } } } fn win(self: *Self) bool { for (self.grid) |_, idx| { if (self.seen_rows[idx].count() == 5 or self.seen_cols[idx].count() == 5) { return true; } } return false; } // Sum of all the unseen numbers fn score(self: *Self) usize { var out: usize = 0; for (self.grid) |row, row_idx| { for (row) |cell, col_idx| { if (!self.seen_rows[row_idx].isSet(col_idx)) { out += cell; } } } return out; } }; const Bingo = struct { const Self = @This(); allocator: Allocator, numbers: []u8, boards: []Board, fn deinit(self: Self) void { self.allocator.free(self.numbers); self.allocator.free(self.boards); } fn parse(allocator: Allocator, input: []const u8) !Self { var buffer = std.ArrayList(u8).init(allocator); defer buffer.deinit(); var lines = std.mem.tokenize(u8, input, "\n"); const numbers_line = lines.next().?; var numbers_str = std.mem.tokenize(u8, numbers_line, ","); var numbers = std.ArrayList(u8).init(allocator); while (numbers_str.next()) |n_str| { const n = try std.fmt.parseInt(u8, n_str, 10); try numbers.append(n); } var boards = std.ArrayList(Board).init(allocator); while (lines.rest().len > 0) { const board = try Board.parse(&lines); try boards.append(board); } return Bingo{ .allocator = allocator, .numbers = numbers.toOwnedSlice(), .boards = boards.toOwnedSlice(), }; } }; test "bingo" { const input = \\7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1 \\ \\22 13 17 11 0 \\ 8 2 23 4 24 \\21 9 14 16 7 \\ 6 10 3 18 5 \\ 1 12 20 15 19 \\ \\ 3 15 0 2 22 \\ 9 18 13 17 5 \\19 8 7 25 23 \\20 11 10 24 4 \\14 21 16 12 6 \\ \\14 21 17 24 4 \\10 16 15 9 19 \\18 8 23 26 20 \\22 11 13 6 5 \\ 2 0 12 3 7 ; const allocator = std.testing.allocator; const bingo = try Bingo.parse(allocator, input[0..]); defer bingo.deinit(); const res = try solve(allocator, bingo); try std.testing.expectEqual(@as(usize, 4512), res.part1); try std.testing.expectEqual(@as(usize, 1924), res.part2); }
src/day04.zig
const std = @import("std"); const uefi = std.os.uefi; const sabaton = @import("root").sabaton; const root = @import("root"); pub fn init(page_root: *sabaton.paging.Root) callconv(.Inline) void { const graphicsProto = root.locateProtocol(uefi.protocols.GraphicsOutputProtocol) orelse { sabaton.puts("No graphics protocol found!\n"); return; }; sabaton.add_framebuffer(graphicsProto.mode.frame_buffer_base); const mode = graphicsProto.mode.info; switch (mode.pixel_format) { .PixelRedGreenBlueReserved8BitPerColor, .PixelBlueGreenRedReserved8BitPerColor, => { sabaton.fb.bpp = 32; sabaton.fb.red_mask_size = 8; sabaton.fb.blue_mask_size = 8; sabaton.fb.green_mask_size = 8; }, .PixelBitMask => { const mask = mode.pixel_information; sabaton.fb.bpp = if (mask.reserved_mask == 0) 24 else 32; }, else => unreachable, } switch (mode.pixel_format) { .PixelRedGreenBlueReserved8BitPerColor => { sabaton.fb.red_mask_shift = 0; sabaton.fb.green_mask_shift = 8; sabaton.fb.blue_mask_shift = 16; }, .PixelBlueGreenRedReserved8BitPerColor => { sabaton.fb.blue_mask_shift = 0; sabaton.fb.green_mask_shift = 8; sabaton.fb.red_mask_shift = 16; }, .PixelBitMask => { const mask = mode.pixel_information; sabaton.fb.red_mask_shift = @ctz(u32, mask.red_mask); sabaton.fb.red_mask_size = @popCount(u32, mask.red_mask); sabaton.fb.green_mask_shift = @ctz(u32, mask.green_mask); sabaton.fb.green_mask_size = @popCount(u32, mask.green_mask); sabaton.fb.blue_mask_shift = @ctz(u32, mask.blue_mask); sabaton.fb.blue_mask_size = @popCount(u32, mask.blue_mask); }, else => unreachable, } sabaton.fb.width = @intCast(u16, mode.horizontal_resolution); sabaton.fb.height = @intCast(u16, mode.vertical_resolution); sabaton.fb.pitch = @intCast(u16, mode.pixels_per_scan_line * sabaton.fb.bpp / 8); if (!root.memmap.containsAddr(sabaton.fb.addr)) { // We need to map it ourselves const fbsz = @as(u64, sabaton.fb.pitch) * @as(u64, sabaton.fb.height); sabaton.paging.map(sabaton.fb.addr, sabaton.fb.addr, fbsz, .rw, .memory, page_root); sabaton.paging.map(sabaton.upper_half_phys_base + sabaton.fb.addr, sabaton.fb.addr, fbsz, .rw, .memory, page_root); } sabaton.puts("Mapped framebuffer!\n"); }
src/platform/uefi_aarch64/framebuffer.zig
const std = @import("std"); const mem = std.mem; const os = std.os; const debug = std.debug; const fs = std.fs; const Allocator = mem.Allocator; const page_allocator = std.heap.page_allocator; const ArrayListAligned = std.ArrayListAligned; const builtin = @import("builtin"); const s2 = builtin.zig_backend != .stage1; const ArrayList = std.ArrayList; code: ArrayListAligned(u8, 4096), /// offset of each encoded instruction. Might not be needed /// but useful for debugging. inst_off: ArrayList(u32), inst_dbg: ArrayList(usize), const Self = @This(); /// Registers for pointers and pointer-sized int /// /// smaller sizes will be handled by separate modifiers /// Question: support ah,ch,dh,bh at all? perhaps as separate pseudo-op. pub const IPReg = enum(u4) { // 0 through 15, 64-bit registers. 8-15 are extended. // id is just the int value. rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15, pub fn id(self: @This()) u4 { return @enumToInt(self); } pub fn lowId(self: @This()) u3 { return @truncate(u3, @enumToInt(self)); } pub fn ext(self: @This()) bool { return @enumToInt(self) >= 0x08; } }; pub const AOp = enum(u3) { add, bor, adc, sbb, band, // There is no band! sub, xor, cmp, fn off(self: @This()) u8 { return @as(u8, @enumToInt(self)) * 8; } pub fn opx(self: @This()) u3 { return @enumToInt(self); } }; pub const ShiftOp = enum { hl, // logically left ar, // arithmically right hr, // logically right const al = .hl; // arithmically left, same as logically left // pub fn to_pp(self: @This()) PP { return @intToEnum(PP, @enumToInt(self) + 1); } }; pub const Cond = enum(u4) { o, // overflow no, b, // below nb, e, // equal ne, na, a, // above s, // sign ns, p, // parity np, l, // less nl, ng, g, // greater const C = @This(); pub const c = C.b; pub const nc = C.nb; pub const ae = C.nb; pub const be = C.na; pub const ge = C.nl; pub const le = C.ng; fn off(self: @This()) u8 { return @as(u8, @enumToInt(self)); } }; pub const VCmp = enum(u5) { eq, lt, le, unord, neq, nlt, nle, ord, eq_uq, nge, ngt, @"false", neq_oq, ge, gt, @"true", eq_os, lt_oq, le_oq, unord_s, neq_us, nlt_uq, nle_uq, ord_s, eq_us, nge_uq, ngt_uq, false_os, neq_os, ge_oq, gt_oq, true_us, fn val(self: @This()) u8 { return @as(u8, @enumToInt(self)); } }; const PP = enum(u2) { none, h66, F3, F2, fn val(self: @This()) u8 { return @as(u8, @enumToInt(self)); } }; // common floating-point modes of VEX instructions pub const FMode = enum(u3) { ps4, pd2, ss, sd, ps8, pd4, fn pp(self: @This()) PP { return @intToEnum(PP, @truncate(u2, @enumToInt(self))); } fn l(self: @This()) bool { return @enumToInt(self) >= 4; } fn scalar(self: @This()) bool { return self == @This().ss or self == @This().sd; } fn double(self: @This()) bool { return @truncate(u1, @enumToInt(self)) == 1; } }; const MM = enum(u5) { h0F = 1, h0F38 = 2, h0F3A = 3, fn val(self: @This()) u8 { return @as(u8, @enumToInt(self)); } }; pub const IMode = enum(u2) { b, w, d, q, fn off(self: @This()) u8 { return @as(u8, @enumToInt(self)); } }; pub const VMathOp = enum(u3) { add = 0, mul = 1, sub = 4, min = 5, div = 6, max = 7, pub fn off(self: @This()) u8 { return @as(u8, @enumToInt(self)); } }; pub fn init(allocator: Allocator) !Self { // TODO: allocate consequtive mprotectable pages return Self{ .code = try ArrayListAligned(u8, 4096).initCapacity(page_allocator, 4096), .inst_off = ArrayList(u32).init(allocator), .inst_dbg = ArrayList(usize).init(allocator), }; } pub fn deinit(self: *Self) void { // TODO: only in debug mode (as clobbers the array, needs r/w) os.mprotect(self.code.items.ptr[0..self.code.capacity], os.PROT.READ | os.PROT.WRITE) catch unreachable; self.code.deinit(); self.inst_off.deinit(); self.inst_dbg.deinit(); } fn new_inst(self: *Self, addr: usize) !void { var size = @intCast(u32, self.get_target()); try self.inst_off.append(size); try self.inst_dbg.append(addr); } // TODO: use appendAssumeCapacity in a smart way like arch/x86_64 pub fn wb(self: *Self, opcode: u8) !void { try self.code.append(opcode); } pub fn wbi(self: *Self, imm: i8) !void { try self.wb(@bitCast(u8, imm)); } pub fn wd(self: *Self, dword: i32) !void { std.mem.writeIntLittle(i32, try self.code.addManyAsArray(4), dword); } pub fn wq(self: *Self, qword: u64) !void { std.mem.writeIntLittle(u64, try self.code.addManyAsArray(8), qword); } pub fn set_align(self: *Self, alignment: u32) !void { var residue = self.get_target() & (alignment - 1); var padding = alignment - residue; if (padding != 0 and padding != alignment) { try self.code.appendNTimes(0x90, padding); } } // encodings pub fn rex_wrxb(self: *Self, w: bool, r: bool, x: bool, b: bool) !void { var value: u8 = 0x40; if (w) value |= 0b1000; if (r) value |= 0b0100; if (x) value |= 0b0010; if (b) value |= 0b0001; if (value != 0x40) { try self.wb(value); } } pub fn modRm(self: *Self, mod: u2, reg_or_opx: u3, rm: u3) !void { try self.wb(@as(u8, mod) << 6 | @as(u8, reg_or_opx) << 3 | rm); } pub fn sib(self: *Self, scale: u2, index: u3, base: u3) !void { try self.wb(@as(u8, scale) << 6 | @as(u8, index) << 3 | base); } pub const EAddr = struct { base: ?IPReg, // null for rip[offset] index: ?IPReg = null, scale: u2 = 0, offset: i32 = 0, pub inline fn b(self: @This()) bool { return if (self.base) |base| base.ext() else false; } pub inline fn x(self: @This()) bool { return if (self.index) |index| index.ext() else false; } pub inline fn o(self: @This(), offset: i32) @This() { var newself = self; newself.offset += offset; return newself; } }; pub fn a(reg: IPReg) EAddr { return .{ .base = reg }; } pub fn bo(reg: IPReg, offset: i32) EAddr { return .{ .base = reg, .offset = offset }; } pub fn rel(offset: u32) EAddr { return .{ .base = null, .offset = @bitCast(i32, offset) }; } // index quadword array pub fn qi(base: IPReg, index: IPReg) EAddr { return .{ .base = base, .index = index, .scale = 3 }; } pub fn bi(base: IPReg, index: IPReg) EAddr { return .{ .base = base, .index = index, .scale = 0 }; } pub fn maybe_imm8(imm: i32) ?i8 { var imm8 = @truncate(i8, imm); return if (imm == imm8) imm8 else null; } // write modrm byte + optional SIB + optional offset // caller needs to handle ea.x() and ea.b() for addresses // with extended indices! pub fn modRmEA(self: *Self, reg_or_opx: u3, ea: EAddr) !void { const offset8 = maybe_imm8(ea.offset); const mod: u2 = if (ea.base == null or (ea.offset == 0 and ea.base.? != .rbp)) @as(u2, 0b00) else if (offset8 != null) @as(u2, 0b01) else @as(u2, 0b10); // we allow base == null, index = null to encode RIP+off32, but not // yet index without base ( i e scale*index+off ) if (ea.base == null and ea.index != null) return error.NotImplemented; const rm = if (ea.base) |base| base.lowId() else 0x05; try self.modRm(mod, reg_or_opx, if (ea.index) |_| 0x04 else rm); if (ea.index == null and rm == 0x04) { // no index, but RSP/R12 as base // forces a SIB byte try self.sib(0b00, 0x04, 0x04); } else if (ea.index) |index| { if (index == .rsp) { return error.InvalidIndex; } try self.sib(ea.scale, index.lowId(), rm); } if (ea.base == null) { // rip+off32 try self.wd(ea.offset - (@bitCast(i32, self.get_target()) + 4)); } else if (mod != 0b00) { // TODO: stage2 // try if (offset8) |off| self.wbi(off) else self.wd(ea.offset); if (offset8) |off| (try self.wbi(off)) else (try self.wd(ea.offset)); } } pub fn tibflag(comptime T: type, flag: bool) u8 { return @as(T, @boolToInt(!flag)); } // Note: implements inversion of r, vvvv pub fn vex2(self: *Self, r: bool, vv: u4, l: bool, pp: PP) !void { try self.wb(0xC5); try self.wb(tibflag(u8, r) << 7 | @as(u8, ~vv) << 3 | @as(u8, @boolToInt(l)) << 2 | pp.val()); } // Note: implements inversion of wrxb, vvvv pub fn vex3(self: *Self, w: bool, r: bool, x: bool, b: bool, mm: MM, vv: u4, l: bool, pp: PP) !void { try self.wb(0xC4); try self.wb(tibflag(u8, r) << 7 | tibflag(u8, x) << 6 | tibflag(u8, b) << 5 | mm.val()); try self.wb(tibflag(u8, w) << 7 | @as(u8, ~vv) << 3 | @as(u8, @boolToInt(l)) << 2 | pp.val()); } pub fn vex0fwig(self: *Self, r: bool, x: bool, b: bool, vv: u4, l: bool, pp: PP) !void { // TODO: stage2 // try if (x or b) self.vex3(false, r, x, b, .h0F, vv, l, pp) else self.vex2(r, vv, l, pp); if (x or b) (try self.vex3(false, r, x, b, .h0F, vv, l, pp)) else (try self.vex2(r, vv, l, pp)); } // control flow pub fn ret(self: *Self) !void { try self.new_inst(@returnAddress()); try self.wb(0xC3); } pub fn enter(self: *Self) !void { try self.new_inst(@returnAddress()); try self.wb(0x55); // PUSH rbp try self.mov(.rbp, .rsp); } pub fn leave(self: *Self) !void { try self.new_inst(@returnAddress()); try self.mov(.rsp, .rbp); try self.wb(0x5D); // POP rbp } pub fn trap(self: *Self) !void { try self.new_inst(@returnAddress()); // WHEEEEEEEE! try self.wb(0xCC); // INT 03h } // there.. pub fn jfwd(self: *Self, cond: ?Cond) !u32 { try self.new_inst(@returnAddress()); if (cond) |c| { try self.wb(0x70 + c.off()); } else { try self.wb(0xeb); } var pos = @intCast(u32, self.code.items.len); try self.wb(0x00); // placeholder return pos; } pub fn set_target(self: *Self, pos: u32) !void { var off = self.get_target() - (pos + 1); if (off > 0x7f) { return error.InvalidNearJump; } self.code.items[pos] = @intCast(u8, off); } pub fn set_lea_target(self: *Self, pos: u32) void { self.set_lea(pos, self.get_target()); } pub fn set_lea(self: *Self, pos: u32, target: u32) void { var off = target - (pos + 4); self.code.items[pos] = @intCast(u8, off); std.mem.writeIntLittle(u32, self.code.items[pos..][0..4], off); } pub fn get_target(self: *Self) u32 { return @intCast(u32, self.code.items.len); } // .. and back again pub fn jbck(self: *Self, cond: ?Cond, target: u32) !void { try self.new_inst(@returnAddress()); var off = @intCast(i32, target) - (@intCast(i32, self.code.items.len) + 2); if (maybe_imm8(off)) |off8| { try self.wb(if (cond) |c| 0x70 + c.off() else 0xEB); try self.wbi(off8); } else { try self.wb(0x0f); try self.wb(if (cond) |c| 0x80 + c.off() else 0xe9); try self.wd(off - 4); // FETING: offset is larger as the jump instruction is larger } } // stack management fn push(self: *Self, src: IPReg) !void { // luring: 64-bit wide is already the default, // extension only needed for r8-r15 registers try self.rex_wrxb(false, false, false, src.ext()); try self.wb(0x50 + @as(u8, src.lowId())); } fn pop(self: *Self, dst: IPReg) !void { try self.rex_wrxb(false, false, false, dst.ext()); try self.wb(0x58 + @as(u8, dst.lowId())); } // mov and arithmetic inline fn op_rr(self: *Self, opcode: u8, dst: IPReg, src: IPReg) !void { try self.new_inst(@returnAddress()); try self.rex_wrxb(true, dst.ext(), false, src.ext()); try self.wb(opcode); // OP reg, \rm try self.modRm(0b11, dst.lowId(), src.lowId()); } pub fn mov(self: *Self, dst: IPReg, src: IPReg) !void { try self.op_rr(0x8b, dst, src); } pub fn arit(self: *Self, op: AOp, dst: IPReg, src: IPReg) !void { try self.op_rr(op.off() + 0b11, dst, src); } pub inline fn op_rm(self: *Self, opcode: u8, reg: IPReg, ea: EAddr) !void { try self.new_inst(@returnAddress()); try self.rex_wrxb(true, reg.ext(), ea.x(), ea.b()); try self.wb(opcode); try self.modRmEA(reg.lowId(), ea); } pub fn movrm(self: *Self, dst: IPReg, src: EAddr) !void { try self.op_rm(0x8b, dst, src); // MOV reg, \rm } pub fn movmr(self: *Self, dst: EAddr, src: IPReg) !void { try self.op_rm(0x89, src, dst); // MOV \rm, reg } pub fn aritrm(self: *Self, op: AOp, dst: IPReg, src: EAddr) !void { try self.op_rm(op.off() + 0b11, dst, src); } pub fn lea(self: *Self, dst: IPReg, src: EAddr) !void { try self.op_rm(0x8d, dst, src); // LEA reg, \rm } // load adress of a latter target into a register // this is useful i e to keep a pointer to section // of constants in an ordinary register pub fn lealink(self: *Self, dst: IPReg) !u32 { try self.new_inst(@returnAddress()); try self.rex_wrxb(true, dst.ext(), false, false); try self.wb(0x8d); try self.modRm(0x00, dst.lowId(), 0x05); const pos = self.get_target(); try self.wd(0); // placeholder return pos; } pub fn movri(self: *Self, dst: IPReg, src: i32) !void { try self.new_inst(@returnAddress()); // TODO: w bit should be avoidable in a lot of cases // like "mov rax, 1337" is equivalent to "mov eax, 1337" try self.rex_wrxb(true, false, false, dst.ext()); try self.wb(0xc7); // MOV \rm, imm32 try self.modRm(0b11, 0b000, dst.lowId()); try self.wd(src); } pub fn aritri(self: *Self, op: AOp, dst: IPReg, imm: i32) !void { const imm8 = maybe_imm8(imm); try self.new_inst(@returnAddress()); try self.rex_wrxb(true, false, false, dst.ext()); try self.wb(if (imm8 != null) 0x83 else 0x81); try self.modRm(0b11, op.opx(), dst.lowId()); try if (imm8) |i| self.wbi(i) else self.wd(imm); } pub fn movmi(self: *Self, dst: EAddr, src: i32) !void { try self.new_inst(@returnAddress()); try self.rex_wrxb(true, false, dst.x(), dst.b()); try self.wb(0xc7); // MOV \rm, imm32 try self.modRmEA(0b000, dst); try self.wd(src); } // VEX instructions // note: for now we use VEX for all xmm/ymm operations. // old school SSE forms might be shorter for some 128/scalar ops? pub inline fn vop_rr(self: *Self, op: u8, fmode: FMode, dst: u4, src1: u4, src2: u4) !void { try self.new_inst(@returnAddress()); try self.vex0fwig(dst > 7, false, src2 > 7, src1, fmode.l(), fmode.pp()); try self.wb(op); try self.modRm(0b11, @truncate(u3, dst), @truncate(u3, src2)); } pub inline fn vop_rm(self: *Self, op: u8, fmode: FMode, reg: u4, vreg: u4, ea: EAddr) !void { try self.new_inst(@returnAddress()); try self.vex0fwig(reg > 7, ea.x(), ea.b(), vreg, fmode.l(), fmode.pp()); try self.wb(op); try self.modRmEA(@truncate(u3, reg), ea); } // dst[low] = src2[low]; dst[high] = src[high] pub fn vmov2(self: *Self, fmode: FMode, dst: u4, src1: u4, src2: u4) !void { if (!fmode.scalar()) { return error.InvalidFMode; } try self.vop_rr(0x10, fmode, dst, src1, src2); } // pseudo-instruction for moving register // vmovsd xmm1, xmm1, xmm2 // vmovupd xmm1, xmm2 pub fn vmovf(self: *Self, fmode: FMode, dst: u4, src: u4) !void { try self.vop_rr(0x10, fmode, dst, if (fmode.scalar()) dst else 0, src); } pub fn vmovurm(self: *Self, fmode: FMode, dst: u4, src: EAddr) !void { try self.vop_rm(0x10, fmode, dst, 0, src); } pub fn vmovumr(self: *Self, fmode: FMode, dst: EAddr, src: u4) !void { try self.vop_rm(0x11, fmode, src, 0, dst); } pub fn vmovarm(self: *Self, fmode: FMode, dst: u4, src: EAddr) !void { // scalar load/store cannot use vmova* encoding, so don't const op: u8 = if (fmode.scalar()) 0x10 else 0x28; try self.vop_rm(op, fmode, dst, 0, src); } pub fn vmovamr(self: *Self, fmode: FMode, dst: EAddr, src: u4) !void { const op: u8 = if (fmode.scalar()) 0x11 else 0x29; try self.vop_rm(op, fmode, src, 0, dst); } // note: not all fmodes makes sense (and pd2 is not mentioned in the manual) pub fn vbroadcast(self: *Self, fmode: FMode, dst: u4, src: EAddr) !void { try self.new_inst(@returnAddress()); try self.vex3(false, dst > 7, src.x(), src.b(), .h0F38, 0, fmode.l(), .h66); try self.wb(if (fmode.double()) 0x19 else 0x18); try self.modRmEA(@truncate(u3, dst), src); } pub fn vmathf(self: *Self, op: VMathOp, fmode: FMode, dst: u4, src1: u4, src2: u4) !void { try self.vop_rr(0x58 + op.off(), fmode, dst, src1, src2); } pub fn vmathfrm(self: *Self, op: VMathOp, fmode: FMode, dst: u4, src1: u4, src2: EAddr) !void { try self.vop_rm(0x58 + op.off(), fmode, dst, src1, src2); } pub fn vcmp(self: *Self, op: VCmp, fmode: FMode, dst: u4, src1: u4, src2: EAddr) !void { if (fmode.scalar()) { return error.FEEEEL; // TODO:probably does something useful for scalars? } try self.vop_rr(0xC2, fmode, dst, src1, src2); try self.wb(op.val()); } // integer vector instructions pub inline fn vop_i_rr(self: *Self, op: u8, wide: bool, pp: PP, dst: u4, src1: u4, src2: u4) !void { try self.new_inst(@returnAddress()); try self.vex0fwig(dst > 7, false, src2 > 7, src1, wide, pp); try self.wb(op); try self.modRm(0b11, @truncate(u3, dst), @truncate(u3, src2)); } pub inline fn vop_i_rm(self: *Self, op: u8, wide: bool, pp: PP, reg: u4, vreg: u4, ea: EAddr) !void { try self.new_inst(@returnAddress()); try self.vex0fwig(reg > 7, ea.x(), ea.b(), vreg, wide, pp); try self.wb(op); try self.modRmEA(@truncate(u3, reg), ea); } pub fn vmovdq(self: *Self, wide: bool, dst: u4, src: u4) !void { try self.vop_i_rr(0x6f, wide, .h66, dst, 0, src); } pub fn vmovdqarm(self: *Self, wide: bool, dst: u4, src: EAddr) !void { try self.vop_i_rm(0x6f, wide, .h66, dst, 0, src); } pub fn vmovdqamr(self: *Self, wide: bool, dst: EAddr, src: u4) !void { try self.vop_i_rm(0x7f, wide, .h66, src, 0, dst); } pub fn vmovdqurm(self: *Self, wide: bool, dst: u4, src: EAddr) !void { try self.vop_i_rm(0x6f, wide, .F3, dst, 0, src); } pub fn vmovdqumr(self: *Self, wide: bool, dst: EAddr, src: u4) !void { try self.vop_i_rm(0x7f, wide, .F3, src, 0, dst); } pub fn vaddi(self: *Self, wide: bool, imode: IMode, dst: u4, src1: u4, src2: u4) !void { const op = if (imode == .q) 0xD4 else (0xFC + imode.off()); try self.vop_i_rr(op, wide, .h66, dst, src1, src2); } pub fn vzeroupper(self: *Self) !void { try self.vex2(false, 0, false, PP.none); try self.wb(0x77); } pub fn vzeroall(self: *Self) !void { try self.vex2(false, 0, true, PP.none); try self.wb(0x77); } // BMI instructions: GPR operations coded with VEX pub inline fn bmi_rr(self: *Self, op: u8, wide: bool, pp: PP, dst: IPReg, src1: IPReg, src2: IPReg) !void { try self.new_inst(@returnAddress()); try self.vex3(wide, dst.ext(), src1.ext(), false, .h0F38, src2.id(), false, pp); try self.wb(op); try self.modRm(0b11, dst.lowId(), src1.lowId()); } pub fn sx(self: *Self, op: ShiftOp, dst: IPReg, src1: IPReg, src2: IPReg) !void { try self.bmi_rr(0xf7, true, op.to_pp(), dst, src1, src2); } // output functions pub fn dump(self: *Self) !void { try fs.cwd().writeFile("test.o", self.code.items); } pub fn dbg_nasm(self: *Self, allocator: Allocator) !void { var nasm = try std.ChildProcess.init(&[_][]const u8{ "ndisasm", "-b", "64", "-" }, allocator); defer nasm.deinit(); nasm.stdin_behavior = .Pipe; _ = try std.io.getStdOut().write("\n"); try nasm.spawn(); _ = try nasm.stdin.?.write(self.code.items); _ = nasm.stdin.?.close(); nasm.stdin = null; _ = try nasm.wait(); } pub fn finalize(self: *Self) !void { try os.mprotect(self.code.items.ptr[0..self.code.capacity], os.PROT.READ | os.PROT.EXEC); } pub fn get_ptr(self: *Self, target: u32, comptime T: type) T { return @ptrCast(T, self.code.items[target..].ptr); } pub fn test_call2(self: *Self, arg1: usize, arg2: usize) !usize { try self.finalize(); const FunPtr = fn (arg1: usize, arg2: usize) callconv(.C) usize; return self.get_ptr(0, FunPtr)(arg1, arg2); } pub fn test_call2f64(self: *Self, arg1: f64, arg2: f64) !f64 { try self.finalize(); const FunPtr = fn (arg1: f64, arg2: f64) callconv(.C) f64; return self.get_ptr(0, FunPtr)(arg1, arg2); } pub fn test_call2x(self: *Self, comptime T: type, arg1: anytype, arg2: anytype) !T { try self.finalize(); const FunPtr = fn (arg1: @TypeOf(arg1), arg2: @TypeOf(arg2)) callconv(.C) T; return self.get_ptr(0, FunPtr)(arg1, arg2); } const test_allocator = std.testing.allocator; const expectEqual = std.testing.expectEqual; // for quick debugging change ret to retnasm pub fn retnasm(self: *Self) !void { try self.ret(); try self.dbg_nasm(test_allocator); } pub fn dbg_test(self: *Self) !void { const stderr = std.io.getStdErr().writer(); const dbginfo = try debug.getSelfDebugInfo(); const tty_config = debug.detectTTYConfig(); for (self.inst_dbg.items) |x, i| { debug.print("{} {}\n", .{ i, x }); try debug.printSourceAtAddress(dbginfo, stderr, x, tty_config); } } pub fn lookup(self: *Self, addr: usize) usize { const startaddr: usize = @ptrToInt(self.code.items.ptr); const endaddr: usize = startaddr + self.code.items.len; if (startaddr <= addr and addr < endaddr) { const off = addr - startaddr; for (self.inst_dbg.items) |x, i| { if (i + 1 >= self.inst_off.items.len or off < self.inst_off.items[i + 1]) { return x; } } } return addr; } test "return first argument" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.mov(.rax, .rdi); try cfo.ret(); try expectEqual(@as(usize, 4), try cfo.test_call2(4, 10)); } test "return second argument" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.mov(.rax, .rsi); try cfo.ret(); try expectEqual(@as(usize, 10), try cfo.test_call2(4, 10)); } test "read/write first arg as 64-bit pointer" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.movrm(.rax, a(.rdi)); try cfo.movmr(a(.rdi), .rsi); try cfo.ret(); var someint: u64 = 33; var retval = try cfo.test_call2(@ptrToInt(&someint), 10); try expectEqual(@as(usize, 33), retval); try expectEqual(@as(usize, 10), someint); } test "read/write first arg as 64-bit pointer with offsett" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.movrm(.rax, bo(.rdi, 0x08)); try cfo.movmr(bo(.rdi, 0x10), .rsi); try cfo.ret(); var someint: [2]u64 = .{ 33, 45 }; var retval = try cfo.test_call2(@ptrToInt(&someint) - 8, 79); try expectEqual(@as(usize, 33), retval); try expectEqual(@as(usize, 33), someint[0]); try expectEqual(@as(usize, 79), someint[1]); } test "RIP-relative read" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.wq(0x9090909090909090); const theconst = cfo.get_target(); try cfo.wq(0x1122334455667788); // not needed, but put nasm back in style // try cfo.wb(0x00); const entry = cfo.get_target(); try cfo.enter(); try cfo.movrm(.rax, rel(theconst)); try cfo.leave(); try cfo.ret(); try cfo.finalize(); const fun = cfo.get_ptr(entry, fn () callconv(.C) u64); try expectEqual(@as(u64, 0x1122334455667788), fun()); } test "lealink" { var cfo = try init(test_allocator); defer cfo.deinit(); //const OSHA = @import("./OSHA.zig"); //OSHA.install(&cfo); //defer OSHA.clear(); const entry = cfo.get_target(); try cfo.enter(); const link = try cfo.lealink(.rdx); try cfo.movrm(.rax, a(.rdx)); try cfo.movrm(.rcx, a(.rdx).o(8)); try cfo.movmr(a(.rdi), .rcx); try cfo.leave(); try cfo.ret(); try cfo.set_align(8); cfo.set_lea_target(link); try cfo.wq(0x8822883344114422); try cfo.wq(0x0104050610405060); try cfo.finalize(); const fun = cfo.get_ptr(entry, fn (*u64) callconv(.C) u64); var somemem: u64 = undefined; try expectEqual(@as(u64, 0x8822883344114422), fun(&somemem)); try expectEqual(@as(u64, 0x0104050610405060), somemem); } test "return intermediate value" { var cfo = try init(test_allocator); defer cfo.deinit(); // try cfo.movri(.rbx, 20); // try cfo.movri(.r15, 7); try cfo.movri(.rax, 1337); try cfo.ret(); var retval = try cfo.test_call2(7, 8); try expectEqual(@as(usize, 1337), retval); } test "write intermediate value to 64-bit pointer" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.movmi(a(.rdi), 586); try cfo.ret(); var someint: u64 = 33; _ = try cfo.test_call2(@ptrToInt(&someint), 8); try expectEqual(@as(usize, 586), someint); } test "use r12 for base address" { var cfo = try init(test_allocator); defer cfo.deinit(); // r12 is callee-saved. so save it try cfo.mov(.rcx, .r12); try cfo.mov(.r12, .rdi); try cfo.movmi(a(.r12), 389); try cfo.mov(.r12, .rcx); try cfo.ret(); var someint: u64 = 33; _ = try cfo.test_call2(@ptrToInt(&someint), 8); try expectEqual(@as(usize, 389), someint); } test "add arguments" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.mov(.rax, .rdi); try cfo.arit(.add, .rax, .rsi); try cfo.ret(); var retval = try cfo.test_call2(1002, 560); try expectEqual(@as(usize, 1562), retval); } test "add arguments using lea" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.lea(.rax, bi(.rdi, .rsi)); try cfo.ret(); var retval = try cfo.test_call2(736, 121); try expectEqual(@as(usize, 857), retval); } test "add scaled arguments using lea" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.lea(.rax, qi(.rdi, .rsi)); try cfo.ret(); var retval = try cfo.test_call2(736, 121); try expectEqual(@as(usize, 1704), retval); } test "subtract arguments" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.mov(.rax, .rdi); try cfo.arit(.sub, .rax, .rsi); try cfo.ret(); var retval = try cfo.test_call2(1002, 560); try expectEqual(@as(usize, 442), retval); } test "add imm8 to argument" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.mov(.rax, .rdi); try cfo.aritri(.add, .rax, 64); try cfo.ret(); var retval = try cfo.test_call2(120, 9204); try expectEqual(@as(usize, 184), retval); } test "add immediate to argument" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.mov(.rax, .rdi); try cfo.aritri(.add, .rax, 137); try cfo.ret(); var retval = try cfo.test_call2(100, 560); try expectEqual(@as(usize, 237), retval); } test "get the maximum of two args" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.mov(.rax, .rdi); try cfo.arit(.cmp, .rdi, .rsi); const jump = try cfo.jfwd(.g); try cfo.mov(.rax, .rsi); try cfo.set_target(jump); try cfo.ret(); var retval = try cfo.test_call2(1002, 560); try expectEqual(@as(usize, 1002), retval); retval = try cfo.test_call2(460, 902); try expectEqual(@as(usize, 902), retval); } test "jump backwards in a loop" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.arit(.xor, .rax, .rax); const loop = cfo.get_target(); try cfo.arit(.add, .rax, .rdi); try cfo.aritri(.sub, .rdi, 1); // equal -> zero after the subtraction try cfo.jbck(.ne, loop); try cfo.ret(); var retval = try cfo.test_call2(10, 560); try expectEqual(@as(usize, 55), retval); retval = try cfo.test_call2(20, 560); try expectEqual(@as(usize, 210), retval); } test "push/pop" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.push(.rdi); try cfo.pop(.r13); try cfo.mov(.rax, .r13); try cfo.ret(); var retval = try cfo.test_call2(9009, 560); try expectEqual(@as(usize, 9009), retval); } test "add scalar double" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.vmathf(.add, .sd, 0, 0, 1); try cfo.ret(); var retval = try cfo.test_call2f64(2.0, 0.5); try expectEqual(@as(f64, 2.5), retval); } test "max of scalar double" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.vmathf(.max, .sd, 0, 0, 1); try cfo.ret(); var retval = try cfo.test_call2f64(2.0, 5.5); try expectEqual(@as(f64, 5.5), retval); retval = try cfo.test_call2f64(10.0, 8.5); try expectEqual(@as(f64, 10.0), retval); } test "move scalar double" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.vmovf(.sd, 0, 1); try cfo.ret(); var retval = try cfo.test_call2f64(22.0, 0.75); try expectEqual(@as(f64, 0.75), retval); } test "read/write scalar double" { var cfo = try init(test_allocator); defer cfo.deinit(); // as we are swapping [rdi] and xmm0, use a temp try cfo.vmovurm(.sd, 1, a(.rdi)); try cfo.vmovumr(.sd, a(.rdi), 0); try cfo.vmovf(.sd, 0, 1); try cfo.ret(); var thefloat: f64 = 13.5; var retval = try cfo.test_call2x(f64, &thefloat, @as(f64, 0.25)); try expectEqual(@as(f64, 13.5), retval); try expectEqual(@as(f64, 0.25), thefloat); } test "read/write aligned double vector" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.vmovarm(.pd4, 0, a(.rdi)); try cfo.vmathf(.mul, .pd4, 0, 0, 0); try cfo.vmovamr(.pd4, a(.rdi), 0); try cfo.ret(); var thevec: [4]f64 align(32) = .{ 13.5, 25.125, 4552.0, -50.5 }; try cfo.test_call2x(void, &thevec, @as(u64, 0)); try expectEqual(@as(f64, 182.25), thevec[0]); try expectEqual(@as(f64, 2550.25), thevec[3]); } test "add scalar double from memory" { var cfo = try init(test_allocator); defer cfo.deinit(); errdefer cfo.dbg_nasm(test_allocator) catch unreachable; try cfo.vmathfrm(.add, .sd, 0, 0, a(.rdi)); try cfo.ret(); var thefloat: f64 = 6.5; var retval = try cfo.test_call2x(f64, &thefloat, @as(f64, 0.125)); try expectEqual(@as(f64, 6.625), retval); try expectEqual(@as(f64, 6.5), thefloat); } test "shlx (shift left)" { var cfo = try init(test_allocator); defer cfo.deinit(); try cfo.sx(.hl, .rax, .rdi, .rsi); try cfo.ret(); var retval = try cfo.test_call2(17, 3); try expectEqual(@as(usize, 136), retval); }
src/CFO.zig
const std = @import("std"); const math = std.math; const assert = std.debug.assert; const zwin32 = @import("zwin32"); const w32 = zwin32.base; const d3d12 = zwin32.d3d12; const hrPanic = zwin32.hrPanic; const hrPanicOnFail = zwin32.hrPanicOnFail; const zd3d12 = @import("zd3d12"); const common = @import("common"); const GuiRenderer = common.GuiRenderer; const c = common.c; const zm = @import("zmath"); const zbt = @import("zbullet"); const zmesh = @import("zmesh"); pub export const D3D12SDKVersion: u32 = 4; pub export const D3D12SDKPath: [*:0]const u8 = ".\\d3d12\\"; const content_dir = @import("build_options").content_dir; const window_name = "zig-gamedev: intro 6"; const window_width = 1920; const window_height = 1080; const Pso_DrawConst = struct { object_to_world: [16]f32, }; const Pso_FrameConst = struct { world_to_clip: [16]f32, }; const Pso_Vertex = struct { position: [3]f32, normal: [3]f32, }; const DemoState = struct { gctx: zd3d12.GraphicsContext, guir: GuiRenderer, frame_stats: common.FrameStats, simple_pso: zd3d12.PipelineHandle, physics_debug_pso: zd3d12.PipelineHandle, vertex_buffer: zd3d12.ResourceHandle, index_buffer: zd3d12.ResourceHandle, depth_texture: zd3d12.ResourceHandle, depth_texture_dsv: d3d12.CPU_DESCRIPTOR_HANDLE, mesh_num_vertices: u32, mesh_num_indices: u32, keyboard_delay: f32 = 1.0, physics: struct { world: *const zbt.World, shapes: std.ArrayList(*const zbt.Shape), debug: *zbt.DebugDrawer, }, camera: struct { position: [3]f32 = .{ 0.0, 10.0, -10.0 }, forward: [3]f32 = .{ 0.0, 0.0, 1.0 }, pitch: f32 = 0.15 * math.pi, yaw: f32 = 0.0, } = .{}, mouse: struct { cursor_prev_x: i32 = 0, cursor_prev_y: i32 = 0, } = .{}, }; fn init(allocator: std.mem.Allocator) !DemoState { const window = try common.initWindow(allocator, window_name, window_width, window_height); var arena_allocator_state = std.heap.ArenaAllocator.init(allocator); defer arena_allocator_state.deinit(); const arena_allocator = arena_allocator_state.allocator(); var gctx = zd3d12.GraphicsContext.init(allocator, window); // Enable vsync. gctx.present_flags = 0; gctx.present_interval = 1; const simple_pso = blk: { const input_layout_desc = [_]d3d12.INPUT_ELEMENT_DESC{ d3d12.INPUT_ELEMENT_DESC.init("POSITION", 0, .R32G32B32_FLOAT, 0, 0, .PER_VERTEX_DATA, 0), d3d12.INPUT_ELEMENT_DESC.init("_Normal", 0, .R32G32B32_FLOAT, 0, 12, .PER_VERTEX_DATA, 0), }; var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault(); pso_desc.InputLayout = .{ .pInputElementDescs = &input_layout_desc, .NumElements = input_layout_desc.len, }; pso_desc.RTVFormats[0] = .R8G8B8A8_UNORM; pso_desc.NumRenderTargets = 1; pso_desc.DSVFormat = .D32_FLOAT; pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf; pso_desc.PrimitiveTopologyType = .TRIANGLE; break :blk gctx.createGraphicsShaderPipeline( arena_allocator, &pso_desc, content_dir ++ "shaders/simple.vs.cso", content_dir ++ "shaders/simple.ps.cso", ); }; const physics_debug_pso = blk: { var pso_desc = d3d12.GRAPHICS_PIPELINE_STATE_DESC.initDefault(); pso_desc.RTVFormats[0] = .R8G8B8A8_UNORM; pso_desc.NumRenderTargets = 1; pso_desc.DSVFormat = .D32_FLOAT; pso_desc.BlendState.RenderTarget[0].RenderTargetWriteMask = 0xf; pso_desc.PrimitiveTopologyType = .LINE; break :blk gctx.createGraphicsShaderPipeline( arena_allocator, &pso_desc, content_dir ++ "shaders/physics_debug.vs.cso", content_dir ++ "shaders/physics_debug.ps.cso", ); }; // Load a mesh from file and store the data in temporary arrays. var mesh_indices = std.ArrayList(u32).init(arena_allocator); var mesh_positions = std.ArrayList([3]f32).init(arena_allocator); var mesh_normals = std.ArrayList([3]f32).init(arena_allocator); { zmesh.init(arena_allocator); defer zmesh.deinit(); const data = try zmesh.gltf.parseAndLoadFile(content_dir ++ "cube.gltf"); defer zmesh.gltf.freeData(data); zmesh.gltf.appendMeshPrimitive(data, 0, 0, &mesh_indices, &mesh_positions, &mesh_normals, null, null); } const mesh_num_indices = @intCast(u32, mesh_indices.items.len); const mesh_num_vertices = @intCast(u32, mesh_positions.items.len); const vertex_buffer = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &d3d12.RESOURCE_DESC.initBuffer(mesh_num_vertices * @sizeOf(Pso_Vertex)), d3d12.RESOURCE_STATE_COPY_DEST, null, ) catch |err| hrPanic(err); const index_buffer = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &d3d12.RESOURCE_DESC.initBuffer(mesh_num_indices * @sizeOf(u32)), d3d12.RESOURCE_STATE_COPY_DEST, null, ) catch |err| hrPanic(err); const depth_texture = gctx.createCommittedResource( .DEFAULT, d3d12.HEAP_FLAG_NONE, &blk: { var desc = d3d12.RESOURCE_DESC.initTex2d(.D32_FLOAT, gctx.viewport_width, gctx.viewport_height, 1); desc.Flags = d3d12.RESOURCE_FLAG_ALLOW_DEPTH_STENCIL | d3d12.RESOURCE_FLAG_DENY_SHADER_RESOURCE; break :blk desc; }, d3d12.RESOURCE_STATE_DEPTH_WRITE, &d3d12.CLEAR_VALUE.initDepthStencil(.D32_FLOAT, 1.0, 0), ) catch |err| hrPanic(err); const depth_texture_dsv = gctx.allocateCpuDescriptors(.DSV, 1); gctx.device.CreateDepthStencilView( gctx.lookupResource(depth_texture).?, null, depth_texture_dsv, ); const physics_world = zbt.World.init(.{}); var physics_debug = allocator.create(zbt.DebugDrawer) catch unreachable; physics_debug.* = zbt.DebugDrawer.init(allocator); physics_world.debugSetDrawer(&physics_debug.getDebugDraw()); physics_world.debugSetMode(zbt.dbgmode_draw_wireframe); const physics_shapes = blk: { var shapes = std.ArrayList(*const zbt.Shape).init(allocator); const box_shape = zbt.BoxShape.init(&.{ 1.05, 1.05, 1.05 }); try shapes.append(box_shape.asShape()); const ground_shape = zbt.BoxShape.init(&.{ 50.0, 0.2, 50.0 }); try shapes.append(ground_shape.asShape()); const box_body = zbt.Body.init( 1.0, // mass &zm.mat43ToArray(zm.translation(0.0, 3.0, 0.0)), box_shape.asShape(), ); physics_world.addBody(box_body); const ground_body = zbt.Body.init( 0.0, // static body &zm.mat43ToArray(zm.identity()), ground_shape.asShape(), ); physics_world.addBody(ground_body); break :blk shapes; }; gctx.beginFrame(); var guir = GuiRenderer.init(arena_allocator, &gctx, 1, content_dir); // Fill vertex buffer with vertex data. { const verts = gctx.allocateUploadBufferRegion(Pso_Vertex, mesh_num_vertices); for (mesh_positions.items) |_, i| { verts.cpu_slice[i].position = mesh_positions.items[i]; verts.cpu_slice[i].normal = mesh_normals.items[i]; } gctx.cmdlist.CopyBufferRegion( gctx.lookupResource(vertex_buffer).?, 0, verts.buffer, verts.buffer_offset, verts.cpu_slice.len * @sizeOf(@TypeOf(verts.cpu_slice[0])), ); } // Fill index buffer with index data. { const indices = gctx.allocateUploadBufferRegion(u32, mesh_num_indices); for (mesh_indices.items) |_, i| { indices.cpu_slice[i] = mesh_indices.items[i]; } gctx.cmdlist.CopyBufferRegion( gctx.lookupResource(index_buffer).?, 0, indices.buffer, indices.buffer_offset, indices.cpu_slice.len * @sizeOf(@TypeOf(indices.cpu_slice[0])), ); } gctx.addTransitionBarrier(vertex_buffer, d3d12.RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); gctx.addTransitionBarrier(index_buffer, d3d12.RESOURCE_STATE_INDEX_BUFFER); gctx.flushResourceBarriers(); gctx.endFrame(); gctx.finishGpuCommands(); return DemoState{ .gctx = gctx, .guir = guir, .frame_stats = common.FrameStats.init(), .simple_pso = simple_pso, .physics_debug_pso = physics_debug_pso, .vertex_buffer = vertex_buffer, .index_buffer = index_buffer, .depth_texture = depth_texture, .depth_texture_dsv = depth_texture_dsv, .mesh_num_vertices = mesh_num_vertices, .mesh_num_indices = mesh_num_indices, .physics = .{ .world = physics_world, .shapes = physics_shapes, .debug = physics_debug, }, }; } fn deinit(demo: *DemoState, allocator: std.mem.Allocator) void { demo.gctx.finishGpuCommands(); { var i = demo.physics.world.getNumBodies() - 1; while (i >= 0) : (i -= 1) { const body = demo.physics.world.getBody(i); demo.physics.world.removeBody(body); body.deinit(); } } for (demo.physics.shapes.items) |shape| shape.deinit(); demo.physics.shapes.deinit(); demo.physics.debug.deinit(); allocator.destroy(demo.physics.debug); demo.physics.world.deinit(); demo.guir.deinit(&demo.gctx); demo.gctx.deinit(allocator); common.deinitWindow(allocator); demo.* = undefined; } fn update(demo: *DemoState) void { demo.frame_stats.update(demo.gctx.window, window_name); const dt = demo.frame_stats.delta_time; _ = demo.physics.world.stepSimulation(dt, .{}); common.newImGuiFrame(dt); c.igSetNextWindowPos( .{ .x = @intToFloat(f32, demo.gctx.viewport_width) - 600.0 - 20, .y = 20.0 }, c.ImGuiCond_FirstUseEver, .{ .x = 0.0, .y = 0.0 }, ); c.igSetNextWindowSize(.{ .x = 600.0, .y = -1 }, c.ImGuiCond_Always); _ = c.igBegin( "Demo Settings", null, c.ImGuiWindowFlags_NoMove | c.ImGuiWindowFlags_NoResize | c.ImGuiWindowFlags_NoSavedSettings, ); c.igBulletText("", ""); c.igSameLine(0, -1); c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "Right Mouse Button + drag", ""); c.igSameLine(0, -1); c.igText(" : rotate camera", ""); c.igBulletText("", ""); c.igSameLine(0, -1); c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "W, A, S, D", ""); c.igSameLine(0, -1); c.igText(" : move camera", ""); c.igBulletText("", ""); c.igSameLine(0, -1); c.igTextColored(.{ .x = 0, .y = 0.8, .z = 0, .w = 1 }, "SPACE", ""); c.igSameLine(0, -1); c.igText(" : shoot", ""); c.igEnd(); // Handle camera rotation with mouse. { var pos: w32.POINT = undefined; _ = w32.GetCursorPos(&pos); const delta_x = @intToFloat(f32, pos.x) - @intToFloat(f32, demo.mouse.cursor_prev_x); const delta_y = @intToFloat(f32, pos.y) - @intToFloat(f32, demo.mouse.cursor_prev_y); demo.mouse.cursor_prev_x = pos.x; demo.mouse.cursor_prev_y = pos.y; if (w32.GetAsyncKeyState(w32.VK_RBUTTON) < 0) { demo.camera.pitch += 0.0025 * delta_y; demo.camera.yaw += 0.0025 * delta_x; demo.camera.pitch = math.min(demo.camera.pitch, 0.48 * math.pi); demo.camera.pitch = math.max(demo.camera.pitch, -0.48 * math.pi); demo.camera.yaw = zm.modAngle(demo.camera.yaw); } } // Handle camera movement with 'WASD' keys. { const speed = zm.f32x4s(10.0); const delta_time = zm.f32x4s(demo.frame_stats.delta_time); const transform = zm.mul(zm.rotationX(demo.camera.pitch), zm.rotationY(demo.camera.yaw)); var forward = zm.normalize3(zm.mul(zm.f32x4(0.0, 0.0, 1.0, 0.0), transform)); zm.store(demo.camera.forward[0..], forward, 3); const right = speed * delta_time * zm.normalize3(zm.cross3(zm.f32x4(0.0, 1.0, 0.0, 0.0), forward)); forward = speed * delta_time * forward; var cpos = zm.load(demo.camera.position[0..], zm.Vec, 3); if (w32.GetAsyncKeyState('W') < 0) { cpos += forward; } else if (w32.GetAsyncKeyState('S') < 0) { cpos -= forward; } if (w32.GetAsyncKeyState('D') < 0) { cpos += right; } else if (w32.GetAsyncKeyState('A') < 0) { cpos -= right; } zm.store(demo.camera.position[0..], cpos, 3); } // Shooting. { demo.keyboard_delay += dt; if (w32.GetAsyncKeyState(w32.VK_SPACE) < 0 and demo.keyboard_delay >= 0.5) { demo.keyboard_delay = 0.0; const transform = zm.translationV(zm.load(demo.camera.position[0..], zm.Vec, 3)); const impulse = zm.f32x4s(50.0) * zm.load(demo.camera.forward[0..], zm.Vec, 3); const body = zbt.Body.init( 1.0, &zm.mat43ToArray(transform), demo.physics.shapes.items[0], ); body.setFriction(2.1); body.applyCentralImpulse(&zm.vec3ToArray(impulse)); demo.physics.world.addBody(body); } } } fn draw(demo: *DemoState) void { var gctx = &demo.gctx; const cam_world_to_view = zm.lookToLh( zm.load(demo.camera.position[0..], zm.Vec, 3), zm.load(demo.camera.forward[0..], zm.Vec, 3), zm.f32x4(0.0, 1.0, 0.0, 0.0), ); const cam_view_to_clip = zm.perspectiveFovLh( 0.25 * math.pi, @intToFloat(f32, gctx.viewport_width) / @intToFloat(f32, gctx.viewport_height), 0.01, 200.0, ); const cam_world_to_clip = zm.mul(cam_world_to_view, cam_view_to_clip); gctx.beginFrame(); const back_buffer = gctx.getBackBuffer(); gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_RENDER_TARGET); gctx.flushResourceBarriers(); gctx.cmdlist.OMSetRenderTargets( 1, &[_]d3d12.CPU_DESCRIPTOR_HANDLE{back_buffer.descriptor_handle}, w32.TRUE, &demo.depth_texture_dsv, ); gctx.cmdlist.ClearRenderTargetView( back_buffer.descriptor_handle, &.{ 0.1, 0.1, 0.1, 1.0 }, 0, null, ); gctx.cmdlist.ClearDepthStencilView(demo.depth_texture_dsv, d3d12.CLEAR_FLAG_DEPTH, 1.0, 0, 0, null); // Set input assembler (IA) state. gctx.cmdlist.IASetPrimitiveTopology(.TRIANGLELIST); gctx.cmdlist.IASetVertexBuffers(0, 1, &[_]d3d12.VERTEX_BUFFER_VIEW{.{ .BufferLocation = gctx.lookupResource(demo.vertex_buffer).?.GetGPUVirtualAddress(), .SizeInBytes = demo.mesh_num_vertices * @sizeOf(Pso_Vertex), .StrideInBytes = @sizeOf(Pso_Vertex), }}); gctx.cmdlist.IASetIndexBuffer(&.{ .BufferLocation = gctx.lookupResource(demo.index_buffer).?.GetGPUVirtualAddress(), .SizeInBytes = demo.mesh_num_indices * @sizeOf(u32), .Format = .R32_UINT, }); gctx.setCurrentPipeline(demo.simple_pso); // Upload per-frame constant data (camera xform). { const mem = gctx.allocateUploadMemory(Pso_FrameConst, 1); zm.storeMat(mem.cpu_slice[0].world_to_clip[0..], zm.transpose(cam_world_to_clip)); gctx.cmdlist.SetGraphicsRootConstantBufferView(1, mem.gpu_base); } // For each object, upload per-draw constant data (object to world xform) and draw. { const num_bodies = demo.physics.world.getNumBodies(); var body_index: i32 = 0; while (body_index < num_bodies) : (body_index += 1) { const body = demo.physics.world.getBody(body_index); if (body.getMass() == 0.0) continue; // Get transform matrix from the physics simulator. const object_to_world = blk: { var transform: [12]f32 = undefined; body.getGraphicsWorldTransform(&transform); break :blk zm.loadMat43(transform[0..]); }; const mem = gctx.allocateUploadMemory(Pso_DrawConst, 1); zm.storeMat(mem.cpu_slice[0].object_to_world[0..], zm.transpose(object_to_world)); gctx.cmdlist.SetGraphicsRootConstantBufferView(0, mem.gpu_base); gctx.cmdlist.DrawIndexedInstanced(demo.mesh_num_indices, 1, 0, 0, 0); } } // Draw physics debug data. demo.physics.world.debugDrawAll(); if (demo.physics.debug.lines.items.len > 0) { gctx.setCurrentPipeline(demo.physics_debug_pso); gctx.cmdlist.IASetPrimitiveTopology(.LINELIST); { const mem = gctx.allocateUploadMemory(Pso_FrameConst, 1); zm.storeMat(mem.cpu_slice[0].world_to_clip[0..], zm.transpose(cam_world_to_clip)); gctx.cmdlist.SetGraphicsRootConstantBufferView(0, mem.gpu_base); } const num_vertices = @intCast(u32, demo.physics.debug.lines.items.len); { const mem = gctx.allocateUploadMemory(zbt.DebugDrawer.Vertex, num_vertices); for (demo.physics.debug.lines.items) |p, i| { mem.cpu_slice[i] = p; } gctx.cmdlist.SetGraphicsRootShaderResourceView(1, mem.gpu_base); } gctx.cmdlist.DrawInstanced(num_vertices, 1, 0, 0); demo.physics.debug.lines.clearRetainingCapacity(); } demo.guir.draw(gctx); gctx.addTransitionBarrier(back_buffer.resource_handle, d3d12.RESOURCE_STATE_PRESENT); gctx.flushResourceBarriers(); gctx.endFrame(); } pub fn main() !void { common.init(); defer common.deinit(); var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const allocator = gpa.allocator(); // Init zbullet library. zbt.init(allocator); defer zbt.deinit(); var demo = try init(allocator); defer deinit(&demo, allocator); while (common.handleWindowEvents()) { update(&demo); draw(&demo); } }
samples/intro/src/intro6.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const Data = @This(); value: union(Type) { bool: bool, num: f64, str: String, array: Array, map: Map, nulled: void, } = undefined, allocator: Allocator = undefined, pub const String = std.ArrayListUnmanaged(u8); pub const Array = std.ArrayListUnmanaged(Data); pub const Map = std.StringHashMapUnmanaged(Data); pub const Type = enum(u8) { bool, num, str, array, map, nulled, }; pub const null_data = Data{ .value = .{ .nulled = .{} } }; pub fn deinit(self: *Data) void { switch (self.value) { .str => self.value.str.deinit(self.allocator), .array => { for (self.value.array.items) |item| { var i = item; i.deinit(); } self.value.array.deinit(self.allocator); }, .map => { var iter = self.value.map.valueIterator(); while (iter.next()) |value| { value.*.deinit(); } self.value.map.deinit(self.allocator); }, else => {}, } } pub fn is(self: *const Data, t: Type) bool { return self.value == t; } pub fn get(self: *const Data, comptime t: Type) switch (t) { .bool => bool, .num => f64, .str => String, .array => Array, .map => Map, .nulled => @compileError("cannot use Data.get(.nulled)"), } { return @field(self.value, @tagName(t)); } pub fn findEx(self: *const Data, name: []const u8) Data { return switch (self.value) { .map => if (self.value.map.get(name)) |data| data else Data.null_data, else => unreachable, }; } pub fn find(self: *const Data, name: []const u8) Data { if (std.mem.startsWith(u8, name, "_")) return Data.null_data; return self.findEx(name); } pub fn index(self: *const Data, in: usize) !Data { return switch (self.value) { .str => blk: { if (in > self.get(.str).items.len) break :blk Data.null_data; var data = Data{ .value = .{ .str = .{} }, .allocator = self.allocator }; try data.value.str.append(data.allocator, self.get(.str).items[in]); break :blk data; }, .array => blk: { if (in > self.get(.array).items.len) break :blk Data.null_data; break :blk try self.get(.array).items[in].copy(self.allocator); }, else => unreachable, }; } // Unsafe, check for matching tags separately pub fn eql(self: *const Data, data: *const Data) bool { return switch (self.value) { .bool => self.value.bool == data.value.bool, .num => self.value.num == data.value.num, .str => std.mem.eql(u8, self.value.str.items, data.value.str.items), .array => false, // TODO .map => false, // TODO .nulled => true, }; } pub fn copy(self: *const Data, allocator: Allocator) Allocator.Error!Data { switch (self.value) { .bool, .num, .nulled => return self.*, .str => { var data = Data{ .value = .{ .str = .{} }, .allocator = allocator }; for (self.value.str.items) |item| { try data.value.str.append(allocator, item); } return data; }, .array => { var data = Data{ .value = .{ .array = .{} }, .allocator = allocator }; for (self.value.array.items) |item| { try data.value.array.append(allocator, try item.copy(allocator)); } return data; }, .map => { var data = Data{ .value = .{ .map = .{} }, .allocator = allocator }; var iter = self.value.map.iterator(); while (iter.next()) |entry| { const key = entry.key_ptr.*; const value = entry.value_ptr.*; try data.value.map.put(allocator, key, try value.copy(allocator)); } return data; }, } } pub fn serialize(self: *Data, indent: usize, writer: std.fs.File.Writer) std.os.WriteError!void { try self.serializeInternal(0, indent, writer); try writer.print("\n", .{}); } fn serializeInternal(self: *Data, start: usize, indent: usize, writer: std.fs.File.Writer) std.os.WriteError!void { switch (self.value) { .bool => try writer.print("{}", .{self.value.bool}), .num => try writer.print("{}", .{self.value.num}), .nulled => try writer.writeAll("null"), .str => try writer.print("\"{s}\"", .{self.value.str.items}), .array => { const arr = self.value.array; var id: usize = 0; _ = try writer.write("["); while (id < arr.items.len) : (id += 1) { try arr.items[id].serializeInternal(start, indent, writer); if (id != arr.items.len - 1) _ = try writer.write(", "); } _ = try writer.write("]"); }, .map => { const map = self.value.map; var iter = map.iterator(); var id: usize = 0; _ = try writer.write("{\n"); while (iter.next()) |entry| : (id += 1) { const key = entry.key_ptr.*; if (std.mem.startsWith(u8, key, "_")) continue; // Write key _ = try writer.writeByteNTimes(' ', start + indent); try writer.print("{s} : ", .{key}); // Write value try entry.value_ptr.*.serializeInternal(start + indent, indent, writer); _ = try writer.write("\n"); } _ = try writer.writeByteNTimes(' ', start); _ = try writer.write("}"); }, } }
src/Data.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const AtomicOrder = builtin.AtomicOrder; const AtomicRmwOp = builtin.AtomicRmwOp; const assert = std.debug.assert; const expect = std.testing.expect; const windows = std.os.windows; /// Lock may be held only once. If the same thread /// tries to acquire the same mutex twice, it deadlocks. /// This type is intended to be initialized statically. If you don't /// require static initialization, use std.Mutex. /// On Windows, this mutex allocates resources when it is /// first used, and the resources cannot be freed. /// On Linux, this is an alias of std.Mutex. pub const StaticallyInitializedMutex = switch (builtin.os) { builtin.Os.linux => std.Mutex, builtin.Os.windows => struct { lock: windows.CRITICAL_SECTION, init_once: windows.RTL_RUN_ONCE, pub const Held = struct { mutex: *StaticallyInitializedMutex, pub fn release(self: Held) void { windows.kernel32.LeaveCriticalSection(&self.mutex.lock); } }; pub fn init() StaticallyInitializedMutex { return StaticallyInitializedMutex{ .lock = undefined, .init_once = windows.INIT_ONCE_STATIC_INIT, }; } extern fn initCriticalSection( InitOnce: *windows.RTL_RUN_ONCE, Parameter: ?*c_void, Context: ?*c_void, ) windows.BOOL { const lock = @ptrCast(*windows.CRITICAL_SECTION, @alignCast(@alignOf(windows.CRITICAL_SECTION), Parameter)); windows.kernel32.InitializeCriticalSection(lock); return windows.TRUE; } /// TODO: once https://github.com/ziglang/zig/issues/287 is solved and std.Mutex has a better /// implementation of a runtime initialized mutex, remove this function. pub fn deinit(self: *StaticallyInitializedMutex) void { windows.InitOnceExecuteOnce(&self.init_once, initCriticalSection, &self.lock, null); windows.kernel32.DeleteCriticalSection(&self.lock); } pub fn acquire(self: *StaticallyInitializedMutex) Held { windows.InitOnceExecuteOnce(&self.init_once, initCriticalSection, &self.lock, null); windows.kernel32.EnterCriticalSection(&self.lock); return Held{ .mutex = self }; } }, else => std.Mutex, }; test "std.StaticallyInitializedMutex" { const TestContext = struct { data: i128, const TestContext = @This(); const incr_count = 10000; var mutex = StaticallyInitializedMutex.init(); fn worker(ctx: *TestContext) void { var i: usize = 0; while (i != TestContext.incr_count) : (i += 1) { const held = mutex.acquire(); defer held.release(); ctx.data += 1; } } }; var plenty_of_memory = try std.heap.direct_allocator.alloc(u8, 300 * 1024); defer std.heap.direct_allocator.free(plenty_of_memory); var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); var a = &fixed_buffer_allocator.allocator; var context = TestContext{ .data = 0 }; if (builtin.single_threaded) { TestContext.worker(&context); expect(context.data == TestContext.incr_count); } else { const thread_count = 10; var threads: [thread_count]*std.Thread = undefined; for (threads) |*t| { t.* = try std.Thread.spawn(&context, TestContext.worker); } for (threads) |t| t.wait(); expect(context.data == thread_count * TestContext.incr_count); } }
lib/std/statically_initialized_mutex.zig
const os = @import("std").os; const wayland = @import("wayland"); const wl = wayland.server.wl; pub const Device = extern struct { fd: c_int, device_id: c_int, dev: os.dev_t, /// Session.devices link: wl.list.Link, events: extern struct { change: wl.Signal(void), remove: wl.Signal(void), }, }; pub const Session = extern struct { pub const event = struct { pub const Add = extern struct { path: [*:0]const u8, }; }; active: bool, vtnr: c_uint, seat: [256]u8, // TODO: do we need libudev bindings? udev: *opaque {}, udev_monitor: *opaque {}, udev_event: *wl.EventSource, seat_handle: *opaque {}, libseat_event: *wl.EventSource, devices: wl.list.Head(Device, "link"), server: *wl.Server, server_destroy: wl.Listener(*wl.Server), events: extern struct { active: wl.Signal(void), add_drm_card: wl.Signal(*event.Add), destroy: wl.Signal(*Session), }, extern fn wlr_session_create(server: *wl.Server) ?*Session; pub fn create(server: *wl.Server) !*Session { return wlr_session_create(server) orelse error.SessionCreateFailed; } extern fn wlr_session_destroy(session: *Session) void; pub const destroy = wlr_session_destroy; extern fn wlr_session_open_file(session: *Session, path: [*:0]const u8) ?*Device; pub fn openFile(session: *Session, path: [*:0]const u8) !*Device { return wlr_session_open_file(session, path) orelse error.SessionOpenFileFailed; } extern fn wlr_session_close_file(session: *Session, device: *Device) void; pub const closeFile = wlr_session_close_file; extern fn wlr_session_change_vt(session: *Session, vt: c_uint) bool; pub fn changeVt(session: *Session, vt: c_uint) !void { if (!wlr_session_change_vt(session, vt)) return error.ChangeVtFailed; } extern fn wlr_session_find_gpus(session: *Session, ret_len: usize, ret: [*]*Device) isize; pub const findGpus = wlr_session_find_gpus; };
src/backend/session.zig
const std = @import("std"); const ArrayList = std.ArrayList; const core = @import("../index.zig"); const Coord = core.geometry.Coord; const isCardinalDirection = core.geometry.isCardinalDirection; const isScaledCardinalDirection = core.geometry.isScaledCardinalDirection; const directionToCardinalIndex = core.geometry.directionToCardinalIndex; const makeCoord = core.geometry.makeCoord; const zero_vector = makeCoord(0, 0); const Action = core.protocol.Action; const Species = core.protocol.Species; const Floor = core.protocol.Floor; const Wall = core.protocol.Wall; const PerceivedFrame = core.protocol.PerceivedFrame; const ThingPosition = core.protocol.ThingPosition; const PerceivedThing = core.protocol.PerceivedThing; const PerceivedActivity = core.protocol.PerceivedActivity; const TerrainSpace = core.protocol.TerrainSpace; const StatusConditions = core.protocol.StatusConditions; const view_distance = core.game_logic.view_distance; const isOpenSpace = core.game_logic.isOpenSpace; const getHeadPosition = core.game_logic.getHeadPosition; const getAllPositions = core.game_logic.getAllPositions; const applyMovementToPosition = core.game_logic.applyMovementToPosition; const getInertiaIndex = core.game_logic.getInertiaIndex; const game_model = @import("./game_model.zig"); const GameState = game_model.GameState; const Individual = game_model.Individual; const StateDiff = game_model.StateDiff; const IdMap = game_model.IdMap; const CoordMap = game_model.CoordMap; const Terrain = game_model.Terrain; const oob_terrain = game_model.oob_terrain; /// Allocates and then calls `init(allocator)` on the new object. pub fn createInit(allocator: *std.mem.Allocator, comptime T: type) !*T { var x = try allocator.create(T); x.* = T.init(allocator); return x; } fn findAvailableId(cursor: *u32, usedIds: IdMap(*Individual)) u32 { while (usedIds.contains(cursor.*)) { cursor.* += 1; } defer cursor.* += 1; return cursor.*; } pub const GameEngine = struct { allocator: *std.mem.Allocator, pub fn init(self: *GameEngine, allocator: *std.mem.Allocator) void { self.* = GameEngine{ .allocator = allocator, }; } pub fn validateAction(self: *const GameEngine, action: Action) bool { switch (action) { .wait => return true, .move => |move_delta| return isCardinalDirection(move_delta), .fast_move => |move_delta| return isScaledCardinalDirection(move_delta, 2), .attack => |direction| return isCardinalDirection(direction), .kick => |direction| return isCardinalDirection(direction), } } pub const Happenings = struct { individual_to_perception: IdMap([]PerceivedFrame), state_changes: []StateDiff, }; /// Computes what would happen to the state of the game. /// This is the entry point for all game rules. pub fn computeHappenings(self: *const GameEngine, game_state: *GameState, actions: IdMap(Action)) !Happenings { // cache the set of keys so iterator is easier. var everybody = try self.allocator.alloc(u32, game_state.individuals.count()); { var iterator = game_state.individuals.iterator(); for (everybody) |*x| { x.* = iterator.next().?.key; } std.debug.assert(iterator.next() == null); } const everybody_including_dead = try std.mem.dupe(self.allocator, u32, everybody); var budges_at_all = IdMap(void).init(self.allocator); var individual_to_perception = IdMap(*MutablePerceivedHappening).init(self.allocator); for (everybody_including_dead) |id| { try individual_to_perception.putNoClobber(id, try createInit(self.allocator, MutablePerceivedHappening)); } var current_positions = IdMap(ThingPosition).init(self.allocator); for (everybody) |id| { try current_positions.putNoClobber(id, game_state.individuals.get(id).?.abs_position); } var current_status_conditions = IdMap(StatusConditions).init(self.allocator); for (everybody) |id| { try current_status_conditions.putNoClobber(id, game_state.individuals.get(id).?.status_conditions); } var total_deaths = IdMap(void).init(self.allocator); { var intended_moves = IdMap(Coord).init(self.allocator); for (everybody) |id| { const move_delta: Coord = switch (actions.get(id).?) { .move, .fast_move => |move_delta| move_delta, else => continue, }; if (0 != current_status_conditions.get(id).? & core.protocol.StatusCondition_limping) { // nope.avi continue; } try intended_moves.putNoClobber(id, move_delta); } try self.doMovementAndCollisions( game_state, &everybody, &individual_to_perception, &current_positions, &intended_moves, &budges_at_all, &total_deaths, ); } // Kicks { var kicks = IdMap(Coord).init(self.allocator); var intended_moves = IdMap(Coord).init(self.allocator); var kicked_too_much = IdMap(void).init(self.allocator); for (everybody) |id| { const kick_direction: Coord = switch (actions.get(id).?) { .kick => |direction| direction, else => continue, }; const attacker_coord = getHeadPosition(current_positions.get(id).?); const kick_position = attacker_coord.plus(kick_direction); for (everybody) |other_id| { const position = current_positions.get(other_id).?; for (getAllPositions(&position)) |coord, i| { if (!coord.equals(kick_position)) continue; // gotchya if (getInertiaIndex(game_state.individuals.get(other_id).?.species) > 0) { // Your kick is not stronk enough. continue; } if (try intended_moves.fetchPut(other_id, kick_direction)) |_| { // kicked multiple times at once! _ = try kicked_too_much.put(other_id, {}); } } } try kicks.putNoClobber(id, kick_direction); } if (kicks.count() > 0) { for (everybody) |id| { try self.observeFrame( game_state, id, individual_to_perception.get(id).?, &current_positions, Activities{ .kicks = &kicks, }, ); } // for now, multiple kicks at once just fail. var iterator = kicked_too_much.iterator(); while (iterator.next()) |kv| { intended_moves.removeAssertDiscard(kv.key); } try self.doMovementAndCollisions( game_state, &everybody, &individual_to_perception, &current_positions, &intended_moves, &budges_at_all, &total_deaths, ); } } // Check status conditions for (everybody) |id| { const status_conditions = &current_status_conditions.getEntry(id).?.value; if (game_state.terrainAt(getHeadPosition(current_positions.get(id).?)).floor == .marble) { // level transitions remedy all status ailments. status_conditions.* = 0; } else if (!budges_at_all.contains(id)) { // you held still, so you are free of any limping status. status_conditions.* &= ~core.protocol.StatusCondition_limping; } else if (0 != status_conditions.* & core.protocol.StatusCondition_wounded_leg) { // you moved while wounded. now you limp. status_conditions.* |= core.protocol.StatusCondition_limping; } } // Attacks var attacks = IdMap(Activities.Attack).init(self.allocator); var attack_deaths = IdMap(void).init(self.allocator); for (everybody) |id| { var attack_direction: Coord = switch (actions.get(id).?) { .attack => |direction| direction, else => continue, }; var attacker_coord = getHeadPosition(current_positions.get(id).?); var attack_distance: i32 = 1; const range = core.game_logic.getAttackRange(game_state.individuals.get(id).?.species); range_loop: while (attack_distance <= range) : (attack_distance += 1) { var damage_position = attacker_coord.plus(attack_direction.scaled(attack_distance)); for (everybody) |other_id| { const position = current_positions.get(other_id).?; for (getAllPositions(&position)) |coord, i| { if (!coord.equals(damage_position)) continue; // hit something. const other = game_state.individuals.get(other_id).?; const is_effective = blk: { // innate defense if (!core.game_logic.isAffectedByAttacks(other.species, i)) break :blk false; // shield blocks arrows if (range > 1 and other.has_shield) break :blk false; break :blk true; }; if (is_effective) { // get wrecked const other_status_conditions = &current_status_conditions.getEntry(other_id).?.value; if (other_status_conditions.* & core.protocol.StatusCondition_wounded_leg == 0) { // first hit is a wound other_status_conditions.* |= core.protocol.StatusCondition_wounded_leg; } else { // second hit. you ded. _ = try attack_deaths.put(other_id, {}); } } break :range_loop; } } } try attacks.putNoClobber(id, Activities.Attack{ .direction = attack_direction, .distance = attack_distance, }); } // Lava for (everybody) |id| { const position = current_positions.get(id).?; for (getAllPositions(&position)) |coord| { if (game_state.terrainAt(coord).floor == .lava) { _ = try attack_deaths.put(id, {}); } } } // Perception of Attacks and Death for (everybody) |id| { if (attacks.count() != 0) { try self.observeFrame( game_state, id, individual_to_perception.get(id).?, &current_positions, Activities{ .attacks = &attacks }, ); } if (attack_deaths.count() != 0) { try self.observeFrame( game_state, id, individual_to_perception.get(id).?, &current_positions, Activities{ .deaths = &attack_deaths }, ); } } try flushDeaths(&total_deaths, &attack_deaths, &everybody); // Traps var polymorphs = IdMap(Species).init(self.allocator); for (everybody) |id| { const position = current_positions.get(id).?; for (getAllPositions(&position)) |coord| { if (game_state.terrainAt(coord).wall == .centaur_transformer and game_state.individuals.get(id).?.species != .centaur) { try polymorphs.putNoClobber(id, .centaur); } } } if (polymorphs.count() != 0) { for (everybody) |id| { try self.observeFrame( game_state, id, individual_to_perception.get(id).?, &current_positions, Activities{ .polymorphs = &polymorphs }, ); } } var new_id_cursor: u32 = @intCast(u32, game_state.individuals.count()); // build state changes var state_changes = ArrayList(StateDiff).init(self.allocator); for (everybody_including_dead) |id| { switch (game_state.individuals.get(id).?.abs_position) { .small => |coord| { const from = coord; const to = current_positions.get(id).?.small; if (to.equals(from)) continue; const delta = to.minus(from); try state_changes.append(StateDiff{ .small_move = .{ .id = id, .coord = delta, }, }); }, .large => |coords| { const from = coords; const to = current_positions.get(id).?.large; if (to[0].equals(from[0]) and to[1].equals(from[1])) continue; try state_changes.append(StateDiff{ .large_move = .{ .id = id, .coords = .{ to[0].minus(from[0]), to[1].minus(from[1]), }, }, }); }, } } { var iterator = polymorphs.iterator(); while (iterator.next()) |kv| { try state_changes.append(StateDiff{ .polymorph = .{ .id = kv.key, .from = game_state.individuals.get(kv.key).?.species, .to = kv.value, }, }); } } for (everybody_including_dead) |id| { const old = game_state.individuals.get(id).?.status_conditions; const new = current_status_conditions.get(id).?; if (old != new) { try state_changes.append(StateDiff{ .status_condition_diff = .{ .id = id, .from = old, .to = new, }, }); } } { var iterator = total_deaths.iterator(); while (iterator.next()) |kv| { const id = kv.key; try state_changes.append(StateDiff{ .despawn = blk: { var individual = game_state.individuals.get(id).?.*; individual.abs_position = current_positions.get(id).?; break :blk .{ .id = id, .individual = individual, }; }, }); } } // final observations try game_state.applyStateChanges(state_changes.items); current_positions.clearRetainingCapacity(); for (everybody) |id| { try self.observeFrame( game_state, id, individual_to_perception.get(id).?, null, Activities.static_state, ); } return Happenings{ .individual_to_perception = blk: { var ret = IdMap([]PerceivedFrame).init(self.allocator); for (everybody_including_dead) |id| { var frame_list = individual_to_perception.get(id).?.frames; // remove empty frames, except the last one var i: usize = 0; frameLoop: while (i + 1 < frame_list.items.len) : (i +%= 1) { const frame = frame_list.items[i]; if (frame.self.activity != PerceivedActivity.none) continue :frameLoop; for (frame.others) |other| { if (other.activity != PerceivedActivity.none) continue :frameLoop; } // delete this frame _ = frame_list.orderedRemove(i); i -%= 1; } try ret.putNoClobber(id, frame_list.toOwnedSlice()); } break :blk ret; }, .state_changes = state_changes.items, }; } fn doMovementAndCollisions( self: *const GameEngine, game_state: *GameState, everybody: *[]u32, individual_to_perception: *IdMap(*MutablePerceivedHappening), current_positions: *IdMap(ThingPosition), intended_moves: *IdMap(Coord), budges_at_all: *IdMap(void), total_deaths: *IdMap(void), ) !void { var next_positions = IdMap(ThingPosition).init(self.allocator); for (everybody.*) |id| { // seek forward and stop at any wall. const initial_head_coord = getHeadPosition(current_positions.get(id).?); const move_delta = intended_moves.get(id) orelse continue; const move_unit = move_delta.signumed(); const move_magnitude = move_delta.magnitudeDiag(); var distance: i32 = 1; while (true) : (distance += 1) { const new_head_coord = initial_head_coord.plus(move_unit.scaled(distance)); if (!isOpenSpace(game_state.terrainAt(new_head_coord).wall)) { // bonk distance -= 1; break; } if (distance >= move_magnitude) break; } if (distance == 0) { // no move for you continue; } // found an open space const adjusted_move_delta = move_unit.scaled(distance); _ = intended_moves.put(id, adjusted_move_delta) catch unreachable; try next_positions.putNoClobber(id, applyMovementToPosition(current_positions.get(id).?, adjusted_move_delta)); } var trample_deaths = IdMap(void).init(self.allocator); // Collision detection and resolution. { const Collision = struct { /// this has at most one entry inertia_index_to_stationary_id: [2]?u32 = [_]?u32{null} ** 2, inertia_index_to_cardinal_index_to_enterer: [2][4]?u32 = [_][4]?u32{[_]?u32{null} ** 4} ** 2, inertia_index_to_cardinal_index_to_fast_enterer: [2][4]?u32 = [_][4]?u32{[_]?u32{null} ** 4} ** 2, winner_id: ?u32 = null, }; // Collect collision information. var coord_to_collision = CoordMap(Collision).init(self.allocator); for (everybody.*) |id| { const inertia_index = getInertiaIndex(game_state.individuals.get(id).?.species); const old_position = current_positions.get(id).?; const new_position = next_positions.get(id) orelse old_position; for (getAllPositions(&new_position)) |new_coord, i| { const old_coord = getAllPositions(&old_position)[i]; const delta = new_coord.minus(old_coord); var collision = coord_to_collision.get(new_coord) orelse Collision{}; if (delta.equals(zero_vector)) { collision.inertia_index_to_stationary_id[inertia_index] = id; } else if (isCardinalDirection(delta)) { collision.inertia_index_to_cardinal_index_to_enterer[inertia_index][directionToCardinalIndex(delta)] = id; } else if (isScaledCardinalDirection(delta, 2)) { collision.inertia_index_to_cardinal_index_to_fast_enterer[inertia_index][directionToCardinalIndex(delta.scaledDivTrunc(2))] = id; } else unreachable; _ = try coord_to_collision.put(new_coord, collision); } } // Determine who wins each collision { var iterator = coord_to_collision.iterator(); while (iterator.next()) |kv| { var collision: *Collision = &kv.value; var is_trample = false; // higher inertia trumps any lower inertia for ([_]u1{ 1, 0 }) |inertia_index| { if (is_trample) { // You can't win. You can only get trampled. for ([_]?u32{ collision.inertia_index_to_stationary_id[inertia_index], collision.inertia_index_to_cardinal_index_to_enterer[inertia_index][0], collision.inertia_index_to_cardinal_index_to_enterer[inertia_index][1], collision.inertia_index_to_cardinal_index_to_enterer[inertia_index][2], collision.inertia_index_to_cardinal_index_to_enterer[inertia_index][3], collision.inertia_index_to_cardinal_index_to_fast_enterer[inertia_index][0], collision.inertia_index_to_cardinal_index_to_fast_enterer[inertia_index][1], collision.inertia_index_to_cardinal_index_to_fast_enterer[inertia_index][2], collision.inertia_index_to_cardinal_index_to_fast_enterer[inertia_index][3], }) |maybe_id| { if (maybe_id) |trampled_id| { try trample_deaths.putNoClobber(trampled_id, {}); } } } else { var incoming_vector_set: u9 = 0; if (collision.inertia_index_to_stationary_id[inertia_index] != null) incoming_vector_set |= 1 << 0; for (collision.inertia_index_to_cardinal_index_to_enterer[inertia_index]) |maybe_id, i| { if (maybe_id != null) incoming_vector_set |= @as(u9, 1) << (1 + @as(u4, @intCast(u2, i))); } for (collision.inertia_index_to_cardinal_index_to_fast_enterer[inertia_index]) |maybe_id, i| { if (maybe_id != null) incoming_vector_set |= @as(u9, 1) << (5 + @as(u4, @intCast(u2, i))); } if (incoming_vector_set & 1 != 0) { // Stationary entities always win. collision.winner_id = collision.inertia_index_to_stationary_id[inertia_index]; // Standing still doesn't trample. } else if (cardinalIndexBitSetToCollisionWinnerIndex(@intCast(u4, (incoming_vector_set & 0b111100000) >> 5))) |index| { // fast bois beat slow bois. collision.winner_id = collision.inertia_index_to_cardinal_index_to_fast_enterer[inertia_index][index].?; is_trample = inertia_index > 0; } else if (cardinalIndexBitSetToCollisionWinnerIndex(@intCast(u4, (incoming_vector_set & 0b11110) >> 1))) |index| { // a slow boi wins. collision.winner_id = collision.inertia_index_to_cardinal_index_to_enterer[inertia_index][index].?; is_trample = inertia_index > 0; } else { // nobody wins. winner stays null. } } } } } id_loop: for (everybody.*) |id| { if (trample_deaths.contains(id)) { // The move succeeds so that we see where the squashing happens. continue; } const next_position = next_positions.get(id) orelse continue; for (getAllPositions(&next_position)) |coord| { var collision = coord_to_collision.get(coord).?; // TODO: https://github.com/ziglang/zig/issues/1332 if (collision.winner_id != id) if (!(collision.winner_id != null and collision.winner_id.? == id)) { // i lose. next_positions.removeAssertDiscard(id); continue :id_loop; } } } // Check for unsuccessful conga lines. // for example: // 🐕 -> 🐕 // V // 🐕 <- 🐕 // This conga line would be unsuccessful because the head of the line isn't successfully moving. for (everybody.*) |head_id| { // anyone who fails to move could be the head of a sad conga line. if (next_positions.contains(head_id)) continue; if (trample_deaths.contains(head_id)) { // You're dead to me. continue; } var id = head_id; conga_loop: while (true) { const position = current_positions.get(id).?; for (getAllPositions(&position)) |coord| { const follower_id = (coord_to_collision.remove(coord) orelse continue).value.winner_id orelse continue; if (follower_id == id) continue; // your tail is always allowed to follow your head. // conga line is botched. if (getInertiaIndex(game_state.individuals.get(follower_id).?.species) > getInertiaIndex(game_state.individuals.get(head_id).?.species)) { // Outta my way! try trample_deaths.putNoClobber(head_id, {}); // Party don't stop for this pushover's failure! Viva la conga! break :conga_loop; } _ = next_positions.remove(follower_id); // and now you get to pass on the bad news. id = follower_id; continue :conga_loop; } // no one wants to move into this space. break; } } } // Observe movement. for (everybody.*) |id| { try self.observeFrame( game_state, id, individual_to_perception.get(id).?, current_positions, Activities{ .movement = .{ .intended_moves = intended_moves, .next_positions = &next_positions, }, }, ); } // Update positions from movement. for (everybody.*) |id| { const next_position = next_positions.get(id) orelse continue; current_positions.getEntry(id).?.value = next_position; } for (everybody.*) |id| { try self.observeFrame( game_state, id, individual_to_perception.get(id).?, current_positions, Activities{ .deaths = &trample_deaths }, ); } try flushDeaths(total_deaths, &trample_deaths, everybody); var iterator = intended_moves.iterator(); while (iterator.next()) |kv| { const id = kv.key; _ = try budges_at_all.put(id, {}); } } const Activities = union(enum) { static_state, movement: Movement, attacks: *const IdMap(Attack), kicks: *const IdMap(Coord), polymorphs: *const IdMap(Species), deaths: *const IdMap(void), const Movement = struct { intended_moves: *const IdMap(Coord), next_positions: *const IdMap(ThingPosition), }; const Attack = struct { direction: Coord, distance: i32, }; }; fn observeFrame( self: *const GameEngine, game_state: *const GameState, my_id: u32, perception: *MutablePerceivedHappening, maybe_current_positions: ?*const IdMap(ThingPosition), activities: Activities, ) !void { try perception.frames.append(try getPerceivedFrame( self, game_state, my_id, maybe_current_positions, activities, )); } pub fn getStaticPerception(self: *const GameEngine, game_state: GameState, individual_id: u32) !PerceivedFrame { return getPerceivedFrame( self, &game_state, individual_id, null, Activities.static_state, ); } fn getPerceivedFrame( self: *const GameEngine, game_state: *const GameState, my_id: u32, maybe_current_positions: ?*const IdMap(ThingPosition), activities: Activities, ) !PerceivedFrame { const your_coord = getHeadPosition(if (maybe_current_positions) |current_positions| current_positions.get(my_id).? else game_state.individuals.get(my_id).?.abs_position); var yourself: ?PerceivedThing = null; var others = ArrayList(PerceivedThing).init(self.allocator); { var iterator = game_state.individuals.iterator(); while (iterator.next()) |kv| { const id = kv.key; const activity = switch (activities) { .movement => |data| if (data.intended_moves.get(id)) |move_delta| if (data.next_positions.contains(id)) PerceivedActivity{ .movement = move_delta } else PerceivedActivity{ .failed_movement = move_delta } else PerceivedActivity{ .none = {} }, .attacks => |data| if (data.get(id)) |attack| PerceivedActivity{ .attack = PerceivedActivity.Attack{ .direction = attack.direction, .distance = attack.distance, }, } else PerceivedActivity{ .none = {} }, .kicks => |data| if (data.get(id)) |coord| PerceivedActivity{ .kick = coord } else PerceivedActivity{ .none = {} }, .polymorphs => |data| if (data.get(id)) |species| PerceivedActivity{ .polymorph = species } else PerceivedActivity{ .none = {} }, .deaths => |data| if (data.get(id)) |_| PerceivedActivity{ .death = {} } else PerceivedActivity{ .none = {} }, .static_state => PerceivedActivity{ .none = {} }, }; var abs_position = if (maybe_current_positions) |current_positions| current_positions.get(id).? else game_state.individuals.get(id).?.abs_position; var rel_position: ThingPosition = undefined; switch (abs_position) { .small => |coord| { rel_position = .{ .small = coord.minus(your_coord) }; }, .large => |coords| { rel_position = .{ .large = .{ coords[0].minus(your_coord), coords[1].minus(your_coord), }, }; }, } // if any position is within view, we can see all of it. const within_view = blk: for (getAllPositions(&rel_position)) |delta| { if (delta.magnitudeDiag() <= view_distance) break :blk true; } else false; if (!within_view) continue; const actual_thing = game_state.individuals.get(id).?; const thing = PerceivedThing{ .species = actual_thing.species, .rel_position = rel_position, .status_conditions = actual_thing.status_conditions, .has_shield = actual_thing.has_shield, .activity = activity, }; if (id == my_id) { yourself = thing; } else { try others.append(thing); } } } const view_size = view_distance * 2 + 1; var terrain_chunk = core.protocol.TerrainChunk{ .rel_position = makeCoord(-view_distance, -view_distance), .matrix = try Terrain.initFill(self.allocator, view_size, view_size, oob_terrain), }; const view_origin = your_coord.minus(makeCoord(view_distance, view_distance)); var cursor = Coord{ .x = undefined, .y = 0 }; while (cursor.y < view_size) : (cursor.y += 1) { cursor.x = 0; while (cursor.x < view_size) : (cursor.x += 1) { if (game_state.terrain.getCoord(cursor.plus(view_origin))) |cell| { terrain_chunk.matrix.atCoord(cursor).?.* = cell; } } } var winning_score: ?i32 = 0; { const my_species = game_state.individuals.get(my_id).?.species; var iterator = game_state.individuals.iterator(); while (iterator.next()) |kv| { if (my_species != kv.value.species) { winning_score = null; break; } winning_score.? += 1; } } return PerceivedFrame{ .self = yourself.?, .others = others.toOwnedSlice(), .terrain = terrain_chunk, .winning_score = winning_score, }; } }; const MutablePerceivedHappening = struct { frames: ArrayList(PerceivedFrame), pub fn init(allocator: *std.mem.Allocator) MutablePerceivedHappening { return MutablePerceivedHappening{ .frames = ArrayList(PerceivedFrame).init(allocator), }; } }; fn flushDeaths(total_deaths: *IdMap(void), local_deaths: *IdMap(void), everybody: *[]u32) !void { var iterator = local_deaths.iterator(); while (iterator.next()) |kv| { const id = kv.key; // write local deaths to total deaths if (null == try total_deaths.fetchPut(id, {})) { // actually removed. const index = std.mem.indexOfScalar(u32, everybody.*, id).?; // swap remove everybody.*[index] = everybody.*[everybody.len - 1]; everybody.len -= 1; } } local_deaths.clearRetainingCapacity(); } /// See geometry for cardinal index definition. /// e.g. 0b0001: just right, 0b1100: left and up. /// Note that the directions are the movement directions, not the "from" directions. /// Returning null means no winner. fn cardinalIndexBitSetToCollisionWinnerIndex(cardinal_index_bit_set: u4) ?u2 { switch (cardinal_index_bit_set) { // nobody here. 0b0000 => return null, // only one person 0b0001 => return 0, 0b0010 => return 1, 0b0100 => return 2, 0b1000 => return 3, // 2-way head-on collision 0b0101 => return null, 0b1010 => return null, // 2-way right-angle collision. // give right of way to the one on the right. 0b0011 => return 0, 0b0110 => return 1, 0b1100 => return 2, 0b1001 => return 3, // 3-way collision. the middle one wins. 0b1011 => return 0, 0b0111 => return 1, 0b1110 => return 2, 0b1101 => return 3, // 4-way collision 0b1111 => return null, } }
src/server/game_engine.zig
usingnamespace @import("root").preamble; const log = lib.output.log.scoped(.{ .prefix = "platform/acpi", .filter = .info, }).write; const paging = os.memory.paging; const pci = os.platform.pci; const libalign = lib.util.libalign; const range = lib.util.range; const RSDP = packed struct { signature: [8]u8, checksum: u8, oemid: [6]u8, revision: u8, rsdt_addr: u32, extended_length: u32, xsdt_addr: u64, extended_checksum: u8, }; const SDTHeader = packed struct { signature: [4]u8, len: u32, revision: u8, checksum: u8, oem: [6]u8, oem_table: [8]u8, oem_revison: u32, creator_id: u32, creator_revision: u32, }; const GenericAddrStructure = packed struct { addrspace: u8, bit_width: u8, bit_offset: u8, access_size: u8, base: u64, }; const FADT = packed struct { header: SDTHeader, firmware_control: u32, dsdt: u32, res0: u8, profile: u8, sci_irq: u16, smi_command_port: u32, acpi_enable: u8, acpi_disable: u8, s4bios_req: u8, pstate_control: u8, pm1a_event_block: u32, pm1b_event_block: u32, pm1a_control_block: u32, pm1b_control_block: u32, pm2_control_block: u32, pm_timer_block: u32, gpe0_block: u32, gpe1_block: u32, pm1_event_length: u8, pm1_control_length: u8, pm2_control_length: u8, pm_timer_length: u8, gpe0_length: u8, gpe1_length: u8, gpe1_base: u8, cstate_control: u8, worst_c2_latency: u16, worst_c3_latency: u16, flush_size: u16, flush_stride: u16, duty_offset: u8, duty_width: u8, // cmos registers day_alarm: u8, month_alarm: u8, century: u8, // ACPI 2.0 fields iapc_boot_flags: u16, reserved2: u8, flags: u32, reset_register: GenericAddrStructure, reset_command: u8, arm_boot_flags: u16, minor_version: u8, x_firmware_control: u64, x_dsdt: u64, x_pm1a_event_block: GenericAddrStructure, x_pm1b_event_block: GenericAddrStructure, x_pm1a_control_block: GenericAddrStructure, x_pm1b_control_block: GenericAddrStructure, x_pm2_control_block: GenericAddrStructure, x_pm_timer_block: GenericAddrStructure, x_gpe0_block: GenericAddrStructure, x_gpe1_block: GenericAddrStructure, }; comptime { std.debug.assert(@offsetOf(FADT, "dsdt") == 40); std.debug.assert(@offsetOf(FADT, "x_dsdt") == 140); } const lai = os.kernel.lai; var rsdp_phys: ?os.platform.phys_ptr([*]u8) = null; var rsdp: *RSDP = undefined; pub fn register_rsdp(rsdp_in: os.platform.phys_ptr([*]u8)) void { rsdp_phys = rsdp_in; } fn locate_rsdp() ?os.platform.phys_ptr([*]u8) { // @TODO return null; } fn parse_MCFG(sdt: []u8) void { var offset: usize = 44; while (offset + 16 <= sdt.len) : (offset += 16) { var addr = std.mem.readIntNative(u64, sdt[offset..][0..8]); var lo_bus = sdt[offset + 10]; const hi_bus = sdt[offset + 11]; while (true) { pci.register_mmio(lo_bus, addr) catch |err| { log(.err, "ACPI: Unable to register PCI mmio: {e}", .{@errorName(err)}); }; if (lo_bus == hi_bus) break; addr += 1 << 20; lo_bus += 1; } } } fn signature_value(sdt: anytype) u32 { return std.mem.readIntNative(u32, sdt[0..4]); } fn get_sdt(addr: u64) []u8 { var result = os.platform.phys_slice(u8).init(addr, 8); result.len = std.mem.readIntNative(u32, result.to_slice_writeback()[4..8]); return result.to_slice_writeback(); } fn parse_sdt(addr: usize) void { const sdt = get_sdt(addr); switch (signature_value(sdt)) { signature_value("FACP") => {}, // Ignore for now signature_value("SSDT") => {}, // Ignore for now signature_value("DMAR") => {}, // Ignore for now signature_value("ECDT") => {}, // Ignore for now signature_value("SBST") => {}, // Ignore for now signature_value("HPET") => {}, // Ignore for now signature_value("WAET") => {}, // Ignore for now signature_value("SPCR") => {}, // Ignore for now signature_value("GTDT") => {}, // Ignore for now signature_value("APIC") => { switch (os.platform.arch) { .x86_64 => @import("x86_64/apic.zig").handle_madt(sdt), else => log(.notice, "ACPI: MADT found on unsupported architecture!", .{}), } }, signature_value("MCFG") => { parse_MCFG(sdt); }, else => log(.warn, "ACPI: Unknown SDT: '{s}' with size {d} bytes", .{ @as([]u8, sdt[0..4]), sdt.len }), } } fn parse_root_sdt(comptime T: type, addr: usize) void { const sdt = get_sdt(addr); var offset: u64 = 36; while (offset + @sizeOf(T) <= sdt.len) : (offset += @sizeOf(T)) { parse_sdt(std.mem.readIntNative(T, sdt[offset..][0..@sizeOf(T)])); } } export fn laihost_log(kind: c_int, str: [*:0]const u8) void { switch (kind) { lai.LAI_WARN_LOG => log(.warn, "LAI: {s}", .{str}), lai.LAI_DEBUG_LOG => log(.debug, "LAI: {s}", .{str}), else => log(null, "UNK: LAI {s}", .{str}), } } fn impl_laihost_scan_table(addr: usize, name: *const [4]u8, index: *c_int) ?*c_void { const table = get_sdt(addr); if (std.mem.eql(u8, table[0..4], name)) { if (index.* == 0) return @ptrCast(*c_void, table.ptr); index.* -= 1; } return null; } fn impl_laihost_scan_root(comptime T: type, addr: usize, name: *const [4]u8, index_c: c_int) ?*c_void { const sdt = get_sdt(addr); var index = index_c; var offset: u64 = 36; while (offset + @sizeOf(T) <= sdt.len) : (offset += @sizeOf(T)) { const paddr = std.mem.readIntNative(T, sdt[offset..][0..@sizeOf(T)]); if (impl_laihost_scan_table(paddr, name, &index)) |result| return result; } return lai.NULL; } export fn laihost_scan(name: *const [4]u8, index: c_int) ?*c_void { if (index == 0) { if (std.mem.eql(u8, name, "RSDT")) return @ptrCast(*c_void, get_sdt(rsdp.rsdt_addr).ptr); if (std.mem.eql(u8, name, "XSDT")) return @ptrCast(*c_void, get_sdt(rsdp.xsdt_addr).ptr); if (std.mem.eql(u8, name, "DSDT")) { const fadt = @ptrCast(*align(1) FADT, laihost_scan("FACP", 0) orelse return lai.NULL); if (fadt.dsdt != 0) return @ptrCast(*c_void, get_sdt(fadt.dsdt).ptr); if (fadt.x_dsdt != 0) return @ptrCast(*c_void, get_sdt(fadt.x_dsdt).ptr); return lai.NULL; } } switch (rsdp.revision) { 0 => return impl_laihost_scan_root(u32, rsdp.rsdt_addr, name, index), 2 => return impl_laihost_scan_root(u64, rsdp.xsdt_addr, name, index), else => unreachable, } } export fn laihost_panic(err: [*:0]const u8) noreturn { has_lai_acpi = false; log(.emerg, "LAI: {s}", .{err}); @panic("LAI PANIC"); } export fn laihost_map(addr: usize, size: usize) ?*c_void { return os.platform.phys_ptr(*c_void).from_int(addr).get_uncached(); } export fn laihost_unmap(ptr: *c_void, size: usize) void {} export fn laihost_handle_amldebug(ptr: *c_void) void {} export fn laihost_sleep(some_unit_of_time: u64) void { @panic("laihost_sleep"); } export fn laihost_sync_wait(state: *lai.lai_sync_state, value: u32, deadline: u64) void { @panic("laihost_sync_wait"); } export fn laihost_sync_wake(state: *lai.lai_sync_state) void { @panic("laihost_sync_wake"); } var has_lai_acpi = false; pub fn init_acpi() !void { if (rsdp_phys == null) { log(.notice, "No RSDP registered... Looking for it ourselves", .{}); rsdp_phys = locate_rsdp() orelse { log(.err, "Unable to locate RSDP ourselves", .{}); return; }; } log(.debug, "Using RSDP {} for acpi", .{rsdp_phys}); rsdp = rsdp_phys.?.cast(*RSDP).get_writeback(); log(.debug, "Revision: {d}", .{rsdp.revision}); switch (rsdp.revision) { 0 => parse_root_sdt(u32, rsdp.rsdt_addr), 2 => parse_root_sdt(u64, rsdp.xsdt_addr), else => return error.UnknownACPIRevision, } lai.lai_set_acpi_revision(rsdp.revision); lai.lai_create_namespace(); has_lai_acpi = true; }
subprojects/flork/src/platform/acpi.zig
const std = @import("std"); const upaya = @import("upaya"); const Writer = std.fs.File.Writer; const Reader = std.fs.File.Reader; const AppState = @import("app_state.zig").AppState; const data = @import("map.zig"); const Map = data.Map; const RuleSet = data.RuleSet; const Rule = data.Rule; const RuleTile = data.RuleTile; const Tag = data.Tag; const TileDefinitions = data.TileDefinitions; const Object = data.Object; const Animation = data.Animation; pub fn save(map: Map, file: []const u8) !void { var handle = try std.fs.cwd().createFile(file, .{}); defer handle.close(); const out = handle.writer(); try out.writeIntLittle(usize, map.w); try out.writeIntLittle(usize, map.h); try out.writeIntLittle(usize, map.tile_size); try out.writeIntLittle(usize, map.tile_spacing); try out.writeIntLittle(usize, map.image.len); try out.writeAll(map.image); const data_bytes = std.mem.sliceAsBytes(map.data); try out.writeIntLittle(usize, data_bytes.len); try out.writeAll(data_bytes); // RuleSet try writeRuleSet(out, map.ruleset); // groups try out.writeIntLittle(usize, map.ruleset_groups.count()); var iter = map.ruleset_groups.iterator(); while (iter.next()) |entry| { try out.writeIntLittle(u8, entry.key); try writeFixedSliceZ(out, entry.value); } // pre RuleSets try out.writeIntLittle(usize, map.pre_rulesets.items.len); for (map.pre_rulesets.items) |ruleset| { try writeRuleSet(out, ruleset); } // tags try out.writeIntLittle(usize, map.tags.items.len); for (map.tags.items) |tag| { try writeFixedSliceZ(out, &tag.name); try out.writeIntLittle(usize, tag.tiles.len); for (tag.tiles.items) |tile, i| { if (i == tag.tiles.len) break; try out.writeIntLittle(u8, tile); } } // tile definitions inline for (@typeInfo(TileDefinitions).Struct.fields) |field| { var list = @field(map.tile_definitions, field.name); try out.writeIntLittle(usize, list.len); for (list.items) |tile, i| { if (i == list.len) break; try out.writeIntLittle(u8, tile); } } // objects try out.writeIntLittle(usize, map.objects.items.len); for (map.objects.items) |obj| { try out.writeIntLittle(u8, obj.id); try writeFixedSliceZ(out, &obj.name); try out.writeIntLittle(usize, obj.x); try out.writeIntLittle(usize, obj.y); try out.writeIntLittle(usize, obj.props.items.len); for (obj.props.items) |prop| { try writeFixedSliceZ(out, &prop.name); try writeUnion(out, prop.value); } } // animations try out.writeIntLittle(usize, map.animations.items.len); for (map.animations.items) |anim| { try out.writeIntLittle(u8, anim.tile); try out.writeIntLittle(u16, anim.rate); try out.writeIntLittle(usize, anim.tiles.len); for (anim.tiles.items) |tile, i| { if (i == anim.tiles.len) break; try out.writeIntLittle(u8, tile); } } } fn writeRuleSet(out: Writer, ruleset: RuleSet) !void { try out.writeIntLittle(u64, ruleset.seed); try out.writeIntLittle(u8, ruleset.repeat); try out.writeIntLittle(usize, ruleset.rules.items.len); for (ruleset.rules.items) |rule| { try writeFixedSliceZ(out, &rule.name); try out.writeIntLittle(u8, rule.group); for (rule.rule_tiles) |rule_tile| { try out.writeIntLittle(usize, rule_tile.tile); try out.writeIntLittle(u8, @enumToInt(rule_tile.state)); } try out.writeIntLittle(u8, rule.chance); try out.writeIntLittle(usize, rule.result_tiles.len); for (rule.result_tiles.items) |result_tiles, i| { if (i == rule.result_tiles.len) break; try out.writeIntLittle(u8, result_tiles); } } } pub fn load(file: []const u8) !Map { var handle = try std.fs.cwd().openFile(file, .{}); defer handle.close(); var map = Map{ .data = undefined, .ruleset = RuleSet.init(), .ruleset_groups = std.AutoHashMap(u8, []const u8).init(upaya.mem.allocator), .pre_rulesets = std.ArrayList(RuleSet).init(upaya.mem.allocator), .tags = std.ArrayList(Tag).init(upaya.mem.allocator), .tile_definitions = .{}, .objects = std.ArrayList(Object).init(upaya.mem.allocator), .animations = std.ArrayList(Animation).init(upaya.mem.allocator), }; const in = handle.reader(); map.w = try in.readIntLittle(usize); map.h = try in.readIntLittle(usize); map.tile_size = try in.readIntLittle(usize); map.tile_spacing = try in.readIntLittle(usize); var image_len = try in.readIntLittle(usize); if (image_len > 0) { const buffer = try upaya.mem.allocator.alloc(u8, image_len); _ = try in.readAll(buffer); map.image = buffer; } // map data const data_len = try in.readIntLittle(usize); map.data = try upaya.mem.allocator.alloc(u8, data_len); _ = try in.readAll(map.data); // RuleSet try readIntoRuleSet(in, &map.ruleset); // grouops const group_len = try in.readIntLittle(usize); var i: usize = 0; while (i < group_len) : (i += 1) { const key = try in.readIntLittle(u8); const len = try in.readIntLittle(usize); std.debug.assert(len != 0); const value = try upaya.mem.allocator.alloc(u8, len); _ = try in.readAll(value); map.ruleset_groups.put(key, value) catch unreachable; } // pre RuleSets const pre_ruleset_count = try in.readIntLittle(usize); _ = try map.pre_rulesets.ensureCapacity(pre_ruleset_count); i = 0; while (i < pre_ruleset_count) : (i += 1) { var ruleset = RuleSet.init(); try readIntoRuleSet(in, &ruleset); _ = try map.pre_rulesets.append(ruleset); } // tags const tag_cnt = try in.readIntLittle(usize); _ = try map.tags.ensureCapacity(tag_cnt); i = 0; while (i < tag_cnt) : (i += 1) { var tag = Tag.init(); try readFixedSliceZ(in, &tag.name); var tile_len = try in.readIntLittle(usize); while (tile_len > 0) : (tile_len -= 1) { tag.tiles.append(try in.readIntLittle(u8)); } try map.tags.append(tag); } // tile definitions inline for (@typeInfo(TileDefinitions).Struct.fields) |field| { var list = &@field(map.tile_definitions, field.name); var tile_len = try in.readIntLittle(usize); while (tile_len > 0) : (tile_len -= 1) { list.append(try in.readIntLittle(u8)); } } // objects const obj_cnt = try in.readIntLittle(usize); _ = try map.objects.ensureCapacity(obj_cnt); i = 0; while (i < obj_cnt) : (i += 1) { var obj = Object.init(try in.readIntLittle(u8)); try readFixedSliceZ(in, &obj.name); obj.x = try in.readIntLittle(usize); obj.y = try in.readIntLittle(usize); var props_len = try in.readIntLittle(usize); try obj.props.ensureCapacity(props_len); while (props_len > 0) : (props_len -= 1) { var prop = Object.Prop.init(); try readFixedSliceZ(in, &prop.name); try readUnionInto(in, &prop.value); obj.props.appendAssumeCapacity(prop); } map.objects.appendAssumeCapacity(obj); } // animations const anim_cnt = try in.readIntLittle(usize); _ = try map.animations.ensureCapacity(anim_cnt); i = 0; while (i < anim_cnt) : (i += 1) { var anim = Animation.init(try in.readIntLittle(u8)); anim.rate = try in.readIntLittle(u16); var tile_len = try in.readIntLittle(usize); while (tile_len > 0) : (tile_len -= 1) { anim.tiles.append(try in.readIntLittle(u8)); } try map.animations.append(anim); } return map; } fn readIntoRuleSet(in: Reader, ruleset: *RuleSet) !void { ruleset.seed = try in.readIntLittle(u64); ruleset.repeat = try in.readIntLittle(u8); const rule_count = try in.readIntLittle(usize); _ = try ruleset.rules.ensureCapacity(rule_count); var i: usize = 0; while (i < rule_count) : (i += 1) { try ruleset.rules.append(try readRule(in)); } } fn readRule(in: Reader) !Rule { var rule = Rule.init(); try readFixedSliceZ(in, &rule.name); rule.group = try in.readIntLittle(u8); for (rule.rule_tiles) |*rule_tile| { rule_tile.tile = try in.readIntLittle(usize); rule_tile.state = @intToEnum(RuleTile.RuleState, @intCast(u4, try in.readIntLittle(u8))); } rule.chance = try in.readIntLittle(u8); const result_tiles_len = try in.readIntLittle(usize); if (result_tiles_len > 100) return error.WTF; var i: usize = 0; while (i < result_tiles_len) : (i += 1) { rule.result_tiles.append(try in.readIntLittle(u8)); } return rule; } // generic write helpers fn writeFixedSliceZ(out: Writer, slice: []const u8) !void { const sentinel_index = std.mem.indexOfScalar(u8, slice, 0) orelse slice.len; const txt = slice[0..sentinel_index]; try out.writeIntLittle(usize, txt.len); if (txt.len > 0) { try out.writeAll(txt); } } fn writeUnion(out: Writer, value: anytype) !void { const info = @typeInfo(@TypeOf(value)).Union; if (info.tag_type) |TagType| { const active_tag = std.meta.activeTag(value); try writeValue(out, active_tag); inline for (std.meta.fields(TagType)) |field_info| { if (field_info.value == @enumToInt(active_tag)) { const name = field_info.name; try writeValue(out, @field(value, name)); } } } } fn writeValue(out: Writer, value: anytype) !void { const T = comptime @TypeOf(value); if (comptime std.meta.trait.isIndexable(T)) { for (value) |v| try writeValue(out, v); return; } switch (@typeInfo(T)) { .Int => try out.writeIntLittle(T, value), .Float => try out.writeIntLittle(i32, @bitCast(i32, value)), // zig pre-0.7 try out.writeIntLittle(T, value) .Enum => try writeValue(out, @enumToInt(value)), else => unreachable, } } // generic read helpers fn readUnionInto(in: Reader, ptr: anytype) !void { const C = comptime std.meta.Child(@TypeOf(ptr)); const info = @typeInfo(C).Union; if (info.tag_type) |TagType| { // this is a technically a u2 but we read it as a u8 because there is no bit packing in this reader/writer const tag = try in.readIntLittle(u8); inline for (std.meta.fields(TagType)) |field_info| { if (field_info.value == tag) { const name = field_info.name; ptr.* = @unionInit(C, name, undefined); try readValueInto(in, &@field(ptr, name)); } } } } fn readValueInto(in: Reader, ptr: anytype) !void { const T = @TypeOf(ptr); comptime std.debug.assert(std.meta.trait.is(.Pointer)(T)); if (comptime std.meta.trait.isSlice(T) or std.meta.trait.isPtrTo(.Array)(T)) { for (ptr) |*v| try readValueInto(in, v); return; } comptime std.debug.assert(std.meta.trait.isSingleItemPtr(T)); const C = comptime std.meta.Child(T); const child_ti = @typeInfo(C); switch (child_ti) { .Int => ptr.* = try in.readIntLittle(C), .Float => ptr.* = @bitCast(f32, try in.readIntLittle(i32)), // zig pre-0.7 ptr.* = try in.readIntLittle(C) else => unreachable, } } // generic read helpers fn readFixedSliceZ(in: Reader, dst: []u8) !void { const len = try in.readIntLittle(usize); if (len > 0) { const buffer = try upaya.mem.tmp_allocator.alloc(u8, len); _ = try in.readAll(buffer); std.mem.copy(u8, dst, buffer); } } // export pub fn exportJson(map: Map, map_data: []u8, file: []const u8) !void { var handle = try std.fs.cwd().createFile(file, .{}); defer handle.close(); const out_stream = handle.writer(); var jw = std.json.writeStream(out_stream, 10); { try jw.beginObject(); defer jw.endObject() catch unreachable; try jw.objectField("w"); try jw.emitNumber(map.w); try jw.objectField("h"); try jw.emitNumber(map.h); try jw.objectField("tile_size"); try jw.emitNumber(map.tile_size); try jw.objectField("tile_spacing"); try jw.emitNumber(map.tile_spacing); try jw.objectField("image"); try jw.emitString(map.image); // tags try jw.objectField("tags"); try jw.beginArray(); { defer jw.endArray() catch unreachable; for (map.tags.items) |tag| { try jw.arrayElem(); try jw.beginObject(); const sentinel_index = std.mem.indexOfScalar(u8, &tag.name, 0) orelse tag.name.len; const name = tag.name[0..sentinel_index]; try jw.objectField("name"); try jw.emitString(name); try jw.objectField("tiles"); try jw.beginArray(); for (tag.tiles.items) |tile, i| { if (i == tag.tiles.len) break; try jw.arrayElem(); try jw.emitNumber(tile); } try jw.endArray(); try jw.endObject(); } } // tile definitions try jw.objectField("tile_definitions"); try jw.beginArray(); { defer jw.endArray() catch unreachable; inline for (@typeInfo(TileDefinitions).Struct.fields) |field| { try jw.arrayElem(); try jw.beginObject(); try jw.objectField(field.name); var list = @field(map.tile_definitions, field.name); try jw.beginArray(); for (list.items) |tile, i| { if (i == list.len) break; try jw.arrayElem(); try jw.emitNumber(tile); } try jw.endArray(); try jw.endObject(); } } // objects try jw.objectField("objects"); try jw.beginArray(); { defer jw.endArray() catch unreachable; for (map.objects.items) |obj| { try jw.arrayElem(); try jw.beginObject(); try jw.objectField("id"); try jw.emitNumber(obj.id); const sentinel_index = std.mem.indexOfScalar(u8, &obj.name, 0) orelse obj.name.len; try jw.objectField("name"); try jw.emitString(obj.name[0..sentinel_index]); try jw.objectField("x"); try jw.emitNumber(obj.x); try jw.objectField("y"); try jw.emitNumber(obj.y); for (obj.props.items) |prop| { const prop_sentinel_index = std.mem.indexOfScalar(u8, &prop.name, 0) orelse prop.name.len; try jw.objectField(prop.name[0..prop_sentinel_index]); switch (prop.value) { .string => |str| { const prop_value_sentinel_index = std.mem.indexOfScalar(u8, &str, 0) orelse str.len; try jw.emitString(str[0..prop_value_sentinel_index]); }, .int => |int| try jw.emitNumber(int), .float => |float| try jw.emitNumber(float), .link => |link| try jw.emitNumber(link), } } try jw.endObject(); } } // animations try jw.objectField("animations"); try jw.beginArray(); { defer jw.endArray() catch unreachable; for (map.animations.items) |anim| { try jw.arrayElem(); try jw.beginObject(); try jw.objectField("tile"); try jw.emitNumber(anim.tile); try jw.objectField("rate"); try jw.emitNumber(anim.rate); try jw.objectField("tiles"); try jw.beginArray(); for (anim.tiles.items) |tile, i| { if (i == anim.tiles.len) break; try jw.arrayElem(); try jw.emitNumber(tile); } try jw.endArray(); try jw.endObject(); } } // map data try jw.objectField("data"); try jw.beginArray(); for (map_data) |d| { try jw.arrayElem(); try jw.emitNumber(d); } try jw.endArray(); } } pub fn exportTiled(state: *AppState, file: []const u8) !void { var handle = try std.fs.cwd().createFile(file, .{}); defer handle.close(); const out_stream = handle.writer(); var jw = std.json.writeStream(out_stream, 10); { try jw.beginObject(); defer jw.endObject() catch unreachable; try jw.objectField("width"); try jw.emitNumber(state.map.w); try jw.objectField("height"); try jw.emitNumber(state.map.h); try jw.objectField("tilewidth"); try jw.emitNumber(state.map.tile_size); try jw.objectField("tileheight"); try jw.emitNumber(state.map.tile_size); try jw.objectField("infinite"); try jw.emitBool(false); try jw.objectField("version"); try jw.emitNumber(1.2); try jw.objectField("tiledversion"); try jw.emitString("1.3.2"); try jw.objectField("orientation"); try jw.emitString("orthogonal"); try jw.objectField("renderorder"); try jw.emitString("right-down"); try jw.objectField("type"); try jw.emitString("map"); try jw.objectField("nextlayerid"); try jw.emitNumber(2); try jw.objectField("nextobjectid"); try jw.emitNumber(1); // tileset try jw.objectField("tilesets"); try jw.beginArray(); { defer jw.endArray() catch unreachable; try jw.arrayElem(); try jw.beginObject(); try jw.objectField("image"); try jw.emitString(state.map.image); try jw.objectField("margin"); try jw.emitNumber(state.map.tile_spacing); try jw.objectField("spacing"); try jw.emitNumber(state.map.tile_spacing); try jw.objectField("imagewidth"); try jw.emitNumber(state.texture.width); try jw.objectField("imageheight"); try jw.emitNumber(state.texture.width); try jw.objectField("tilewidth"); try jw.emitNumber(state.map.tile_size); try jw.objectField("tileheight"); try jw.emitNumber(state.map.tile_size); try jw.objectField("columns"); try jw.emitNumber(state.tilesPerCol()); try jw.objectField("firstgid"); try jw.emitNumber(1); try jw.objectField("tiles"); try jw.beginArray(); try jw.endArray(); try jw.endObject(); } // layer try jw.objectField("layers"); try jw.beginArray(); { defer jw.endArray() catch unreachable; try jw.arrayElem(); try jw.beginObject(); // map data try jw.objectField("data"); try jw.beginArray(); for (state.final_map_data) |d| { try jw.arrayElem(); try jw.emitNumber(d); } try jw.endArray(); try jw.objectField("width"); try jw.emitNumber(state.map.w); try jw.objectField("height"); try jw.emitNumber(state.map.h); try jw.objectField("id"); try jw.emitNumber(1); try jw.objectField("name"); try jw.emitString("main"); try jw.objectField("type"); try jw.emitString("tilelayer"); try jw.objectField("visible"); try jw.emitBool(true); try jw.objectField("opacity"); try jw.emitNumber(1); try jw.objectField("x"); try jw.emitNumber(0); try jw.objectField("y"); try jw.emitNumber(0); try jw.endObject(); } } }
tilescript/persistence.zig
const Wasm = @This(); const std = @import("std"); const Atom = @import("Atom.zig"); const Object = @import("Object.zig"); const types = @import("types.zig"); const Symbol = @import("Symbol.zig"); const sections = @import("sections.zig"); const leb = std.leb; const fs = std.fs; const Allocator = std.mem.Allocator; const log = std.log.scoped(.zwld); /// The binary file that we will write the final binary data to file: fs.File, /// Configuration of the linker provided by the user options: Options, /// A list with references to objects we link to during `flush()` objects: std.ArrayListUnmanaged(Object) = .{}, /// A map of global names to their symbol location in an object file global_symbols: std.StringArrayHashMapUnmanaged(SymbolWithLoc) = .{}, /// Contains all atoms that have been created, used to clean up managed_atoms: std.ArrayListUnmanaged(*Atom) = .{}, /// Maps atoms to their segment index atoms: std.AutoHashMapUnmanaged(u32, *Atom) = .{}, /// All symbols created by the linker, rather than through /// object files will be inserted in this list to manage them. synthetic_symbols: std.StringArrayHashMapUnmanaged(Symbol) = .{}, /// List of all symbol locations which have been resolved by the linker /// and will be emit into the final binary. resolved_symbols: std.AutoArrayHashMapUnmanaged(SymbolWithLoc, void) = .{}, /// Maps discarded symbols and their positions to the location of the symbol /// it was resolved to. discarded: std.AutoHashMapUnmanaged(SymbolWithLoc, SymbolWithLoc) = .{}, // OUTPUT SECTIONS // /// Output function signature types types: sections.Types = .{}, /// Output import section imports: sections.Imports = .{}, /// Output function section functions: sections.Functions = .{}, /// Output table section tables: sections.Tables = .{}, /// Output memory section, this will only be used when `options.import_memory` /// is set to false. The limits will be set, based on the total data section size /// and other configuration options. memories: std.wasm.Memory = .{ .limits = .{ .min = 0, .max = null } }, /// Output global section globals: sections.Globals = .{}, /// Output export section exports: sections.Exports = .{}, /// Output element section elements: sections.Elements = .{}, /// Index to a function defining the entry of the wasm file entry: ?u32 = null, /// Output data section, keyed by the segment name /// Represents non-synthetic section entries /// Used for code, data and custom sections. segments: std.ArrayListUnmanaged(Segment) = .{}, /// Maps a data segment key (such as .rodata) to the index into `segments` data_segments: std.StringArrayHashMapUnmanaged(u32) = .{}, /// Index into `atoms` that represents the code section code_section_index: ?u32 = null, pub const Segment = struct { alignment: u32, size: u32, offset: u32, }; /// Describes the location of a symbol pub const SymbolWithLoc = struct { /// Symbol entry index within the object/binary file sym_index: u32, /// When file is `null`, this symbol refers to a synthetic symbol. file: ?u16, /// From a given location, find the corresponding symbol in the wasm binary. pub fn getSymbol(self: SymbolWithLoc, wasm: *const Wasm) *Symbol { if (wasm.discarded.get(self)) |new_loc| return new_loc.getSymbol(wasm); if (self.file) |file_index| { const object = wasm.objects.items[file_index]; return &object.symtable[self.sym_index]; } return &wasm.synthetic_symbols.values()[self.sym_index]; } }; /// Options to pass to our linker which affects /// the end result and tells the linker how to build the final binary. pub const Options = struct { /// When the entry name is different than `_start` entry_name: ?[]const u8 = null, /// Points to where the global data will start global_base: ?u32 = null, /// Tells the linker we will import memory from the host environment import_memory: bool = false, /// Tells the linker we will import the function table from the host environment import_table: bool = false, /// Sets the initial memory of the data section /// Providing a value too low will result in a linking error. initial_memory: ?u32 = null, /// Sets the max memory for the data section. /// Will result in a linking error when it's smaller than `initial_memory`m /// or when the initial memory calculated by the linker is larger than the given maximum memory. max_memory: ?u32 = null, /// Tell the linker to merge data segments /// i.e. all '.rodata' will be merged into a .rodata segment. merge_data_segments: bool = true, /// Tell the linker we do not require a starting entry no_entry: bool = false, /// Tell the linker to put the stack first, instead of after the data stack_first: bool = false, /// Specifies the size of the stack in bytes stack_size: ?u32 = null, }; /// Initializes a new wasm binary file at the given path. /// Will overwrite any existing file at said path. pub fn openPath(path: []const u8, options: Options) !Wasm { const file = try fs.cwd().createFile(path, .{ .truncate = true, .read = true, }); errdefer file.close(); return Wasm{ .file = file, .options = options }; } /// Releases any resources that is owned by `Wasm`, /// usage after calling deinit is illegal behaviour. pub fn deinit(self: *Wasm, gpa: Allocator) void { for (self.objects.items) |*object| { object.file.?.close(); object.deinit(gpa); } for (self.managed_atoms.items) |atom| { atom.deinit(gpa); } self.synthetic_symbols.deinit(gpa); self.discarded.deinit(gpa); self.resolved_symbols.deinit(gpa); self.managed_atoms.deinit(gpa); self.atoms.deinit(gpa); self.data_segments.deinit(gpa); self.segments.deinit(gpa); self.global_symbols.deinit(gpa); self.objects.deinit(gpa); self.functions.deinit(gpa); self.types.deinit(gpa); self.imports.deinit(gpa); self.globals.deinit(gpa); self.exports.deinit(gpa); self.elements.deinit(gpa); self.tables.deinit(gpa); self.file.close(); self.* = undefined; } /// Parses objects from the given paths as well as append them to `self` pub fn addObjects(self: *Wasm, gpa: Allocator, file_paths: []const []const u8) !void { for (file_paths) |path| { const file = try fs.cwd().openFile(path, .{}); errdefer file.close(); var object = try Object.init(gpa, file, path); errdefer object.deinit(gpa); try self.objects.append(gpa, object); } } /// Returns the data section entry count, skipping the .bss section pub fn dataCount(self: Wasm) u32 { var i: u32 = 0; for (self.data_segments.keys()) |key| { if (std.mem.eql(u8, key, ".bss") and !self.options.import_memory) continue; i += 1; } return i; } /// Flushes the `Wasm` construct into a final wasm binary by linking /// the objects, ensuring the final binary file has no collisions. pub fn flush(self: *Wasm, gpa: Allocator) !void { try self.setupLinkerSymbols(gpa); for (self.objects.items) |_, obj_idx| { try self.resolveSymbolsInObject(gpa, @intCast(u16, obj_idx)); } for (self.objects.items) |*object, obj_idx| { try object.parseIntoAtoms(gpa, @intCast(u16, obj_idx), self); } try self.setupStart(); try self.mergeImports(gpa); try self.allocateAtoms(); try self.setupMemory(); try self.mergeSections(gpa); try self.mergeTypes(gpa); try self.setupExports(gpa); try self.relocateAtoms(); try @import("emit_wasm.zig").emit(self, gpa); } fn resolveSymbolsInObject(self: *Wasm, gpa: Allocator, object_index: u16) !void { const object: Object = self.objects.items[object_index]; log.debug("resolving symbols in {s}", .{object.name}); for (object.symtable) |*symbol, i| { const sym_idx = @intCast(u32, i); const location: SymbolWithLoc = .{ .file = object_index, .sym_index = sym_idx, }; if (symbol.isLocal()) { if (symbol.isUndefined()) { log.err("Local symbols are not allowed to reference imports", .{}); log.err(" symbol '{s}' defined in '{s}'", .{ symbol.name, object.name }); return error.undefinedLocal; } try self.resolved_symbols.putNoClobber(gpa, location, {}); continue; } // TODO: Store undefined symbols so we can verify at the end if they've all been found // if not, emit an error (unless --allow-undefined is enabled). const maybe_existing = try self.global_symbols.getOrPut(gpa, symbol.name); if (!maybe_existing.found_existing) { maybe_existing.value_ptr.* = location; try self.resolved_symbols.putNoClobber(gpa, location, {}); continue; } const existing_loc = maybe_existing.value_ptr.*; const existing_sym: *Symbol = existing_loc.getSymbol(self); if (!existing_sym.isUndefined()) { if (!symbol.isUndefined()) { log.info("Overwriting symbol '{s}'", .{symbol.name}); log.info(" first definition in '{s}'", .{self.objects.items[existing_loc.file.?].name}); log.info(" next definition in '{s}'", .{object.name}); return error.SymbolCollision; } continue; // Do not overwrite defined symbols with undefined symbols } // when both symbols are weak, we skip overwriting if (existing_sym.isWeak() and symbol.isWeak()) { continue; } // simply overwrite with the new symbol log.debug("Overwriting symbol '{s}'", .{symbol.name}); log.debug(" old definition in '{s}'", .{self.objects.items[existing_loc.file.?].name}); log.debug(" new definition in '{s}'", .{object.name}); try self.discarded.putNoClobber(gpa, maybe_existing.value_ptr.*, location); maybe_existing.value_ptr.* = location; try self.global_symbols.put(gpa, symbol.name, location); try self.resolved_symbols.put(gpa, location, {}); std.debug.assert(self.resolved_symbols.swapRemove(existing_loc)); } } /// Calculates the new indexes for symbols and their respective symbols fn mergeSections(self: *Wasm, gpa: Allocator) !void { // first append the indirect function table if initialized if (self.global_symbols.get("__indirect_function_table")) |sym_with_loc| { log.debug("Appending indirect function table", .{}); const object: Object = self.objects.items[sym_with_loc.file.?]; const symbol = sym_with_loc.getSymbol(self); const imp = object.findImport(.table, object.symtable[sym_with_loc.sym_index].index); symbol.index = try self.tables.append(gpa, self.imports.tableCount(), imp.kind.table); } log.debug("Merging sections", .{}); for (self.resolved_symbols.keys()) |sym_with_loc| { const object = self.objects.items[sym_with_loc.file orelse continue]; // synthetic symbols do not need to be merged const symbol: *Symbol = &object.symtable[sym_with_loc.sym_index]; if (symbol.isUndefined()) continue; // skip imports switch (symbol.tag) { .function => { const offset = object.importedCountByKind(.function); const original_func = object.functions[symbol.index - offset]; symbol.index = try self.functions.append( gpa, self.imports.functionCount(), original_func, ); }, .global => { const offset = object.importedCountByKind(.global); const original_global = object.globals[symbol.index - offset]; symbol.index = try self.globals.append( gpa, self.imports.globalCount(), original_global, ); }, .table => { const offset = object.importedCountByKind(.table); const original_table = object.tables[symbol.index - offset]; symbol.index = try self.tables.append( gpa, self.imports.tableCount(), original_table, ); }, else => {}, } } log.debug("Merged ({d}) functions", .{self.functions.count()}); log.debug("Merged ({d}) globals", .{self.globals.count()}); log.debug("Merged ({d}) tables", .{self.tables.count()}); } fn mergeTypes(self: *Wasm, gpa: Allocator) !void { log.debug("Merging types", .{}); for (self.resolved_symbols.keys()) |sym_with_loc| { const object = self.objects.items[sym_with_loc.file orelse continue]; // synthetic symbols do not need to be merged const symbol: Symbol = object.symtable[sym_with_loc.sym_index]; if (symbol.tag == .function) { if (symbol.isUndefined()) { log.debug("Adding type from extern function '{s}'", .{symbol.name}); const value = &self.imports.imported_functions.values()[symbol.index]; value.type = try self.types.append(gpa, object.types[value.type]); continue; } log.debug("Adding type from function '{s}'", .{symbol.name}); const func = &self.functions.items.items[symbol.index - self.imports.functionCount()]; func.type_index = try self.types.append(gpa, object.types[func.type_index]); } } log.debug("Completed building types. Total count: ({d})", .{self.types.count()}); } fn setupExports(self: *Wasm, gpa: Allocator) !void { log.debug("Building exports from symbols", .{}); // When importing memory option is false, // we export the memory. if (!self.options.import_memory) { try self.exports.append(gpa, .{ .name = "memory", .kind = .memory, .index = 0 }); } var symbol_it = SymbolIterator.init(self); while (symbol_it.next()) |entry| { const symbol = entry.symbol; if (!symbol.isExported()) continue; var name: []const u8 = symbol.name; var exported: std.wasm.Export = undefined; if (symbol.tag == .function) { exported = .{ .name = name, .kind = .function, .index = symbol.index }; } else { log.warn("TODO: Export non-functions type({s}) name={s}", .{ @tagName(symbol.tag), name, }); continue; } log.debug("Appending export from symbol '{s}' using name: '{s}' index: {d}", .{ symbol.name, name, symbol.index, }); try self.exports.append(gpa, exported); try self.exports.appendSymbol(gpa, entry.symbol); } log.debug("Completed building exports. Total count: ({d})", .{self.exports.count()}); } /// Creates symbols that are made by the linker, rather than the compiler/object file fn setupLinkerSymbols(self: *Wasm, gpa: Allocator) !void { var symbol: Symbol = .{ .flags = 0, .name = "__stack_pointer", .tag = .global, .index = 0, }; const global: std.wasm.Global = .{ .init = .{ .i32_const = 0 }, .global_type = .{ .valtype = .i32, .mutable = true }, }; symbol.index = try self.globals.append(gpa, 0, global); const sym_index = @intCast(u32, self.synthetic_symbols.count()); const loc: SymbolWithLoc = .{ .sym_index = sym_index, .file = null }; try self.synthetic_symbols.putNoClobber(gpa, symbol.name, symbol); try self.resolved_symbols.putNoClobber(gpa, loc, {}); try self.global_symbols.putNoClobber(gpa, symbol.name, loc); } const SymbolIterator = struct { symbol_index: u32, file_index: u16, wasm: *Wasm, const Entry = struct { sym_index: u32, file_index: u16, symbol: *Symbol, }; fn init(wasm_bin: *Wasm) SymbolIterator { return .{ .symbol_index = 0, .file_index = 0, .wasm = wasm_bin }; } fn next(self: *SymbolIterator) ?Entry { if (self.file_index >= self.wasm.objects.items.len) return null; const object: *Object = &self.wasm.objects.items[self.file_index]; if (self.symbol_index >= object.symtable.len) { self.file_index += 1; self.symbol_index = 0; return self.next(); } const symbol = &object.symtable[self.symbol_index]; defer self.symbol_index += 1; return Entry{ .sym_index = self.symbol_index, .file_index = self.file_index, .symbol = symbol, }; } }; fn mergeImports(self: *Wasm, gpa: Allocator) !void { const maybe_func_table = self.global_symbols.get("__indirect_function_table"); if (self.options.import_table) { const sym_with_loc = maybe_func_table orelse { log.err("Required import __indirect_function_table is missing from object files", .{}); return error.MissingSymbol; }; try self.imports.appendSymbol(gpa, self, sym_with_loc); } for (self.resolved_symbols.keys()) |sym_with_loc| { const symbol = sym_with_loc.getSymbol(self); if (symbol.tag != .data) { if (!symbol.requiresImport()) { continue; } if (std.mem.eql(u8, symbol.name, "__indirect_function_table")) { continue; } log.debug("Symbol '{s}' will be imported", .{symbol.name}); try self.imports.appendSymbol(gpa, self, sym_with_loc); } } } /// Sets up the memory section of the wasm module, as well as the stack. fn setupMemory(self: *Wasm) !void { log.debug("Setting up memory layout", .{}); const page_size = 64 * 1024; const stack_size = self.options.stack_size orelse page_size * 1; const stack_alignment = 16; const stack_first = self.options.stack_first; var memory_ptr: u32 = 0; if (!stack_first and self.options.global_base != null) { memory_ptr = self.options.global_base.?; } if (stack_first) { memory_ptr = std.mem.alignForwardGeneric(u32, memory_ptr, stack_alignment); memory_ptr += stack_size; // set stack value on global if (self.synthetic_symbols.get("__stack_pointer")) |stack_pointer| { const global: *std.wasm.Global = &self.globals.items.items[stack_pointer.index]; global.init = .{ .i32_const = @bitCast(i32, memory_ptr) }; } } var offset: u32 = memory_ptr; for (self.segments.items) |*segment, i| { // skip 'code' segments if (self.code_section_index) |index| { if (index == i) continue; } memory_ptr = std.mem.alignForwardGeneric(u32, memory_ptr, segment.alignment); memory_ptr += segment.size; segment.offset = offset; offset += segment.size; } if (!stack_first) { memory_ptr = std.mem.alignForwardGeneric(u32, memory_ptr, stack_alignment); memory_ptr += stack_size; // set stack value on global if (self.synthetic_symbols.get("__stack_pointer")) |stack_pointer| { const global: *std.wasm.Global = &self.globals.items.items[stack_pointer.index]; global.init = .{ .i32_const = @bitCast(i32, memory_ptr) }; } } // Setup the max amount of pages const max_memory_allowed: u32 = (1 << 32) - 1; if (self.options.initial_memory) |initial_memory| { if (!std.mem.isAligned(initial_memory, page_size)) { log.err("Initial memory must be {d}-byte aligned", .{page_size}); return error.MissAlignment; } if (memory_ptr > initial_memory) { log.err("Initial memory too small, must be at least {d} bytes", .{memory_ptr}); return error.MemoryTooSmall; } if (initial_memory > max_memory_allowed) { log.err("Initial memory exceeds maximum memory {d}", .{max_memory_allowed}); return error.MemoryTooBig; } memory_ptr = initial_memory; } // In case we do not import memory, but define it ourselves, // set the minimum amount of pages on the memory section. self.memories.limits.min = std.mem.alignForwardGeneric(u32, memory_ptr, page_size) / page_size; log.debug("Total memory pages: {d}", .{self.memories.limits.min}); if (self.options.max_memory) |max_memory| { if (!std.mem.isAligned(max_memory, page_size)) { log.err("Maximum memory must be {d}-byte aligned", .{page_size}); return error.MissAlignment; } if (memory_ptr > max_memory) { log.err("Maxmimum memory too small, must be at least {d} bytes", .{memory_ptr}); return error.MemoryTooSmall; } if (max_memory > max_memory_allowed) { log.err("Maximum memory exceeds maxmium amount {d}", .{max_memory_allowed}); return error.MemoryTooBig; } self.memories.limits.max = max_memory / page_size; log.debug("Maximum memory pages: {d}", .{self.memories.limits.max}); } } /// From a given object's index and the index of the segment, returns the corresponding /// index of the segment within the final data section. When the segment does not yet /// exist, a new one will be initialized and appended. The new index will be returned in that case. pub fn getMatchingSegment(self: *Wasm, gpa: Allocator, object_index: u16, relocatable_index: u32) !u32 { const object: Object = self.objects.items[object_index]; const relocatable_data = object.relocatable_data[relocatable_index]; const index = @intCast(u32, self.segments.items.len); switch (relocatable_data.type) { .data => { const segment_info = object.segment_info[relocatable_data.index]; const segment_name = if (self.options.merge_data_segments) segment_info.outputName() else segment_info.name; const result = try self.data_segments.getOrPut(gpa, segment_name); if (!result.found_existing) { result.value_ptr.* = index; try self.segments.append(gpa, .{ .alignment = 1, .size = 0, .offset = 0, }); return index; } else return result.value_ptr.*; }, .code => return self.code_section_index orelse blk: { self.code_section_index = index; try self.segments.append(gpa, .{ .alignment = 1, .size = 0, .offset = 0, }); break :blk index; }, .custom => @panic("TODO: Custom section relocation"), } } fn allocateAtoms(self: *Wasm) !void { var it = self.atoms.iterator(); while (it.next()) |entry| { const segment_index = entry.key_ptr.*; const segment: *Segment = &self.segments.items[segment_index]; var atom: *Atom = entry.value_ptr.*.getFirst(); log.debug("Allocating atoms for segment '{d}'", .{segment_index}); var offset: u32 = 0; while (true) { offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment); atom.offset = offset; const object: *Object = &self.objects.items[atom.file]; const symbol = &object.symtable[atom.sym_index]; log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ symbol.name, offset, offset + atom.size, atom.size, }); offset += atom.size; atom = atom.next orelse break; } segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment); } } fn relocateAtoms(self: *Wasm) !void { var it = self.atoms.valueIterator(); while (it.next()) |next_atom| { var atom: *Atom = next_atom.*.getFirst(); while (true) { // First perform relocations to rewrite the binary data try atom.resolveRelocs(self); atom = atom.next orelse break; } } } fn setupStart(self: *Wasm) !void { if (self.options.no_entry) return; const entry_name = self.options.entry_name orelse "_start"; const symbol_with_loc: SymbolWithLoc = self.global_symbols.get(entry_name) orelse { log.err("Entry symbol '{s}' does not exist, use '--no-entry' to suppress", .{entry_name}); return error.MissingSymbol; }; const symbol = symbol_with_loc.getSymbol(self); if (symbol.tag != .function) { log.err("Entry symbol '{s}' is not a function", .{entry_name}); return error.InvalidEntryKind; } // Simply export the symbol as the start function is reserved // for synthetic symbols such as __wasm_start, __wasm_init_memory, and // __wasm_apply_global_relocs symbol.setFlag(.WASM_SYM_EXPORTED); }
src/Wasm.zig
const std = @import("std"); //-------------------------------------------------------------------------------------------------- const Entry = struct { total_count: u64, spawn_count: u64, spawn_pair_left: [2]u8, spawn_pair_right: [2]u8, }; //-------------------------------------------------------------------------------------------------- pub fn main() anyerror!void { var timer = try std.time.Timer.start(); const file = std.fs.cwd().openFile("data/day14_input.txt", .{}) catch |err| label: { std.debug.print("unable to open file: {e}\n", .{err}); const stderr = std.io.getStdErr(); break :label stderr; }; defer file.close(); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); var allocator = arena.allocator(); var map = std.AutoHashMap([2]u8, Entry).init(allocator); defer { map.deinit(); } var template_buf: [20]u8 = undefined; var template_slice: ?[]u8 = undefined; { var reader = std.io.bufferedReader(file.reader()); var istream = reader.reader(); var buf: [20]u8 = undefined; // Read template template_slice = try istream.readUntilDelimiterOrEof(&template_buf, '\n'); std.debug.assert(template_slice != null); // Read insertion rules while (try istream.readUntilDelimiterOrEof(&buf, '\n')) |line| { if (line.len == 0) { continue; } var it = std.mem.split(u8, line, " -> "); const pattern = it.next().?; //std.log.info("pattern: {c}", .{pattern}); const insertion = it.next().?; //std.log.info("insertion: {c}", .{insertion}); // Add all rules into the map const entry = Entry{ .total_count = 0, .spawn_count = 0, .spawn_pair_left = [2]u8{ pattern[0], insertion[0] }, .spawn_pair_right = [2]u8{ insertion[0], pattern[1] }, }; var key = [2]u8{ pattern[0], pattern[1] }; try map.put(key, entry); } } //std.log.info("template: {c}", .{template_slice}); // Prime map with initial entries from the template // NOTE: The second iteration will create a pair that duplicates the last letter // from the first iteration e.g. CFBV -> CF FB BV // This means every letter is duplicated by the pairing except the first and last. // This needs to be accounted for during the counting stage. { var key: [2]u8 = undefined; var i: usize = 1; while (i < template_slice.?.len) : (i += 1) { key[0] = template_slice.?[i - 1]; key[1] = template_slice.?[i]; map.getPtr(key).?.total_count += 1; } } // Apply steps { const part1_iterations: u32 = 10; const part2_iterations: u32 = 40; var i: usize = 0; while (i < part2_iterations) : (i += 1) { if (i == part1_iterations) { std.log.info("Part 1: {d}", .{calc_min_max_diff(map, template_slice.?)}); } // Expand each `current` pair of letters to spawn 2 new pairs // These 2 new pairs will replace the current pair on the next iteration // NOTE: this means from 2 letters we spawn 4 letters (not just 3) // e.g. CF FB BV -> CxxF FyyB BzzV // Therefore we continue to have double the amount of letters (excluding first/last). { var it = map.iterator(); while (it.next()) |pair| { const value_ptr = pair.value_ptr; const left = &value_ptr.spawn_pair_left; const right = &value_ptr.spawn_pair_right; map.getPtr(left.*).?.spawn_count += value_ptr.total_count; map.getPtr(right.*).?.spawn_count += value_ptr.total_count; } } // Now spawn all new pairs we found in the last pass { var it = map.iterator(); while (it.next()) |pair| { const value_ptr = pair.value_ptr; // Replace `current` pairs with only the pairs that were spawned // in this iteration. value_ptr.total_count = value_ptr.spawn_count; value_ptr.spawn_count = 0; } } } } std.log.info("Part 2: {d}", .{calc_min_max_diff(map, template_slice.?)}); std.log.info("Completed in {d:.2}ms\n", .{@intToFloat(f32, timer.lap()) / 1.0e+6}); } //-------------------------------------------------------------------------------------------------- fn calc_min_max_diff(map: std.AutoHashMap([2]u8, Entry), template: []u8) u64 { var letters = [_]u64{0} ** 27; // As we have double counted the letters except for the first and last, // we should increase the count of the first and last letters, so that // everything is double counted (for now). letters[template[0] - 'A'] += 1; letters[template[template.len - 1] - 'A'] += 1; // For all the `current` pairs, count the individual letters var it = map.iterator(); while (it.next()) |pair| { const key = pair.key_ptr.*; const value_ptr = pair.value_ptr; letters[key[0] - 'A'] += value_ptr.total_count; letters[key[1] - 'A'] += value_ptr.total_count; } var min: u64 = std.math.maxInt(u64); var max: u64 = 0; // Find the max and mins for (letters) |count| { if (count != 0 and min > count) { min = count; } if (max < count) { max = count; } } // NOTE: half the counts because we double counted everything. return (max - min) / 2; } //--------------------------------------------------------------------------------------------------
src/day14.zig
const std = @import("std"); const mem = std.mem; const ArrayList = std.ArrayList; const StringHashMap = std.hash_map.StringHashMap; const LinearFifo = std.fifo.LinearFifo; const Token = @import("token.zig").Token; const ParseError = @import("parse_error.zig").ParseError; const buildNamedCharacterReferenceTable = @import("namedCharacterReference.zig").buildNamedCharacterReferenceTable; /// Represents the state of the HTML tokenizer as described /// [here](https://html.spec.whatwg.org/multipage/parsing.html#tokenization) pub const Tokenizer = struct { const Self = @This(); /// The current state of the parser. pub const State = enum { Data, RCDATA, RAWTEXT, ScriptData, PLAINTEXT, TagOpen, EndTagOpen, TagName, RCDATALessThanSign, RCDATAEndTagOpen, RCDATAEndTagName, RAWTEXTLessThanSign, RAWTEXTEndTagOpen, RAWTEXTEndTagName, ScriptDataLessThanSign, ScriptDataEndTagOpen, ScriptDataEndTagName, ScriptDataEscapeStart, ScriptDataEscapeStartDash, ScriptDataEscaped, ScriptDataEscapedDash, ScriptDataEscapedDashDash, ScriptDataEscapedLessThanSign, ScriptDataEscapedEndTagOpen, ScriptDataEscapedEndTagName, ScriptDataDoubleEscapeStart, ScriptDataDoubleEscaped, ScriptDataDoubleEscapedDash, ScriptDataDoubleEscapedDashDash, ScriptDataDoubleEscapedLessThanSign, ScriptDataDoubleEscapeEnd, BeforeAttributeName, AttributeName, AfterAttributeName, BeforeAttributeValue, AttributeValueDoubleQuoted, AttributeValueSingleQuoted, AttributeValueUnquoted, AfterAttributeValueQuoted, SelfClosingStartTag, BogusComment, MarkupDeclarationOpen, CommentStart, CommentStartDash, Comment, CommentLessThanSign, CommentLessThanSignBang, CommentLessThanSignBangDash, CommentLessThanSignBangDashDash, CommentEndDash, CommentEnd, CommentEndBang, DOCTYPE, BeforeDOCTYPEName, DOCTYPEName, AfterDOCTYPEName, AfterDOCTYPEPublicKeyword, BeforeDOCTYPEPublicIdentifier, DOCTYPEPublicIdentifierDoubleQuoted, DOCTYPEPublicIdentifierSingleQuoted, AfterDOCTYPEPublicIdentifier, BetweenDOCTYPEPublicAndSystemIdentifiers, AfterDOCTYPESystemKeyword, BeforeDOCTYPESystemIdentifier, DOCTYPESystemIdentifierDoubleQuoted, DOCTYPESystemIdentifierSingleQuoted, AfterDOCTYPESystemIdentifier, BogusDOCTYPE, CDATASection, CDATASectionBracket, CDATASectionEnd, CharacterReference, NamedCharacterReference, AmbiguousAmpersand, NumericCharacterReference, HexadecimalCharacterReferenceStart, DecimalCharacterReferenceStart, HexadecimalCharacterReference, DecimalCharacterReference, NumericCharacterReferenceEnd, }; // Intermediate type necessary to be able to store ParseError's in a LinearFifo // See https://github.com/ziglang/zig/issues/5820 const ParseErrorIntType = std.meta.IntType(false, @sizeOf(anyerror) * 8); allocator: *mem.Allocator, state: State = .Data, returnState: ?State = null, // TODO: This could potentially use .Static if we can guarantee some maximum number of tokens emitted at a time backlog: LinearFifo(Token, .Dynamic), errorQueue: LinearFifo(ParseErrorIntType, .Dynamic), // denotes if contents have been heap allocated (from a file) allocated: bool, filename: []const u8, contents: []const u8, line: usize, column: usize, index: usize, reconsume: bool = false, temporaryBuffer: ArrayList(u8), lastEmittedStartTag: ?Token = null, currentToken: IncompleteToken, characterReferenceCode: usize = 0, namedCharacterReferenceTable: StringHashMap(u21), /// Create a new {{Tokenizer}} instance using a file. pub fn initWithFile(allocator: *mem.Allocator, filename: []const u8) !Tokenizer { var contents = try std.fs.cwd().readFileAlloc(allocator, filename, std.math.maxInt(usize)); var tokenizer = try Tokenizer.initWithString(allocator, contents); tokenizer.backlog = LinearFifo(Token, .Dynamic).init(allocator); tokenizer.errorQueue = LinearFifo(ParseErrorIntType, .Dynamic).init(allocator); tokenizer.filename = filename; tokenizer.allocated = true; tokenizer.temporaryBuffer = ArrayList(u8).init(allocator); tokenizer.namedCharacterReferenceTable = buildNamedCharacterReferenceTable(allocator); return tokenizer; } /// Create a new {{Tokenizer}} instance using a string. pub fn initWithString(allocator: *mem.Allocator, str: []const u8) !Tokenizer { return Tokenizer{ .allocator = allocator, .allocated = false, .backlog = LinearFifo(Token, .Dynamic).init(allocator), .errorQueue = LinearFifo(ParseErrorIntType, .Dynamic).init(allocator), .temporaryBuffer = ArrayList(u8).init(allocator), .namedCharacterReferenceTable = buildNamedCharacterReferenceTable(allocator), .currentToken = IncompleteToken.init(allocator), .filename = "", .contents = str, .line = 1, .column = 0, .index = 0, }; } pub fn deinit(self: Self) void { if (self.allocated) { self.allocator.free(self.contents); } } pub fn reset(self: *Self) void { self.line = 1; self.column = 0; self.index = 0; self.deinit(); } /// null being returned always signifies EOF pub fn nextToken(self: *Self) ParseError!Token { // Clear out any backlog before continuing if (self.hasQueuedErrorOrToken()) { return self.popQueuedErrorOrToken(); } while (true) { switch (self.state) { // 12.2.5.1 Data state .Data => { if (self.nextChar()) |next_char| { switch (next_char) { '&' => { self.returnState = .Data; self.state = .CharacterReference; }, '<' => { self.state = .TagOpen; }, 0x00 => { self.emitToken(Token { .Character = .{ .data = 0x00 } }); return ParseError.UnexpectedNullCharacter; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return self.popQueuedErrorOrToken(); } }, // 12.2.5.2 RCDATA state .RCDATA => { if (self.nextChar()) |next_char| { switch (next_char) { '&' => { self.returnState = .RCDATA; self.state = .CharacterReference; }, '<' => { self.state = .RCDATALessThanSign; }, 0x00 => { self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return self.popQueuedErrorOrToken(); } }, // 12.2.5.3 RAWTEXT state .RAWTEXT => { if (self.nextChar()) |next_char| { switch (next_char) { '<' => { self.state = .RAWTEXTLessThanSign; }, 0x00 => { self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return self.popQueuedErrorOrToken(); } }, // 12.2.5.4 Script data state .ScriptData => { if (self.nextChar()) |next_char| { switch (next_char) { '<' => { self.state = .ScriptDataLessThanSign; }, 0x00 => { self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return self.popQueuedErrorOrToken(); } }, // 12.2.5.5 PLAINTEXT state .PLAINTEXT => { if (self.nextChar()) |next_char| { switch (next_char) { 0x00 => { self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return self.popQueuedErrorOrToken(); } }, // 12.2.5.6 Tag open state .TagOpen => { if (self.nextChar()) |next_char| { switch (next_char) { '!' => { self.state = .MarkupDeclarationOpen; }, '/' => { self.state = .EndTagOpen; }, '?' => { self.currentToken.create(.Comment); self.state = .BogusComment; self.reconsume = true; return ParseError.UnexpectedQuestionMarkInsteadOfTagName; }, else => { if (std.ascii.isAlpha(next_char)) { self.currentToken.create(.StartTag); self.state = .TagName; self.reconsume = true; } else { self.state = .Data; self.reconsume = true; self.emitToken(Token { .Character = .{ .data = '<' } }); return ParseError.InvalidFirstCharacterOfTagName; } } } } else { self.emitToken(Token{ .Character = .{ .data = '<' } }); self.emitToken(Token.EndOfFile); return ParseError.EofBeforeTagName; } }, // 12.2.5.7 End tag open state .EndTagOpen => { if (self.nextChar()) |next_char| { if (next_char == '>') { self.state = .Data; return ParseError.MissingEndTagName; } else if (std.ascii.isAlpha(next_char)) { self.currentToken.create(.EndTag); self.state = .TagName; self.reconsume = true; } else { self.currentToken.create(.Comment); self.reconsume = true; self.state = .BogusComment; return ParseError.InvalidFirstCharacterOfTagName; } } else { self.emitToken(Token{ .Character = .{ .data = '<' } }); self.emitToken(Token{ .Character = .{ .data = '/' } }); self.emitToken(Token.EndOfFile); return ParseError.EofBeforeTagName; } }, // 12.2.5.8 Tag name state .TagName => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .BeforeAttributeName; }, '/' => { self.state = .SelfClosingStartTag; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, 0x00 => { self.currentToken.tokenData.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, else => { var lowered = std.ascii.toLower(next_char); self.currentToken.tokenData.append(lowered) catch unreachable; } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInTag; } }, // 172.16.58.3 RCDATA less-than sign state .RCDATALessThanSign => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '/') { self.temporaryBuffer.shrink(0); self.state = .RCDATAEndTagOpen; } else { self.reconsume = true; self.state = .RCDATA; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); } }, // 172.16.31.100 RCDATA end tag open state .RCDATAEndTagOpen => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isAlpha(next_char.?)) { self.currentToken.create(.EndTag); self.reconsume = true; self.state = .RCDATA; } else { self.emitToken(Token { .Character = .{ .data = '<' } }); self.emitToken(Token { .Character = .{ .data = '/' } }); self.reconsume = true; self.state = .RCDATA; return self.popQueuedErrorOrToken(); } }, // 172.16.17.32 RCDATA end tag name state .RCDATAEndTagName => { if (self.nextChar()) |next_char| { switch(next_char) { '\t', 0x0A, 0x0C, ' ' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .BeforeAttributeName; continue; } // else fallthrough to anything else }, '/' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .SelfClosingStartTag; continue; } // else fallthrough to anything else }, '>' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .Data; continue; } // else fallthrough to anything else }, 'A'...'Z' => { self.currentToken.tokenData.append(std.ascii.toLower(next_char)) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, 'a'...'z' => { self.currentToken.tokenData.append(next_char) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, else => {}, // fallthrough } } // anything else self.emitToken(Token{ .Character = .{ .data = '<' } }); self.emitToken(Token{ .Character = .{ .data = '/' } }); for (self.temporaryBuffer.items) |char| { self.emitToken(Token{ .Character = .{ .data = char } }); } self.reconsume = true; self.state = .RCDATA; return self.popQueuedErrorOrToken(); }, // 172.16.17.32 RAWTEXT less-than sign state .RAWTEXTLessThanSign => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '/') { self.temporaryBuffer.shrink(0); self.state = .RAWTEXTEndTagOpen; } else { self.reconsume = true; self.state = .RAWTEXT; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); } }, // 172.16.17.32 RAWTEXT end tag open state .RAWTEXTEndTagOpen => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isAlpha(next_char.?)) { self.currentToken.create(.EndTag); self.reconsume = true; self.state = .RAWTEXTEndTagName; } else { self.reconsume = true; self.state = .RAWTEXT; self.emitToken(Token { .Character = .{ .data = '<' } }); self.emitToken(Token { .Character = .{ .data = '/' } }); return self.popQueuedErrorOrToken(); } }, // 172.16.58.3 RAWTEXT end tag name state .RAWTEXTEndTagName => { if (self.nextChar()) |next_char| { switch(next_char) { '\t', 0x0A, 0x0C, ' ' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .BeforeAttributeName; continue; } // else fallthrough to anything else }, '/' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .SelfClosingStartTag; continue; } // else fallthrough to anything else }, '>' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .Data; continue; } // else fallthrough to anything else }, 'A'...'Z' => { self.currentToken.tokenData.append(std.ascii.toLower(next_char)) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, 'a'...'z' => { self.currentToken.tokenData.append(next_char) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, else => {}, // fallthrough } } // anything else self.emitToken(Token{ .Character = .{ .data = '<' } }); self.emitToken(Token{ .Character = .{ .data = '/' } }); for (self.temporaryBuffer.items) |char| { self.emitToken(Token{ .Character = .{ .data = char } }); } self.reconsume = true; self.state = .RAWTEXT; return self.popQueuedErrorOrToken(); }, // 172.16.31.105 Script data less-than sign state .ScriptDataLessThanSign => { if (self.nextChar()) |next_char| { switch (next_char) { '/' => { self.temporaryBuffer.shrink(0); self.state = .ScriptDataEndTagOpen; continue; }, '!' => { self.state = .ScriptDataEscapeStart; self.emitToken(Token { .Character = .{ .data = '<' } }); self.emitToken(Token { .Character = .{ .data = '!' } }); return self.popQueuedErrorOrToken(); }, else => {}, // fallthrough } } // anything else self.reconsume = true; self.state = .ScriptData; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); }, // 172.16.31.106 Script data end tag open state .ScriptDataEndTagOpen => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isAlpha(next_char.?)) { self.currentToken.create(.EndTag); self.reconsume = true; self.state = .ScriptDataEndTagName; } else { self.reconsume = true; self.state = .ScriptData; self.emitToken(Token { .Character = .{ .data = '<' } }); self.emitToken(Token { .Character = .{ .data = '/' } }); return self.popQueuedErrorOrToken(); } }, // 172.16.31.107 Script data end tag name state .ScriptDataEndTagName => { if (self.nextChar()) |next_char| { switch(next_char) { '\t', 0x0A, 0x0C, ' ' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .BeforeAttributeName; continue; } // else fallthrough to anything else }, '/' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .SelfClosingStartTag; continue; } // else fallthrough to anything else }, '>' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .Data; continue; } // else fallthrough to anything else }, 'A'...'Z' => { self.currentToken.tokenData.append(std.ascii.toLower(next_char)) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, 'a'...'z' => { self.currentToken.tokenData.append(next_char) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, else => {}, // fallthrough } } // anything else self.emitToken(Token{ .Character = .{ .data = '<' } }); self.emitToken(Token{ .Character = .{ .data = '/' } }); for (self.temporaryBuffer.items) |char| { self.emitToken(Token{ .Character = .{ .data = char } }); } self.reconsume = true; self.state = .ScriptData; return self.popQueuedErrorOrToken(); }, // 192.168.127.12 Script data escape start state .ScriptDataEscapeStart => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '-') { self.state = .ScriptDataEscapeStartDash; self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); } else { self.reconsume = true; self.state = .ScriptData; } }, // 192.168.127.12 Script data escape start dash state .ScriptDataEscapeStartDash => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '-') { self.state = .ScriptDataEscapedDashDash; self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); } else { self.reconsume = true; self.state = .ScriptData; } }, // 192.168.127.12 Script data escaped state .ScriptDataEscaped => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.state = .ScriptDataEscapedDash; self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); }, '<' => { self.state = .ScriptDataEscapedLessThanSign; }, 0x00 => { self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInScriptHTMLCommentLikeText; } }, // 172.16.31.101 Script data escaped dash state .ScriptDataEscapedDash => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.state = .ScriptDataEscapedDashDash; self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); }, '<' => { self.state = .ScriptDataEscapedLessThanSign; }, 0x00 => { self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.state = .ScriptDataEscaped; self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInScriptHTMLCommentLikeText; } }, // 172.16.31.10 Script data escaped dash dash state .ScriptDataEscapedDashDash => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); }, '<' => { self.state = .ScriptDataEscapedLessThanSign; }, '>' => { self.state = .ScriptData; self.emitToken(Token { .Character = .{ .data = '>' } }); }, 0x00 => { self.state = .ScriptDataEscaped; self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.state = .ScriptDataEscaped; self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInScriptHTMLCommentLikeText; } }, // 192.168.3.11 Script data escaped less-than sign state .ScriptDataEscapedLessThanSign => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '/') { self.temporaryBuffer.shrink(0); self.state = .ScriptDataEscapedEndTagOpen; } else if (next_char != null and std.ascii.isAlpha(next_char.?)) { self.temporaryBuffer.shrink(0); self.reconsume = true; self.state = .ScriptDataDoubleEscapeStart; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); } else { self.reconsume = true; self.state = .ScriptDataEscaped; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); } }, // 192.168.3.11 Script data escaped end tag open state .ScriptDataEscapedEndTagOpen => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isAlpha(next_char.?)) { self.currentToken.create(.EndTag); self.reconsume = true; self.state = .ScriptDataEscapedEndTagName; } else { self.reconsume = true; self.state = .ScriptDataEscaped; self.emitToken(Token { .Character = .{ .data = '<' } }); self.emitToken(Token { .Character = .{ .data = '/' } }); return self.popQueuedErrorOrToken(); } }, // 192.168.3.11 Script data escaped end tag name state .ScriptDataEscapedEndTagName => { if (self.nextChar()) |next_char| { switch(next_char) { '\t', 0x0A, 0x0C, ' ' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .BeforeAttributeName; continue; } // else fallthrough to anything else }, '/' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .SelfClosingStartTag; continue; } // else fallthrough to anything else }, '>' => { if (self.currentToken.isAppropriateEndTag(self)) { self.state = .Data; continue; } // else fallthrough to anything else }, 'A'...'Z' => { self.currentToken.tokenData.append(std.ascii.toLower(next_char)) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, 'a'...'z' => { self.currentToken.tokenData.append(next_char) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; continue; }, else => {}, // fallthrough } } // anything else self.emitToken(Token{ .Character = .{ .data = '<' } }); self.emitToken(Token{ .Character = .{ .data = '/' } }); for (self.temporaryBuffer.items) |char| { self.emitToken(Token{ .Character = .{ .data = char } }); } self.reconsume = true; self.state = .ScriptDataEscaped; return self.popQueuedErrorOrToken(); }, // 192.168.127.12 Script data double escape start state .ScriptDataDoubleEscapeStart => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ', '/', '>' => { if (mem.eql(u8, self.temporaryBuffer.items, "script")) { self.state = .ScriptDataDoubleEscaped; } else { self.state = .ScriptDataEscaped; } self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); }, else => if (std.ascii.isAlpha(next_char)) { var lowered = std.ascii.toLower(next_char); self.temporaryBuffer.append(lowered) catch unreachable; self.emitToken(Token { .Character = .{ .data = lowered } }); return self.popQueuedErrorOrToken(); } } } // anything else self.reconsume = true; self.state = .ScriptDataEscaped; }, // 192.168.3.11 Script data double escaped state .ScriptDataDoubleEscaped => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.state = .ScriptDataDoubleEscapedDash; self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); }, '<' => { self.state = .ScriptDataDoubleEscapedLessThanSign; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); }, 0x00 => { self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInScriptHTMLCommentLikeText; } }, // 172.16.17.32 Script data double escaped dash state .ScriptDataDoubleEscapedDash => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.state = .ScriptDataDoubleEscapedDashDash; self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); }, '<' => { self.state = .ScriptDataDoubleEscapedLessThanSign; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); }, 0x00 => { self.state = .ScriptDataDoubleEscaped; self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.state = .ScriptDataDoubleEscaped; self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInScriptHTMLCommentLikeText; } }, // 172.16.58.3 Script data double escaped dash dash state .ScriptDataDoubleEscapedDashDash => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.emitToken(Token { .Character = .{ .data = '-' } }); return self.popQueuedErrorOrToken(); }, '<' => { self.state = .ScriptDataDoubleEscapedLessThanSign; self.emitToken(Token { .Character = .{ .data = '<' } }); return self.popQueuedErrorOrToken(); }, '>' => { self.state = .ScriptData; self.emitToken(Token { .Character = .{ .data = '>' } }); return self.popQueuedErrorOrToken(); }, 0x00 => { self.state = .ScriptDataDoubleEscaped; self.emitToken(Token { .Character = .{ .data = '�' } }); return ParseError.UnexpectedNullCharacter; }, else => { self.state = .ScriptDataDoubleEscaped; self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInScriptHTMLCommentLikeText; } }, // 192.168.127.12 Script data double escaped less-than sign state .ScriptDataDoubleEscapedLessThanSign => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '/') { self.temporaryBuffer.shrink(0); self.state = .ScriptDataDoubleEscapeEnd; self.emitToken(Token { .Character = .{ .data = '/' } }); return self.popQueuedErrorOrToken(); } else { self.reconsume = true; self.state = .ScriptDataDoubleEscaped; } }, // 192.168.3.11 Script data double escape end state .ScriptDataDoubleEscapeEnd => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ', '/', '>' => { if (mem.eql(u8, self.temporaryBuffer.items, "script")) { self.state = .ScriptDataEscaped; } else { self.state = .ScriptDataDoubleEscaped; } self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); }, else => if (std.ascii.isAlpha(next_char)) { var lowered = std.ascii.toLower(next_char); self.temporaryBuffer.append(lowered) catch unreachable; self.emitToken(Token { .Character = .{ .data = lowered } }); return self.popQueuedErrorOrToken(); } } } // anything else self.reconsume = true; self.state = .ScriptDataDoubleEscaped; }, // 172.16.17.32 Before attribute name state .BeforeAttributeName => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing. }, '/', '>' => { self.state = .AfterAttributeName; self.reconsume = true; }, '=' => { self.currentToken.startNewAttribute(self) catch unreachable; self.currentToken.currentAttributeName.append(next_char) catch unreachable; self.state = .AttributeName; return ParseError.UnexpectedEqualsSignBeforeAttributeName; }, else => { self.currentToken.startNewAttribute(self) catch unreachable; self.state = .AttributeName; self.reconsume = true; // startNewAttribute can emit an error if (self.hasQueuedErrorOrToken()) return self.popQueuedErrorOrToken(); } } } else { self.reconsume = true; self.state = .AfterAttributeName; } }, // 172.16.17.32 Attribute name state .AttributeName => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ', '/', '>' => { self.state = .AfterAttributeName; self.reconsume = true; }, '=' => { self.state = .BeforeAttributeValue; }, 0x00 => { self.currentToken.currentAttributeName.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, '"', '\'', '<' => { self.currentToken.currentAttributeName.append(next_char) catch unreachable; return ParseError.UnexpectedCharacterInAttributeName; }, else => { self.currentToken.currentAttributeName.append(std.ascii.toLower(next_char)) catch unreachable; } } } else { self.reconsume = true; self.state = .AfterAttributeName; } }, // 172.16.58.34 After attribute name state .AfterAttributeName => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing. }, '/' => { self.state = .SelfClosingStartTag; }, '=' => { self.state = .BeforeAttributeValue; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, else => { self.currentToken.startNewAttribute(self) catch unreachable; self.state = .AttributeName; self.reconsume = true; } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInTag; } }, // 172.16.58.35 Before attribute value state .BeforeAttributeValue => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing. continue; }, '"' => { self.state = .AttributeValueDoubleQuoted; continue; }, '\'' => { self.state = .AttributeValueSingleQuoted; continue; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.MissingAttributeValue; }, else => {}, // fallthrough } } // anything else self.reconsume = true; self.state = .AttributeValueUnquoted; }, // 172.16.58.36 Attribute value (double-quoted) state .AttributeValueDoubleQuoted => { if (self.nextChar()) |next_char| { switch (next_char) { '"' => { self.state = .AfterAttributeValueQuoted; }, '&' => { self.returnState = .AttributeValueDoubleQuoted; self.state = .CharacterReference; }, 0x00 => { self.currentToken.currentAttributeValue.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, else => { self.currentToken.currentAttributeValue.append(next_char) catch unreachable; } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInTag; } }, // 172.16.58.37 Attribute value (single-quoted) state .AttributeValueSingleQuoted => { if (self.nextChar()) |next_char| { switch (next_char) { '\'' => { self.state = .AfterAttributeValueQuoted; }, '&' => { self.returnState = .AttributeValueSingleQuoted; self.state = .CharacterReference; }, 0x00 => { self.currentToken.currentAttributeValue.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, else => { self.currentToken.currentAttributeValue.append(next_char) catch unreachable; } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInTag; } }, // 172.16.58.38 Attribute value (unquoted) state .AttributeValueUnquoted => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .BeforeAttributeName; }, '&' => { self.returnState = .AttributeValueUnquoted; self.state = .CharacterReference; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, 0x00 => { self.currentToken.currentAttributeValue.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, '"', '\'', '<', '=', '`' => { self.currentToken.currentAttributeValue.append(next_char) catch unreachable; return ParseError.UnexpectedCharacterInUnquotedAttributeValue; }, else => { self.currentToken.currentAttributeValue.append(next_char) catch unreachable; } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInTag; } }, // 172.16.58.39 After attribute value (quoted) state .AfterAttributeValueQuoted => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .BeforeAttributeName; }, '/' => { self.state = .SelfClosingStartTag; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, else => { self.state = .BeforeAttributeName; self.reconsume = true; return ParseError.MissingWhitespaceBetweenAttributes; } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInTag; } }, // 172.16.58.3 Self-closing start tag state .SelfClosingStartTag => { if (self.nextChar()) |next_char| { switch (next_char) { '>' => { self.state = .Data; self.currentToken.selfClosing = true; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, else => { self.reconsume = true; self.state = .BeforeAttributeName; return ParseError.UnexpectedSolidusInTag; } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInTag; } }, // 172.16.58.3 Bogus comment state .BogusComment => { if (self.nextChar()) |next_char| { switch (next_char) { '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, 0x00 => { self.currentToken.commentData.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, else => { self.currentToken.commentData.append(next_char) catch unreachable; } } } else { self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return self.popQueuedErrorOrToken(); } }, // 192.168.3.11 Markup declaration open state .MarkupDeclarationOpen => { var next_seven = self.peekN(7); if (next_seven.len >= 2 and mem.eql(u8, next_seven[0..2], "--")) { self.index += 2; self.column += 2; self.currentToken.create(.Comment); self.state = .CommentStart; } else if (std.ascii.eqlIgnoreCase(next_seven, "DOCTYPE")) { self.index += 7; self.column += 7; self.state = .DOCTYPE; } else if (mem.eql(u8, next_seven, "[CDATA[")) { // FIXME: Consume those characters. If there is an adjusted current node and it is not // an element in the HTML namespace, then switch to the CDATA section state. self.index += 7; self.column += 7; self.currentToken.create(.Comment); self.currentToken.commentData.appendSlice("[CDATA[") catch unreachable; self.state = .BogusComment; return ParseError.CDATAInHtmlContent; } else { self.state = .BogusComment; self.currentToken.create(.Comment); return ParseError.IncorrectlyOpenedComment; } }, // 172.16.17.32 Comment start state .CommentStart => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.state = .CommentStartDash; continue; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.AbruptClosingOfEmptyComment; }, else => {}, // fallthrough } } // anything else self.reconsume = true; self.state = .Comment; }, // 192.168.127.124 Comment start dash state .CommentStartDash => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.state = .CommentEnd; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.AbruptClosingOfEmptyComment; }, else => { self.currentToken.commentData.append('-') catch unreachable; self.reconsume = true; self.state = .Comment; } } } else { self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInComment; } }, // 192.168.127.125 Comment state .Comment => { if (self.nextChar()) |next_char| { switch (next_char) { '<' => { self.currentToken.commentData.append(next_char) catch unreachable; self.state = .CommentLessThanSign; }, '-' => { self.state = .CommentEndDash; }, 0x00 => { self.currentToken.commentData.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, else => { self.currentToken.commentData.append(next_char) catch unreachable; } } } else { self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInComment; } }, // 172.16.31.10 Comment less-than sign state .CommentLessThanSign => { if (self.nextChar()) |next_char| { switch (next_char) { '!' => { self.currentToken.commentData.append('!') catch unreachable; self.state = .CommentLessThanSignBang; continue; }, '<' => { self.currentToken.commentData.append(next_char) catch unreachable; continue; }, else => {}, // fallthrough } } // anything else self.reconsume = true; self.state = .Comment; }, // 192.168.3.11 Comment less-than sign bang state .CommentLessThanSignBang => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '-') { self.state = .CommentLessThanSignBangDash; } else { self.reconsume = true; self.state = .Comment; } }, // 172.16.58.3 Comment less-than sign bang dash state .CommentLessThanSignBangDash => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '-') { self.state = .CommentLessThanSignBangDashDash; } else { self.reconsume = true; self.state = .CommentEndDash; } }, // 12.2.5.49 Comment less-than sign bang dash dash state .CommentLessThanSignBangDashDash => { var next_char = self.nextChar(); if (next_char == null or next_char.? == '>') { self.reconsume = true; self.state = .CommentEnd; } else { self.reconsume = true; self.state = .CommentEnd; return ParseError.NestedComment; } }, // 172.16.17.320 Comment end dash state .CommentEndDash => { var next_char = self.nextChar(); if (next_char != null and next_char.? == '-') { self.state = .CommentEnd; } else if (next_char == null) { self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInComment; } else { self.currentToken.commentData.append('-') catch unreachable; self.reconsume = true; self.state = .Comment; } }, // 172.16.17.32 Comment end state .CommentEnd => { if (self.nextChar()) |next_char| { switch (next_char) { '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, '!' => { self.state = .CommentEndBang; }, '-' => { self.currentToken.commentData.append(next_char) catch unreachable; }, else => { self.currentToken.commentData.appendSlice("--") catch unreachable; self.reconsume = true; self.state = .Comment; } } } else { self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInComment; } }, // 12.2.5.52 Comment end bang state .CommentEndBang => { if (self.nextChar()) |next_char| { switch (next_char) { '-' => { self.currentToken.commentData.appendSlice("--!") catch unreachable; self.state = .CommentEndDash; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.IncorrectlyClosedComment; }, else => { self.currentToken.commentData.appendSlice("--!") catch unreachable; self.reconsume = true; self.state = .Comment; } } } else { self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInComment; } }, // 172.16.58.3 DOCTYPE state .DOCTYPE => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .BeforeDOCTYPEName; }, '>' => { self.state = .BeforeDOCTYPEName; self.reconsume = true; }, else => { self.state = .BeforeDOCTYPEName; self.reconsume = true; return ParseError.MissingWhitespaceBeforeDoctypeName; } } } else { self.emitToken(Token { .DOCTYPE = .{ .forceQuirks = true } }); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 192.168.127.12 Before DOCTYPE name state .BeforeDOCTYPEName => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing. }, 0x00 => { self.currentToken.create(.DOCTYPE); self.currentToken.tokenData.appendSlice("�") catch unreachable; self.state = .DOCTYPEName; return ParseError.UnexpectedNullCharacter; }, '>' => { self.state = .Data; self.emitToken(Token { .DOCTYPE = .{ .forceQuirks = true } }); return ParseError.MissingDoctypeName; }, else => { self.currentToken.create(.DOCTYPE); self.currentToken.tokenData.append(std.ascii.toLower(next_char)) catch unreachable; self.state = .DOCTYPEName; } } } else { self.emitToken(Token { .DOCTYPE = .{ .forceQuirks = true } }); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.31.10 DOCTYPE name state .DOCTYPEName => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .AfterDOCTYPEName; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, 0x00 => { self.currentToken.tokenData.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, else => { self.currentToken.tokenData.append(std.ascii.toLower(next_char)) catch unreachable; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 192.168.127.12 After DOCTYPE name state .AfterDOCTYPEName => { // delay consuming for the 'anything else' case if (self.peekChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.index += 1; // consume self.column += 1; }, '>' => { self.index += 1; // consume self.column += 1; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, else => { var next_six = self.peekN(6); if (std.ascii.eqlIgnoreCase(next_six, "PUBLIC")) { self.index += 6; self.column += 6; self.state = .AfterDOCTYPEPublicKeyword; } else if (std.ascii.eqlIgnoreCase(next_six, "SYSTEM")) { self.index += 6; self.column += 6; self.state = .AfterDOCTYPESystemKeyword; } else { // reconsume, but since we peek'd to begin with, no need to actually set reconsume here self.currentToken.forceQuirks = true; self.state = .BogusDOCTYPE; return ParseError.InvalidCharacterSequenceAfterDoctypeName; } } } } else { self.index += 1; // consume self.column += 1; self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.58.3 After DOCTYPE public keyword state .AfterDOCTYPEPublicKeyword => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .BeforeDOCTYPEPublicIdentifier; }, '"' => { self.currentToken.publicIdentifier.shrink(0); self.currentToken.publicIdentifierMissing = false; self.state = .DOCTYPEPublicIdentifierDoubleQuoted; return ParseError.MissingWhitespaceAfterDoctypePublicKeyword; }, '\'' => { self.currentToken.publicIdentifier.shrink(0); self.currentToken.publicIdentifierMissing = false; self.state = .DOCTYPEPublicIdentifierSingleQuoted; return ParseError.MissingWhitespaceAfterDoctypePublicKeyword; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.MissingDoctypePublicIdentifier; }, else => { self.currentToken.forceQuirks = true; self.reconsume = true; self.state = .BogusDOCTYPE; return ParseError.MissingQuoteBeforeDoctypePublicIdentifier; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 192.168.127.12 Before DOCTYPE public identifier state .BeforeDOCTYPEPublicIdentifier => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing }, '"' => { self.currentToken.publicIdentifier.shrink(0); self.currentToken.publicIdentifierMissing = false; self.state = .DOCTYPEPublicIdentifierDoubleQuoted; }, '\'' => { self.currentToken.publicIdentifier.shrink(0); self.currentToken.publicIdentifierMissing = false; self.state = .DOCTYPEPublicIdentifierSingleQuoted; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.MissingDoctypePublicIdentifier; }, else => { self.currentToken.forceQuirks = true; self.reconsume = true; self.state = .BogusDOCTYPE; return ParseError.MissingQuoteBeforeDoctypePublicIdentifier; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 192.168.127.12 DOCTYPE public identifier (double-quoted) state .DOCTYPEPublicIdentifierDoubleQuoted => { if (self.nextChar()) |next_char| { switch (next_char) { '"' => { self.state = .AfterDOCTYPEPublicIdentifier; }, 0x00 => { self.currentToken.publicIdentifier.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.AbruptDoctypePublicIdentifier; }, else => { self.currentToken.publicIdentifier.append(next_char) catch unreachable; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.17.32 DOCTYPE public identifier (single-quoted) state .DOCTYPEPublicIdentifierSingleQuoted => { if (self.nextChar()) |next_char| { switch (next_char) { '\'' => { self.state = .AfterDOCTYPEPublicIdentifier; }, 0x00 => { self.currentToken.publicIdentifier.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.AbruptDoctypePublicIdentifier; }, else => { self.currentToken.publicIdentifier.append(next_char) catch unreachable; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.31.10 After DOCTYPE public identifier state .AfterDOCTYPEPublicIdentifier => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .BetweenDOCTYPEPublicAndSystemIdentifiers; }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, '"' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierDoubleQuoted; return ParseError.MissingWhitespaceBetweenDoctypePublicAndSystemIdentifiers; }, '\'' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierSingleQuoted; return ParseError.MissingWhitespaceBetweenDoctypePublicAndSystemIdentifiers; }, else => { self.currentToken.forceQuirks = true; self.reconsume = true; self.state = .BogusDOCTYPE; return ParseError.MissingQuoteBeforeDoctypeSystemIdentifier; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.58.3 Between DOCTYPE public and system identifiers state .BetweenDOCTYPEPublicAndSystemIdentifiers => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, '"' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierDoubleQuoted; }, '\'' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierSingleQuoted; }, else => { self.currentToken.forceQuirks = true; self.reconsume = true; self.state = .BogusDOCTYPE; return ParseError.MissingQuoteBeforeDoctypeSystemIdentifier; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.31.10 After DOCTYPE system keyword state .AfterDOCTYPESystemKeyword => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { self.state = .BeforeDOCTYPESystemIdentifier; }, '"' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierDoubleQuoted; return ParseError.MissingWhitespaceAfterDoctypeSystemKeyword; }, '\'' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierSingleQuoted; return ParseError.MissingWhitespaceAfterDoctypeSystemKeyword; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.MissingDoctypeSystemIdentifier; }, else => { self.currentToken.forceQuirks = true; self.reconsume = true; self.state = .BogusDOCTYPE; return ParseError.MissingQuoteBeforeDoctypeSystemIdentifier; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 192.168.3.11 Before DOCTYPE system identifier state .BeforeDOCTYPESystemIdentifier => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing }, '"' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierDoubleQuoted; }, '\'' => { self.currentToken.systemIdentifier.shrink(0); self.currentToken.systemIdentifierMissing = false; self.state = .DOCTYPESystemIdentifierSingleQuoted; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, else => { self.currentToken.forceQuirks = true; self.reconsume = true; self.state = .BogusDOCTYPE; return ParseError.MissingQuoteBeforeDoctypeSystemIdentifier; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.17.32 DOCTYPE system identifier (double-quoted) state .DOCTYPESystemIdentifierDoubleQuoted => { if (self.nextChar()) |next_char| { switch (next_char) { '"' => { self.state = .AfterDOCTYPESystemIdentifier; }, 0x00 => { self.currentToken.systemIdentifier.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.AbruptDoctypeSystemIdentifier; }, else => { self.currentToken.systemIdentifier.append(next_char) catch unreachable; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.58.3 DOCTYPE system identifier (single-quoted) state .DOCTYPESystemIdentifierSingleQuoted => { if (self.nextChar()) |next_char| { switch (next_char) { '\'' => { self.state = .AfterDOCTYPESystemIdentifier; }, 0x00 => { self.currentToken.systemIdentifier.appendSlice("�") catch unreachable; return ParseError.UnexpectedNullCharacter; }, '>' => { self.currentToken.forceQuirks = true; self.state = .Data; self.emitToken(self.currentToken.complete(self)); return ParseError.AbruptDoctypeSystemIdentifier; }, else => { self.currentToken.systemIdentifier.append(next_char) catch unreachable; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 172.16.58.3 After DOCTYPE system identifier state .AfterDOCTYPESystemIdentifier => { if (self.nextChar()) |next_char| { switch (next_char) { '\t', 0x0A, 0x0C, ' ' => { // Ignore and do nothing }, '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, else => { self.reconsume = true; self.state = .BogusDOCTYPE; return ParseError.UnexpectedCharacterAfterDoctypeSystemIdentifier; } } } else { self.currentToken.forceQuirks = true; self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return ParseError.EofInDOCTYPE; } }, // 192.168.127.12 Bogus DOCTYPE state .BogusDOCTYPE => { if (self.nextChar()) |next_char| { switch (next_char) { '>' => { self.state = .Data; self.emitToken(self.currentToken.complete(self)); return self.popQueuedErrorOrToken(); }, 0x00 => { return ParseError.UnexpectedNullCharacter; }, else => { // Ignore and do nothing } } } else { self.emitToken(self.currentToken.complete(self)); self.emitToken(Token.EndOfFile); return self.popQueuedErrorOrToken(); } }, // 172.16.31.10 CDATA section state .CDATASection => { if (self.nextChar()) |next_char| { switch (next_char) { '[' => { self.state = .CDATASectionBracket; }, else => { self.emitToken(Token { .Character = .{ .data = next_char } }); return self.popQueuedErrorOrToken(); } } } else { self.emitToken(Token.EndOfFile); return ParseError.EofInCDATA; } }, // 192.168.127.12 CDATA section bracket state .CDATASectionBracket => { var next_char = self.nextChar(); if (next_char != null and next_char.? == ']') { self.state = .CDATASectionEnd; } else { self.emitToken(Token { .Character = .{ .data = ']' } }); self.reconsume = true; self.state = .CDATASection; return self.popQueuedErrorOrToken(); } }, // 172.16.17.32 CDATA section end state .CDATASectionEnd => { if (self.nextChar()) |next_char| { switch (next_char) { ']' => { self.emitToken(Token { .Character = .{ .data = ']' } }); return self.popQueuedErrorOrToken(); }, '>' => { self.state = .Data; continue; }, else => {}, // fallthrough } } // anything else self.emitToken(Token { .Character = .{ .data = ']' } }); self.emitToken(Token { .Character = .{ .data = ']' } }); self.reconsume = true; self.state = .CDATASection; return self.popQueuedErrorOrToken(); }, // 12.2.5.72 Character reference state .CharacterReference => { self.temporaryBuffer.shrink(0); // self.temporaryBuffer.append('&') catch unreachable; var next_char = self.nextChar(); if (next_char != null and std.ascii.isAlNum(next_char.?)) { self.reconsume = true; self.state = .NamedCharacterReference; } else if (next_char != null and next_char.? == '#') { self.temporaryBuffer.append(next_char.?) catch unreachable; self.state = .NumericCharacterReference; } else { self.flushTemporaryBufferAsCharacterReference(); self.reconsume = true; self.state = self.returnState.?; if (self.hasQueuedErrorOrToken()) return self.popQueuedErrorOrToken(); } }, // 12.2.5.73 Named character reference state .NamedCharacterReference => { var next_char = self.nextChar().?; // TODO: handle EOF const peeked = self.peekChar().?; // TODO: handle EOF // Read input characters until the next_char is not alpha-numeric // TODO: This should actually only be matching prefixes of valid character references // See the example in 12.2.5.73 while (std.ascii.isAlNum(next_char)) : (next_char = self.nextChar().?) // TODO: handle EOF self.temporaryBuffer.append(next_char) catch unreachable; self.temporaryBuffer.append(next_char) catch unreachable; const collected = self.temporaryBuffer.items; if (self.namedCharacterReferenceTable.contains(collected)) { if (self.inAttributeState() and next_char != ';' and (std.ascii.isAlNum(peeked) or peeked == '=')) { self.flushTemporaryBufferAsCharacterReference(); self.state = self.returnState.?; if (self.hasQueuedErrorOrToken()) return self.popQueuedErrorOrToken(); } else { const ncr = self.temporaryBuffer.toOwnedSlice(); const codepoint = self.namedCharacterReferenceTable.get(ncr).?; self.flushCodepointAsCharacterReference(codepoint); self.state = self.returnState.?; if (self.currentChar().? != ';') // TODO: handle EOF return ParseError.MissingSemicolonAfterCharacterReference; if (self.hasQueuedErrorOrToken()) return self.popQueuedErrorOrToken(); } } else { self.flushTemporaryBufferAsCharacterReference(); self.state = .AmbiguousAmpersand; if (self.hasQueuedErrorOrToken()) return self.popQueuedErrorOrToken(); } }, // 172.16.31.104 Ambiguous ampersand state .AmbiguousAmpersand => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isAlNum(next_char.?)) { if (self.inAttributeState()) { self.currentToken.currentAttributeValue.append(next_char.?) catch unreachable; } else { self.emitToken(Token { .Character = .{ .data = next_char.? } }); return self.popQueuedErrorOrToken(); } } else if (next_char != null and next_char.? == ';') { self.reconsume = true; self.state = self.returnState.?; return ParseError.UnknownNamedCharacterReference; } else { self.reconsume = true; self.state = self.returnState.?; } }, // 172.16.31.105 Numeric character reference state .NumericCharacterReference => { self.characterReferenceCode = 0; var next_char = self.nextChar(); if (next_char != null and (next_char.? == 'X' or next_char.? == 'x')) { self.temporaryBuffer.append(next_char.?) catch unreachable; self.state = .HexadecimalCharacterReference; } else { self.reconsume = true; self.state = .DecimalCharacterReferenceStart; } }, // 12.2.5.76 Hexadecimal character reference start state .HexadecimalCharacterReferenceStart => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isDigit(next_char.?)) { self.reconsume = true; self.state = .HexadecimalCharacterReference; } else { self.temporaryBuffer.shrink(0); self.reconsume = true; self.state = self.returnState.?; return ParseError.AbsenceOfDigitsInNumericCharacterReference; } }, // 12.2.5.77 Decimal character reference start state .DecimalCharacterReferenceStart => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isDigit(next_char.?)) { self.reconsume = true; self.state = .DecimalCharacterReference; } else { self.temporaryBuffer.shrink(0); self.reconsume = true; self.state = self.returnState.?; return ParseError.AbsenceOfDigitsInNumericCharacterReference; } }, // 12.2.5.78 Hexadecimal character reference state .HexadecimalCharacterReference => { if (self.nextChar()) |next_char| { if (std.ascii.isDigit(next_char)) { self.characterReferenceCode *= 16; self.characterReferenceCode += (next_char - 0x0030); continue; } else if (std.ascii.isXDigit(next_char)) { self.characterReferenceCode *= 16; if (std.ascii.isUpper(next_char)) { self.characterReferenceCode += (next_char - 0x0037); } else { self.characterReferenceCode += (next_char - 0x0057); } continue; } else if (next_char == ';') { self.state = .NumericCharacterReferenceEnd; continue; } } // anything else self.reconsume = true; self.state = .NumericCharacterReferenceEnd; return ParseError.MissingSemicolonAfterCharacterReference; }, // 12.2.5.79 Decimal character reference state .DecimalCharacterReference => { var next_char = self.nextChar(); if (next_char != null and std.ascii.isDigit(next_char.?)) { self.characterReferenceCode *= 10; self.characterReferenceCode += (next_char.? - 0x0030); } else if (next_char != null and next_char.? == ';') { self.state = .NumericCharacterReferenceEnd; } else { self.temporaryBuffer.shrink(0); self.reconsume = true; self.state = .NumericCharacterReferenceEnd; return ParseError.MissingSemicolonAfterCharacterReference; } }, // 12.2.5.80 Numeric character reference end state .NumericCharacterReferenceEnd => { var err: ?ParseError = null; switch (self.characterReferenceCode) { 0x00 => { self.characterReferenceCode = 0xFFFD; err = ParseError.NullCharacterReference; }, 0xD800...0xDFFF => { self.characterReferenceCode = 0xFFFD; err = ParseError.SurrogateCharacterReference; }, 0xFDD0...0xFDEF, 0xFFFE, 0xFFFF, 0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF => { err = ParseError.NoncharacterCharacterReference; }, 0x0001...0x001F, 0x007F...0x009F => { // TODO: Match against the control character reference code. err = ParseError.ControlCharacterReference; }, else => { if (self.characterReferenceCode > 0x10FFFF) { self.characterReferenceCode = 0xFFFD; err = ParseError.CharacterReferenceOutsideUnicodeRange; } } } const codepoint = @intCast(u21, self.characterReferenceCode); self.flushCodepointAsCharacterReference(codepoint); self.state = self.returnState.?; if (err != null) return err.?; if (self.hasQueuedErrorOrToken()) return self.popQueuedErrorOrToken(); } } } unreachable; } /// Returns true if that last char consumed is EOF pub fn eof(self: Self) bool { if (self.contents.len == 0) { return true; } return self.index > self.contents.len; } fn hasQueuedErrorOrToken(self: *Self) bool { return self.errorQueue.count > 0 or self.backlog.count > 0; } /// Must be certain that an error or token exists in the queue, see hasQueuedErrorOrToken fn popQueuedErrorOrToken(self: *Self) ParseError!Token { // check errors first if (self.errorQueue.readItem()) |err_int| { return @errSetCast(ParseError, @intToError(err_int)); } if (self.backlog.readItem()) |token| { return token; } unreachable; } pub fn emitToken(self: *Self, token: Token) void { if (token == .EndTag) { if (token.EndTag.attributes.items().len > 0) { self.emitError(ParseError.EndTagWithAttributes); } if (token.EndTag.selfClosing) { self.emitError(ParseError.EndTagWithTrailingSolidus); } } if (token == .StartTag) { // FIXME: lastEmittedStartTag does not own its own 'name' memory. It should // probably copy the 'name' memory instead of re-using the emitted token's memory. self.lastEmittedStartTag = token; } self.backlog.writeItem(token) catch unreachable; } pub fn emitError(self: *Self, err: ParseError) void { self.errorQueue.writeItem(@errorToInt(err)) catch unreachable; } fn inAttributeState(self: Self) bool { return switch (self.returnState.?) { .AttributeValueDoubleQuoted, .AttributeValueSingleQuoted, .AttributeValueUnquoted => true, else => false, }; } fn flushTemporaryBufferAsCharacterReference(self: *Self) void { const characterReference = self.temporaryBuffer.toOwnedSlice(); if (self.inAttributeState()) { // FIXME: This is a hack because we don't append & to the temporary buffer self.currentToken.currentAttributeValue.append('&') catch unreachable; self.currentToken.currentAttributeValue.appendSlice(characterReference) catch unreachable; } else { var i: usize = characterReference.len - 1; while (i >= 0) { self.emitToken(Token { .Character = .{ .data = characterReference[i] } }); if (i == 0) break; i -= 1; } } } fn flushCodepointAsCharacterReference(self: *Self, codepoint: u21) void { if (self.inAttributeState()) { var char: [4]u8 = undefined; var len = std.unicode.utf8Encode(codepoint, char[0..]) catch unreachable; self.temporaryBuffer.appendSlice(char[0..len]) catch unreachable; self.currentToken.currentAttributeValue.appendSlice(self.temporaryBuffer.toOwnedSlice()) catch unreachable; } else { self.temporaryBuffer.shrink(0); self.emitToken(Token { .Character = .{ .data = codepoint } }); } } /// Returns null on EOF fn nextChar(self: *Self) ?u8 { if (self.reconsume) { self.reconsume = false; return self.currentChar(); } if (self.index + 1 > self.contents.len) { self.index = self.contents.len + 1; // consume the EOF // TODO: handle column increment return null; // EOF } var c = self.contents[self.index]; if (c == '\n') { self.line += 1; self.column = 0; } self.index += 1; self.column += 1; return c; } fn currentChar(self: *Self) ?u8 { if (self.eof()) { return null; } else if (self.index == 0) { return self.contents[self.index]; } else { return self.contents[self.index - 1]; } } /// Returns null on EOF fn peekChar(self: *Self) ?u8 { if (self.reconsume) { return self.currentChar(); } if (self.index + 1 > self.contents.len) { return null; // EOF } return self.contents[self.index]; } /// Can return less than the requested `n` characters if EOF is reached fn peekN(self: *Self, n: usize) []const u8 { if (self.eof()) { return self.contents[0..0]; } const start = if (self.reconsume) self.index - 1 else self.index; const end = std.math.min(self.contents.len, start + n); return self.contents[start..end]; } fn getIndex(self: *Self) usize { return self.index; } }; /// A token that is still being constructed and has yet to be emitted. /// Contains temporary buffers necessary for the construction of a token during tokenization. pub const IncompleteToken = struct { const Self = @This(); tokenType: ?@TagType(Token) = null, forceQuirks: bool = false, selfClosing: bool = false, tokenData: ArrayList(u8), publicIdentifier: ArrayList(u8), publicIdentifierMissing: bool = true, systemIdentifier: ArrayList(u8), systemIdentifierMissing: bool = true, commentData: ArrayList(u8), currentAttributeName: ArrayList(u8), currentAttributeValue: ArrayList(u8), attributes: StringHashMap([]const u8), allocator: *mem.Allocator, // TODO: Might be nice to take a *Tokenizer instead, but that would require // https://github.com/ziglang/zig/issues/2765 because the Tokenizer.init fn's // would need to be able to get a pointer to the struct value that will // be returned. This would allow us to remove the *Tokenizer parameters from // the IncompleteToken functions that currently take one. pub fn init(allocator: *mem.Allocator) Self { return Self { .allocator = allocator, .tokenData = ArrayList(u8).init(allocator), .publicIdentifier = ArrayList(u8).init(allocator), .systemIdentifier = ArrayList(u8).init(allocator), .commentData = ArrayList(u8).init(allocator), .currentAttributeName = ArrayList(u8).init(allocator), .currentAttributeValue = ArrayList(u8).init(allocator), .attributes = StringHashMap([]const u8).init(allocator), }; } pub fn deinit(self: *Self) void { // TODO: Handle deinit of ArrayList's, currently we can't deinit them // because we use toOwnedSlice self.attributes.deinit(); } pub fn create(self: *Self, tokenType: @TagType(Token)) void { std.debug.assert(self.tokenType == null); self.reset(); self.tokenType = tokenType; } pub fn startNewAttribute(self: *Self, tokenizer: *Tokenizer) !void { try self.flushAttribute(tokenizer); self.currentAttributeName.shrink(0); self.currentAttributeValue.shrink(0); } fn flushAttribute(self: *Self, tokenizer: *Tokenizer) !void { if (self.currentAttributeName.items.len == 0) return; const isDuplicate = self.attributes.get(self.currentAttributeName.items) != null; if (isDuplicate) { tokenizer.emitError(ParseError.DuplicateAttribute); } else { try self.attributes.putNoClobber(self.currentAttributeName.toOwnedSlice(), self.currentAttributeValue.toOwnedSlice()); } } pub fn isAppropriateEndTag(self: *Self, tokenizer: *Tokenizer) bool { std.debug.assert(self.tokenType.? == .EndTag); if (tokenizer.lastEmittedStartTag) |last| { return mem.eql(u8, self.tokenData.items, last.EndTag.name.?); } else { return false; } } /// Return a finished Token and reset state pub fn complete(self: *Self, tokenizer: *Tokenizer) Token { var token: Token = undefined; switch(self.tokenType.?) { .DOCTYPE => { token = Token{ .DOCTYPE = .{ .name = self.tokenData.toOwnedSlice(), .publicIdentifier = if (!self.publicIdentifierMissing) self.publicIdentifier.toOwnedSlice() else null, .systemIdentifier = if (!self.systemIdentifierMissing) self.systemIdentifier.toOwnedSlice() else null, .forceQuirks = self.forceQuirks, } }; }, .StartTag => { self.flushAttribute(tokenizer) catch unreachable; token = Token{ .StartTag = .{ .name = self.tokenData.toOwnedSlice(), .selfClosing = self.selfClosing, .attributes = self.attributes.clone() catch unreachable, } }; }, .EndTag => { self.flushAttribute(tokenizer) catch unreachable; token = Token{ .EndTag = .{ .name = self.tokenData.toOwnedSlice(), .selfClosing = self.selfClosing, .attributes = self.attributes.clone() catch unreachable, } }; }, .Comment => { token = Token{ .Comment = .{ .data = self.commentData.toOwnedSlice(), } }; }, .Character => unreachable, .EndOfFile => unreachable, } self.reset(); return token; } fn reset(self: *Self) void { self.tokenType = null; self.forceQuirks = false; self.selfClosing = false; self.tokenData.shrink(0); self.publicIdentifier.shrink(0); self.publicIdentifierMissing = true; self.systemIdentifier.shrink(0); self.systemIdentifierMissing = true; self.commentData.shrink(0); self.currentAttributeName.shrink(0); self.currentAttributeValue.shrink(0); self.attributes.clearAndFree(); } }; test "nextChar, currentChar, peekChar, peekN" { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var tokenizer = try Tokenizer.initWithString(&arena.allocator, "abcdefghijklmnop"); defer tokenizer.deinit(); // before consuming anything, current/peek/next should all get the first char std.testing.expectEqual(@as(u8, 'a'), tokenizer.currentChar().?); std.testing.expectEqual(@as(u8, 'a'), tokenizer.currentChar().?); std.testing.expectEqual(@as(u8, 'a'), tokenizer.peekChar().?); std.testing.expectEqual(@as(u8, 'a'), tokenizer.currentChar().?); std.testing.expectEqual(@as(u8, 'a'), tokenizer.nextChar().?); // after the first consume, current should give the last consumed char std.testing.expectEqual(@as(u8, 'a'), tokenizer.currentChar().?); // peek should give the next std.testing.expectEqual(@as(u8, 'b'), tokenizer.peekChar().?); std.testing.expectEqualSlices(u8, "b", tokenizer.peekN(1)); std.testing.expectEqualSlices(u8, "bcdef", tokenizer.peekN(5)); std.testing.expectEqualSlices(u8, "bcdefghijklmnop", tokenizer.peekN(100)); // go to second-to-last char while (tokenizer.nextChar()) |c| { if (c == 'o') break; } std.testing.expectEqual(false, tokenizer.eof()); std.testing.expectEqual(@as(u8, 'o'), tokenizer.currentChar().?); std.testing.expectEqual(@as(u8, 'p'), tokenizer.peekChar().?); std.testing.expectEqualSlices(u8, "p", tokenizer.peekN(100)); std.testing.expectEqual(@as(u8, 'p'), tokenizer.nextChar().?); std.testing.expectEqual(false, tokenizer.eof()); std.testing.expectEqual(@as(u8, 'p'), tokenizer.currentChar().?); // next should be EOF std.testing.expect(null == tokenizer.peekChar()); std.testing.expectEqualSlices(u8, "", tokenizer.peekN(100)); // reconsume the last char tokenizer.reconsume = true; std.testing.expectEqual(@as(u8, 'p'), tokenizer.currentChar().?); std.testing.expectEqual(@as(u8, 'p'), tokenizer.peekChar().?); std.testing.expectEqualSlices(u8, "p", tokenizer.peekN(100)); std.testing.expectEqual(@as(u8, 'p'), tokenizer.nextChar().?); // consume EOF std.testing.expect(null == tokenizer.nextChar()); std.testing.expectEqual(true, tokenizer.eof()); // current, next, and peek should be eof std.testing.expect(null == tokenizer.currentChar()); std.testing.expect(null == tokenizer.nextChar()); std.testing.expect(null == tokenizer.peekChar()); std.testing.expectEqualSlices(u8, "", tokenizer.peekN(100)); // reconsume still gives us EOF tokenizer.reconsume = true; std.testing.expectEqual(true, tokenizer.eof()); std.testing.expect(null == tokenizer.currentChar()); std.testing.expect(null == tokenizer.nextChar()); }
src/tokenizer.zig
const std = @import("std"); const assert = std.debug.assert; const mem = std.mem; const window = @import("Window.zig"); const img = @import("Image.zig"); const Texture2D = img.Texture2D; const MinFilter = img.MinFilter; const ArrayList = std.ArrayList; const c_allocator = std.heap.c_allocator; const c = @import("c.zig").c; const expect = std.testing.expect; const ReferenceCounter = @import("../RefCount.zig").ReferenceCounter; fn image_data_size(w: usize, h: usize, layers: u32, imgType: img.ImageType) usize { var expectedDataSize: usize = 0; if (imgType == img.ImageType.RGBA) { expectedDataSize = w * h * layers * 4; } else if (imgType == img.ImageType.RG) { assert(w * 2 % 4 == 0); // Ensure rows are a multiple of 4 bytes expectedDataSize = w * h * layers * 2; } else if (imgType == img.ImageType.R) { assert(w % 4 == 0); // Ensure rows are a multiple of 4 bytes expectedDataSize = w * h * layers; } return expectedDataSize; } pub const Texture2DArray = struct { ref_count: ReferenceCounter = ReferenceCounter{}, id: u32, width: u32, height: u32, layers: u32, imageType: img.ImageType, // createFramebuffers() // One framebuffer per texture layer frame_buffer_ids: ArrayList(u32), // createFramebuffer() // One framebuffer with each texture layer attatched frame_buffer_id: u32, pub fn init(smooth_when_magnified: bool, min_filter: MinFilter) !Texture2DArray { return Texture2DArray{ .id = try img.createTexture(c.GL_TEXTURE_2D_ARRAY, smooth_when_magnified, min_filter), .width = 0, .height = 0, .layers = 0, .imageType = img.ImageType.RGBA, .frame_buffer_ids = ArrayList(u32).init(c_allocator), .frame_buffer_id = 0, }; } fn createframe_buffer_ids(self: *Texture2DArray) !void { const start = self.frame_buffer_ids.items.len; const numToCreate = self.layers - start; if (numToCreate < 1) { return; } try self.frame_buffer_ids.resize(self.layers); for (self.frame_buffer_ids.items[start..]) |*id| { id.* = 0; } c.glGenFramebuffers(@intCast(c_int, numToCreate), @ptrCast([*c]c_uint, self.frame_buffer_ids.items[start..].ptr)); var allIdsNon0: bool = true; for (self.frame_buffer_ids.items[start..]) |id| { if (id == 0) { allIdsNon0 = false; break; } } if (!allIdsNon0) { c.glDeleteFramebuffers(@intCast(c_int, self.layers), @ptrCast([*c]c_uint, self.frame_buffer_ids.items[start..].ptr)); return error.OpenGLError; } } pub fn createFrameBuffers(self: *Texture2DArray) !void { if (self.id == 0) { assert(false); return error.InvalidState; } try self.createframe_buffer_ids(); for (self.frame_buffer_ids.items) |id, i| { c.glBindFramebuffer(c.GL_FRAMEBUFFER, id); c.glFramebufferTextureLayer(c.GL_FRAMEBUFFER, c.GL_COLOR_ATTACHMENT0, self.id, 0, @intCast(c_int, i)); // Configure framebuffer var drawBuffers: [1]c_uint = [1]c_uint{c.GL_COLOR_ATTACHMENT0}; c.glDrawBuffers(1, drawBuffers[0..]); // Validate framebuffer if (c.glCheckFramebufferStatus(c.GL_FRAMEBUFFER) != c.GL_FRAMEBUFFER_COMPLETE) { assert(false); return error.OpenGLError; } c.glBindFramebuffer(c.GL_FRAMEBUFFER, 0); } } pub fn createFrameBuffer(self: *Texture2DArray, depth_texture: ?*Texture2D) !void { if (self.id == 0 or self.width <= 0 or self.height <= 0 or self.layers <= 0) { assert(false); return error.InvalidState; } if (self.layers > 8) { return error.TooManyLayers; } // Create FBO c.glGenFramebuffers(1, @ptrCast([*c]c_uint, &self.frame_buffer_id)); if (self.frame_buffer_id == 0) { return error.OpenGLError; } errdefer { c.glDeleteFramebuffers(1, @ptrCast([*c]c_uint, &self.frame_buffer_id)); self.frame_buffer_id = 0; } // Bind and configure FBO c.glBindFramebuffer(c.GL_FRAMEBUFFER, self.frame_buffer_id); const drawBuffers = [8]c_uint{ c.GL_COLOR_ATTACHMENT0, c.GL_COLOR_ATTACHMENT1, c.GL_COLOR_ATTACHMENT2, c.GL_COLOR_ATTACHMENT3, c.GL_COLOR_ATTACHMENT4, c.GL_COLOR_ATTACHMENT5, c.GL_COLOR_ATTACHMENT6, c.GL_COLOR_ATTACHMENT7, }; c.glDrawBuffers(@intCast(c_int, self.layers), drawBuffers[0..].ptr); var i: u32 = 0; while (i < self.layers) : (i += 1) { c.glFramebufferTextureLayer(c.GL_FRAMEBUFFER, c.GL_COLOR_ATTACHMENT0 + i, self.id, 0, @intCast(c_int, i)); } if (depth_texture != null) { c.glFramebufferTexture2D(c.GL_FRAMEBUFFER, c.GL_DEPTH_ATTACHMENT, c.GL_TEXTURE_2D, depth_texture.?.id, 0); } // Validate framebuffer if (c.glCheckFramebufferStatus(c.GL_FRAMEBUFFER) != c.GL_FRAMEBUFFER_COMPLETE) { assert(false); return error.OpenGLError; } c.glBindFramebuffer(c.GL_FRAMEBUFFER, 0); } pub fn bindFrameBuffer2(self: Texture2DArray) !void { if (self.frame_buffer_id == 0) { assert(false); return error.InvalidState; } c.glBindFramebuffer(c.GL_FRAMEBUFFER, self.frame_buffer_id); c.glViewport(0, 0, @intCast(c_int, self.width), @intCast(c_int, self.height)); } pub fn bindFrameBuffer(self: Texture2DArray, index: u32) !void { if (index >= self.layers) { assert(false); return error.InvalidParameter; } c.glBindFramebuffer(c.GL_FRAMEBUFFER, self.frame_buffer_ids.at(index)); c.glViewport(0, 0, @intCast(c_int, self.width), @intCast(c_int, self.height)); } pub fn bindToUnit(self: *Texture2DArray, unit: u32) !void { if (self.width == 0 or self.height == 0 or self.layers == 0 or self.id == 0) { assert(false); return error.InvalidState; } if (unit >= window.maximumNumTextureImageUnits()) { return error.InvalidParameter; } c.glActiveTexture(c.GL_TEXTURE0 + unit); c.glBindTexture(c.GL_TEXTURE_2D_ARRAY, self.id); } pub fn bind(self: *Texture2DArray) !void { try self.bindToUnit(0); } // Replaces the texture's data (dimensions and depth can change) pub fn upload(self: *Texture2DArray, w: u32, h: u32, lyrs: u32, imgType: img.ImageType, data: ?[]const u8) !void { if (w == 0 or h == 0 or lyrs == 0 or w > 32768 or h > 32768) { assert(false); return error.InvalidParameter; } if (data != null) { const expectedDataSize = image_data_size(w, h, lyrs, imgType); if (data.?.len != expectedDataSize) { assert(false); return error.InvalidParameter; } } self.width = w; self.height = h; self.layers = lyrs; self.imageType = imgType; var internalFormat: u32 = img.image_type_sized_internal_formats[@enumToInt(imgType)]; try self.bind(); var ptr: [*c]const u8 = 0; if (data != null) { ptr = data.?.ptr; } c.glTexImage3D(c.GL_TEXTURE_2D_ARRAY, 0, @intCast(c_int, internalFormat), @intCast(c_int, w), @intCast(c_int, h), @intCast(c_int, lyrs), 0, img.image_type_base_internal_formats[@enumToInt(imgType)], c.GL_UNSIGNED_BYTE, ptr); } // Downloads the entire texture (all layers) pub fn download(self: *Texture2DArray, outputBuffer: []u8) !void { const expectedDataSize = image_data_size(self.width, self.height, self.layers, self.imageType); if (outputBuffer.len != expectedDataSize) { assert(false); return error.InvalidParameter; } try self.bind(); c.glGetTexImage(c.GL_TEXTURE_2D_ARRAY, 0, img.image_type_base_internal_formats[@enumToInt(self.imageType)], c.GL_UNSIGNED_BYTE, outputBuffer.ptr); } pub fn free(self: *Texture2DArray) void { if (self.id == 0) { assert(false); return; } self.ref_count.deinit(); if (self.frame_buffer_ids.items.len > 0) { c.glDeleteFramebuffers(@intCast(c_int, self.frame_buffer_ids.items.len), @ptrCast([*c]const c_uint, self.frame_buffer_ids.items.ptr)); } self.frame_buffer_ids.deinit(); c.glDeleteTextures(1, @ptrCast([*c]const c_uint, &self.id)); self.id = 0; } pub fn fill(self: *Texture2DArray, index: u32, colour: [4]f32) !void { if (index >= self.layers) { assert(false); return error.InvalidParameter; } if (c.GL_ARB_clear_texture != 0) { c.glClearTexSubImage(self.id, 0, 0, 0, @intCast(c_int, index), @intCast(c_int, self.width), @intCast(c_int, self.height), 1, c.GL_RGBA, c.GL_FLOAT, colour[0..4].ptr); } else if (self.frame_buffer_ids.items.len > 0) { try self.bindFrameBuffer(index); window.setClearColour(colour[0], colour[1], colour[2], colour[3]); window.clear(true, false); } else { return error.InvalidState; } } // Assumed the bound frame buffer is the same size as the texture pub fn copyFromFrameBuffer(self: *Texture2DArray, index: u32) !void { try self.bind(); c.glCopyTexSubImage3D(0x8C1A, 0, 0, 0, @intCast(c_int, index), 0, 0, @intCast(c_int, self.width), @intCast(c_int, self.height)); } }; test "2d texture array" { try window.createWindow(false, 200, 200, "test", true, 0); var texture: Texture2DArray = try Texture2DArray.init(false, MinFilter.Nearest); const dataIn: []const u8 = &[8]u8{ 127, 127, 127, 127, 33, 33, 33, 33 }; try texture.upload(1, 1, 2, img.ImageType.RGBA, dataIn); expect(texture.width == 1); expect(texture.height == 1); expect(texture.layers == 2); expect(texture.imageType == img.ImageType.RGBA); try texture.createFrameBuffers(); var data: [8]u8 = undefined; try texture.download(&data); expect(mem.eql(u8, data[0..], dataIn)); try texture.bind(); texture.free(); window.closeWindow(); }
src/WindowGraphicsInput/ArrayTexture.zig
const std = @import("std"); const server = &@import("../main.zig").server; const Error = @import("../command.zig").Error; const Seat = @import("../Seat.zig"); /// Switch focus to the passed tags. pub fn setFocusedTags( allocator: *std.mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { const tags = try parseTags(allocator, args, out); if (seat.focused_output.pending.tags != tags) { seat.focused_output.previous_tags = seat.focused_output.pending.tags; seat.focused_output.pending.tags = tags; seat.focused_output.arrangeViews(); seat.focus(null); server.root.startTransaction(); } } /// Set the spawn tagmask pub fn spawnTagmask( allocator: *std.mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { const tags = try parseTags(allocator, args, out); seat.focused_output.spawn_tagmask = tags; } /// Set the tags of the focused view. pub fn setViewTags( allocator: *std.mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { const tags = try parseTags(allocator, args, out); if (seat.focused == .view) { const view = seat.focused.view; view.pending.tags = tags; seat.focus(null); view.applyPending(); } } /// Toggle focus of the passsed tags. pub fn toggleFocusedTags( allocator: *std.mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { const tags = try parseTags(allocator, args, out); const output = seat.focused_output; const new_focused_tags = output.pending.tags ^ tags; if (new_focused_tags != 0) { output.previous_tags = output.pending.tags; output.pending.tags = new_focused_tags; output.arrangeViews(); seat.focus(null); server.root.startTransaction(); } } /// Toggle the passed tags of the focused view pub fn toggleViewTags( allocator: *std.mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { const tags = try parseTags(allocator, args, out); if (seat.focused == .view) { const new_tags = seat.focused.view.pending.tags ^ tags; if (new_tags != 0) { const view = seat.focused.view; view.pending.tags = new_tags; seat.focus(null); view.applyPending(); } } } /// Switch focus to tags that were selected previously pub fn focusPreviousTags( allocator: *std.mem.Allocator, seat: *Seat, args: []const []const u8, out: *?[]const u8, ) Error!void { const previous_tags = seat.focused_output.previous_tags; if (seat.focused_output.pending.tags != previous_tags) { seat.focused_output.previous_tags = seat.focused_output.pending.tags; seat.focused_output.pending.tags = previous_tags; seat.focused_output.arrangeViews(); seat.focus(null); server.root.startTransaction(); } } /// Set the tags of the focused view to the tags that were selected previously pub fn sendToPreviousTags( allocator: *std.mem.Allocator, seat: *Seat, args: []const []const u8, out: *?[]const u8, ) Error!void { const previous_tags = seat.focused_output.previous_tags; if (seat.focused == .view) { const view = seat.focused.view; view.pending.tags = previous_tags; seat.focus(null); view.applyPending(); } } fn parseTags( allocator: *std.mem.Allocator, args: []const [:0]const u8, out: *?[]const u8, ) Error!u32 { if (args.len < 2) return Error.NotEnoughArguments; if (args.len > 2) return Error.TooManyArguments; const tags = try std.fmt.parseInt(u32, args[1], 10); if (tags == 0) { out.* = try std.fmt.allocPrint(allocator, "tags may not be 0", .{}); return Error.Other; } return tags; }
source/river-0.1.0/river/command/tags.zig
const std = @import("std"); pub const Vec2 = packed struct { x: f32 = 0.0, y: f32 = 0.0, pub fn Scale(self: *Vec2, scalar: f32) void { self.x *= scalar; self.y *= scalar; } pub fn GetScaled(self: *const Vec2, scalar: f32) Vec2 { return Vec2{ .x = self.x * scalar, .y = self.y * scalar, }; } pub fn Add(self: *const Vec2, rhs: Vec2) Vec2 { return Vec2{ .x = self.x + rhs.x, .y = self.y + rhs.y, }; } pub fn Sub(self: *const Vec2, rhs: Vec2) Vec2 { return Vec2{ .x = self.x - rhs.x, .y = self.y - rhs.y, }; } pub fn Dot(self: *const Vec2, rhs: Vec2) Vec2 { return Vec2{ .x = self.x * rhs.x, .y = self.y * rhs.y, }; } // equals with a default tolerance of f32_epsilon pub fn Equals(self: *const Vec2, rhs: *const Vec2) bool { return self.x == rhs.x and self.y == rhs.y; } pub fn EqualsT(self: *const Vec2, rhs: *const Vec2, tolerance: comptime f32) bool { return self.Sub(rhs).LengthSqrd() <= tolerance * tolerance; } pub fn LengthSqrd(self: *const Vec2) f32 { return self.x * self.x + self.y * self.y; } pub fn Length(self: *const Vec2) f32 { return std.math.sqrt(self.LengthSqrd); } pub fn DistSqrd(self: *const Vec2, rhs: Vec2) f32 { return self.Sub(rhs).LengthSqrd(); } pub fn Dist(self: *const Vec2, rhs: Vec2) f32 { return self.Sub(rhs).Length(); } //TODO panic checks in debug build only maybe? pub fn ClampToMinSize(self: *Vec2, size: f32) void { const lengthSqrd = self.LengthSqrd(); const sizeSqrd = size * size; if (lengthSqrd < sizeSqrd) { if (lengthSqrd == 0.0) @panic("Clamping vector with length 0"); const inv = size / std.math.sqrt(lengthSqrd); self.x *= inv; self.y *= inv; } } pub fn ClampToMaxSize(self: *Vec2, size: f32) void { const lengthSqrd = self.LengthSqrd(); const sizeSqrd = size * size; if (lengthSqrd > sizeSqrd) { if (lengthSqrd == 0.0) @panic("Clamping vector with length 0"); const inv = size / std.math.sqrt(lengthSqrd); self.x *= inv; self.y *= inv; } } pub fn GetClampedToMinSize(self: *const Vec2, size: f32) Vec2 { const lengthSqrd = self.LengthSqrd(); const sizeSqrd = size * size; if (lengthSqrd < sizeSqrd) { if (lengthSqrd == 0.0) @panic("Clamping vector with length 0"); const inv = size / std.math.sqrt(lengthSqrd); return Vec2{ .x = self.x * inv, .y = self.y * inv, }; } } pub fn GetClampedToMaxSize(self: *const Vec2, size: f32) Vec2 { const lengthSqrd = self.LengthSqrd(); const sizeSqrd = size * size; if (lengthSqrd > sizeSqrd) { if (lengthSqrd == 0.0) @panic("Clamping vector with length 0"); const inv = size / std.math.sqrt(lengthSqrd); return Vec2{ .x = self.x * inv, .y = self.y * inv, }; } } pub fn ScaleToSize(self: *Vec2, size: f32) void { const length = self.Length(); if (length == 0.0) @panic("Trying to scale up a vector with length 0"); const scaleAmount = size / length; self.Scale(scaleAmount); } pub fn GetScaledToSize(self: *Vec2, size: f32) Vec2 { const length = self.Length(); if (length == 0.0) @panic("Trying to scale up a vector with length 0"); const scaleAmount = size / length; return self.GetScaled(scaleAmount); } pub fn Normalized(self: *Vec2) Vec2 { const length = self.Length(); if (length == 0.0) @panic("Normalizing vector with length 0"); return Vec2{ .x = self.x / length, .y = self.y / length, }; } pub fn NormalizeSelf(self: *Vec2) void { const length = self.Length(); if (length == 0.0) @panic("Normalizing vector with length 0"); self.x /= length; self.y /= length; } }; //TODO testing
src/math/Vec2.zig
const std = @import("std"); const traits = @import("./traits.zig"); pub const CommandSerializer = struct { pub fn serializeCommand(msg: anytype, command: anytype) !void { // Serializes an entire command. // Callers can expect this function to: // 1. Write the number of arguments in the command // (optionally using the Redis.Arguments trait) // 2. Write each argument // (optionally using the Redis.Arguments trait) // // `command` can be: // 1. RedisCommand trait // 2. RedisArguments trait // 3. Zig Tuple // 4. Array / Slice // // Redis.Command types can call this function // in order to delegate simple serialization // scenarios, the only requirement being that they // pass a Zig Tuple or an Array/Slice, and not another // reference to themselves (as that would loop forever). // // As an example, the `commands.GET` command calls this // function passing `.{"GET", self.key}` as // argument. const CmdT = @TypeOf(command); if (comptime traits.isCommand(CmdT)) { return CmdT.RedisCommand.serialize(command, CommandSerializer, msg); } // TODO: decide if this should be removed. // Why would someone use Arguments directly? if (comptime traits.isArguments(CmdT)) { try msg.print("*{}\r\n", CmdT.RedisArguments.count(command)); return CmdT.RedisArguments.serialize(command, CommandSerializer, msg); } switch (@typeInfo(CmdT)) { else => { @compileLog(CmdT); @compileError("unsupported"); }, .Struct => { // Since we already handled structs that implement the // Command trait, the expectation here is that this struct // is in fact a Zig Tuple. if (!(comptime std.meta.trait.isTuple(CmdT))) { @compileError("Only Zig tuples and Redis.Command types are allowed as argument to send."); } // Count the number of arguments var argNum: usize = 0; inline for (std.meta.fields(CmdT)) |field| { const arg = @field(command, field.name); const ArgT = @TypeOf(arg); if (comptime traits.isArguments(ArgT)) { argNum += ArgT.RedisArguments.count(arg); } else { argNum += switch (@typeInfo(ArgT)) { .Array => |arr| if (arr.child != u8) arg.len else 1, .Pointer => |ptr| switch (ptr.size) { .Slice => if (ptr.child != u8) arg.len else 1, .One => switch (@typeInfo(ptr.child)) { .Array => |arr| if (arr.child != u8) arg.len else 1, else => @compileError("unsupported"), }, else => @compileError("unsupported"), }, else => 1, }; } } // Write the number of arguments // std.debug.warn("*{}\r\n", argNum); try msg.print("*{}\r\n", .{argNum}); // Serialize each argument inline for (std.meta.fields(CmdT)) |field| { const arg = @field(command, field.name); const ArgT = @TypeOf(arg); if (comptime traits.isArguments(ArgT)) { try ArgT.RedisArguments.serialize(arg, CommandSerializer, msg); } else { switch (@typeInfo(ArgT)) { .Array => |arr| if (arr.child != u8) { for (arg) |elem| { if (comptime traits.isArguments(arr.child)) { try arr.child.RedisArguments.serialize(elem, CommandSerializer, msg); } else { try serializeArgument(msg, arr.child, elem); } } } else { try serializeArgument(msg, ArgT, arg); }, .Pointer => |ptr| switch (ptr.size) { .Slice => { if (ptr.child != u8) { for (arg) |elem| { if (comptime traits.isArguments(ptr.child)) { try ptr.child.RedisArguments.serialize(elem, CommandSerializer, msg); } else { try serializeArgument(msg, ptr.child, elem); } } } else { try serializeArgument(msg, ArgT, arg); } }, .One => switch (@typeInfo(ptr.child)) { .Array => |arr| { if (arr.child != u8) { for (arg) |elem| { if (comptime traits.isArguments(arr.child)) { try arr.child.RedisArguments.serialize(elem, CommandSerializer, msg); } else { try serializeArgument(msg, arr.child, elem); } } } else { try serializeArgument(msg, ptr.child, arg.*); } }, else => @compileError("unsupported"), }, else => @compileError("unsupported"), }, else => try serializeArgument(msg, ArgT, arg), } } } }, } } pub fn serializeArgument(msg: anytype, comptime T: type, val: T) !void { // Serializes a single argument. // Supports the following types: // 1. Strings // 2. Numbers // // Redis.Argument types can use this function // in their implementation. // Similarly to what happens with Redis.Command types // and serializeCommand(), Redis.Argument types // can call this function and pass a basic type. switch (@typeInfo(T)) { .Int, .Float, .ComptimeInt, => { // TODO: write a better method var buf: [100]u8 = undefined; var res = try std.fmt.bufPrint(buf[0..], "{}", .{val}); // std.debug.warn("${}\r\n{s}\r\n", res.len, res); try msg.print("${}\r\n{s}\r\n", .{ res.len, res }); }, .ComptimeFloat => { // TODO: write a better method, avoid duplication? var buf: [100]u8 = undefined; var res = try std.fmt.bufPrint(buf[0..], "{}", .{@as(f64, val)}); // std.debug.warn("${}\r\n{s}\r\n", res.len, res); try msg.print("${}\r\n{s}\r\n", .{ res.len, res }); }, .Array => { // std.debug.warn("${}\r\n{s}\r\n", val.len, val); try msg.print("${}\r\n{s}\r\n", .{ val.len, val }); }, .Pointer => |ptr| { switch (ptr.size) { // .One => { // switch (@typeInfo(ptr.child)) { // .Array => { // const arr = val.*; // try msg.print("${}\r\n{s}\r\n", .{ arr.len, arr }); // return; // }, // else => @compileError("unsupported"), // } // }, .Slice => { try msg.print("${}\r\n{s}\r\n", .{ val.len, val }); }, else => { if ((ptr.size != .Slice or ptr.size != .One) or ptr.child != u8) { @compileLog(ptr.size); @compileLog(ptr.child); @compileError("Type " ++ T ++ " is not supported."); } }, } }, else => @compileError("Type " ++ @typeName(T) ++ " is not supported."), } } };
src/serializer.zig
const std = @import("std"); const builtin = @import("builtin"); const liu = @import("./lib.zig"); const Spec = liu.packed_asset.Spec; const TypeInfo = liu.packed_asset.TypeInfo; const tempEncode = liu.packed_asset.tempEncode; const parse = liu.packed_asset.parse; const U32Slice = liu.packed_asset.U32Slice; test "Packed Asset: spec generation" { const mark = liu.TempMark; defer liu.TempMark = mark; const TestE = extern struct { data: U32Slice(u8), field: u8, }; const Test = struct { field: u8, data: []u8, }; const spec = Spec.fromType(TestE); const spec2 = Spec.fromType(Test); try std.testing.expectEqualSlices(TypeInfo, spec.typeInfo(), spec2.typeInfo()); try std.testing.expectEqualSlices(TypeInfo, spec.typeInfo(), &.{ .ustruct_open_4, .uslice_of_next, .pu8, .pu8, .ustruct_close_4, }); } test "Packed Asset: spec encode/decode simple" { const mark = liu.TempMark; defer liu.TempMark = mark; const Test = struct { field2: struct { asdf: u8, wasd: u8 }, field: u64, }; const spec = Spec.fromType(Test); try std.testing.expectEqualSlices(TypeInfo, spec.typeInfo(), &.{ .ustruct_open_8, .pu64, .ustruct_open_1, .pu8, .pu8, .ustruct_close_1, .ustruct_close_8, }); const t: Test = .{ .field = 120303113, .field2 = .{ .asdf = 100, .wasd = 255 } }; const encoded = try tempEncode(t, null); try std.testing.expect(encoded.chunks.len == 0); const value = try parse(Test, encoded.last); try std.testing.expectEqual(value.field, t.field); try std.testing.expectEqual(value.field2.asdf, t.field2.asdf); try std.testing.expectEqual(value.field2.wasd, t.field2.wasd); } test "Packed Asset: encode/decode extern" { const mark = liu.TempMark; defer liu.TempMark = mark; const TestE = extern struct { data: u64, field: u8, }; const Test = struct { field: u8, data: u64, }; const spec = Spec.fromType(Test); try std.testing.expectEqualSlices(TypeInfo, spec.typeInfo(), &.{ .ustruct_open_8, .pu64, .pu8, .ustruct_close_8, }); const t: TestE = .{ .field = 123, .data = 12398145 }; const encoded = try tempEncode(t, 24); try std.testing.expect(encoded.chunks.len == 1); const bytes = try encoded.copyContiguous(liu.Temp); const value = try parse(TestE, bytes); try std.testing.expectEqual(value.*, t); } test "Packed Asset: spec encode/decode multiple chunks" { const mark = liu.TempMark; defer liu.TempMark = mark; const Test = struct { field: u16, data: []const u64, }; const spec = Spec.fromType(Test); const type_info = spec.typeInfo(); try std.testing.expectEqualSlices(TypeInfo, type_info, &.{ .ustruct_open_4, .uslice_of_next, .pu64, .pu16, .ustruct_close_4, }); const t: Test = .{ .field = 16, .data = &.{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, }, }; const encoded = try tempEncode(t, 32); try std.testing.expectEqual(encoded.chunks.len, 41); const bytes = try encoded.copyContiguous(liu.Temp); const value = try parse(Test, bytes); try std.testing.expectEqual(t.field, value.field); const slice = value.data.slice(); const begin = @ptrToInt(slice.ptr); const end = @ptrToInt(slice.ptr + slice.len); try std.testing.expect(begin > @ptrToInt(bytes.ptr)); try std.testing.expect(end <= @ptrToInt(bytes.ptr + bytes.len)); try std.testing.expectEqualSlices(u64, t.data, slice); } test "Packed Asset: alignment" { const aligned_1: []const TypeInfo = &.{ .pu8, .pi8, .ustruct_open_1, // struct alignment comes from trailing number .ustruct_close_1, }; for (aligned_1) |t| { try std.testing.expectEqual(t.alignment(), 1); } const aligned_2: []const TypeInfo = &.{ .pu16, .pi16, .ustruct_open_2, .ustruct_close_2, }; for (aligned_2) |t| { try std.testing.expectEqual(t.alignment(), 2); } const aligned_4: []const TypeInfo = &.{ .pu32, .pi32, .pf32, .uslice_of_next, // align 4, size 8 .ustruct_open_4, .ustruct_close_4, }; for (aligned_4) |t| { try std.testing.expectEqual(t.alignment(), 4); } const aligned_8: []const TypeInfo = &.{ .pu64, .pi64, .pf64, .ustruct_open_8, .ustruct_close_8, }; for (aligned_8) |t| { try std.testing.expectEqual(t.alignment(), 8); } } test "Packed Asset: spec encode/decode slices" { const mark = liu.TempMark; defer liu.TempMark = mark; const Test = struct { field: u16, data: []const u8, }; const spec = Spec.fromType(Test); const type_info = spec.typeInfo(); try std.testing.expectEqualSlices(TypeInfo, type_info, &.{ .ustruct_open_4, .uslice_of_next, .pu8, .pu16, .ustruct_close_4, }); const t: Test = .{ .field = 16, .data = &.{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, }; const encoded = try tempEncode(t, 1024); try std.testing.expect(encoded.chunks.len == 0); const bytes = try encoded.copyContiguous(liu.Temp); const value = try parse(Test, bytes); try std.testing.expectEqual(t.field, value.field); const slice = value.data.slice(); const begin = @ptrToInt(slice.ptr); const end = @ptrToInt(slice.ptr + slice.len); try std.testing.expect(begin > @ptrToInt(bytes.ptr)); try std.testing.expect(end <= @ptrToInt(bytes.ptr + bytes.len)); try std.testing.expectEqualSlices(u8, t.data, slice); } test "Packed Asset: spec branch quota" { const mark = liu.TempMark; defer liu.TempMark = mark; const Test = struct { field1: u64, field2: u64, field3: u64, field4: u64, field5: u64, field6: u64, field7: u64, field8: u64, field9: u64, field10: u64, field11: u64, field12: u64, field13: u64, field14: u64, field15: u64, field16: u64, field17: u64, field18: u64, field19: u64, field20: u64, field21: u64, field22: u64, field23: u64, field24: u64, field25: u64, field26: u64, field27: u64, field28: u64, field29: u64, field30: u64, field31: u64, field32: u64, field33: u64, field34: u64, field35: u64, field36: u64, field37: u64, field38: u64, field39: u64, field40: u64, field41: u64, field42: u64, field43: u64, field44: u64, field45: u64, field46: u64, field47: u64, field48: u64, field49: u64, field50: u64, field51: u64, field52: u64, field53: u64, field54: u64, field55: u64, field56: u64, // Boundary // field57: u64, // field58: u64, // field59: u64, // field60: u64, // field61: u64, // field62: u64, // field63: u64, // field64: u64, // field65: u64, // field66: u64, // field67: u64, // field68: u64, // field69: u64, // field70: u64, // field71: u64, // field72: u64, // field73: u64, // field74: u64, // field75: u64, // field76: u64, // field77: u64, // field78: u64, // field79: u64, // field80: u64, // field81: u64, // field82: u64, // field83: u64, // field84: u64, // field85: u64, // field86: u64, // field87: u64, // field88: u64, // field89: u64, // field90: u64, // field91: u64, // field92: u64, // field93: u64, // field94: u64, // field95: u64, // field96: u64, // field97: u64, // field98: u64, // field99: u64, // field100: u64, }; const Wrap = struct { field: Test, }; const spec = Spec.fromType(Wrap); _ = spec; } test "Packed Asset: arrays" { const mark = liu.TempMark; defer liu.TempMark = mark; const Test = struct { data: [10]u64 }; const spec = Spec.fromType(Test); const type_info = spec.typeInfo(); const header: []const TypeInfo = &.{ .ustruct_open_8, .ustruct_open_8 }; const footer: []const TypeInfo = &.{ .ustruct_close_8, .ustruct_close_8 }; try std.testing.expectEqualSlices(TypeInfo, type_info, header ++ &[_]TypeInfo{ .pu64, .pu64, .pu64, .pu64, .pu64, .pu64, .pu64, .pu64, .pu64, .pu64, } ++ footer); const t: Test = .{ .data = .{ 1230, 23450923641, 33450913542, 4345893, 5234523458, 513459, 62312347, 712389477, 81203498, 91203948, } }; const encoded = try tempEncode(t, 1024); try std.testing.expect(encoded.chunks.len == 0); const bytes = try encoded.copyContiguous(liu.Temp); const value = try parse(Test, bytes); try std.testing.expectEqualSlices(u64, &t.data, &value.data); } test "Packed Asset: wordle" { const mark = liu.TempMark; defer liu.TempMark = mark; const Test = struct { words: [5][]const u8, wordles: [5][]const u8, }; const spec = Spec.fromType(Test); const type_info = spec.typeInfo(); try std.testing.expectEqualSlices(TypeInfo, type_info, &[_]TypeInfo{ .ustruct_open_4, .ustruct_open_4, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .ustruct_close_4, .ustruct_open_4, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .uslice_of_next, .pu8, .ustruct_close_4, .ustruct_close_4, }); const t: Test = .{ .words = .{ "asdf", "asdf;lk", "wertn", "sdoi", "wernt" }, .wordles = .{ "bsdf", "bsdf;lk", "wedtn", "swoi", "wgrnt" }, }; const encoded = try tempEncode(t, 1024); try std.testing.expect(encoded.chunks.len == 0); const bytes = try encoded.copyContiguous(liu.Temp); const value = try parse(Test, bytes); for (t.words) |w, i| { try std.testing.expectEqualSlices(u8, w, value.words[i].slice()); } for (t.wordles) |w, i| { try std.testing.expectEqualSlices(u8, w, value.wordles[i].slice()); } }
src/liu/test_packed_asset.zig
const std = @import("std"); pub const ColorLookupTable: []const u8 align(32) = @embedFile("lut.dat"); pub const MapByteSize: usize = (128 * 128); usingnamespace @import("threadpool"); const PartitionPool = ThreadPool(processMapPartition, null); var coreCount: usize = undefined; var pool: ?*PartitionPool = null; pub const NativeMapContext = struct { buffer: [*]u8, mapWidth: u16, mapHeight: u16, }; pub fn toMinecraftColors(frameData: [*c]const u8, contextNullable: ?*NativeMapContext) void { if (pool == null) { coreCount = std.Thread.getCpuCount() catch 1; pool = PartitionPool.init(std.heap.c_allocator, coreCount) catch { @panic("Failed to create color translation thread pool"); }; } const context = contextNullable.?; const len = context.mapWidth * context.mapHeight * MapByteSize; const dst = context.buffer[0..len]; // do hacky pointer casting - note: we don't particularly care about endian yet // we handle endianness within mc.zig const casted = @ptrCast([*]const u16, @alignCast(@alignOf(u16), frameData))[0..len]; // check if we're just doing a single map - if we are, no fancy partitioning // needed, otherwise, we need to do some math to figure out where to put stuff if (context.mapWidth | context.mapHeight == 1) { for (dst) |*out, index| { out.* = ColorLookupTable[casted[index]]; } } else { // TODO: figure out how to do this with strides instead of 1 at a time // the performance benefit is highly likely to be negligible though, reason being: the point in which we'd // benefit from striding casted is the same point in which we're likely running into the brick wall of // minecraft's limitations - each map item takes up 128*128 bytes, which can result in >=100mb/s bandwidth // thoughput if we're doing 1080p _equivalent_ at 60fps (unlikely scenario, but within reason). // for now, ~720p @ 24-30fps is the sweet spot for not encountering it // hypothetically you could go ~1080p @ 30fps, but we'd be hoping the video hits cache a lot // disregard everything above - attempting to vectorize this isn't possible because of the fact that we use // a lookup table to figure out what color is what palette index. ideally we would be able to use VGATHERDPS // but this has the significant drawback of actually being worse in performance compared to scalar; it is // only better on skylake+ cpus and is just... bad on ryzen. we'd just need more raw IPC. // another avenue to explore is possible multi-threading our code, but as far as i remember, multithreading // on zig isn't exactly easy, nor does it provide exceptional performance improvements, at the cost of // having to rework the entire codebase to prevent race conditions, deadlocks, etc. const width = @intCast(usize, context.mapWidth); const step = len / coreCount; var index: usize = 0; while (index < len) : (index += step) { pool.?.submitTask(.{ casted, dst, index, step, width, }) catch @panic("Failed to submit color translation task"); } pool.?.awaitTermination() catch |err| { if (err == error.Forced) { return; } else { @panic("awaitTermination error: not forced?"); } }; } } fn processMapPartition(src: []const u16, dst: []u8, start: usize, len: usize, width: usize) void { const linelength = width * 128; const _UNROLL_LIM = 16; // basically do the same partial loop unrolling seen in sdl-main.zig var srcIndex: usize = start; while (srcIndex < start + len) : (srcIndex += _UNROLL_LIM) { const y = srcIndex / linelength; const x = srcIndex % linelength; const mapY = y / 128; const mapX = x / 128; // it feels like we could do better than this messy math const mapOffset = (mapY * width + mapX) * MapByteSize; const dstIndex = mapOffset + ((y % 128) * 128) + (x % 128); comptime var _unroll_index: usize = 0; inline while (_unroll_index < _UNROLL_LIM) : (_unroll_index += 1) { const i = srcIndex + _unroll_index; const j = dstIndex + _unroll_index; dst[j] = ColorLookupTable[src[i]]; } } }
nativemap/src/mc.zig
const std = @import("std"); const testing = std.testing; const Allocator = std.mem.Allocator; /// QueryParameters is an alias for a String HashMap pub const QueryParameters = std.StringHashMap([]const u8); /// Possible errors when parsing query parameters const QueryError = error{ MalformedUrl, OutOfMemory, InvalidCharacter }; pub const Url = struct { path: []const u8, raw_path: []const u8, raw_query: []const u8, /// Builds a new URL from a given path pub fn init(path: []const u8) Url { const query = blk: { var raw_query: []const u8 = undefined; if (std.mem.indexOf(u8, path, "?")) |index| { raw_query = path[index..]; } else { raw_query = ""; } break :blk raw_query; }; return Url{ .path = path[0 .. path.len - query.len], .raw_path = path, .raw_query = query, }; } /// Builds query parameters from url's `raw_query` /// Memory is owned by caller /// Note: For now, each key/value pair needs to be freed manually pub fn queryParameters(self: Url, allocator: *Allocator) QueryError!QueryParameters { var queries = QueryParameters.init(allocator); errdefer queries.deinit(); var query = self.raw_query; if (std.mem.startsWith(u8, query, "?")) { query = query[1..]; } while (query.len > 0) { var key = query; if (std.mem.indexOfAny(u8, key, "&")) |index| { query = key[index + 1 ..]; key = key[0..index]; } else { query = ""; } if (key.len == 0) continue; var value: []const u8 = undefined; if (std.mem.indexOfAny(u8, key, "=")) |index| { value = key[index + 1 ..]; key = key[0..index]; } key = try unescape(allocator, key); errdefer allocator.free(key); value = try unescape(allocator, value); errdefer allocator.free(value); try queries.put(key, value); } return queries; } }; /// Unescapes the given string literal by decoding the %hex number into ascii /// memory is owned & freed by caller fn unescape(allocator: *Allocator, value: []const u8) QueryError![]const u8 { var perc_counter: usize = 0; var has_plus: bool = false; // find % and + symbols to determine buffer size var i: usize = 0; while (i < value.len) : (i += 1) { switch (value[i]) { '%' => { perc_counter += 1; if (i + 2 > value.len or !isHex(value[i + 1]) or !isHex(value[i + 2])) { return QueryError.MalformedUrl; } i += 2; }, '+' => { has_plus = true; }, else => {}, } } if (perc_counter == 0 and !has_plus) return value; // replace url encoded string var buffer = try allocator.alloc(u8, value.len - 2 * perc_counter); errdefer allocator.free(buffer); i = 0; while (i < buffer.len) : (i += 1) { switch (value[i]) { '%' => { const a = try std.fmt.charToDigit(value[i + 1], 16); const b = try std.fmt.charToDigit(value[i + 2], 16); buffer[i] = a << 4 | b; i += 2; }, '+' => buffer[i] = ' ', else => buffer[i] = value[i], } } return buffer; } /// Sanitizes the given `path` by removing '..' etc. /// This returns a slice from a static buffer and therefore requires no allocations pub fn sanitize(path: []const u8) []const u8 { var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined; if (path.len == 0) { buffer[0] = '.'; return buffer[0..1][0..]; } const rooted = path[0] == '/'; const len = path.len; std.mem.copy(u8, &buffer, path); var out = BufferUtil.init(&buffer, path); var i: usize = 0; var dot: usize = 0; if (rooted) { out.append('/'); i = 1; dot = 1; } while (i < len) { if (path[i] == '/') { // empty path element i += 1; continue; } if (path[i] == '.' and (i + 1 == len or path[i + 1] == '/')) { // . element i += 1; continue; } if (path[i] == '.' and path[i + 1] == '.' and (i + 2 == len or path[i + 2] == '/')) { // .. element, remove '..' bits till last '/' i += 2; if (out.index > dot) { out.index -= 1; while (out.index > dot and out.char() != '/') : (out.index -= 1) {} continue; } if (!rooted) { if (out.index > 0) out.append('/'); out.append('.'); out.append('.'); dot = out.index; continue; } } if (rooted and out.index != 1 or !rooted and out.index != 0) out.append('/'); while (i < len and path[i] != '/') : (i += 1) { out.append(path[i]); } } if (out.index == 0) { buffer[0] = '.'; return buffer[0..1][0..]; } return out.result(); } const BufferUtil = struct { buffer: []u8, index: usize, path: []const u8, fn init(buffer: []u8, path: []const u8) BufferUtil { return .{ .buffer = buffer, .index = 0, .path = path }; } fn append(self: *BufferUtil, c: u8) void { std.debug.assert(self.index < self.buffer.len); if (self.index < self.path.len and self.path[self.index] == c) { self.index += 1; return; } self.buffer[self.index] = c; self.index += 1; } fn char(self: BufferUtil) u8 { return self.buffer[self.index]; } fn result(self: BufferUtil) []const u8 { return self.buffer[0..self.index][0..]; } }; /// Escapes a string by encoding symbols so it can be safely used inside an URL fn escape(value: []const u8) []const u8 { @compileError("TODO: Implement escape()"); } /// Returns true if the given byte is heximal fn isHex(c: u8) bool { return switch (c) { '0'...'9', 'a'...'f', 'A'...'F' => true, else => false, }; } test "Basic raw query" { const path = "/example?name=value"; const url: Url = Url.init(path); testing.expectEqualSlices(u8, "?name=value", url.raw_query); } test "Retrieve query parameters" { const path = "/example?name=value"; const url: Url = Url.init(path); var query_params = try url.queryParameters(testing.allocator); defer query_params.deinit(); testing.expect(query_params.contains("name")); testing.expectEqualStrings("value", query_params.get("name") orelse " "); } test "Sanitize paths" { const cases = .{ // Already clean .{ .input = "", .expected = "." }, .{ .input = "abc", .expected = "abc" }, .{ .input = "abc/def", .expected = "abc/def" }, .{ .input = "a/b/c", .expected = "a/b/c" }, .{ .input = ".", .expected = "." }, .{ .input = "..", .expected = ".." }, .{ .input = "../..", .expected = "../.." }, .{ .input = "../../abc", .expected = "../../abc" }, .{ .input = "/abc", .expected = "/abc" }, .{ .input = "/", .expected = "/" }, // Remove trailing slash .{ .input = "abc/", .expected = "abc" }, .{ .input = "abc/def/", .expected = "abc/def" }, .{ .input = "a/b/c/", .expected = "a/b/c" }, .{ .input = "./", .expected = "." }, .{ .input = "../", .expected = ".." }, .{ .input = "../../", .expected = "../.." }, .{ .input = "/abc/", .expected = "/abc" }, // Remove doubled slash .{ .input = "abc//def//ghi", .expected = "abc/def/ghi" }, .{ .input = "//abc", .expected = "/abc" }, .{ .input = "///abc", .expected = "/abc" }, .{ .input = "//abc//", .expected = "/abc" }, .{ .input = "abc//", .expected = "abc" }, // Remove . elements .{ .input = "abc/./def", .expected = "abc/def" }, .{ .input = "/./abc/def", .expected = "/abc/def" }, .{ .input = "abc/.", .expected = "abc" }, // Remove .. elements .{ .input = "abc/def/ghi/../jkl", .expected = "abc/def/jkl" }, .{ .input = "abc/def/../ghi/../jkl", .expected = "abc/jkl" }, .{ .input = "abc/def/..", .expected = "abc" }, .{ .input = "abc/def/../..", .expected = "." }, .{ .input = "/abc/def/../..", .expected = "/" }, .{ .input = "abc/def/../../..", .expected = ".." }, .{ .input = "/abc/def/../../..", .expected = "/" }, .{ .input = "abc/def/../../../ghi/jkl/../../../mno", .expected = "../../mno" }, // Combinations .{ .input = "abc/./../def", .expected = "def" }, .{ .input = "abc//./../def", .expected = "def" }, .{ .input = "abc/../../././../def", .expected = "../../def" }, }; inline for (cases) |case| { testing.expectEqualStrings(case.expected, sanitize(case.input)); } }
src/url.zig
const std = @import("std"); const fs = std.fs; const heap = std.heap; const math = std.math; const mem = std.mem; const process = std.process; const os = std.os; const ChildProcess = std.ChildProcess; pub fn copy(allocator: *mem.Allocator) !*ChildProcess { var env = try process.getEnvMap(allocator); defer env.deinit(); const path = env.get("PATH") orelse "./"; var buf: [os.PATH_MAX]u8 = undefined; if (getPathToExe(&buf, path, "xclip")) |p| { return try exec(allocator, &[_][]const u8{ p, "-selection", "clipboard" }, .Pipe, .Ignore); } else if (getPathToExe(&buf, path, "xsel")) |p| blk: { return try exec(allocator, &[_][]const u8{ p, "-b" }, .Pipe, .Ignore); } return error.NoCopyCommand; } pub fn paste(allocator: *mem.Allocator) !*ChildProcess { var env = try process.getEnvMap(allocator); defer env.deinit(); const path = env.get("PATH") orelse "./"; var buf: [os.PATH_MAX]u8 = undefined; if (getPathToExe(&buf, path, "xclip")) |p| { return try exec(allocator, &[_][]const u8{ p, "-selection", "clipboard", "-o" }, .Ignore, .Pipe); } else if (getPathToExe(&buf, path, "xsel")) |p| blk: { return try exec(allocator, &[_][]const u8{ p, "-b" }, .Ignore, .Pipe); } return error.NoCopyCommand; } fn exec( allocator: *mem.Allocator, argv: []const []const u8, stdin: ChildProcess.StdIo, stdout: ChildProcess.StdIo, ) !*ChildProcess { const p = try ChildProcess.init(argv, allocator); p.stdin_behavior = stdin; p.stdout_behavior = stdout; p.stderr_behavior = ChildProcess.StdIo.Ignore; try p.spawn(); return p; } fn getPathToExe(buf: *[os.PATH_MAX]u8, path_list: []const u8, exe: []const u8) ?[]u8 { var iter = mem.tokenize(path_list, ":"); while (iter.next()) |path| { var allocator = &heap.FixedBufferAllocator.init(buf).allocator; const file = fs.path.join(allocator, &[_][]const u8{ path, exe }) catch continue; fs.cwd().access(file, .{}) catch continue; return file; } return null; }
src/core/clipboard.zig
const std = @import("std"); const utils = @import("utils.zig"); const Vec3 = @import("vec.zig").Vec3; const Ray = @import("ray.zig").Ray; const HitRecord = @import("sphere.zig").HitRecord; pub const Material = union(enum) { Lambertian: Lambertian, Metal: Metal, Dielectric: Dielectric, pub fn lambertian(albedo: Vec3) Material { return Material{ .Lambertian = Lambertian{ .albedo = albedo } }; } pub fn metal(albedo: Vec3, fuzz: f32) Material { return Material{ .Metal = Metal{ .albedo = albedo, .fuzz = fuzz } }; } pub fn dielectric(ior: f32) Material { return Material{ .Dielectric = Dielectric{ .ior = ior } }; } }; pub const Lambertian = struct { albedo: Vec3, pub fn scatter( self: *const @This(), rec: *const HitRecord, atten: *Vec3, out: *Ray, random: *std.rand.Random, ) bool { var scatter_dir = rec.normal.add(utils.randomUnitVec(random)); // Catch degenerate scatter direction. if (scatter_dir.approxEqAbs(Vec3.zero, 10 * std.math.epsilon(f32))) scatter_dir = rec.normal; out.origin = rec.point; out.dir = scatter_dir; atten.* = self.albedo; return true; } }; pub const Metal = struct { albedo: Vec3, fuzz: f32, pub fn scatter( self: *const @This(), in: *const Ray, rec: *const HitRecord, atten: *Vec3, out: *Ray, random: *std.rand.Random, ) bool { const reflect_dir = Vec3.reflect(in.dir.normalize(), rec.normal); out.origin = rec.point; out.dir = reflect_dir .add(utils.randomVecInUnitSphere(random).scale(self.fuzz)); atten.* = self.albedo; return out.dir.dot(rec.normal) > 0; } }; pub const Dielectric = struct { ior: f32, pub fn scatter( self: *const @This(), in: *const Ray, rec: *const HitRecord, atten: *Vec3, out: *Ray, random: *std.rand.Random, ) bool { // Air has an index of refraction of 1. const ior_ratio = if (rec.front_face) 1 / self.ior else self.ior; const unit_dir = in.dir.normalize(); const cos_theta = @minimum(unit_dir.neg().dot(rec.normal), 1.0); const sin_theta = @sqrt(1 - cos_theta * cos_theta); const refl = reflectance(cos_theta, ior_ratio); // Consider total internal reflection. out.dir = if (ior_ratio * sin_theta > 1 or refl > random.float(f32)) Vec3.reflect(unit_dir, rec.normal) else Vec3.refract(unit_dir, rec.normal, ior_ratio); out.origin = rec.point; atten.* = Vec3.initAll(1); return true; } // Calculate reflectance using Schlick's approximation. fn reflectance(cosine: f32, ior: f32) f32 { var r0 = (1 - ior) / (1 + ior); r0 = r0 * r0; return r0 + (1 - r0) * std.math.pow(f32, 1 - cosine, 5); } };
src/materials.zig
const std = @import("std"); const fmt = std.fmt; const algods = @import("algods"); const compiler = @import("main.zig"); const TokenKind = enum { TK_IDENT, // Identifiers TK_PUNCT, // Punctuators TK_NUM, // Numeric literals TK_EOF, // End-of-file markers TK_KEYWORD, //Keywords }; pub const Token = struct { kind: TokenKind, // Token kind value: Value, location: usize, // location of Token in input stream pub const Value = union { num_value: u64, // If kind is TK_NUM, its value ident_name: []const u8, // if kind is TK_IDENT, it name }; }; const Tokenizer = @This(); pub const TokenList = algods.linked_list.SinglyCircularList(Token); const Error = error{ TerminalMismatch, TokenNotANumber, InvalidToken, }; pub fn OOMhandler() noreturn { std.log.err("allocator has run out of memory", .{}); std.debug.panic("Out of Memory condition", .{}); std.process.exit(-1); } tokens: TokenList, // List of tokens pub fn init(allocator: std.mem.Allocator) Tokenizer { return Tokenizer{ .tokens = TokenList.init(allocator), }; } // Create a new token. fn addToken(self: *Tokenizer, token: Token) void { self.tokens.append(token) catch OOMhandler(); } fn reportTokenizerError(error_slice: []const u8, comptime msg: []const u8, args: anytype) noreturn { const token_index = std.mem.indexOf(u8, compiler.TOKEN_STREAM, error_slice).?; const error_fmt = "\nError: Invalid Token '{s}' in '{s} at {d}\n"; const position_of_stream_in_error_fmt = 28; const error_token_start_location = error_slice.len + position_of_stream_in_error_fmt; const actual_location = error_token_start_location + token_index; std.debug.print(error_fmt, .{ error_slice, compiler.TOKEN_STREAM, token_index }); std.debug.print("{[spaces]s:>[width]}", .{ .spaces = " ", .width = actual_location }); const format_msg = "^ " ++ msg ++ "\n"; std.debug.print(format_msg, args); std.process.exit(2); } const PUNCTUATOR = [_][]const u8{ "==", "<=", ">=", "!=", }; fn isPunct(punct: []const u8) bool { for (PUNCTUATOR) |punctuation| { if (std.mem.eql(u8, punctuation, punct)) return true; } return false; } // Returns true if char is valid as the first character of an identifier. fn isValidIdentifier1stChar(char: u8) bool { if (std.ascii.isAlpha(char) or char == '_') { return true; } return false; } // Returns true if char is valid as a character in an identifier. fn isValidIdentifierChar(char: u8) bool { if (isValidIdentifier1stChar(char) or std.ascii.isDigit(char)) { return true; } return false; } const KEYWORDS = [_][]const u8{ "return", "if", "else", "for", "while", }; fn isKeyword(identifier: []const u8) bool { for (KEYWORDS) |keyword| { if (std.mem.eql(u8, keyword, identifier)) { return true; } } return false; } // Tokenize `peek` and returns new tokens. pub fn tokenize(self: *Tokenizer) !TokenList.Iterator { const peek = compiler.TOKEN_STREAM; var index: usize = 0; //condition to prevent out of bound index but allow procession of last token while (index < peek.len) : ({ index += 1; }) { const value = peek[index]; // Numeric literal if (std.ascii.isDigit(value)) { const current_index = index; //since value is a digit we need to look pass the current index for addition digits in the stream var digit_index = current_index + 1; // convert ascii char to number const RADIX = 10; var digits = fmt.charToDigit(value, RADIX) catch unreachable; //look from digit_index for additional digits for (peek[digit_index..]) |digit| { if (std.ascii.isDigit(digit)) { //convert multiple number characters into a single numeric value digits = (digits * 10) + (fmt.charToDigit(digit, RADIX) catch unreachable); digit_index += 1; continue; } // digit is not a digit break; } //-1 because our anticipated next digit token wasn't a digit const next_token_index = digit_index - 1; index = next_token_index; self.addToken(Token{ .kind = .TK_NUM, .value = Token.Value{ .num_value = digits }, .location = current_index, }); continue; } // Skip whitespace characters. if (std.ascii.isSpace(value)) { continue; } // Operator if (std.ascii.isPunct(value)) { const current_index = index; const current_punct = value; //check if == != <= or >= if (current_index + 1 < peek.len) { if (std.ascii.isPunct(peek[current_index + 1])) { const next_punct = peek[current_index + 1]; const max_punct_char = 2; var buf: [max_punct_char]u8 = undefined; const operator = try std.fmt.bufPrint(&buf, "{c}{c}", .{ current_punct, next_punct }); if (isPunct(operator)) { index = current_index + 1; self.addToken(Token{ .kind = .TK_PUNCT, .value = Token.Value{ .ident_name = try fmt.allocPrint(self.tokens.allocator, "{s}", .{operator}) }, .location = current_index, }); continue; } } } self.addToken(Token{ .kind = .TK_PUNCT, .value = Token.Value{ .ident_name = try fmt.allocPrint(self.tokens.allocator, "{c}", .{current_punct}) }, .location = index, }); continue; } //Identifier or Keywords if (isValidIdentifier1stChar(value)) { const current_index = index; const next_index = index + 1; const max_identifier_length = 124; var identifier_buf: [max_identifier_length]u8 = undefined; identifier_buf[0] = value; const next_ident_offset = 1; var identifier_len: usize = 1; for (peek[next_index..]) |identifier_char, ident_index| { if (isValidIdentifierChar(identifier_char)) { identifier_len += next_ident_offset; //move index to end of identifier index += next_ident_offset; identifier_buf[ident_index + next_ident_offset] = identifier_char; } else { //break if char isn't a valid identifier character break; } } const identifier = identifier_buf[0..identifier_len]; if (isKeyword(identifier)) { self.addToken(Token{ .kind = .TK_KEYWORD, .value = Token.Value{ .ident_name = try fmt.allocPrint(self.tokens.allocator, "{s}", .{identifier}) }, .location = current_index, }); } else { self.addToken(Token{ .kind = .TK_IDENT, .value = Token.Value{ .ident_name = try fmt.allocPrint(self.tokens.allocator, "{s}", .{identifier}) }, .location = current_index, }); } continue; } const bad_token = &[_]u8{value}; reportTokenizerError(bad_token, "expected number or punctuation or identifier but found {s}", .{bad_token}); return error.InvalidToken; } self.addToken(Token{ .kind = .TK_EOF, .value = Token.Value{ .ident_name = "EOF" }, .location = index, }); return self.tokens.iterator(); }
src/tokenizer.zig
const std = @import("std"); const input = @embedFile("data/input12"); usingnamespace @import("util.zig"); pub fn main() !void { const part1 = findManhattanDistance(input, .one); const part2 = findManhattanDistance(input, .two); print("[Part1] Distance: {}", .{part1}); print("[Part2] Distance: {}", .{part2}); } const Ship = struct { x: isize = 0, y: isize = 0, facing: u8 = 'E', waypoint_x: isize = 10, waypoint_y: isize = 1, }; const Part = enum { one, two }; fn findManhattanDistance(inputStr: []const u8, comptime part: Part) usize { var ship: Ship = .{}; var reader = lines(inputStr); while (reader.next()) |line| { const inst = line[0]; const value = std.fmt.parseUnsigned(isize, line[1..], 10) catch unreachable; move(&ship, inst, value, part); } const x = std.math.absInt(ship.x) catch unreachable; const y = std.math.absInt(ship.y) catch unreachable; return @intCast(usize, x + y); } fn move(ship: *Ship, inst: u8, value: isize, comptime part: Part) void { switch (part) { .one => moveV1(ship, inst, value), .two => moveV2(ship, inst, value), } } fn moveV1(ship: *Ship, inst: u8, value: isize) void { switch (inst) { 'N' => ship.y += value, 'S' => ship.y -= value, 'E' => ship.x += value, 'W' => ship.x -= value, 'L' => ship.facing = rotate('L', ship.facing, value), 'R' => ship.facing = rotate('R', ship.facing, value), 'F' => moveV1(ship, ship.facing, value), else => unreachable, } } fn moveV2(ship: *Ship, inst: u8, value: isize) void { switch (inst) { 'N' => ship.waypoint_y += value, 'S' => ship.waypoint_y -= value, 'E' => ship.waypoint_x += value, 'W' => ship.waypoint_x -= value, 'L' => rotateWaypoint('L', &ship.waypoint_x, &ship.waypoint_y, value), 'R' => rotateWaypoint('R', &ship.waypoint_x, &ship.waypoint_y, value), 'F' => { ship.x += ship.waypoint_x * value; ship.y += ship.waypoint_y * value; }, else => unreachable, } } fn rotate(comptime turn_dir: u8, facing: u8, degrees: isize) u8 { const steps = @intCast(usize, @divExact(degrees, 90)) % 4; const directions = [_]u8 { 'E', 'N', 'W', 'S' }; const index = std.mem.indexOf(u8, &directions, &[_]u8 { facing }) orelse unreachable; const newIndex = switch (turn_dir) { 'L' => index + steps, 'R' => (index + directions.len) - steps, else => unreachable, } % directions.len; return directions[newIndex]; } fn rotateWaypoint(comptime turn_dir: u8, x: *isize, y: *isize, degrees: isize) void { const num_steps = @intCast(usize, @divExact(degrees, 90)) % 4; const steps_to_the_left = if (turn_dir == 'L') num_steps else (4 - num_steps) % 4; const x_val = x.*; const y_val = y.*; switch (steps_to_the_left) { 0 => {}, 1 => { x.* = -y_val; y.* = x_val; }, 2 => { x.* = -x_val; y.* = -y_val; }, 3 => { x.* = y_val; y.* = -x_val; }, else => unreachable, } } const expectEqual = std.testing.expectEqual; test "findManhattanDistance" { expectEqual(@as(usize, 25), findManhattanDistance( \\F10 \\N3 \\F7 \\R90 \\F11 , .one)); expectEqual(@as(usize, 286), findManhattanDistance( \\F10 \\N3 \\F7 \\R90 \\F11 , .two)); }
src/day12.zig
const std = @import("std"); const assert = std.debug.assert; pub const Time = struct { const Self = @This(); /// Hardware and/or software bugs can mean that the monotonic clock may regress. /// One example (of many): https://bugzilla.redhat.com/show_bug.cgi?id=448449 /// We crash the process for safety if this ever happens, to protect against infinite loops. /// It's better to crash and come back with a valid monotonic clock than get stuck forever. monotonic_guard: u64 = 0, /// A timestamp to measure elapsed time, meaningful only on the same system, not across reboots. /// Always use a monotonic timestamp if the goal is to measure elapsed time. /// This clock is not affected by discontinuous jumps in the system time, for example if the /// system administrator manually changes the clock. pub fn monotonic(self: *Self) u64 { // The true monotonic clock on Linux is not in fact CLOCK_MONOTONIC: // CLOCK_MONOTONIC excludes elapsed time while the system is suspended (e.g. VM migration). // CLOCK_BOOTTIME is the same as CLOCK_MONOTONIC but includes elapsed time during a suspend. // For more detail and why CLOCK_MONOTONIC_RAW is even worse than CLOCK_MONOTONIC, // see https://github.com/ziglang/zig/pull/933#discussion_r656021295. var ts: std.os.timespec = undefined; std.os.clock_gettime(std.os.CLOCK_BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required"); const m = @intCast(u64, ts.tv_sec) * std.time.ns_per_s + @intCast(u64, ts.tv_nsec); // "Oops!...I Did It Again" if (m < self.monotonic_guard) @panic("a hardware/kernel bug regressed the monotonic clock"); self.monotonic_guard = m; return m; } /// A timestamp to measure real (i.e. wall clock) time, meaningful across systems, and reboots. /// This clock is affected by discontinuous jumps in the system time. pub fn realtime(self: *Self) i64 { var ts: std.os.timespec = undefined; std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts) catch unreachable; return @as(i64, ts.tv_sec) * std.time.ns_per_s + ts.tv_nsec; } pub fn tick(self: *Self) void {} }; pub const DeterministicTime = struct { const Self = @This(); /// The duration of a single tick in nanoseconds. resolution: u64, /// The number of ticks elapsed since initialization. ticks: u64 = 0, /// The instant in time chosen as the origin of this time source. epoch: i64 = 0, pub fn monotonic(self: *Self) u64 { return self.ticks * self.resolution; } pub fn realtime(self: *Self) i64 { return self.epoch + @intCast(i64, self.monotonic()); } pub fn tick(self: *Self) void { self.ticks += 1; } };
src/time.zig
const MessageHeader = @import("message.zig").MessageHeader; const futex_wait = @import("futex.zig").futex_wait; const futex_wake = @import("futex.zig").futex_wake; const std = @import("std"); const assert = std.debug.assert; const warn = std.debug.warn; const builtin = @import("builtin"); const AtomicOrder = builtin.AtomicOrder; const AtomicRmwOp = builtin.AtomicRmwOp; /// SignalContext is Os specific pub const SignalContext = switch (builtin.os) { builtin.Os.linux => u32, else => @compileError("Unsupported OS"), }; /// A Many producer, single consumer queue of MessageHeaders. /// Based on std.atomic.Queue /// /// Uses a spinlock to protect get() and put() and thus is non-blocking pub fn MessageQueue() type { return struct { pub const Self = @This(); head: ?*MessageHeader, tail: ?*MessageHeader, lock: u8, signalFn: ?fn(pSignalContext: *SignalContext) void, pSignalContext: ?*SignalContext, /// Initialize an MessageQueue with optional signalFn and signalContext. /// When the first message is added to an empty signalFn is invoked if it /// and a signalContext is available. If either are null then the signalFn /// will never be invoked. /// TODO: Consider signalFn taking an opaque usize as a parameter? pub fn init(signalFn: ?fn(context: *SignalContext) void, pSignalContext: ?*SignalContext) Self { warn("MessageQueue.init: {*}:&signal_context={*}\n", signalFn, pSignalContext); return Self{ .head = null, .tail = null, .lock = 0, .signalFn = signalFn, .pSignalContext = pSignalContext, }; } pub fn put(pSelf: *Self, mh: *MessageHeader) void { //warn("MessageQueue.put:+ mh={*} cmd={}\n", mh, mh.cmd); //defer warn("MessageQueue.put:- mh={*} cmd={}\n", mh, mh.cmd); mh.pNext = null; while (@atomicRmw(u8, &pSelf.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} defer assert(@atomicRmw(u8, &pSelf.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); const opt_tail = pSelf.tail; pSelf.tail = mh; if (opt_tail) |tail| { //warn("put: append tail mh={*} cmd={}\n", mh, mh.cmd); tail.pNext = mh; } else { // Was empty so wakeup any waiters //warn("put: firstEntry+ mh={*} cmd={}\n", mh, mh.cmd); //defer warn("put: firstEntry- mh={*} cmd={}\n", mh, mh.cmd); assert(pSelf.head == null); pSelf.head = mh; if (pSelf.signalFn) |sigFn| { if (pSelf.pSignalContext) |pSigCtx| { //warn("put: firstEntry+++sigFn={*} mh={*} cmd={} SigCtx={*}\n", &sigFn, mh, mh.cmd, pSigCtx); //defer warn("put: firstEntry---sigFn={*} mh={*} cmd={} SigCtx={*}\n", &sigFn, mh, mh.cmd, pSigCtx); //if (mh.cmd == 2) { // warn("put: firstEntry+++sigFn={*} mh={*} cmd={} SigCtx={*}\n", &sigFn, mh, mh.cmd, pSigCtx); //} sigFn(pSigCtx); //if (mh.cmd == 2) { // warn("put: firstEntry---sigFn={*} mh={*} cmd={} SigCtx={*}\n", &sigFn, mh, mh.cmd, pSigCtx); //} } } } } pub fn get(pSelf: *Self) ?*MessageHeader { while (@atomicRmw(u8, &pSelf.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} defer assert(@atomicRmw(u8, &pSelf.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); const head = pSelf.head orelse { //warn("get: return null\n"); return null; }; pSelf.head = head.pNext; if (head.pNext == null) { pSelf.tail = null; //warn("get: returning last entry head={*} cmd={}\n", head, head.cmd); } //warn("get: return head={*} cmd={}\n", head, head.cmd); return head; } pub fn format( pSelf: *const Self, comptime fmt: []const u8, context: var, comptime FmtError: type, output: fn (@typeOf(context), []const u8) FmtError!void ) FmtError!void { try std.fmt.format(context, FmtError, output, "{{head=0x{x} tail=0x{x} lock=0x{x} signalFn=0x{x} pSignalContext=0x{x}}}", if (pSelf.head) |pH| @ptrToInt(pH) else 0, if (pSelf.tail) |pT| @ptrToInt(pT) else 0, pSelf.lock, if (pSelf.signalFn) |sFn| @ptrToInt(sFn) else 0, if (pSelf.pSignalContext) |pCtx| @ptrToInt(pCtx) else 0); } }; } const Context = struct { allocator: *std.mem.Allocator, queue: *MessageQueue(), put_sum: u64, get_sum: u64, get_count: usize, puts_done: u8, // TODO make this a bool }; // ************************************** // The code below is from zig/std/atomic/queue.zig // ************************************** // TODO add lazy evaluated build options and then put puts_per_thread behind // some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor // CI we would use a less aggressive setting since at 1 core, while we still // want this test to pass, we need a smaller value since there is so much thrashing // we would also use a less aggressive setting when running in valgrind const puts_per_thread = 500; const put_thread_count = 3; test "MessageQueue.multi-threaded" { var direct_allocator = std.heap.DirectAllocator.init(); defer direct_allocator.deinit(); var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024); defer direct_allocator.allocator.free(plenty_of_memory); var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); var a = &fixed_buffer_allocator.allocator; signal_count = 0; var queue = MessageQueue().init(signaler, &signal); var context = Context{ .allocator = a, .queue = &queue, .put_sum = 0, .get_sum = 0, .puts_done = 0, .get_count = 0, }; var putters: [put_thread_count]*std.os.Thread = undefined; for (putters) |*t| { t.* = try std.os.spawnThread(&context, startPuts); } var getter = try std.os.spawnThread(&context, startGetter); for (putters) |t| t.wait(); _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); warn("putters are done, signal getter context.puts_done={}\n", context.puts_done); signaler(&signal); getter.wait(); if (context.put_sum != context.get_sum) { std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum); } if (context.get_count != puts_per_thread * put_thread_count) { std.debug.panic( "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", context.get_count, u32(puts_per_thread), u32(put_thread_count), ); } } fn startPuts(ctx: *Context) u8 { var put_count: usize = puts_per_thread; var r = std.rand.DefaultPrng.init(0xdeadbeef); while (put_count != 0) : (put_count -= 1) { std.os.time.sleep(1); // let the os scheduler be our fuzz const x = r.random.scalar(u64); const mh = ctx.allocator.create(MessageHeader { .pNext = null, .pAllocator = null, .pSrcActor = null, .pDstActor = null, .cmd = x, }) catch unreachable; ctx.queue.put(mh); _ = @atomicRmw(u64, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); } return 0; } fn startGetter(ctx: *Context) u8 { while (true) { while (ctx.queue.get()) |mh| { std.os.time.sleep(1); // let the os scheduler be our fuzz _ = @atomicRmw(u64, &ctx.get_sum, builtin.AtomicRmwOp.Add, mh.cmd, builtin.AtomicOrder.SeqCst); _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); } if (@atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1) { warn("startGetter: puts_done=1\n"); return 0; } else { warn("startGetter: waiting on signal\n"); futex_wait(&signal, 0); warn("startGetter: wokeup\n"); } } } var signal: u32 = 0; var signal_count: u32 = 0; fn signaler(pSignalContext: *SignalContext) void { _ = @atomicRmw(u32, &signal_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); //warn("signaler: call wake {*}\n", pSignalContext); futex_wake(pSignalContext, 1); } test "MessageQueue.single-threaded" { var queue = MessageQueue().init(signaler, &signal); signal_count = 0; var mh_0: MessageHeader = undefined; mh_0.initEmpty(); queue.put(&mh_0); assert(signal_count == 1); assert(queue.get().?.cmd == 0); assert(signal_count == 1); queue.put(&mh_0); assert(signal_count == 2); var mh_1: MessageHeader = undefined; mh_1.initEmpty(); mh_1.cmd = 1; queue.put(&mh_1); assert(signal_count == 2); assert(queue.get().?.cmd == 0); assert(signal_count == 2); var mh_2: MessageHeader = undefined; mh_2.init(null, null, null, 2); queue.put(&mh_2); var mh_3 = MessageHeader { .pNext = null, .pAllocator = null, .pSrcActor = null, .pDstActor = null, .cmd = 3, }; queue.put(&mh_3); assert(queue.get().?.cmd == 1); assert(queue.get().?.cmd == 2); var mh_4: MessageHeader = undefined; mh_4.initEmpty(); mh_4.cmd = 4; queue.put(&mh_4); assert(queue.get().?.cmd == 3); assert(queue.get().?.cmd == 4); assert(queue.get() == null); }
message_queue.zig
const std = @import("std"); const testing = std.testing; const match_path = @import("match.zig"); const warn = std.debug.warn; const MatchTest = struct { pattern: []const u8, s: []const u8, match: bool, err: ?match_path.MatchError, fn init( pattern: []const u8, s: []const u8, match: bool, err: ?match_path.MatchError, ) MatchTest { return MatchTest{ .pattern = pattern, .s = s, .match = match, .err = err, }; } }; const sample = [_]MatchTest{ MatchTest.init("abc", "abc", true, null), MatchTest.init("*", "abc", true, null), MatchTest.init("*c", "abc", true, null), MatchTest.init("a*", "a", true, null), MatchTest.init("a*", "abc", true, null), MatchTest.init("a*", "ab/c", false, null), MatchTest.init("a*/b", "abc/b", true, null), MatchTest.init("a*/b", "a/c/b", false, null), MatchTest.init("a*b*c*d*e*/f", "axbxcxdxe/f", true, null), MatchTest.init("a*b*c*d*e*/f", "axbxcxdxexxx/f", true, null), MatchTest.init("a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, null), MatchTest.init("a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, null), MatchTest.init("a*b?c*x", "abxbbxdbxebxczzx", true, null), MatchTest.init("a*b?c*x", "abxbbxdbxebxczzy", false, null), MatchTest.init("ab[c]", "abc", true, null), MatchTest.init("ab[b-d]", "abc", true, null), MatchTest.init("ab[e-g]", "abc", false, null), MatchTest.init("ab[^c]", "abc", false, null), MatchTest.init("ab[^b-d]", "abc", false, null), MatchTest.init("ab[^e-g]", "abc", true, null), MatchTest.init("a\\*b", "a*b", true, null), MatchTest.init("a\\*b", "ab", false, null), MatchTest.init("a?b", "a☺b", true, null), MatchTest.init("a[^a]b", "a☺b", true, null), MatchTest.init("a???b", "a☺b", false, null), MatchTest.init("a[^a][^a][^a]b", "a☺b", false, null), MatchTest.init("[a-ζ]*", "α", true, null), MatchTest.init("*[a-ζ]", "A", false, null), MatchTest.init("a?b", "a/b", false, null), MatchTest.init("a*b", "a/b", false, null), MatchTest.init("[\\]a]", "]", true, null), MatchTest.init("[\\-]", "-", true, null), MatchTest.init("[x\\-]", "x", true, null), MatchTest.init("[x\\-]", "-", true, null), MatchTest.init("[x\\-]", "z", false, null), MatchTest.init("[\\-x]", "x", true, null), MatchTest.init("[\\-x]", "-", true, null), MatchTest.init("[\\-x]", "a", false, null), MatchTest.init("[]a]", "]", false, error.BadPattern), MatchTest.init("[-]", "-", false, error.BadPattern), MatchTest.init("[x-]", "x", false, error.BadPattern), MatchTest.init("[x-]", "-", false, error.BadPattern), MatchTest.init("[x-]", "z", false, error.BadPattern), MatchTest.init("[-x]", "x", false, error.BadPattern), MatchTest.init("[-x]", "-", false, error.BadPattern), MatchTest.init("[-x]", "a", false, error.BadPattern), MatchTest.init("\\", "a", false, error.BadPattern), MatchTest.init("[a-b-c]", "a", false, error.BadPattern), MatchTest.init("[", "a", false, error.BadPattern), MatchTest.init("[^", "a", false, error.BadPattern), MatchTest.init("[^bc", "a", false, error.BadPattern), MatchTest.init("a[", "a", false, null), MatchTest.init("a[", "ab", false, error.BadPattern), MatchTest.init("*x", "xxx", true, null), }; test "match" { for (sample) |ts, i| { if (ts.err) |err| { testing.expectError(err, match_path.match(ts.pattern, ts.s)); } else { const ok = try match_path.match(ts.pattern, ts.s); testing.expectEqual(ok, ts.match); } } }
src/path/match_test.zig
const serial = @import("../../debug/serial.zig"); pub const KERNEL_CODE = 0x08; pub const KERNEL_DATA = 0x10; pub const USER_CODE = 0x20; pub const USER_DATA = 0x28; pub const OVMF_DATA = 0x30; pub const OVMF_CODE = 0x38; pub const TSS_LOW = 0x40; pub const TSS_HIGH = 0x48; pub const KERNEL_RPL = 0b00; pub const USER_RPL = 0b11; const KERNEL = 0x90; const OVMF = 0x90; const USER = 0xF0; const CODE = 0x0A; const DATA = 0x02; const TSS_ACCESS = 0x89; const LONGMODE = (1 << 1); const PROTECTED = (1 << 2); const BLOCKS_4K = (1 << 3); const GDTEntry = packed struct { limit_low: u16, base_low: u16, base_mid: u8, access: u8, limit_high: u4, flags: u4, base_high: u8 }; const GDTRegister = packed struct { limit: u16, base: *const GDTEntry, }; const TSS = packed struct { reserved0: u32 = undefined, rsp0: u64 = undefined, // Stack to use when coming to ring 0 from ring > 0 rsp1: u64 = undefined, rsp2: u64 = undefined, reserved1: u64 = undefined, ist1: u64 = undefined, ist2: u64 = undefined, ist3: u64 = undefined, ist4: u64 = undefined, ist5: u64 = undefined, ist6: u64 = undefined, ist7: u64 = undefined, reserved2: u64 = undefined, reserved3: u16 = undefined, iopb_offset: u16 = undefined }; fn makeEntry(base: usize, limit: usize, access: u8, flags: u4) GDTEntry { return GDTEntry{ .limit_low = @truncate(u16, limit), .base_low = @truncate(u16, base), .base_mid = @truncate(u8, base >> 16), .access = @truncate(u8, access), .limit_high = @truncate(u4, limit >> 16), .flags = @truncate(u4, flags), .base_high = @truncate(u8, base >> 24), }; } var gdt align(4) = [_]GDTEntry{ makeEntry(0, 0, 0, 0), makeEntry(0, 0xFFFFF, KERNEL | CODE, LONGMODE | BLOCKS_4K), // Kernel base selector makeEntry(0, 0xFFFFF, KERNEL | DATA, LONGMODE | BLOCKS_4K), makeEntry(0, 0, 0, 0), // User base selector makeEntry(0, 0xFFFFF, USER | CODE, LONGMODE | BLOCKS_4K), makeEntry(0, 0xFFFFF, USER | DATA, LONGMODE | BLOCKS_4K), makeEntry(0, 0xFFFFF, OVMF | DATA, LONGMODE | BLOCKS_4K), makeEntry(0, 0xFFFFF, OVMF | CODE, LONGMODE | BLOCKS_4K), makeEntry(0, 0, 0, 0), // TSS low makeEntry(0, 0, 0, 0), // TSS high }; var gdtr = GDTRegister{ .limit = @as(u16, @sizeOf(@TypeOf(gdt))), .base = &gdt[0], }; var tss = TSS{}; // During interruption of (driver|user)mode. pub fn setKernelStack(rsp0: usize) void { tss.rsp0 = rsp0; } extern fn loadGDT(gdtr: *const GDTRegister) void; // Load a new Task Register pub fn ltr(desc: u16) void { asm volatile ("ltr %[desc]" : : [desc] "r" (desc) ); } pub fn loadGDTAndTSS(gdt_ptr: *const GDTRegister) void { asm volatile ( \\lgdt (%[gptr]) \\mov %[tss], %%ax \\ltr %%ax \\mov %[kernel_data_segment], %%ax \\mov %%ax, %%ds \\mov %%ax, %%es \\mov %%ax, %%fs \\mov %%ax, %%gs \\mov %%ax, %%ss \\mov %[kernel_code_segment], %%rax \\push %%rax : : [gptr] "r" (gdt_ptr), [tss] "r" (@as(u16, TSS_LOW)), [kernel_data_segment] "i" (@as(u16, KERNEL_DATA)), [kernel_code_segment] "i" (@as(u16, KERNEL_CODE)) : "memory" ); } pub fn readGDT() GDTRegister { var gdtr_buffer: GDTRegister = undefined; asm volatile ("sgdt %[input]" : [input] "=m" (gdtr_buffer) ); return gdtr_buffer; } pub fn initialize() void { serial.writeText("gdt: GDT initialization...\n"); @memset(@ptrCast([*]u8, &tss), 0, @sizeOf(TSS)); serial.writeText("gdt: TSS zeroed.\n"); // Initialize TSS. const tssBase = @ptrToInt(&tss); const lowTSSEntry = makeEntry(tssBase, @sizeOf(TSS) - 1, TSS_ACCESS, PROTECTED); const highTSSEntry = makeEntry(@truncate(u16, tssBase >> 48), @truncate(u16, tssBase >> 32), 0, 0); gdt[TSS_LOW / @sizeOf(GDTEntry)] = lowTSSEntry; gdt[TSS_HIGH / @sizeOf(GDTEntry)] = highTSSEntry; serial.writeText("gdt: TSS ready.\n"); // Load the TSS segment. loadGDTAndTSS(&gdtr); serial.writeText("gdt: GDT and TSS loaded.\n"); runtimeTests(); } fn runtimeTests() void { rt_properlyLoadedGDT(); } fn rt_properlyLoadedGDT() void { const loadedGDT = readGDT(); if (gdtr.limit != loadedGDT.limit) { @panic("Fatal error: GDT limit is not properly set, loading failure.\n"); } if (gdtr.base != loadedGDT.base) { @panic("Fatal error: GDT base is not properly set, loading failure.\n"); } serial.writeText("Runtime tests: GDT loading successful.\n"); }
src/kernel/arch/x86/gdt.zig
const std = @import("std"); const ztBuild = @import("src/deps/ZT/build.zig"); // ZLS kinda freaks out about not having a build file to parse, so this is // a dummy build. pub fn build(b: *std.build.Builder) void { const target = b.standardTargetOptions(.{}); const mode = b.standardReleaseOptions(); const exe = b.addExecutable("fake", "./fakeMain.zig"); exe.linkLibC(); exe.setTarget(target); exe.setBuildMode(mode); link(exe); exe.install(); } fn getRelativePath() []const u8 { comptime var src: std.builtin.SourceLocation = @src(); return std.fs.path.dirname(src.file).? ++ std.fs.path.sep_str; } pub fn link(exe: *std.build.LibExeObjStep) void { comptime var path = getRelativePath(); ztBuild.link(exe.builder, exe, exe.target); const slingPkg = std.build.Pkg{ .name = "sling", .path = .{ .path = path ++ "src/sling.zig", }, .dependencies = &[_]std.build.Pkg{ ztBuild.ztPkg, ztBuild.glfwPkg, ztBuild.glPkg, ztBuild.imguiPkg, ztBuild.stbPkg, } }; exe.addPackage(slingPkg); // TODO: Linux if (exe.target.isWindows()) { exe.target.cpu_arch = .x86_64; if (exe.builder.is_release) { // Release builds wont want to include the commandline. exe.subsystem = .Windows; } // Core exe.addIncludeDir("C:/Program Files (x86)/FMOD SoundSystem/FMOD Studio API Windows/api/core/inc"); exe.addObjectFile("C:/Program Files (x86)/FMOD SoundSystem/FMOD Studio API Windows/api/core/lib/x64/fmod_vc.lib"); // Studio exe.addIncludeDir("C:/Program Files (x86)/FMOD SoundSystem/FMOD Studio API Windows/api/studio/inc"); exe.addObjectFile("C:/Program Files (x86)/FMOD SoundSystem/FMOD Studio API Windows/api/studio/lib/x64/fmodstudio_vc.lib"); var core = exe.builder.addInstallFile(.{ .path = "C:/Program Files (x86)/FMOD SoundSystem/FMOD Studio API Windows/api/studio/lib/x64/fmodstudio.dll" }, "bin/fmodstudio.dll"); var studio = exe.builder.addInstallFile(.{ .path = "C:/Program Files (x86)/FMOD SoundSystem/FMOD Studio API Windows/api/core/lib/x64/fmod.dll" }, "bin/fmod.dll"); exe.step.dependOn(&core.step); exe.step.dependOn(&studio.step); } } pub const addBinaryContent = ztBuild.addBinaryContent;
build.zig
const std = @import("std"); const testing = std.testing; const SYS = std.os.SYS; const syspect = @import("syspect"); const generic = @import("generic.zig"); const utils = @import("utils.zig"); const target_argv = [_][]const u8{"zig-cache/bin/tests/example-child_signals"}; const allocator = std.testing.allocator; test "generic pid tracking" { try generic.ensure_pid_properly_tracked(target_argv[0..]); } test "main test" { const tracked_syscalls = &[_]SYS{ .fork, .gettid, .rt_sigprocmask, .kill, .wait4, .rt_sigtimedwait, }; var inspector = syspect.Inspector.init(allocator, tracked_syscalls, .{}); defer inspector.deinit(); const thread_leader_pid = try inspector.spawn_process(allocator, target_argv[0..]); // Ensure we call sigprocmask and gettid { const expected_calls = [_]generic.Syscall{ .{ .id = .rt_sigprocmask }, .{ .id = .gettid }, }; try generic.test_some_calls(&inspector, expected_calls[0..]); } // Gather child pid from the fork result const child_pid = fork: { const pre_call = (try inspector.next_syscall()).?.pre_call; utils.expectEnumEqual(SYS, SYS.fork, pre_call.registers.syscall); try inspector.resume_tracee(pre_call.pid); const post_call = (try inspector.next_syscall()).?.post_call; utils.expectEnumEqual(SYS, SYS.fork, post_call.registers.syscall); testing.expectEqual(thread_leader_pid, post_call.pid); try inspector.resume_tracee(post_call.pid); break :fork @intCast(std.os.pid_t, post_call.registers.result); }; // These calls could happen in almost any order, use ooo_call_tracking to test with that in mind. var syscalls = [_]generic.Syscall{ .{ .id = .kill, .pid = thread_leader_pid }, .{ .id = .wait4, .pid = thread_leader_pid }, .{ .id = .gettid, .pid = child_pid }, .{ .id = .rt_sigtimedwait, .pid = child_pid }, }; try generic.ooo_call_tracking(&inspector, syscalls[0..]); // Final sanity test for gettid. gettid: { const pre_call = (try inspector.next_syscall()).?.pre_call; utils.expectEnumEqual(SYS, SYS.gettid, pre_call.registers.syscall); try inspector.resume_tracee(pre_call.pid); const post_call = (try inspector.next_syscall()).?.post_call; utils.expectEnumEqual(SYS, SYS.gettid, post_call.registers.syscall); testing.expectEqual(thread_leader_pid, post_call.pid); testing.expectEqual(@intCast(syspect.c.regT, post_call.pid), post_call.registers.result); try inspector.resume_tracee(post_call.pid); } // Expect program has exited. testing.expectEqual(try inspector.next_syscall(), null); }
tests/src/child_signals.zig
const std = @import("std"); const ray = @import("ray.zig"); const scene = @import("scene.zig"); const camera = @import("camera.zig"); const config = @import("config.zig"); const sphere = @import("sphere.zig"); const vector = @import("vector.zig"); const material = @import("material.zig"); const Vec3 = config.Vec3; pub const Ray = struct { origin: Vec3, direction: Vec3, pub fn computeHitPoint(self: Ray, ray_scale_factor: f64) Vec3 { return self.origin + self.direction * @splat(config.SCENE_DIMS, ray_scale_factor); } }; pub fn tracePath(cur_ray: Ray, cur_scene: *scene.Scene, x_sphere_sample: f64, y_sphere_sample: f64, samples: [config.SAMPLES_PER_PIXEL * config.SCREEN_DIMS]f64, rng: *std.rand.Random) Vec3 { var is_direct = true; var bounce: usize = 0; var traced_ray = cur_ray; var ray_color = config.ZERO_VECTOR; var cur_x_sphere_sample = x_sphere_sample; var cur_y_sphere_sample = y_sphere_sample; var color_bleeding_factor = config.IDENTITY_VECTOR; while (bounce < config.MAX_BOUNCES) : (bounce += 1) { const hit = cur_scene.intersect(traced_ray); if (hit.object_idx) |object_idx| { const object = cur_scene.objects.items[object_idx]; const cur_material = object.material; if (is_direct) { ray_color += cur_material.emissive * color_bleeding_factor; } var diffuse = cur_material.diffuse; const max_diffuse = vector.getMaxComponent(diffuse); if (bounce > config.MIN_BOUNCES or max_diffuse < std.math.f64_epsilon) { if (rng.float(f64) > max_diffuse) { break; } diffuse /= @splat(config.SCENE_DIMS, max_diffuse); } const hit_point = traced_ray.computeHitPoint(hit.ray_scale_factor); var normal = (hit_point - object.center) / @splat(config.SCENE_DIMS, object.radius); if (vector.dot_product(normal, traced_ray.direction) >= 0.0) { normal = -normal; } switch (cur_material.material_type) { .DIFFUSE => { is_direct = false; const direct_light = color_bleeding_factor * scene.sampleLights(cur_scene, hit_point, normal, traced_ray.direction, cur_material); ray_color += direct_light; traced_ray = material.interreflectDiffuse(normal, hit_point, cur_x_sphere_sample, cur_y_sphere_sample); color_bleeding_factor *= diffuse; }, .GLOSSY => { is_direct = false; const direct_light = color_bleeding_factor * scene.sampleLights(cur_scene, hit_point, normal, traced_ray.direction, cur_material); ray_color += direct_light; const max_specular = vector.getMaxComponent(cur_material.specular); const specular_probability = max_specular / (max_specular + max_diffuse); const specular_factor = 1.0 / specular_probability; if (rng.float(f64) > specular_probability) { traced_ray = material.interreflectDiffuse(normal, hit_point, cur_x_sphere_sample, cur_y_sphere_sample); const dscale = @splat(config.SCENE_DIMS, (1.0 / (1.0 - 1.0 / specular_factor))); const color = diffuse * dscale; color_bleeding_factor *= color; } else { traced_ray = material.interreflectSpecular(normal, hit_point, cur_x_sphere_sample, cur_y_sphere_sample, cur_material.specular_exponent, traced_ray); const color = cur_material.specular * @splat(config.SCENE_DIMS, specular_factor); color_bleeding_factor *= color; } }, .MIRROR => { const view_direction = -traced_ray.direction; const reflected_direction = vector.normalize(vector.reflect(view_direction, normal)); traced_ray = .{ .origin = hit_point, .direction = reflected_direction }; color_bleeding_factor *= diffuse; }, } const sample_idx = rng.intRangeAtMost(usize, 0, config.SAMPLES_PER_PIXEL - 1); cur_x_sphere_sample = samples[sample_idx * config.SCREEN_DIMS]; cur_y_sphere_sample = samples[sample_idx * config.SCREEN_DIMS + 1]; } else { break; } } return ray_color; }
src/ray.zig
const std = @import("std"); const csig = @cImport({ @cInclude("signal.h"); }); const unistd = @cImport({ @cInclude("unistd.h"); }); const config = @import("config.zig"); const fs = std.fs; const fmt = std.fmt; const mem = std.mem; const os = std.os; const libc = std.c; const time = std.time; fn signalToString(signal: u8) []const u8 { return switch (signal) { csig.SIGTERM => "SIGTERM", csig.SIGKILL => "SIGKILL", else => "unknown" }; } pub const Process = struct { pid: u32, oom_score: i16, buffer: []u8, const Self = @This(); const ProcessError = error{ MalformedOomScore, MalformedOomScoreAdj, MalformedVmRss }; fn fromPid(pid: u32, buffer: []u8) !Self { const oom_score = try oomScoreFromPid(pid, buffer); return Self{ .pid = pid, .oom_score = oom_score, .buffer = buffer }; } fn oomScoreFromPid(pid: u32, buffer: []u8) !i16 { const path = try fmt.bufPrint(buffer, "/proc/{}/oom_score", .{pid}); // The file descriptor for the oom_score file of this process const oom_score_fd = try os.open(path, os.O.RDONLY, 0); defer os.close(oom_score_fd); const bytes_read = try os.read(oom_score_fd, buffer); const oom_score = parse(i16, buffer[0 .. bytes_read - 1]) orelse return error.MalformedOomScore; return oom_score; } pub fn oomScoreAdj(self: *const Self) !i16 { const path = try fmt.bufPrint(self.buffer, "/proc/{}/oom_score_adj", .{self.pid}); // The file descriptor for the oom_score file of this process const oom_score_adj_fd = try os.open(path, os.O.RDONLY, 0); defer os.close(oom_score_adj_fd); const bytes_read = try os.read(oom_score_adj_fd, self.buffer); const oom_score_adj = parse(i16, self.buffer[0 .. bytes_read - 1]) orelse return error.MalformedOomScoreAdj; return oom_score_adj; } pub fn comm(self: *const Self) ![]u8 { const path = try fmt.bufPrint(self.buffer, "/proc/{}/comm", .{self.pid}); // The file descriptor for the oom_score file of this process const comm_fd = try os.open(path, os.O.RDONLY, 0); defer os.close(comm_fd); const bytes_read = try os.read(comm_fd, self.buffer); return self.buffer[0 .. bytes_read - 1]; } pub fn isAlive(self: *const Self) bool { const group_id = unistd.getpgid(@intCast(c_int, self.pid)); return group_id > 0; } pub fn vmRss(self: *const Self) !usize { var filename = try fmt.bufPrint(self.buffer, "/proc/{}/statm", .{self.pid}); var statm_file = try fs.cwd().openFile(filename, .{}); defer statm_file.close(); var statm_reader = statm_file.reader(); // Skip first field (total program size) try statm_reader.skipUntilDelimiterOrEof(' '); var rss_str = try statm_reader.readUntilDelimiter(self.buffer, ' '); var ret = parse(usize, rss_str) orelse return error.MalformedVmRss; return (ret * std.mem.page_size) / 1024; } pub fn signalSelf(self: *const Self, signal: u8) void { // Don't warn `kill` failure if the process is no longer alive if (0 != libc.kill(@intCast(i32, self.pid), signal) and self.isAlive()) { std.log.warn("Failed to send {s} to process {}", .{signalToString(signal), self.pid}); } else { std.log.warn("Successfully sent {s} to process {}", .{signalToString(signal), self.pid}); } } pub fn terminateSelf(self: Self) !void { const quarter_sec_in_ns: u64 = 250000000; self.signalSelf(csig.SIGTERM); var attempt: u8 = 0; while (attempt < 20) : (attempt += 1) { time.sleep(quarter_sec_in_ns); if (!self.isAlive()) { std.log.warn("Process {} has exited.", .{self.pid}); return; } // Escalate to sigkill self.signalSelf(csig.SIGKILL); } } }; /// Wrapper over fmt.parseInt which returns null /// in failure instead of an error fn parse(comptime T: type, buf: []const u8) ?T { return fmt.parseInt(T, buf, 10) catch null; } /// Used to try to give LLVM hints on branch prediction. /// /// I have no idea how effective this is in practice. fn coldNoOp() void { @setCold(true); } /// Searches for a process to kill in order to /// free up memory pub fn findVictimProcess(buffer: []u8) !Process { var victim: Process = undefined; var victim_vm_rss: usize = undefined; var victim_is_undefined = true; const timer = try time.Timer.start(); var proc_dir = try fs.cwd().openDir("/proc", .{ .access_sub_paths = false, .iterate = true }); var proc_it = proc_dir.iterate(); while (try proc_it.next()) |proc_entry| { // We're only interested in directories of /proc if (proc_entry.kind != .Directory) { continue; } else { // `/proc` usually has much more directories than it has files coldNoOp(); } // But we're not interested in files that don't relate to a PID const pid = parse(u32, proc_entry.name) orelse continue; // Don't consider killing the init system if (pid <= 1) { coldNoOp(); continue; } const process = try Process.fromPid(pid, buffer); if (victim_is_undefined) { // We're still reading the first process so a victim hasn't been chosen coldNoOp(); victim = process; victim_vm_rss = try victim.vmRss(); victim_is_undefined = false; std.log.info("First victim set", .{}); } if (victim.oom_score > process.oom_score) { // Our current victim is less innocent than the process being analysed continue; } const victim_comm = try victim.comm(); if (config.unkillables.get(victim_comm) != null) { // The current process was set as unkillable continue; } const current_vm_rss = try process.vmRss(); if (current_vm_rss == 0) { // Current process is a kernel thread continue; } // TODO: recheck this if (process.oom_score == victim.oom_score and current_vm_rss <= victim_vm_rss) { continue; } const current_oom_score_adj = process.oomScoreAdj() catch { std.log.warn("Failed to read adj. OOM score for PID {}. Continuing.", .{process.pid}); continue; }; if (current_oom_score_adj == -1000) { // Follow the behaviour of the standard OOM killer: don't kill processes with oom_score_adj equals to -1000 continue; } victim = process; victim_vm_rss = current_vm_rss; } const ns_elapsed = timer.read(); std.debug.print("Victim found in {} ns.: {s} with PID {} and OOM score {}\n", .{ ns_elapsed, try victim.comm(), victim.pid, victim.oom_score }); return victim; }
src/process.zig
// SPDX-License-Identifier: MIT // This file is part of the Termelot project under the MIT license. //! Ziro is a super-simple terminal text editor written in Zig. //! Ziro is inspired by [kilo](https://github.com/antirez/kilo), //! and is intended to provide an example of using the Termelot //! library. const std = @import("std"); const termelot = @import("termelot"); const style = termelot.style; const event = termelot.event; const Rune = termelot.Rune; const Position = termelot.Position; const Cell = termelot.Cell; const Style = style.Style; const Color = style.Color; const ColorNamed16 = style.ColorNamed16; const Decorations = style.Decorations; pub fn log( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { termelot.log(level, scope, format, args); } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const config = termelot.Config{ .raw_mode = true, .alternate_screen = true, .initial_buffer_size = .{ .rows = 800, .cols = 800 }, }; var term: termelot.Termelot = undefined; term = try term.init(&gpa.allocator, config); defer _ = term.deinit(); term.setCell( Position{ .row = 5, .col = 3 }, Cell{ .rune = 'X', .style = Style{ .fg_color = Color{ .Named16 = ColorNamed16.BrightRed }, .bg_color = Color{ .Named16 = ColorNamed16.Blue }, .decorations = Decorations{ .italic = false, .bold = term.supported_features.decorations.bold, .underline = false, .blinking = false, }, }, }, ); // term.setCell( // Position{ .row = 0, .col = 1 }, // Cell{ // .rune = 'X', // .style = term.screen_buffer.default_style, // }, // ); try term.drawScreen(); std.time.sleep(4 * std.time.ns_per_s); }
examples/ziro.zig
const cc_bake = @import("cc_bake"); const cc_gfx = @import("cc_gfx"); const cc_res = @import("cc_res"); const cc_wnd = @import("cc_wnd"); const cc_wnd_gfx = @import("cc_wnd_gfx"); const Example = struct { window: cc_wnd.Window, gctx: cc_gfx.Context, render_pipeline: cc_gfx.RenderPipeline, }; pub fn init() !Example { var window = try cc_wnd.Window.init(.{ .width = 800, .height = 600, .title = "tri" }); var gctx = try cc_gfx.Context.init(cc_wnd_gfx.getContextDesc(window)); const vert_shader_bake = try cc_res.load(cc_bake.tri_vert_shader, .{}); var vert_shader = try gctx.device.initShader(vert_shader_bake.bytes); defer gctx.device.deinitShader(&vert_shader); const frag_shader_bake = try cc_res.load(cc_bake.tri_frag_shader, .{}); var frag_shader = try gctx.device.initShader(frag_shader_bake.bytes); defer gctx.device.deinitShader(&frag_shader); var render_pipeline_desc = cc_gfx.RenderPipelineDesc{}; render_pipeline_desc.setVertexState(.{ .module = &vert_shader, .entry_point = "vs_main", }); render_pipeline_desc.setFragmentState(.{ .module = &frag_shader, .entry_point = "fs_main", // todo: zig #7607 .targets = &[_]cc_gfx.ColorTargetState{.{ .format = gctx.swapchain_format }}, }); const render_pipeline = try gctx.device.initRenderPipeline(render_pipeline_desc); return Example{ .window = window, .gctx = gctx, .render_pipeline = render_pipeline }; } pub fn loop(ex: *Example) !void { if (!ex.window.isVisible()) { return; } const swapchain_view = try ex.gctx.swapchain.getCurrentTextureView(); var command_encoder = try ex.gctx.device.initCommandEncoder(); var render_pass_desc = cc_gfx.RenderPassDesc{}; // todo: zig #7607 render_pass_desc.setColorAttachments(&[_]cc_gfx.ColorAttachment{.{ .view = &swapchain_view, .load_op = .clear, .clear_value = ex.gctx.clear_color, .store_op = .store, }}); var render_pass = try command_encoder.beginRenderPass(render_pass_desc); try render_pass.setPipeline(&ex.render_pipeline); try render_pass.draw(3, 1, 0, 0); try render_pass.end(); try ex.gctx.device.getQueue().submit(&.{try command_encoder.finish()}); try ex.gctx.swapchain.present(); } pub fn deinit(ex: *Example) !void { ex.gctx.device.deinitRenderPipeline(&ex.render_pipeline); ex.gctx.deinit(); ex.window.deinit(); }
ex/tri/tri.zig
const Compilation = @This(); const std = @import("std"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.compilation); const Target = std.Target; const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const trace = @import("tracy.zig").trace; const liveness = @import("liveness.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const glibc = @import("glibc.zig"); const musl = @import("musl.zig"); const mingw = @import("mingw.zig"); const libunwind = @import("libunwind.zig"); const libcxx = @import("libcxx.zig"); const fatal = @import("main.zig").fatal; const Module = @import("Module.zig"); const Cache = @import("Cache.zig"); const stage1 = @import("stage1.zig"); const translate_c = @import("translate_c.zig"); const c_codegen = @import("codegen/c.zig"); const c_link = @import("link/C.zig"); const ThreadPool = @import("ThreadPool.zig"); const WaitGroup = @import("WaitGroup.zig"); /// General-purpose allocator. Used for both temporary and long-term storage. gpa: *Allocator, /// Arena-allocated memory used during initialization. Should be untouched until deinit. arena_state: std.heap.ArenaAllocator.State, bin_file: *link.File, c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{}, c_object_cache_digest_set: std.AutoHashMapUnmanaged(Cache.BinDigest, void) = .{}, stage1_lock: ?Cache.Lock = null, stage1_cache_manifest: *Cache.Manifest = undefined, link_error_flags: link.File.ErrorFlags = .{}, work_queue: std.fifo.LinearFifo(Job, .Dynamic), /// These jobs are to invoke the Clang compiler to create an object file, which /// gets linked with the Compilation. c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic), /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *ErrorMsg) = .{}, keep_source_files_loaded: bool, use_clang: bool, sanitize_c: bool, /// When this is `true` it means invoking clang as a sub-process is expected to inherit /// stdin, stdout, stderr, and if it returns non success, to forward the exit code. /// Otherwise we attempt to parse the error messages and expose them via the Compilation API. /// This is `true` for `zig cc`, `zig c++`, and `zig translate-c`. clang_passthrough_mode: bool, clang_preprocessor_mode: ClangPreprocessorMode, /// Whether to print clang argvs to stdout. verbose_cc: bool, verbose_tokenize: bool, verbose_ast: bool, verbose_ir: bool, verbose_llvm_ir: bool, verbose_cimport: bool, verbose_llvm_cpu_features: bool, disable_c_depfile: bool, time_report: bool, stack_report: bool, c_source_files: []const CSourceFile, clang_argv: []const []const u8, cache_parent: *Cache, /// Path to own executable for invoking `zig clang`. self_exe_path: ?[]const u8, zig_lib_directory: Directory, local_cache_directory: Directory, global_cache_directory: Directory, libc_include_dir_list: []const []const u8, thread_pool: *ThreadPool, /// Populated when we build the libc++ static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libcxx_static_lib: ?CRTFile = null, /// Populated when we build the libc++abi static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libcxxabi_static_lib: ?CRTFile = null, /// Populated when we build the libunwind static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libunwind_static_lib: ?CRTFile = null, /// Populated when we build the libssp static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libssp_static_lib: ?CRTFile = null, /// Populated when we build the libc static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libc_static_lib: ?CRTFile = null, /// Populated when we build the libcompiler_rt static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). compiler_rt_static_lib: ?CRTFile = null, /// Populated when we build the compiler_rt_obj object. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). compiler_rt_obj: ?CRTFile = null, glibc_so_files: ?glibc.BuiltSharedObjects = null, /// For example `Scrt1.o` and `libc_nonshared.a`. These are populated after building libc from source, /// The set of needed CRT (C runtime) files differs depending on the target and compilation settings. /// The key is the basename, and the value is the absolute path to the completed build artifact. crt_files: std.StringHashMapUnmanaged(CRTFile) = .{}, /// Keeping track of this possibly open resource so we can close it later. owned_link_dir: ?std.fs.Dir, /// This is for stage1 and should be deleted upon completion of self-hosting. /// Don't use this for anything other than stage1 compatibility. color: @import("main.zig").Color = .auto, /// This mutex guards all `Compilation` mutable state. mutex: std.Mutex = .{}, test_filter: ?[]const u8, test_name_prefix: ?[]const u8, test_evented_io: bool, emit_asm: ?EmitLoc, emit_llvm_ir: ?EmitLoc, emit_analysis: ?EmitLoc, emit_docs: ?EmitLoc, c_header: ?c_link.Header, pub const InnerError = Module.InnerError; pub const CRTFile = struct { lock: Cache.Lock, full_object_path: []const u8, fn deinit(self: *CRTFile, gpa: *Allocator) void { self.lock.release(); gpa.free(self.full_object_path); self.* = undefined; } }; /// For passing to a C compiler. pub const CSourceFile = struct { src_path: []const u8, extra_flags: []const []const u8 = &[0][]const u8{}, }; const Job = union(enum) { /// Write the machine code for a Decl to the output file. codegen_decl: *Module.Decl, /// The Decl needs to be analyzed and possibly export itself. /// It may have already be analyzed, or it may have been determined /// to be outdated; in this case perform semantic analysis again. analyze_decl: *Module.Decl, /// The source file containing the Decl has been updated, and so the /// Decl may need its line number information updated in the debug info. update_line_number: *Module.Decl, /// one of the glibc static objects glibc_crt_file: glibc.CRTFile, /// all of the glibc shared objects glibc_shared_objects, /// one of the musl static objects musl_crt_file: musl.CRTFile, /// one of the mingw-w64 static objects mingw_crt_file: mingw.CRTFile, /// libunwind.a, usually needed when linking libc libunwind: void, libcxx: void, libcxxabi: void, libssp: void, compiler_rt_lib: void, compiler_rt_obj: void, /// needed when not linking libc and using LLVM for code generation because it generates /// calls to, for example, memcpy and memset. zig_libc: void, /// Generate builtin.zig source code and write it into the correct place. generate_builtin_zig: void, /// Use stage1 C++ code to compile zig code into an object file. stage1_module: void, /// The value is the index into `link.File.Options.system_libs`. windows_import_lib: usize, }; pub const CObject = struct { /// Relative to cwd. Owned by arena. src: CSourceFile, status: union(enum) { new, success: struct { /// The outputted result. Owned by gpa. object_path: []u8, /// This is a file system lock on the cache hash manifest representing this /// object. It prevents other invocations of the Zig compiler from interfering /// with this object until released. lock: Cache.Lock, }, /// There will be a corresponding ErrorMsg in Compilation.failed_c_objects. failure, }, /// Returns if there was failure. pub fn clearStatus(self: *CObject, gpa: *Allocator) bool { switch (self.status) { .new => return false, .failure => { self.status = .new; return true; }, .success => |*success| { gpa.free(success.object_path); success.lock.release(); self.status = .new; return false; }, } } pub fn destroy(self: *CObject, gpa: *Allocator) void { _ = self.clearStatus(gpa); gpa.destroy(self); } }; pub const AllErrors = struct { arena: std.heap.ArenaAllocator.State, list: []const Message, pub const Message = union(enum) { src: struct { src_path: []const u8, line: usize, column: usize, byte_offset: usize, msg: []const u8, }, plain: struct { msg: []const u8, }, pub fn renderToStdErr(self: Message) void { switch (self) { .src => |src| { std.debug.print("{s}:{d}:{d}: error: {s}\n", .{ src.src_path, src.line + 1, src.column + 1, src.msg, }); }, .plain => |plain| { std.debug.print("error: {s}\n", .{plain.msg}); }, } } }; pub fn deinit(self: *AllErrors, gpa: *Allocator) void { self.arena.promote(gpa).deinit(); } fn add( arena: *std.heap.ArenaAllocator, errors: *std.ArrayList(Message), sub_file_path: []const u8, source: []const u8, simple_err_msg: ErrorMsg, ) !void { const loc = std.zig.findLineColumn(source, simple_err_msg.byte_offset); try errors.append(.{ .src = .{ .src_path = try arena.allocator.dupe(u8, sub_file_path), .msg = try arena.allocator.dupe(u8, simple_err_msg.msg), .byte_offset = simple_err_msg.byte_offset, .line = loc.line, .column = loc.column, }, }); } fn addPlain( arena: *std.heap.ArenaAllocator, errors: *std.ArrayList(Message), msg: []const u8, ) !void { try errors.append(.{ .plain = .{ .msg = msg } }); } }; pub const Directory = struct { /// This field is redundant for operations that can act on the open directory handle /// directly, but it is needed when passing the directory to a child process. /// `null` means cwd. path: ?[]const u8, handle: std.fs.Dir, pub fn join(self: Directory, allocator: *Allocator, paths: []const []const u8) ![]u8 { if (self.path) |p| { // TODO clean way to do this with only 1 allocation const part2 = try std.fs.path.join(allocator, paths); defer allocator.free(part2); return std.fs.path.join(allocator, &[_][]const u8{ p, part2 }); } else { return std.fs.path.join(allocator, paths); } } }; pub const EmitLoc = struct { /// If this is `null` it means the file will be output to the cache directory. /// When provided, both the open file handle and the path name must outlive the `Compilation`. directory: ?Compilation.Directory, /// This may not have sub-directories in it. basename: []const u8, }; pub const ClangPreprocessorMode = enum { no, /// This means we are doing `zig cc -E -o <path>`. yes, /// This means we are doing `zig cc -E`. stdout, }; pub const InitOptions = struct { zig_lib_directory: Directory, local_cache_directory: Directory, global_cache_directory: Directory, target: Target, root_name: []const u8, root_pkg: ?*Package, output_mode: std.builtin.OutputMode, thread_pool: *ThreadPool, dynamic_linker: ?[]const u8 = null, /// `null` means to not emit a binary file. emit_bin: ?EmitLoc, /// `null` means to not emit a C header file. emit_h: ?EmitLoc = null, /// `null` means to not emit assembly. emit_asm: ?EmitLoc = null, /// `null` means to not emit LLVM IR. emit_llvm_ir: ?EmitLoc = null, /// `null` means to not emit semantic analysis JSON. emit_analysis: ?EmitLoc = null, /// `null` means to not emit docs. emit_docs: ?EmitLoc = null, link_mode: ?std.builtin.LinkMode = null, dll_export_fns: ?bool = false, /// Normally when using LLD to link, Zig uses a file named "lld.id" in the /// same directory as the output binary which contains the hash of the link /// operation, allowing Zig to skip linking when the hash would be unchanged. /// In the case that the output binary is being emitted into a directory which /// is externally modified - essentially anything other than zig-cache - then /// this flag would be set to disable this machinery to avoid false positives. disable_lld_caching: bool = false, object_format: ?std.builtin.ObjectFormat = null, optimize_mode: std.builtin.Mode = .Debug, keep_source_files_loaded: bool = false, clang_argv: []const []const u8 = &[0][]const u8{}, lld_argv: []const []const u8 = &[0][]const u8{}, lib_dirs: []const []const u8 = &[0][]const u8{}, rpath_list: []const []const u8 = &[0][]const u8{}, c_source_files: []const CSourceFile = &[0]CSourceFile{}, link_objects: []const []const u8 = &[0][]const u8{}, framework_dirs: []const []const u8 = &[0][]const u8{}, frameworks: []const []const u8 = &[0][]const u8{}, system_libs: []const []const u8 = &[0][]const u8{}, link_libc: bool = false, link_libcpp: bool = false, want_pic: ?bool = null, /// This means that if the output mode is an executable it will be a /// Position Independent Executable. If the output mode is not an /// executable this field is ignored. want_pie: ?bool = null, want_sanitize_c: ?bool = null, want_stack_check: ?bool = null, want_valgrind: ?bool = null, want_compiler_rt: ?bool = null, use_llvm: ?bool = null, use_lld: ?bool = null, use_clang: ?bool = null, rdynamic: bool = false, strip: bool = false, single_threaded: bool = false, function_sections: bool = false, is_native_os: bool, is_native_abi: bool, time_report: bool = false, stack_report: bool = false, link_eh_frame_hdr: bool = false, link_emit_relocs: bool = false, linker_script: ?[]const u8 = null, version_script: ?[]const u8 = null, soname: ?[]const u8 = null, linker_gc_sections: ?bool = null, linker_allow_shlib_undefined: ?bool = null, linker_bind_global_refs_locally: ?bool = null, each_lib_rpath: ?bool = null, disable_c_depfile: bool = false, linker_z_nodelete: bool = false, linker_z_defs: bool = false, clang_passthrough_mode: bool = false, verbose_cc: bool = false, verbose_link: bool = false, verbose_tokenize: bool = false, verbose_ast: bool = false, verbose_ir: bool = false, verbose_llvm_ir: bool = false, verbose_cimport: bool = false, verbose_llvm_cpu_features: bool = false, is_test: bool = false, test_evented_io: bool = false, is_compiler_rt_or_libc: bool = false, parent_compilation_link_libc: bool = false, stack_size_override: ?u64 = null, image_base_override: ?u64 = null, self_exe_path: ?[]const u8 = null, version: ?std.builtin.Version = null, libc_installation: ?*const LibCInstallation = null, machine_code_model: std.builtin.CodeModel = .default, clang_preprocessor_mode: ClangPreprocessorMode = .no, /// This is for stage1 and should be deleted upon completion of self-hosting. color: @import("main.zig").Color = .auto, test_filter: ?[]const u8 = null, test_name_prefix: ?[]const u8 = null, subsystem: ?std.Target.SubSystem = null, }; fn addPackageTableToCacheHash( hash: *Cache.HashHelper, arena: *std.heap.ArenaAllocator, pkg_table: Package.Table, hash_type: union(enum) { path_bytes, files: *Cache.Manifest }, ) (error{OutOfMemory} || std.os.GetCwdError)!void { const allocator = &arena.allocator; const packages = try allocator.alloc(Package.Table.Entry, pkg_table.count()); { // Copy over the hashmap entries to our slice var table_it = pkg_table.iterator(); var idx: usize = 0; while (table_it.next()) |entry| : (idx += 1) { packages[idx] = entry.*; } } // Sort the slice by package name std.sort.sort(Package.Table.Entry, packages, {}, struct { fn lessThan(_: void, lhs: Package.Table.Entry, rhs: Package.Table.Entry) bool { return std.mem.lessThan(u8, lhs.key, rhs.key); } }.lessThan); for (packages) |pkg| { // Finally insert the package name and path to the cache hash. hash.addBytes(pkg.key); switch (hash_type) { .path_bytes => { hash.addBytes(pkg.value.root_src_path); hash.addOptionalBytes(pkg.value.root_src_directory.path); }, .files => |man| { const pkg_zig_file = try pkg.value.root_src_directory.join(allocator, &[_][]const u8{ pkg.value.root_src_path, }); _ = try man.addFile(pkg_zig_file, null); }, } // Recurse to handle the package's dependencies try addPackageTableToCacheHash(hash, arena, pkg.value.table, hash_type); } } pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { const is_dyn_lib = switch (options.output_mode) { .Obj, .Exe => false, .Lib => (options.link_mode orelse .Static) == .Dynamic, }; const is_exe_or_dyn_lib = switch (options.output_mode) { .Obj => false, .Lib => is_dyn_lib, .Exe => true, }; const needs_c_symbols = !options.is_compiler_rt_or_libc and (is_exe_or_dyn_lib or (options.target.isWasm() and options.output_mode != .Obj)); const comp: *Compilation = comp: { // For allocations that have the same lifetime as Compilation. This arena is used only during this // initialization and then is freed in deinit(). var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); const arena = &arena_allocator.allocator; // We put the `Compilation` itself in the arena. Freeing the arena will free the module. // It's initialized later after we prepare the initialization options. const comp = try arena.create(Compilation); const root_name = try arena.dupe(u8, options.root_name); const ofmt = options.object_format orelse options.target.getObjectFormat(); // Make a decision on whether to use LLVM or our own backend. const use_llvm = if (options.use_llvm) |explicit| explicit else blk: { // If we have no zig code to compile, no need for LLVM. if (options.root_pkg == null) break :blk false; // If we are the stage1 compiler, we depend on the stage1 c++ llvm backend // to compile zig code. if (build_options.is_stage1) break :blk true; // We would want to prefer LLVM for release builds when it is available, however // we don't have an LLVM backend yet :) // We would also want to prefer LLVM for architectures that we don't have self-hosted support for too. break :blk false; }; if (!use_llvm and options.machine_code_model != .default) { return error.MachineCodeModelNotSupported; } // Make a decision on whether to use LLD or our own linker. const use_lld = if (options.use_lld) |explicit| explicit else blk: { if (!build_options.have_llvm) break :blk false; if (ofmt == .c) break :blk false; // Our linker can't handle objects or most advanced options yet. if (options.link_objects.len != 0 or options.c_source_files.len != 0 or options.frameworks.len != 0 or options.system_libs.len != 0 or options.link_libc or options.link_libcpp or options.link_eh_frame_hdr or options.link_emit_relocs or options.output_mode == .Lib or options.lld_argv.len != 0 or options.image_base_override != null or options.linker_script != null or options.version_script != null) { break :blk true; } if (use_llvm) { // If stage1 generates an object file, self-hosted linker is not // yet sophisticated enough to handle that. break :blk options.root_pkg != null; } break :blk false; }; const DarwinOptions = struct { syslibroot: ?[]const u8 = null, system_linker_hack: bool = false, }; const darwin_options: DarwinOptions = if (build_options.have_llvm and comptime std.Target.current.isDarwin()) outer: { const opts: DarwinOptions = if (use_lld and options.is_native_os and options.target.isDarwin()) inner: { // TODO Revisit this targeting versions lower than macOS 11 when LLVM 12 is out. // See https://github.com/ziglang/zig/issues/6996 const at_least_big_sur = options.target.os.getVersionRange().semver.min.major >= 11; const syslibroot = if (at_least_big_sur) try std.zig.system.getSDKPath(arena) else null; const system_linker_hack = std.os.getenv("ZIG_SYSTEM_LINKER_HACK") != null; break :inner .{ .syslibroot = syslibroot, .system_linker_hack = system_linker_hack, }; } else .{}; break :outer opts; } else .{}; const link_libc = options.link_libc or target_util.osRequiresLibC(options.target); const must_dynamic_link = dl: { if (target_util.cannotDynamicLink(options.target)) break :dl false; if (is_exe_or_dyn_lib and link_libc and (options.target.isGnuLibC() or target_util.osRequiresLibC(options.target))) { break :dl true; } const any_dyn_libs: bool = x: { if (options.system_libs.len != 0) break :x true; for (options.link_objects) |obj| { switch (classifyFileExt(obj)) { .shared_library => break :x true, else => continue, } } break :x false; }; if (any_dyn_libs) { // When creating a executable that links to system libraries, // we require dynamic linking, but we must not link static libraries // or object files dynamically! break :dl (options.output_mode == .Exe); } break :dl false; }; const default_link_mode: std.builtin.LinkMode = blk: { if (must_dynamic_link) { break :blk .Dynamic; } else if (is_exe_or_dyn_lib and link_libc and options.is_native_abi and options.target.abi.isMusl()) { // If targeting the system's native ABI and the system's // libc is musl, link dynamically by default. break :blk .Dynamic; } else { break :blk .Static; } }; const link_mode: std.builtin.LinkMode = if (options.link_mode) |lm| blk: { if (lm == .Static and must_dynamic_link) { return error.UnableToStaticLink; } break :blk lm; } else default_link_mode; const dll_export_fns = if (options.dll_export_fns) |explicit| explicit else is_dyn_lib; const libc_dirs = try detectLibCIncludeDirs( arena, options.zig_lib_directory.path.?, options.target, options.is_native_os, link_libc, options.libc_installation, ); const must_pie = target_util.requiresPIE(options.target); const pie = if (options.want_pie) |explicit| pie: { if (!explicit and must_pie) { return error.TargetRequiresPIE; } break :pie explicit; } else must_pie; const must_pic: bool = b: { if (target_util.requiresPIC(options.target, link_libc)) break :b true; break :b link_mode == .Dynamic; }; const pic = if (options.want_pic) |explicit| pic: { if (!explicit) { if (must_pic) { return error.TargetRequiresPIC; } if (pie) { return error.PIERequiresPIC; } } break :pic explicit; } else pie or must_pic; // Make a decision on whether to use Clang for translate-c and compiling C files. const use_clang = if (options.use_clang) |explicit| explicit else blk: { if (build_options.have_llvm) { // Can't use it if we don't have it! break :blk false; } // It's not planned to do our own translate-c or C compilation. break :blk true; }; const is_safe_mode = switch (options.optimize_mode) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; const sanitize_c = options.want_sanitize_c orelse is_safe_mode; const stack_check: bool = b: { if (!target_util.supportsStackProbing(options.target)) break :b false; break :b options.want_stack_check orelse is_safe_mode; }; const valgrind: bool = b: { if (!target_util.hasValgrindSupport(options.target)) break :b false; break :b options.want_valgrind orelse (options.optimize_mode == .Debug); }; const include_compiler_rt = options.want_compiler_rt orelse needs_c_symbols; const single_threaded = options.single_threaded or target_util.isSingleThreaded(options.target); const llvm_cpu_features: ?[*:0]const u8 = if (build_options.have_llvm and use_llvm) blk: { var buf = std.ArrayList(u8).init(arena); for (options.target.cpu.arch.allFeaturesList()) |feature, index_usize| { const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize); const is_enabled = options.target.cpu.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { const plus_or_minus = "-+"[@boolToInt(is_enabled)]; try buf.ensureCapacity(buf.items.len + 2 + llvm_name.len); buf.appendAssumeCapacity(plus_or_minus); buf.appendSliceAssumeCapacity(llvm_name); buf.appendSliceAssumeCapacity(","); } } assert(mem.endsWith(u8, buf.items, ",")); buf.items[buf.items.len - 1] = 0; buf.shrink(buf.items.len); break :blk buf.items[0 .. buf.items.len - 1 :0].ptr; } else null; const strip = options.strip or !target_util.hasDebugInfo(options.target); // We put everything into the cache hash that *cannot be modified during an incremental update*. // For example, one cannot change the target between updates, but one can change source files, // so the target goes into the cache hash, but source files do not. This is so that we can // find the same binary and incrementally update it even if there are modified source files. // We do this even if outputting to the current directory because we need somewhere to store // incremental compilation metadata. const cache = try arena.create(Cache); cache.* = .{ .gpa = gpa, .manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}), }; errdefer cache.manifest_dir.close(); // This is shared hasher state common to zig source and all C source files. cache.hash.addBytes(build_options.version); cache.hash.addBytes(options.zig_lib_directory.path orelse "."); cache.hash.add(options.optimize_mode); cache.hash.add(options.target.cpu.arch); cache.hash.addBytes(options.target.cpu.model.name); cache.hash.add(options.target.cpu.features.ints); cache.hash.add(options.target.os.tag); cache.hash.add(options.target.os.getVersionRange()); cache.hash.add(options.is_native_os); cache.hash.add(options.target.abi); cache.hash.add(ofmt); cache.hash.add(pic); cache.hash.add(pie); cache.hash.add(stack_check); cache.hash.add(link_mode); cache.hash.add(options.function_sections); cache.hash.add(strip); cache.hash.add(link_libc); cache.hash.add(options.link_libcpp); cache.hash.add(options.output_mode); cache.hash.add(options.machine_code_model); cache.hash.addOptionalEmitLoc(options.emit_bin); cache.hash.addBytes(options.root_name); // TODO audit this and make sure everything is in it const module: ?*Module = if (options.root_pkg) |root_pkg| blk: { // Options that are specific to zig source files, that cannot be // modified between incremental updates. var hash = cache.hash; // Here we put the root source file path name, but *not* with addFile. We want the // hash to be the same regardless of the contents of the source file, because // incremental compilation will handle it, but we do want to namespace different // source file names because they are likely different compilations and therefore this // would be likely to cause cache hits. hash.addBytes(root_pkg.root_src_path); hash.addOptionalBytes(root_pkg.root_src_directory.path); { var local_arena = std.heap.ArenaAllocator.init(gpa); defer local_arena.deinit(); try addPackageTableToCacheHash(&hash, &local_arena, root_pkg.table, .path_bytes); } hash.add(valgrind); hash.add(single_threaded); hash.add(dll_export_fns); hash.add(options.is_test); hash.add(options.is_compiler_rt_or_libc); hash.add(options.parent_compilation_link_libc); const digest = hash.final(); const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); var artifact_dir = try options.local_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{}); errdefer artifact_dir.close(); const zig_cache_artifact_directory: Directory = .{ .handle = artifact_dir, .path = if (options.local_cache_directory.path) |p| try std.fs.path.join(arena, &[_][]const u8{ p, artifact_sub_dir }) else artifact_sub_dir, }; // TODO when we implement serialization and deserialization of incremental compilation metadata, // this is where we would load it. We have open a handle to the directory where // the output either already is, or will be. // However we currently do not have serialization of such metadata, so for now // we set up an empty Module that does the entire compilation fresh. const root_scope = rs: { if (mem.endsWith(u8, root_pkg.root_src_path, ".zig")) { const struct_payload = try gpa.create(Type.Payload.EmptyStruct); const root_scope = try gpa.create(Module.Scope.File); struct_payload.* = .{ .scope = &root_scope.root_container }; root_scope.* = .{ // TODO this is duped so it can be freed in Container.deinit .sub_file_path = try gpa.dupe(u8, root_pkg.root_src_path), .source = .{ .unloaded = {} }, .contents = .{ .not_available = {} }, .status = .never_loaded, .pkg = root_pkg, .root_container = .{ .file_scope = root_scope, .decls = .{}, .ty = Type.initPayload(&struct_payload.base), }, }; break :rs &root_scope.base; } else if (mem.endsWith(u8, root_pkg.root_src_path, ".zir")) { const root_scope = try gpa.create(Module.Scope.ZIRModule); root_scope.* = .{ .sub_file_path = root_pkg.root_src_path, .source = .{ .unloaded = {} }, .contents = .{ .not_available = {} }, .status = .never_loaded, .decls = .{}, }; break :rs &root_scope.base; } else { unreachable; } }; const module = try arena.create(Module); module.* = .{ .gpa = gpa, .comp = comp, .root_pkg = root_pkg, .root_scope = root_scope, .zig_cache_artifact_directory = zig_cache_artifact_directory, }; break :blk module; } else null; errdefer if (module) |zm| zm.deinit(); const error_return_tracing = !strip and switch (options.optimize_mode) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; // For resource management purposes. var owned_link_dir: ?std.fs.Dir = null; errdefer if (owned_link_dir) |*dir| dir.close(); const bin_file_emit: ?link.Emit = blk: { const emit_bin = options.emit_bin orelse break :blk null; if (emit_bin.directory) |directory| { break :blk link.Emit{ .directory = directory, .sub_path = emit_bin.basename, }; } if (module) |zm| { break :blk link.Emit{ .directory = zm.zig_cache_artifact_directory, .sub_path = emit_bin.basename, }; } // We could use the cache hash as is no problem, however, we increase // the likelihood of cache hits by adding the first C source file // path name (not contents) to the hash. This way if the user is compiling // foo.c and bar.c as separate compilations, they get different cache // directories. var hash = cache.hash; if (options.c_source_files.len >= 1) { hash.addBytes(options.c_source_files[0].src_path); } else if (options.link_objects.len >= 1) { hash.addBytes(options.link_objects[0]); } const digest = hash.final(); const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); var artifact_dir = try options.local_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{}); owned_link_dir = artifact_dir; const link_artifact_directory: Directory = .{ .handle = artifact_dir, .path = try options.local_cache_directory.join(arena, &[_][]const u8{artifact_sub_dir}), }; break :blk link.Emit{ .directory = link_artifact_directory, .sub_path = emit_bin.basename, }; }; var system_libs: std.StringArrayHashMapUnmanaged(void) = .{}; errdefer system_libs.deinit(gpa); try system_libs.ensureCapacity(gpa, options.system_libs.len); for (options.system_libs) |lib_name| { system_libs.putAssumeCapacity(lib_name, {}); } const bin_file = try link.File.openPath(gpa, .{ .emit = bin_file_emit, .root_name = root_name, .module = module, .target = options.target, .dynamic_linker = options.dynamic_linker, .output_mode = options.output_mode, .link_mode = link_mode, .object_format = ofmt, .optimize_mode = options.optimize_mode, .use_lld = use_lld, .use_llvm = use_llvm, .system_linker_hack = darwin_options.system_linker_hack, .link_libc = link_libc, .link_libcpp = options.link_libcpp, .objects = options.link_objects, .frameworks = options.frameworks, .framework_dirs = options.framework_dirs, .system_libs = system_libs, .syslibroot = darwin_options.syslibroot, .lib_dirs = options.lib_dirs, .rpath_list = options.rpath_list, .strip = strip, .is_native_os = options.is_native_os, .is_native_abi = options.is_native_abi, .function_sections = options.function_sections, .allow_shlib_undefined = options.linker_allow_shlib_undefined, .bind_global_refs_locally = options.linker_bind_global_refs_locally orelse false, .z_nodelete = options.linker_z_nodelete, .z_defs = options.linker_z_defs, .stack_size_override = options.stack_size_override, .image_base_override = options.image_base_override, .include_compiler_rt = include_compiler_rt, .linker_script = options.linker_script, .version_script = options.version_script, .gc_sections = options.linker_gc_sections, .eh_frame_hdr = options.link_eh_frame_hdr, .emit_relocs = options.link_emit_relocs, .rdynamic = options.rdynamic, .extra_lld_args = options.lld_argv, .soname = options.soname, .version = options.version, .libc_installation = libc_dirs.libc_installation, .pic = pic, .pie = pie, .valgrind = valgrind, .stack_check = stack_check, .single_threaded = single_threaded, .verbose_link = options.verbose_link, .machine_code_model = options.machine_code_model, .dll_export_fns = dll_export_fns, .error_return_tracing = error_return_tracing, .llvm_cpu_features = llvm_cpu_features, .is_compiler_rt_or_libc = options.is_compiler_rt_or_libc, .parent_compilation_link_libc = options.parent_compilation_link_libc, .each_lib_rpath = options.each_lib_rpath orelse options.is_native_os, .disable_lld_caching = options.disable_lld_caching, .subsystem = options.subsystem, .is_test = options.is_test, }); errdefer bin_file.destroy(); comp.* = .{ .gpa = gpa, .arena_state = arena_allocator.state, .zig_lib_directory = options.zig_lib_directory, .local_cache_directory = options.local_cache_directory, .global_cache_directory = options.global_cache_directory, .bin_file = bin_file, .c_header = if (!use_llvm and options.emit_h != null) c_link.Header.init(gpa, options.emit_h) else null, .emit_asm = options.emit_asm, .emit_llvm_ir = options.emit_llvm_ir, .emit_analysis = options.emit_analysis, .emit_docs = options.emit_docs, .work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .keep_source_files_loaded = options.keep_source_files_loaded, .use_clang = use_clang, .clang_argv = options.clang_argv, .c_source_files = options.c_source_files, .cache_parent = cache, .self_exe_path = options.self_exe_path, .libc_include_dir_list = libc_dirs.libc_include_dir_list, .sanitize_c = sanitize_c, .thread_pool = options.thread_pool, .clang_passthrough_mode = options.clang_passthrough_mode, .clang_preprocessor_mode = options.clang_preprocessor_mode, .verbose_cc = options.verbose_cc, .verbose_tokenize = options.verbose_tokenize, .verbose_ast = options.verbose_ast, .verbose_ir = options.verbose_ir, .verbose_llvm_ir = options.verbose_llvm_ir, .verbose_cimport = options.verbose_cimport, .verbose_llvm_cpu_features = options.verbose_llvm_cpu_features, .disable_c_depfile = options.disable_c_depfile, .owned_link_dir = owned_link_dir, .color = options.color, .time_report = options.time_report, .stack_report = options.stack_report, .test_filter = options.test_filter, .test_name_prefix = options.test_name_prefix, .test_evented_io = options.test_evented_io, }; break :comp comp; }; errdefer comp.destroy(); if (comp.bin_file.options.module) |mod| { try comp.work_queue.writeItem(.{ .generate_builtin_zig = {} }); } // Add a `CObject` for each `c_source_files`. try comp.c_object_table.ensureCapacity(gpa, options.c_source_files.len); for (options.c_source_files) |c_source_file| { const c_object = try gpa.create(CObject); errdefer gpa.destroy(c_object); c_object.* = .{ .status = .{ .new = {} }, .src = c_source_file, }; comp.c_object_table.putAssumeCapacityNoClobber(c_object, {}); } if (comp.bin_file.options.emit != null and !comp.bin_file.options.is_compiler_rt_or_libc) { // If we need to build glibc for the target, add work items for it. // We go through the work queue so that building can be done in parallel. if (comp.wantBuildGLibCFromSource()) { try comp.addBuildingGLibCJobs(); } if (comp.wantBuildMuslFromSource()) { try comp.work_queue.ensureUnusedCapacity(6); if (target_util.libc_needs_crti_crtn(comp.getTarget())) { comp.work_queue.writeAssumeCapacity(&[_]Job{ .{ .musl_crt_file = .crti_o }, .{ .musl_crt_file = .crtn_o }, }); } comp.work_queue.writeAssumeCapacity(&[_]Job{ .{ .musl_crt_file = .crt1_o }, .{ .musl_crt_file = .scrt1_o }, .{ .musl_crt_file = .rcrt1_o }, switch (comp.bin_file.options.link_mode) { .Static => .{ .musl_crt_file = .libc_a }, .Dynamic => .{ .musl_crt_file = .libc_so }, }, }); } if (comp.wantBuildMinGWFromSource()) { const static_lib_jobs = [_]Job{ .{ .mingw_crt_file = .mingw32_lib }, .{ .mingw_crt_file = .msvcrt_os_lib }, .{ .mingw_crt_file = .mingwex_lib }, .{ .mingw_crt_file = .uuid_lib }, }; const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o }; try comp.work_queue.ensureUnusedCapacity(static_lib_jobs.len + 1); comp.work_queue.writeAssumeCapacity(&static_lib_jobs); comp.work_queue.writeItemAssumeCapacity(crt_job); // When linking mingw-w64 there are some import libs we always need. for (mingw.always_link_libs) |name| { try comp.bin_file.options.system_libs.put(comp.gpa, name, .{}); } } // Generate Windows import libs. if (comp.getTarget().os.tag == .windows) { const count = comp.bin_file.options.system_libs.count(); try comp.work_queue.ensureUnusedCapacity(count); var i: usize = 0; while (i < count) : (i += 1) { comp.work_queue.writeItemAssumeCapacity(.{ .windows_import_lib = i }); } } if (comp.wantBuildLibUnwindFromSource()) { try comp.work_queue.writeItem(.{ .libunwind = {} }); } if (build_options.have_llvm and comp.bin_file.options.output_mode != .Obj and comp.bin_file.options.link_libcpp) { try comp.work_queue.writeItem(.libcxx); try comp.work_queue.writeItem(.libcxxabi); } // The `is_stage1` condition is here only because stage2 cannot yet build compiler-rt. // Once it is capable this condition should be removed. if (build_options.is_stage1) { if (comp.bin_file.options.include_compiler_rt) { if (is_exe_or_dyn_lib or comp.getTarget().isWasm()) { try comp.work_queue.writeItem(.{ .compiler_rt_lib = {} }); } else { try comp.work_queue.writeItem(.{ .compiler_rt_obj = {} }); if (comp.bin_file.options.object_format != .elf and comp.bin_file.options.output_mode == .Obj) { // For ELF we can rely on using -r to link multiple objects together into one, // but to truly support `build-obj -fcompiler-rt` will require virtually // injecting `_ = @import("compiler_rt.zig")` into the root source file of // the compilation. fatal("Embedding compiler-rt into {s} objects is not yet implemented.", .{ @tagName(comp.bin_file.options.object_format), }); } } } if (needs_c_symbols) { // MinGW provides no libssp, use our own implementation. if (comp.getTarget().isMinGW()) { try comp.work_queue.writeItem(.{ .libssp = {} }); } if (!comp.bin_file.options.link_libc) { try comp.work_queue.writeItem(.{ .zig_libc = {} }); } } } } if (build_options.is_stage1 and comp.bin_file.options.use_llvm) { try comp.work_queue.writeItem(.{ .stage1_module = {} }); } return comp; } fn releaseStage1Lock(comp: *Compilation) void { if (comp.stage1_lock) |*lock| { lock.release(); comp.stage1_lock = null; } } pub fn destroy(self: *Compilation) void { const optional_module = self.bin_file.options.module; self.bin_file.destroy(); if (optional_module) |module| module.deinit(); self.releaseStage1Lock(); const gpa = self.gpa; self.work_queue.deinit(); { var it = self.crt_files.iterator(); while (it.next()) |entry| { gpa.free(entry.key); entry.value.deinit(gpa); } self.crt_files.deinit(gpa); } if (self.libunwind_static_lib) |*crt_file| { crt_file.deinit(gpa); } if (self.libcxx_static_lib) |*crt_file| { crt_file.deinit(gpa); } if (self.libcxxabi_static_lib) |*crt_file| { crt_file.deinit(gpa); } if (self.compiler_rt_static_lib) |*crt_file| { crt_file.deinit(gpa); } if (self.libssp_static_lib) |*crt_file| { crt_file.deinit(gpa); } if (self.libc_static_lib) |*crt_file| { crt_file.deinit(gpa); } for (self.c_object_table.items()) |entry| { entry.key.destroy(gpa); } self.c_object_table.deinit(gpa); self.c_object_cache_digest_set.deinit(gpa); for (self.failed_c_objects.items()) |entry| { entry.value.destroy(gpa); } self.failed_c_objects.deinit(gpa); if (self.c_header) |*header| { header.deinit(); } self.cache_parent.manifest_dir.close(); if (self.owned_link_dir) |*dir| dir.close(); // This destroys `self`. self.arena_state.promote(gpa).deinit(); } pub fn getTarget(self: Compilation) Target { return self.bin_file.options.target; } /// Detect changes to source files, perform semantic analysis, and update the output files. pub fn update(self: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); self.c_object_cache_digest_set.clearRetainingCapacity(); // For compiling C objects, we rely on the cache hash system to avoid duplicating work. // Add a Job for each C object. try self.c_object_work_queue.ensureUnusedCapacity(self.c_object_table.items().len); for (self.c_object_table.items()) |entry| { self.c_object_work_queue.writeItemAssumeCapacity(entry.key); } const use_stage1 = build_options.is_stage1 and self.bin_file.options.use_llvm; if (!use_stage1) { if (self.bin_file.options.module) |module| { module.generation += 1; // TODO Detect which source files changed. // Until then we simulate a full cache miss. Source files could have been loaded for any reason; // to force a refresh we unload now. if (module.root_scope.cast(Module.Scope.File)) |zig_file| { zig_file.unload(module.gpa); module.failed_root_src_file = null; module.analyzeContainer(&zig_file.root_container) catch |err| switch (err) { error.AnalysisFail => { assert(self.totalErrorCount() != 0); }, error.OutOfMemory => return error.OutOfMemory, else => |e| { module.failed_root_src_file = e; }, }; } else if (module.root_scope.cast(Module.Scope.ZIRModule)) |zir_module| { zir_module.unload(module.gpa); module.analyzeRootZIRModule(zir_module) catch |err| switch (err) { error.AnalysisFail => { assert(self.totalErrorCount() != 0); }, else => |e| return e, }; } // TODO only analyze imports if they are still referenced for (module.import_table.items()) |entry| { entry.value.unload(module.gpa); module.analyzeContainer(&entry.value.root_container) catch |err| switch (err) { error.AnalysisFail => { assert(self.totalErrorCount() != 0); }, else => |e| return e, }; } } } try self.performAllTheWork(); if (!use_stage1) { if (self.bin_file.options.module) |module| { // Process the deletion set. while (module.deletion_set.popOrNull()) |decl| { if (decl.dependants.items().len != 0) { decl.deletion_flag = false; continue; } try module.deleteDecl(decl); } } } if (self.totalErrorCount() != 0) { // Skip flushing. self.link_error_flags = .{}; return; } // This is needed before reading the error flags. try self.bin_file.flush(self); self.link_error_flags = self.bin_file.errorFlags(); // If there are any errors, we anticipate the source files being loaded // to report error messages. Otherwise we unload all source files to save memory. if (self.totalErrorCount() == 0 and !self.keep_source_files_loaded) { if (self.bin_file.options.module) |module| { module.root_scope.unload(self.gpa); } } // If we've chosen to emit a C header, flush the header to the disk. if (self.c_header) |header| { const header_path = header.emit_loc.?; // If a directory has been provided, write the header there. Otherwise, just write it to the // cache directory. const header_dir = if (header_path.directory) |dir| dir.handle else self.local_cache_directory.handle; const header_file = try header_dir.createFile(header_path.basename, .{}); defer header_file.close(); try header.flush(header_file.writer()); } } /// Having the file open for writing is problematic as far as executing the /// binary is concerned. This will remove the write flag, or close the file, /// or whatever is needed so that it can be executed. /// After this, one must call` makeFileWritable` before calling `update`. pub fn makeBinFileExecutable(self: *Compilation) !void { return self.bin_file.makeExecutable(); } pub fn makeBinFileWritable(self: *Compilation) !void { return self.bin_file.makeWritable(); } pub fn totalErrorCount(self: *Compilation) usize { var total: usize = self.failed_c_objects.items().len; if (self.bin_file.options.module) |module| { total += module.failed_decls.items().len + module.failed_exports.items().len + module.failed_files.items().len + @boolToInt(module.failed_root_src_file != null); } // The "no entry point found" error only counts if there are no other errors. if (total == 0) { return @boolToInt(self.link_error_flags.no_entry_point_found); } return total; } pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { var arena = std.heap.ArenaAllocator.init(self.gpa); errdefer arena.deinit(); var errors = std.ArrayList(AllErrors.Message).init(self.gpa); defer errors.deinit(); for (self.failed_c_objects.items()) |entry| { const c_object = entry.key; const err_msg = entry.value; try AllErrors.add(&arena, &errors, c_object.src.src_path, "", err_msg.*); } if (self.bin_file.options.module) |module| { for (module.failed_files.items()) |entry| { const scope = entry.key; const err_msg = entry.value; const source = try scope.getSource(module); try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*); } for (module.failed_decls.items()) |entry| { const decl = entry.key; const err_msg = entry.value; const source = try decl.scope.getSource(module); try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); } for (module.failed_exports.items()) |entry| { const decl = entry.key.owner_decl; const err_msg = entry.value; const source = try decl.scope.getSource(module); try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); } if (module.failed_root_src_file) |err| { const file_path = try module.root_pkg.root_src_directory.join(&arena.allocator, &[_][]const u8{ module.root_pkg.root_src_path, }); const msg = try std.fmt.allocPrint(&arena.allocator, "unable to read {s}: {s}", .{ file_path, @errorName(err), }); try AllErrors.addPlain(&arena, &errors, msg); } } if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) { try errors.append(.{ .plain = .{ .msg = try std.fmt.allocPrint(&arena.allocator, "no entry point found", .{}), }, }); } assert(errors.items.len == self.totalErrorCount()); return AllErrors{ .list = try arena.allocator.dupe(AllErrors.Message, errors.items), .arena = arena.state, }; } pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void { var progress: std.Progress = .{}; var main_progress_node = try progress.start("", 0); defer main_progress_node.end(); if (self.color == .off) progress.terminal = null; var c_comp_progress_node = main_progress_node.start("Compile C Objects", self.c_source_files.len); defer c_comp_progress_node.end(); var arena = std.heap.ArenaAllocator.init(self.gpa); defer arena.deinit(); var wg = WaitGroup{}; defer wg.wait(); while (self.c_object_work_queue.readItem()) |c_object| { wg.start(); try self.thread_pool.spawn(workerUpdateCObject, .{ self, c_object, &c_comp_progress_node, &wg, }); } while (self.work_queue.readItem()) |work_item| switch (work_item) { .codegen_decl => |decl| switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, .outdated => unreachable, .sema_failure, .codegen_failure, .dependency_failure, .sema_failure_retryable, => continue, .complete, .codegen_failure_retryable => { const module = self.bin_file.options.module.?; if (decl.typed_value.most_recent.typed_value.val.cast(Value.Payload.Function)) |payload| { switch (payload.func.analysis) { .queued => module.analyzeFnBody(decl, payload.func) catch |err| switch (err) { error.AnalysisFail => { assert(payload.func.analysis != .in_progress); continue; }, error.OutOfMemory => return error.OutOfMemory, }, .in_progress => unreachable, .sema_failure, .dependency_failure => continue, .success => {}, } // Here we tack on additional allocations to the Decl's arena. The allocations are // lifetime annotations in the ZIR. var decl_arena = decl.typed_value.most_recent.arena.?.promote(module.gpa); defer decl.typed_value.most_recent.arena.?.* = decl_arena.state; log.debug("analyze liveness of {}\n", .{decl.name}); try liveness.analyze(module.gpa, &decl_arena.allocator, payload.func.analysis.success); } assert(decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits()); self.bin_file.updateDecl(module, decl) catch |err| { switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .dependency_failure; }, else => { try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.items().len + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( module.gpa, decl.src(), "unable to codegen: {}", .{@errorName(err)}, )); decl.analysis = .codegen_failure_retryable; }, } return; }; if (self.c_header) |*header| { c_codegen.generateHeader(&arena, module, &header.*, decl) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .dependency_failure; }, else => { try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.items().len + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( module.gpa, decl.src(), "unable to generate C header: {}", .{@errorName(err)}, )); decl.analysis = .codegen_failure_retryable; }, }; } }, }, .analyze_decl => |decl| { const module = self.bin_file.options.module.?; module.ensureDeclAnalyzed(decl) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => continue, }; }, .update_line_number => |decl| { const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.items().len + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( module.gpa, decl.src(), "unable to update line number: {}", .{@errorName(err)}, )); decl.analysis = .codegen_failure_retryable; }; }, .glibc_crt_file => |crt_file| { glibc.buildCRTFile(self, crt_file) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build glibc CRT file: {}", .{@errorName(err)}); }; }, .glibc_shared_objects => { glibc.buildSharedObjects(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build glibc shared objects: {}", .{@errorName(err)}); }; }, .musl_crt_file => |crt_file| { musl.buildCRTFile(self, crt_file) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build musl CRT file: {}", .{@errorName(err)}); }; }, .mingw_crt_file => |crt_file| { mingw.buildCRTFile(self, crt_file) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build mingw-w64 CRT file: {}", .{@errorName(err)}); }; }, .windows_import_lib => |index| { const link_lib = self.bin_file.options.system_libs.items()[index].key; mingw.buildImportLib(self, link_lib) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to generate DLL import .lib file: {}", .{@errorName(err)}); }; }, .libunwind => { libunwind.buildStaticLib(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build libunwind: {}", .{@errorName(err)}); }; }, .libcxx => { libcxx.buildLibCXX(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build libcxx: {}", .{@errorName(err)}); }; }, .libcxxabi => { libcxx.buildLibCXXABI(self) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build libcxxabi: {}", .{@errorName(err)}); }; }, .compiler_rt_lib => { self.buildOutputFromZig("compiler_rt.zig", .Lib, &self.compiler_rt_static_lib) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build compiler_rt: {s}", .{@errorName(err)}); }; }, .compiler_rt_obj => { self.buildOutputFromZig("compiler_rt.zig", .Obj, &self.compiler_rt_obj) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build compiler_rt: {s}", .{@errorName(err)}); }; }, .libssp => { self.buildOutputFromZig("ssp.zig", .Lib, &self.libssp_static_lib) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build libssp: {}", .{@errorName(err)}); }; }, .zig_libc => { self.buildOutputFromZig("c.zig", .Lib, &self.libc_static_lib) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to build zig's multitarget libc: {}", .{@errorName(err)}); }; }, .generate_builtin_zig => { // This Job is only queued up if there is a zig module. self.updateBuiltinZigFile(self.bin_file.options.module.?) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. fatal("unable to update builtin.zig file: {}", .{@errorName(err)}); }; }, .stage1_module => { if (!build_options.is_stage1) unreachable; self.updateStage1Module(main_progress_node) catch |err| { fatal("unable to build stage1 zig object: {}", .{@errorName(err)}); }; }, }; } pub fn obtainCObjectCacheManifest(comp: *const Compilation) Cache.Manifest { var man = comp.cache_parent.obtain(); // Only things that need to be added on top of the base hash, and only things // that apply both to @cImport and compiling C objects. No linking stuff here! // Also nothing that applies only to compiling .zig code. man.hash.add(comp.sanitize_c); man.hash.addListOfBytes(comp.clang_argv); man.hash.add(comp.bin_file.options.link_libcpp); man.hash.addListOfBytes(comp.libc_include_dir_list); return man; } test "cImport" { _ = cImport; } const CImportResult = struct { out_zig_path: []u8, errors: []translate_c.ClangErrMsg, }; /// Caller owns returned memory. /// This API is currently coupled pretty tightly to stage1's needs; it will need to be reworked /// a bit when we want to start using it from self-hosted. pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { if (!build_options.have_llvm) return error.ZigCompilerNotBuiltWithLLVMExtensions; const tracy = trace(@src()); defer tracy.end(); const cimport_zig_basename = "cimport.zig"; var man = comp.obtainCObjectCacheManifest(); defer man.deinit(); man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects man.hash.addBytes(c_src); // If the previous invocation resulted in clang errors, we will see a hit // here with 0 files in the manifest, in which case it is actually a miss. // We need to "unhit" in this case, to keep the digests matching. const prev_hash_state = man.hash.peekBin(); const actual_hit = hit: { const is_hit = try man.hit(); if (man.files.items.len == 0) { man.unhit(prev_hash_state, 0); break :hit false; } break :hit true; }; const digest = if (!actual_hit) digest: { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); const arena = &arena_allocator.allocator; const tmp_digest = man.hash.peek(); const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest }); var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{}); defer zig_cache_tmp_dir.close(); const cimport_basename = "cimport.h"; const out_h_path = try comp.local_cache_directory.join(arena, &[_][]const u8{ tmp_dir_sub_path, cimport_basename, }); const out_dep_path = try std.fmt.allocPrint(arena, "{}.d", .{out_h_path}); try zig_cache_tmp_dir.writeFile(cimport_basename, c_src); if (comp.verbose_cimport) { log.info("C import source: {}", .{out_h_path}); } var argv = std.ArrayList([]const u8).init(comp.gpa); defer argv.deinit(); try comp.addTranslateCCArgs(arena, &argv, .c, out_dep_path); try argv.append(out_h_path); if (comp.verbose_cc) { dump_argv(argv.items); } // Convert to null terminated args. const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1); new_argv_with_sentinel[argv.items.len] = null; const new_argv = new_argv_with_sentinel[0..argv.items.len :null]; for (argv.items) |arg, i| { new_argv[i] = try arena.dupeZ(u8, arg); } const c_headers_dir_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"include"}); const c_headers_dir_path_z = try arena.dupeZ(u8, c_headers_dir_path); var clang_errors: []translate_c.ClangErrMsg = &[0]translate_c.ClangErrMsg{}; const tree = translate_c.translate( comp.gpa, new_argv.ptr, new_argv.ptr + new_argv.len, &clang_errors, c_headers_dir_path_z, ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ASTUnitFailure => { log.warn("clang API returned errors but due to a clang bug, it is not exposing the errors for zig to see. For more details: https://github.com/ziglang/zig/issues/4455", .{}); return error.ASTUnitFailure; }, error.SemanticAnalyzeFail => { return CImportResult{ .out_zig_path = "", .errors = clang_errors, }; }, }; defer tree.deinit(); if (comp.verbose_cimport) { log.info("C import .d file: {}", .{out_dep_path}); } const dep_basename = std.fs.path.basename(out_dep_path); try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); try comp.stage1_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename); const digest = man.final(); const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{}); defer o_dir.close(); var out_zig_file = try o_dir.createFile(cimport_zig_basename, .{}); defer out_zig_file.close(); var bos = std.io.bufferedOutStream(out_zig_file.writer()); _ = try std.zig.render(comp.gpa, bos.writer(), tree); try bos.flush(); man.writeManifest() catch |err| { log.warn("failed to write cache manifest for C import: {}", .{@errorName(err)}); }; break :digest digest; } else man.final(); const out_zig_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{ "o", &digest, cimport_zig_basename, }); if (comp.verbose_cimport) { log.info("C import output: {}\n", .{out_zig_path}); } return CImportResult{ .out_zig_path = out_zig_path, .errors = &[0]translate_c.ClangErrMsg{}, }; } fn workerUpdateCObject( comp: *Compilation, c_object: *CObject, progress_node: *std.Progress.Node, wg: *WaitGroup, ) void { defer wg.stop(); comp.updateCObject(c_object, progress_node) catch |err| switch (err) { error.AnalysisFail => return, else => { { const lock = comp.mutex.acquire(); defer lock.release(); comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.items().len + 1) catch { fatal("TODO handle this by setting c_object.status = oom failure", .{}); }; comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, ErrorMsg.create( comp.gpa, 0, "unable to build C object: {s}", .{@errorName(err)}, ) catch { fatal("TODO handle this by setting c_object.status = oom failure", .{}); }); } c_object.status = .{ .failure = {} }; }, }; } fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: *std.Progress.Node) !void { if (!build_options.have_llvm) { return comp.failCObj(c_object, "clang not available: compiler built without LLVM extensions", .{}); } const self_exe_path = comp.self_exe_path orelse return comp.failCObj(c_object, "clang compilation disabled", .{}); const tracy = trace(@src()); defer tracy.end(); if (c_object.clearStatus(comp.gpa)) { // There was previous failure. const lock = comp.mutex.acquire(); defer lock.release(); comp.failed_c_objects.removeAssertDiscard(c_object); } var man = comp.obtainCObjectCacheManifest(); defer man.deinit(); man.hash.add(comp.clang_preprocessor_mode); _ = try man.addFile(c_object.src.src_path, null); { // Hash the extra flags, with special care to call addFile for file parameters. // TODO this logic can likely be improved by utilizing clang_options_data.zig. const file_args = [_][]const u8{"-include"}; var arg_i: usize = 0; while (arg_i < c_object.src.extra_flags.len) : (arg_i += 1) { const arg = c_object.src.extra_flags[arg_i]; man.hash.addBytes(arg); for (file_args) |file_arg| { if (mem.eql(u8, file_arg, arg) and arg_i + 1 < c_object.src.extra_flags.len) { arg_i += 1; _ = try man.addFile(c_object.src.extra_flags[arg_i], null); } } } } { const is_collision = blk: { const bin_digest = man.hash.peekBin(); const lock = comp.mutex.acquire(); defer lock.release(); const gop = try comp.c_object_cache_digest_set.getOrPut(comp.gpa, bin_digest); break :blk gop.found_existing; }; if (is_collision) { return comp.failCObj( c_object, "the same source file was already added to the same compilation with the same flags", .{}, ); } } var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); const arena = &arena_allocator.allocator; const c_source_basename = std.fs.path.basename(c_object.src.src_path); c_comp_progress_node.activate(); var child_progress_node = c_comp_progress_node.start(c_source_basename, 0); child_progress_node.activate(); defer child_progress_node.end(); // Special case when doing build-obj for just one C file. When there are more than one object // file and building an object we need to link them together, but with just one it should go // directly to the output file. const direct_o = comp.c_source_files.len == 1 and comp.bin_file.options.module == null and comp.bin_file.options.output_mode == .Obj and comp.bin_file.options.objects.len == 0; const o_basename_noext = if (direct_o) comp.bin_file.options.root_name else c_source_basename[0 .. c_source_basename.len - std.fs.path.extension(c_source_basename).len]; const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, comp.getTarget().oFileExt() }); const digest = if (!comp.disable_c_depfile and try man.hit()) man.final() else blk: { var argv = std.ArrayList([]const u8).init(comp.gpa); defer argv.deinit(); // We can't know the digest until we do the C compiler invocation, so we need a temporary filename. const out_obj_path = try comp.tmpFilePath(arena, o_basename); var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{}); defer zig_cache_tmp_dir.close(); try argv.appendSlice(&[_][]const u8{ self_exe_path, "clang" }); const ext = classifyFileExt(c_object.src.src_path); const out_dep_path: ?[]const u8 = if (comp.disable_c_depfile or !ext.clangSupportsDepFile()) null else try std.fmt.allocPrint(arena, "{s}.d", .{out_obj_path}); try comp.addCCArgs(arena, &argv, ext, out_dep_path); try argv.ensureCapacity(argv.items.len + 3); switch (comp.clang_preprocessor_mode) { .no => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-c", "-o", out_obj_path }), .yes => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-E", "-o", out_obj_path }), .stdout => argv.appendAssumeCapacity("-E"), } try argv.append(c_object.src.src_path); try argv.appendSlice(c_object.src.extra_flags); if (comp.verbose_cc) { dump_argv(argv.items); } const child = try std.ChildProcess.init(argv.items, arena); defer child.deinit(); if (comp.clang_passthrough_mode) { child.stdin_behavior = .Inherit; child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; const term = child.spawnAndWait() catch |err| { return comp.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) }); }; switch (term) { .Exited => |code| { if (code != 0) { // TODO https://github.com/ziglang/zig/issues/6342 std.process.exit(1); } if (comp.clang_preprocessor_mode == .stdout) std.process.exit(0); }, else => std.process.abort(), } } else { child.stdin_behavior = .Ignore; child.stdout_behavior = .Ignore; child.stderr_behavior = .Pipe; try child.spawn(); const stderr_reader = child.stderr.?.reader(); // TODO https://github.com/ziglang/zig/issues/6343 // Please uncomment and use stdout once this issue is fixed // const stdout = try stdout_reader.readAllAlloc(arena, std.math.maxInt(u32)); const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024); const term = child.wait() catch |err| { return comp.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) }); }; switch (term) { .Exited => |code| { if (code != 0) { // TODO parse clang stderr and turn it into an error message // and then call failCObjWithOwnedErrorMsg log.err("clang failed with stderr: {}", .{stderr}); return comp.failCObj(c_object, "clang exited with code {}", .{code}); } }, else => { log.err("clang terminated with stderr: {}", .{stderr}); return comp.failCObj(c_object, "clang terminated unexpectedly", .{}); }, } } if (out_dep_path) |dep_file_path| { const dep_basename = std.fs.path.basename(dep_file_path); // Add the files depended on to the cache system. try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); // Just to save disk space, we delete the file because it is never needed again. zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| { log.warn("failed to delete '{}': {}", .{ dep_file_path, @errorName(err) }); }; } // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock. if (comp.disable_c_depfile) _ = try man.hit(); // Rename into place. const digest = man.final(); const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{}); defer o_dir.close(); const tmp_basename = std.fs.path.basename(out_obj_path); try std.fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename); man.writeManifest() catch |err| { log.warn("failed to write cache manifest when compiling '{}': {}", .{ c_object.src.src_path, @errorName(err) }); }; break :blk digest; }; c_object.status = .{ .success = .{ .object_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{ "o", &digest, o_basename, }), .lock = man.toOwnedLock(), }, }; } pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 { const s = std.fs.path.sep_str; const rand_int = std.crypto.random.int(u64); if (comp.local_cache_directory.path) |p| { return std.fmt.allocPrint(arena, "{}" ++ s ++ "tmp" ++ s ++ "{x}-{s}", .{ p, rand_int, suffix }); } else { return std.fmt.allocPrint(arena, "tmp" ++ s ++ "{x}-{s}", .{ rand_int, suffix }); } } pub fn addTranslateCCArgs( comp: *Compilation, arena: *Allocator, argv: *std.ArrayList([]const u8), ext: FileExt, out_dep_path: ?[]const u8, ) !void { try argv.appendSlice(&[_][]const u8{ "-x", "c" }); try comp.addCCArgs(arena, argv, ext, out_dep_path); // This gives us access to preprocessing entities, presumably at the cost of performance. try argv.appendSlice(&[_][]const u8{ "-Xclang", "-detailed-preprocessing-record" }); } /// Add common C compiler args between translate-c and C object compilation. pub fn addCCArgs( comp: *const Compilation, arena: *Allocator, argv: *std.ArrayList([]const u8), ext: FileExt, out_dep_path: ?[]const u8, ) !void { const target = comp.getTarget(); if (ext == .cpp) { try argv.append("-nostdinc++"); } // We don't ever put `-fcolor-diagnostics` or `-fno-color-diagnostics` because in passthrough mode // we want Clang to infer it, and in normal mode we always want it off, which will be true since // clang will detect stderr as a pipe rather than a terminal. if (!comp.clang_passthrough_mode) { // Make stderr more easily parseable. try argv.append("-fno-caret-diagnostics"); } if (comp.bin_file.options.function_sections) { try argv.append("-ffunction-sections"); } try argv.ensureCapacity(argv.items.len + comp.bin_file.options.framework_dirs.len * 2); for (comp.bin_file.options.framework_dirs) |framework_dir| { argv.appendAssumeCapacity("-iframework"); argv.appendAssumeCapacity(framework_dir); } if (comp.bin_file.options.link_libcpp) { const libcxx_include_path = try std.fs.path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, "libcxx", "include", }); const libcxxabi_include_path = try std.fs.path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, "libcxxabi", "include", }); try argv.append("-isystem"); try argv.append(libcxx_include_path); try argv.append("-isystem"); try argv.append(libcxxabi_include_path); if (target.abi.isMusl()) { try argv.append("-D_LIBCPP_HAS_MUSL_LIBC"); } try argv.append("-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS"); try argv.append("-D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS"); } const llvm_triple = try @import("codegen/llvm.zig").targetTriple(arena, target); try argv.appendSlice(&[_][]const u8{ "-target", llvm_triple }); switch (ext) { .c, .cpp, .h => { try argv.appendSlice(&[_][]const u8{ "-nostdinc", "-fno-spell-checking", }); // According to <NAME> libc headers are supposed to go before C language headers. // However as noted by @dimenus, appending libc headers before c_headers breaks intrinsics // and other compiler specific items. const c_headers_dir = try std.fs.path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, "include" }); try argv.append("-isystem"); try argv.append(c_headers_dir); for (comp.libc_include_dir_list) |include_dir| { try argv.append("-isystem"); try argv.append(include_dir); } if (target.cpu.model.llvm_name) |llvm_name| { try argv.appendSlice(&[_][]const u8{ "-Xclang", "-target-cpu", "-Xclang", llvm_name, }); } // It would be really nice if there was a more compact way to communicate this info to Clang. const all_features_list = target.cpu.arch.allFeaturesList(); try argv.ensureCapacity(argv.items.len + all_features_list.len * 4); for (all_features_list) |feature, index_usize| { const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); const is_enabled = target.cpu.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { argv.appendSliceAssumeCapacity(&[_][]const u8{ "-Xclang", "-target-feature", "-Xclang" }); const plus_or_minus = "-+"[@boolToInt(is_enabled)]; const arg = try std.fmt.allocPrint(arena, "{c}{s}", .{ plus_or_minus, llvm_name }); argv.appendAssumeCapacity(arg); } } const mcmodel = comp.bin_file.options.machine_code_model; if (mcmodel != .default) { try argv.append(try std.fmt.allocPrint(arena, "-mcmodel={}", .{@tagName(mcmodel)})); } switch (target.os.tag) { .windows => { // windows.h has files such as pshpack1.h which do #pragma packing, // triggering a clang warning. So for this target, we disable this warning. if (target.abi.isGnu()) { try argv.append("-Wno-pragma-pack"); } }, .macos => { // Pass the proper -m<os>-version-min argument for darwin. const ver = target.os.version_range.semver.min; try argv.append(try std.fmt.allocPrint(arena, "-mmacos-version-min={d}.{d}.{d}", .{ ver.major, ver.minor, ver.patch, })); }, .ios, .tvos, .watchos => switch (target.cpu.arch) { // Pass the proper -m<os>-version-min argument for darwin. .i386, .x86_64 => { const ver = target.os.version_range.semver.min; try argv.append(try std.fmt.allocPrint( arena, "-m{s}-simulator-version-min={d}.{d}.{d}", .{ @tagName(target.os.tag), ver.major, ver.minor, ver.patch }, )); }, else => { const ver = target.os.version_range.semver.min; try argv.append(try std.fmt.allocPrint(arena, "-m{s}-version-min={d}.{d}.{d}", .{ @tagName(target.os.tag), ver.major, ver.minor, ver.patch, })); }, }, else => {}, } if (!comp.bin_file.options.strip) { try argv.append("-g"); switch (comp.bin_file.options.object_format) { .coff, .pe => try argv.append("-gcodeview"), else => {}, } } if (comp.haveFramePointer()) { try argv.append("-fno-omit-frame-pointer"); } else { try argv.append("-fomit-frame-pointer"); } if (comp.sanitize_c) { try argv.append("-fsanitize=undefined"); try argv.append("-fsanitize-trap=undefined"); } switch (comp.bin_file.options.optimize_mode) { .Debug => { // windows c runtime requires -D_DEBUG if using debug libraries try argv.append("-D_DEBUG"); try argv.append("-Og"); if (comp.bin_file.options.link_libc) { try argv.append("-fstack-protector-strong"); try argv.append("--param"); try argv.append("ssp-buffer-size=4"); } else { try argv.append("-fno-stack-protector"); } }, .ReleaseSafe => { // See the comment in the BuildModeFastRelease case for why we pass -O2 rather // than -O3 here. try argv.append("-O2"); if (comp.bin_file.options.link_libc) { try argv.append("-D_FORTIFY_SOURCE=2"); try argv.append("-fstack-protector-strong"); try argv.append("--param"); try argv.append("ssp-buffer-size=4"); } else { try argv.append("-fno-stack-protector"); } }, .ReleaseFast => { try argv.append("-DNDEBUG"); // Here we pass -O2 rather than -O3 because, although we do the equivalent of // -O3 in Zig code, the justification for the difference here is that Zig // has better detection and prevention of undefined behavior, so -O3 is safer for // Zig code than it is for C code. Also, C programmers are used to their code // running in -O2 and thus the -O3 path has been tested less. try argv.append("-O2"); try argv.append("-fno-stack-protector"); }, .ReleaseSmall => { try argv.append("-DNDEBUG"); try argv.append("-Os"); try argv.append("-fno-stack-protector"); }, } if (target_util.supports_fpic(target) and comp.bin_file.options.pic) { try argv.append("-fPIC"); } }, .shared_library, .assembly, .ll, .bc, .unknown, .static_library, .object, .zig, .zir => {}, } if (out_dep_path) |p| { try argv.appendSlice(&[_][]const u8{ "-MD", "-MV", "-MF", p }); } // Argh, why doesn't the assembler accept the list of CPU features?! // I don't see a way to do this other than hard coding everything. switch (target.cpu.arch) { .riscv32, .riscv64 => { if (std.Target.riscv.featureSetHas(target.cpu.features, .relax)) { try argv.append("-mrelax"); } else { try argv.append("-mno-relax"); } }, else => { // TODO }, } if (target.os.tag == .freestanding) { try argv.append("-ffreestanding"); } try argv.appendSlice(comp.clang_argv); } fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) InnerError { @setCold(true); const err_msg = try ErrorMsg.create(comp.gpa, 0, "unable to build C object: " ++ format, args); return comp.failCObjWithOwnedErrorMsg(c_object, err_msg); } fn failCObjWithOwnedErrorMsg(comp: *Compilation, c_object: *CObject, err_msg: *ErrorMsg) InnerError { { const lock = comp.mutex.acquire(); defer lock.release(); { errdefer err_msg.destroy(comp.gpa); try comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.items().len + 1); } comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg); } c_object.status = .failure; return error.AnalysisFail; } pub const ErrorMsg = struct { byte_offset: usize, msg: []const u8, pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg { const self = try gpa.create(ErrorMsg); errdefer gpa.destroy(self); self.* = try init(gpa, byte_offset, format, args); return self; } /// Assumes the ErrorMsg struct and msg were both allocated with allocator. pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void { self.deinit(gpa); gpa.destroy(self); } pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !ErrorMsg { return ErrorMsg{ .byte_offset = byte_offset, .msg = try std.fmt.allocPrint(gpa, format, args), }; } pub fn deinit(self: *ErrorMsg, gpa: *Allocator) void { gpa.free(self.msg); self.* = undefined; } }; pub const FileExt = enum { c, cpp, h, ll, bc, assembly, shared_library, object, static_library, zig, zir, unknown, pub fn clangSupportsDepFile(ext: FileExt) bool { return switch (ext) { .c, .cpp, .h => true, .ll, .bc, .assembly, .shared_library, .object, .static_library, .zig, .zir, .unknown, => false, }; } }; pub fn hasObjectExt(filename: []const u8) bool { return mem.endsWith(u8, filename, ".o") or mem.endsWith(u8, filename, ".obj"); } pub fn hasStaticLibraryExt(filename: []const u8) bool { return mem.endsWith(u8, filename, ".a") or mem.endsWith(u8, filename, ".lib"); } pub fn hasCExt(filename: []const u8) bool { return mem.endsWith(u8, filename, ".c"); } pub fn hasCppExt(filename: []const u8) bool { return mem.endsWith(u8, filename, ".C") or mem.endsWith(u8, filename, ".cc") or mem.endsWith(u8, filename, ".cpp") or mem.endsWith(u8, filename, ".cxx"); } pub fn hasAsmExt(filename: []const u8) bool { return mem.endsWith(u8, filename, ".s") or mem.endsWith(u8, filename, ".S"); } pub fn hasSharedLibraryExt(filename: []const u8) bool { if (mem.endsWith(u8, filename, ".so") or mem.endsWith(u8, filename, ".dll") or mem.endsWith(u8, filename, ".dylib") or mem.endsWith(u8, filename, ".tbd")) { return true; } // Look for .so.X, .so.X.Y, .so.X.Y.Z var it = mem.split(filename, "."); _ = it.next().?; var so_txt = it.next() orelse return false; while (!mem.eql(u8, so_txt, "so")) { so_txt = it.next() orelse return false; } const n1 = it.next() orelse return false; const n2 = it.next(); const n3 = it.next(); _ = std.fmt.parseInt(u32, n1, 10) catch return false; if (n2) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; if (n3) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; if (it.next() != null) return false; return true; } pub fn classifyFileExt(filename: []const u8) FileExt { if (hasCExt(filename)) { return .c; } else if (hasCppExt(filename)) { return .cpp; } else if (mem.endsWith(u8, filename, ".ll")) { return .ll; } else if (mem.endsWith(u8, filename, ".bc")) { return .bc; } else if (hasAsmExt(filename)) { return .assembly; } else if (mem.endsWith(u8, filename, ".h")) { return .h; } else if (mem.endsWith(u8, filename, ".zig")) { return .zig; } else if (mem.endsWith(u8, filename, ".zir")) { return .zir; } else if (hasSharedLibraryExt(filename)) { return .shared_library; } else if (hasStaticLibraryExt(filename)) { return .static_library; } else if (hasObjectExt(filename)) { return .object; } else { return .unknown; } } test "classifyFileExt" { std.testing.expectEqual(FileExt.cpp, classifyFileExt("foo.cc")); std.testing.expectEqual(FileExt.unknown, classifyFileExt("foo.nim")); std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so")); std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so.1")); std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so.1.2")); std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so.1.2.3")); std.testing.expectEqual(FileExt.unknown, classifyFileExt("foo.so.1.2.3~")); std.testing.expectEqual(FileExt.zig, classifyFileExt("foo.zig")); std.testing.expectEqual(FileExt.zir, classifyFileExt("foo.zir")); } fn haveFramePointer(comp: *const Compilation) bool { // If you complicate this logic make sure you update the parent cache hash. // Right now it's not in the cache hash because the value depends on optimize_mode // and strip which are both already part of the hash. return switch (comp.bin_file.options.optimize_mode) { .Debug, .ReleaseSafe => !comp.bin_file.options.strip, .ReleaseSmall, .ReleaseFast => false, }; } const LibCDirs = struct { libc_include_dir_list: []const []const u8, libc_installation: ?*const LibCInstallation, }; fn detectLibCIncludeDirs( arena: *Allocator, zig_lib_dir: []const u8, target: Target, is_native_os: bool, link_libc: bool, libc_installation: ?*const LibCInstallation, ) !LibCDirs { if (!link_libc) { return LibCDirs{ .libc_include_dir_list = &[0][]u8{}, .libc_installation = null, }; } if (libc_installation) |lci| { return detectLibCFromLibCInstallation(arena, target, lci); } if (target_util.canBuildLibC(target)) { const generic_name = target_util.libCGenericName(target); // Some architectures are handled by the same set of headers. const arch_name = if (target.abi.isMusl()) target_util.archMuslName(target.cpu.arch) else @tagName(target.cpu.arch); const os_name = @tagName(target.os.tag); // Musl's headers are ABI-agnostic and so they all have the "musl" ABI name. const abi_name = if (target.abi.isMusl()) "musl" else @tagName(target.abi); const s = std.fs.path.sep_str; const arch_include_dir = try std.fmt.allocPrint( arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-{}-{}", .{ zig_lib_dir, arch_name, os_name, abi_name }, ); const generic_include_dir = try std.fmt.allocPrint( arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "generic-{}", .{ zig_lib_dir, generic_name }, ); const arch_os_include_dir = try std.fmt.allocPrint( arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{}-{}-any", .{ zig_lib_dir, @tagName(target.cpu.arch), os_name }, ); const generic_os_include_dir = try std.fmt.allocPrint( arena, "{}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "any-{}-any", .{ zig_lib_dir, os_name }, ); const list = try arena.alloc([]const u8, 4); list[0] = arch_include_dir; list[1] = generic_include_dir; list[2] = arch_os_include_dir; list[3] = generic_os_include_dir; return LibCDirs{ .libc_include_dir_list = list, .libc_installation = null, }; } if (is_native_os) { const libc = try arena.create(LibCInstallation); libc.* = try LibCInstallation.findNative(.{ .allocator = arena }); return detectLibCFromLibCInstallation(arena, target, libc); } return LibCDirs{ .libc_include_dir_list = &[0][]u8{}, .libc_installation = null, }; } fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs { var list = std.ArrayList([]const u8).init(arena); try list.ensureCapacity(4); list.appendAssumeCapacity(lci.include_dir.?); const is_redundant = mem.eql(u8, lci.sys_include_dir.?, lci.include_dir.?); if (!is_redundant) list.appendAssumeCapacity(lci.sys_include_dir.?); if (target.os.tag == .windows) { if (std.fs.path.dirname(lci.include_dir.?)) |include_dir_parent| { const um_dir = try std.fs.path.join(arena, &[_][]const u8{ include_dir_parent, "um" }); list.appendAssumeCapacity(um_dir); const shared_dir = try std.fs.path.join(arena, &[_][]const u8{ include_dir_parent, "shared" }); list.appendAssumeCapacity(shared_dir); } } return LibCDirs{ .libc_include_dir_list = list.items, .libc_installation = lci, }; } pub fn get_libc_crt_file(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 { if (comp.wantBuildGLibCFromSource() or comp.wantBuildMuslFromSource() or comp.wantBuildMinGWFromSource()) { return comp.crt_files.get(basename).?.full_object_path; } const lci = comp.bin_file.options.libc_installation orelse return error.LibCInstallationNotAvailable; const crt_dir_path = lci.crt_dir orelse return error.LibCInstallationMissingCRTDir; const full_path = try std.fs.path.join(arena, &[_][]const u8{ crt_dir_path, basename }); return full_path; } fn addBuildingGLibCJobs(comp: *Compilation) !void { try comp.work_queue.write(&[_]Job{ .{ .glibc_crt_file = .crti_o }, .{ .glibc_crt_file = .crtn_o }, .{ .glibc_crt_file = .scrt1_o }, .{ .glibc_crt_file = .libc_nonshared_a }, .{ .glibc_shared_objects = {} }, }); } fn wantBuildLibCFromSource(comp: Compilation) bool { const is_exe_or_dyn_lib = switch (comp.bin_file.options.output_mode) { .Obj => false, .Lib => comp.bin_file.options.link_mode == .Dynamic, .Exe => true, }; return comp.bin_file.options.link_libc and is_exe_or_dyn_lib and comp.bin_file.options.libc_installation == null; } fn wantBuildGLibCFromSource(comp: Compilation) bool { return comp.wantBuildLibCFromSource() and comp.getTarget().isGnuLibC(); } fn wantBuildMuslFromSource(comp: Compilation) bool { return comp.wantBuildLibCFromSource() and comp.getTarget().isMusl() and !comp.getTarget().isWasm(); } fn wantBuildMinGWFromSource(comp: Compilation) bool { return comp.wantBuildLibCFromSource() and comp.getTarget().isMinGW(); } fn wantBuildLibUnwindFromSource(comp: *Compilation) bool { const is_exe_or_dyn_lib = switch (comp.bin_file.options.output_mode) { .Obj => false, .Lib => comp.bin_file.options.link_mode == .Dynamic, .Exe => true, }; return comp.bin_file.options.link_libc and is_exe_or_dyn_lib and comp.bin_file.options.libc_installation == null and target_util.libcNeedsLibUnwind(comp.getTarget()); } fn updateBuiltinZigFile(comp: *Compilation, mod: *Module) !void { const tracy = trace(@src()); defer tracy.end(); const source = try comp.generateBuiltinZigSource(comp.gpa); defer comp.gpa.free(source); try mod.zig_cache_artifact_directory.handle.writeFile("builtin.zig", source); } pub fn dump_argv(argv: []const []const u8) void { for (argv[0 .. argv.len - 1]) |arg| { std.debug.print("{} ", .{arg}); } std.debug.print("{}\n", .{argv[argv.len - 1]}); } pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) ![]u8 { const tracy = trace(@src()); defer tracy.end(); var buffer = std.ArrayList(u8).init(allocator); defer buffer.deinit(); const target = comp.getTarget(); const generic_arch_name = target.cpu.arch.genericName(); @setEvalBranchQuota(4000); try buffer.writer().print( \\usingnamespace @import("std").builtin; \\/// Deprecated \\pub const arch = Target.current.cpu.arch; \\/// Deprecated \\pub const endian = Target.current.cpu.arch.endian(); \\pub const output_mode = OutputMode.{}; \\pub const link_mode = LinkMode.{}; \\pub const is_test = {}; \\pub const single_threaded = {}; \\pub const abi = Abi.{}; \\pub const cpu: Cpu = Cpu{{ \\ .arch = .{}, \\ .model = &Target.{}.cpu.{}, \\ .features = Target.{}.featureSet(&[_]Target.{}.Feature{{ \\ , .{ @tagName(comp.bin_file.options.output_mode), @tagName(comp.bin_file.options.link_mode), comp.bin_file.options.is_test, comp.bin_file.options.single_threaded, @tagName(target.abi), @tagName(target.cpu.arch), generic_arch_name, target.cpu.model.name, generic_arch_name, generic_arch_name, }); for (target.cpu.arch.allFeaturesList()) |feature, index_usize| { const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); const is_enabled = target.cpu.features.isEnabled(index); if (is_enabled) { // TODO some kind of "zig identifier escape" function rather than // unconditionally using @"" syntax try buffer.appendSlice(" .@\""); try buffer.appendSlice(feature.name); try buffer.appendSlice("\",\n"); } } try buffer.writer().print( \\ }}), \\}}; \\pub const os = Os{{ \\ .tag = .{}, \\ .version_range = .{{ , .{@tagName(target.os.tag)}, ); switch (target.os.getVersionRange()) { .none => try buffer.appendSlice(" .none = {} }\n"), .semver => |semver| try buffer.outStream().print( \\ .semver = .{{ \\ .min = .{{ \\ .major = {}, \\ .minor = {}, \\ .patch = {}, \\ }}, \\ .max = .{{ \\ .major = {}, \\ .minor = {}, \\ .patch = {}, \\ }}, \\ }}}}, \\ , .{ semver.min.major, semver.min.minor, semver.min.patch, semver.max.major, semver.max.minor, semver.max.patch, }), .linux => |linux| try buffer.outStream().print( \\ .linux = .{{ \\ .range = .{{ \\ .min = .{{ \\ .major = {}, \\ .minor = {}, \\ .patch = {}, \\ }}, \\ .max = .{{ \\ .major = {}, \\ .minor = {}, \\ .patch = {}, \\ }}, \\ }}, \\ .glibc = .{{ \\ .major = {}, \\ .minor = {}, \\ .patch = {}, \\ }}, \\ }}}}, \\ , .{ linux.range.min.major, linux.range.min.minor, linux.range.min.patch, linux.range.max.major, linux.range.max.minor, linux.range.max.patch, linux.glibc.major, linux.glibc.minor, linux.glibc.patch, }), .windows => |windows| try buffer.outStream().print( \\ .windows = .{{ \\ .min = {s}, \\ .max = {s}, \\ }}}}, \\ , .{ windows.min, windows.max }, ), } try buffer.appendSlice("};\n"); // This is so that compiler_rt and libc.zig libraries know whether they // will eventually be linked with libc. They make different decisions // about what to export depending on whether another libc will be linked // in. For example, compiler_rt will not export the __chkstk symbol if it // knows libc will provide it, and likewise c.zig will not export memcpy. const link_libc = comp.bin_file.options.link_libc or (comp.bin_file.options.is_compiler_rt_or_libc and comp.bin_file.options.parent_compilation_link_libc); try buffer.writer().print( \\pub const object_format = ObjectFormat.{}; \\pub const mode = Mode.{}; \\pub const link_libc = {}; \\pub const link_libcpp = {}; \\pub const have_error_return_tracing = {}; \\pub const valgrind_support = {}; \\pub const position_independent_code = {}; \\pub const position_independent_executable = {}; \\pub const strip_debug_info = {}; \\pub const code_model = CodeModel.{}; \\ , .{ @tagName(comp.bin_file.options.object_format), @tagName(comp.bin_file.options.optimize_mode), link_libc, comp.bin_file.options.link_libcpp, comp.bin_file.options.error_return_tracing, comp.bin_file.options.valgrind, comp.bin_file.options.pic, comp.bin_file.options.pie, comp.bin_file.options.strip, @tagName(comp.bin_file.options.machine_code_model), }); if (comp.bin_file.options.is_test) { try buffer.appendSlice( \\pub var test_functions: []TestFn = undefined; // overwritten later \\ ); if (comp.test_evented_io) { try buffer.appendSlice( \\pub const test_io_mode = .evented; \\ ); } else { try buffer.appendSlice( \\pub const test_io_mode = .blocking; \\ ); } } return buffer.toOwnedSlice(); } pub fn updateSubCompilation(sub_compilation: *Compilation) !void { try sub_compilation.update(); // Look for compilation errors in this sub_compilation var errors = try sub_compilation.getAllErrorsAlloc(); defer errors.deinit(sub_compilation.gpa); if (errors.list.len != 0) { for (errors.list) |full_err_msg| { switch (full_err_msg) { .src => |src| { log.err("{s}:{d}:{d}: {s}\n", .{ src.src_path, src.line + 1, src.column + 1, src.msg, }); }, .plain => |plain| { log.err("{s}", .{plain.msg}); }, } } return error.BuildingLibCObjectFailed; } } fn buildOutputFromZig( comp: *Compilation, src_basename: []const u8, output_mode: std.builtin.OutputMode, out: *?CRTFile, ) !void { const tracy = trace(@src()); defer tracy.end(); std.debug.assert(output_mode != .Exe); const special_sub = "std" ++ std.fs.path.sep_str ++ "special"; const special_path = try comp.zig_lib_directory.join(comp.gpa, &[_][]const u8{special_sub}); defer comp.gpa.free(special_path); var special_dir = try comp.zig_lib_directory.handle.openDir(special_sub, .{}); defer special_dir.close(); var root_pkg: Package = .{ .root_src_directory = .{ .path = special_path, .handle = special_dir, }, .root_src_path = src_basename, }; const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len]; const target = comp.getTarget(); const fixed_output_mode = if (target.cpu.arch.isWasm()) .Obj else output_mode; const bin_basename = try std.zig.binNameAlloc(comp.gpa, .{ .root_name = root_name, .target = target, .output_mode = fixed_output_mode, }); defer comp.gpa.free(bin_basename); const emit_bin = Compilation.EmitLoc{ .directory = null, // Put it in the cache directory. .basename = bin_basename, }; const optimize_mode: std.builtin.Mode = blk: { if (comp.bin_file.options.is_test) break :blk comp.bin_file.options.optimize_mode; switch (comp.bin_file.options.optimize_mode) { .Debug, .ReleaseFast, .ReleaseSafe => break :blk .ReleaseFast, .ReleaseSmall => break :blk .ReleaseSmall, } }; const sub_compilation = try Compilation.create(comp.gpa, .{ .global_cache_directory = comp.global_cache_directory, .local_cache_directory = comp.global_cache_directory, .zig_lib_directory = comp.zig_lib_directory, .target = target, .root_name = root_name, .root_pkg = &root_pkg, .output_mode = fixed_output_mode, .thread_pool = comp.thread_pool, .libc_installation = comp.bin_file.options.libc_installation, .emit_bin = emit_bin, .optimize_mode = optimize_mode, .link_mode = .Static, .function_sections = true, .want_sanitize_c = false, .want_stack_check = false, .want_valgrind = false, .want_pic = comp.bin_file.options.pic, .want_pie = comp.bin_file.options.pie, .emit_h = null, .strip = comp.bin_file.options.strip, .is_native_os = comp.bin_file.options.is_native_os, .is_native_abi = comp.bin_file.options.is_native_abi, .self_exe_path = comp.self_exe_path, .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_tokenize = comp.verbose_tokenize, .verbose_ast = comp.verbose_ast, .verbose_ir = comp.verbose_ir, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_cimport = comp.verbose_cimport, .verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features, .clang_passthrough_mode = comp.clang_passthrough_mode, .is_compiler_rt_or_libc = true, .parent_compilation_link_libc = comp.bin_file.options.link_libc, }); defer sub_compilation.destroy(); try sub_compilation.updateSubCompilation(); assert(out.* == null); out.* = Compilation.CRTFile{ .full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{ sub_compilation.bin_file.options.emit.?.sub_path, }), .lock = sub_compilation.bin_file.toOwnedLock(), }; } fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); const arena = &arena_allocator.allocator; // Here we use the legacy stage1 C++ compiler to compile Zig code. const mod = comp.bin_file.options.module.?; const directory = mod.zig_cache_artifact_directory; // Just an alias to make it shorter to type. const main_zig_file = try mod.root_pkg.root_src_directory.join(arena, &[_][]const u8{ mod.root_pkg.root_src_path, }); const zig_lib_dir = comp.zig_lib_directory.path.?; const builtin_zig_path = try directory.join(arena, &[_][]const u8{"builtin.zig"}); const target = comp.getTarget(); const id_symlink_basename = "stage1.id"; const libs_txt_basename = "libs.txt"; // We are about to obtain this lock, so here we give other processes a chance first. comp.releaseStage1Lock(); // Unlike with the self-hosted Zig module, stage1 does not support incremental compilation, // so we input all the zig source files into the cache hash system. We're going to keep // the artifact directory the same, however, so we take the same strategy as linking // does where we have a file which specifies the hash of the output directory so that we can // skip the expensive compilation step if the hash matches. var man = comp.cache_parent.obtain(); defer man.deinit(); _ = try man.addFile(main_zig_file, null); { var local_arena = std.heap.ArenaAllocator.init(comp.gpa); defer local_arena.deinit(); try addPackageTableToCacheHash(&man.hash, &local_arena, mod.root_pkg.table, .{ .files = &man }); } man.hash.add(comp.bin_file.options.valgrind); man.hash.add(comp.bin_file.options.single_threaded); man.hash.add(target.os.getVersionRange()); man.hash.add(comp.bin_file.options.dll_export_fns); man.hash.add(comp.bin_file.options.function_sections); man.hash.add(comp.bin_file.options.is_test); man.hash.add(comp.bin_file.options.emit != null); man.hash.add(comp.c_header != null); if (comp.c_header) |header| { man.hash.addEmitLoc(header.emit_loc.?); } man.hash.addOptionalEmitLoc(comp.emit_asm); man.hash.addOptionalEmitLoc(comp.emit_llvm_ir); man.hash.addOptionalEmitLoc(comp.emit_analysis); man.hash.addOptionalEmitLoc(comp.emit_docs); man.hash.add(comp.test_evented_io); man.hash.addOptionalBytes(comp.test_filter); man.hash.addOptionalBytes(comp.test_name_prefix); // Capture the state in case we come back from this branch where the hash doesn't match. const prev_hash_state = man.hash.peekBin(); const input_file_count = man.files.items.len; if (try man.hit()) { const digest = man.final(); // We use an extra hex-encoded byte here to store some flags. var prev_digest_buf: [digest.len + 2]u8 = undefined; const prev_digest: []u8 = Cache.readSmallFile( directory.handle, id_symlink_basename, &prev_digest_buf, ) catch |err| blk: { log.debug("stage1 {} new_digest={} error: {}", .{ mod.root_pkg.root_src_path, digest, @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; if (prev_digest.len >= digest.len + 2) hit: { if (!mem.eql(u8, prev_digest[0..digest.len], &digest)) break :hit; log.debug("stage1 {} digest={} match - skipping invocation", .{ mod.root_pkg.root_src_path, digest }); var flags_bytes: [1]u8 = undefined; _ = std.fmt.hexToBytes(&flags_bytes, prev_digest[digest.len..]) catch { log.warn("bad cache stage1 digest: '{s}'", .{prev_digest}); break :hit; }; if (directory.handle.readFileAlloc(comp.gpa, libs_txt_basename, 10 * 1024 * 1024)) |libs_txt| { var it = mem.tokenize(libs_txt, "\n"); while (it.next()) |lib_name| { try comp.stage1AddLinkLib(lib_name); } } else |err| switch (err) { error.FileNotFound => {}, // That's OK, it just means 0 libs. else => { log.warn("unable to read cached list of link libs: {s}", .{@errorName(err)}); break :hit; }, } comp.stage1_lock = man.toOwnedLock(); mod.stage1_flags = @bitCast(@TypeOf(mod.stage1_flags), flags_bytes[0]); return; } log.debug("stage1 {} prev_digest={} new_digest={}", .{ mod.root_pkg.root_src_path, prev_digest, digest }); man.unhit(prev_hash_state, input_file_count); } // We are about to change the output file to be different, so we invalidate the build hash now. directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) { error.FileNotFound => {}, else => |e| return e, }; const stage2_target = try arena.create(stage1.Stage2Target); stage2_target.* = .{ .arch = @enumToInt(target.cpu.arch) + 1, // skip over ZigLLVM_UnknownArch .os = @enumToInt(target.os.tag), .abi = @enumToInt(target.abi), .is_native_os = comp.bin_file.options.is_native_os, .is_native_cpu = false, // Only true when bootstrapping the compiler. .llvm_cpu_name = if (target.cpu.model.llvm_name) |s| s.ptr else null, .llvm_cpu_features = comp.bin_file.options.llvm_cpu_features.?, }; comp.stage1_cache_manifest = &man; const main_pkg_path = mod.root_pkg.root_src_directory.path orelse ""; const stage1_module = stage1.create( @enumToInt(comp.bin_file.options.optimize_mode), main_pkg_path.ptr, main_pkg_path.len, main_zig_file.ptr, main_zig_file.len, zig_lib_dir.ptr, zig_lib_dir.len, stage2_target, comp.bin_file.options.is_test, ) orelse return error.OutOfMemory; const emit_bin_path = if (comp.bin_file.options.emit != null) blk: { const bin_basename = try std.zig.binNameAlloc(arena, .{ .root_name = comp.bin_file.options.root_name, .target = target, .output_mode = .Obj, }); break :blk try directory.join(arena, &[_][]const u8{bin_basename}); } else ""; if (comp.c_header != null) { log.warn("-femit-h is not available in the stage1 backend; no .h file will be produced", .{}); } const emit_h_path = try stage1LocPath(arena, if (comp.c_header) |header| header.emit_loc else null, directory); const emit_asm_path = try stage1LocPath(arena, comp.emit_asm, directory); const emit_llvm_ir_path = try stage1LocPath(arena, comp.emit_llvm_ir, directory); const emit_analysis_path = try stage1LocPath(arena, comp.emit_analysis, directory); const emit_docs_path = try stage1LocPath(arena, comp.emit_docs, directory); const stage1_pkg = try createStage1Pkg(arena, "root", mod.root_pkg, null); const test_filter = comp.test_filter orelse ""[0..0]; const test_name_prefix = comp.test_name_prefix orelse ""[0..0]; const subsystem = if (comp.bin_file.options.subsystem) |s| @intToEnum(stage1.TargetSubsystem, @enumToInt(s)) else stage1.TargetSubsystem.Auto; stage1_module.* = .{ .root_name_ptr = comp.bin_file.options.root_name.ptr, .root_name_len = comp.bin_file.options.root_name.len, .emit_o_ptr = emit_bin_path.ptr, .emit_o_len = emit_bin_path.len, .emit_h_ptr = emit_h_path.ptr, .emit_h_len = emit_h_path.len, .emit_asm_ptr = emit_asm_path.ptr, .emit_asm_len = emit_asm_path.len, .emit_llvm_ir_ptr = emit_llvm_ir_path.ptr, .emit_llvm_ir_len = emit_llvm_ir_path.len, .emit_analysis_json_ptr = emit_analysis_path.ptr, .emit_analysis_json_len = emit_analysis_path.len, .emit_docs_ptr = emit_docs_path.ptr, .emit_docs_len = emit_docs_path.len, .builtin_zig_path_ptr = builtin_zig_path.ptr, .builtin_zig_path_len = builtin_zig_path.len, .test_filter_ptr = test_filter.ptr, .test_filter_len = test_filter.len, .test_name_prefix_ptr = test_name_prefix.ptr, .test_name_prefix_len = test_name_prefix.len, .userdata = @ptrToInt(comp), .root_pkg = stage1_pkg, .code_model = @enumToInt(comp.bin_file.options.machine_code_model), .subsystem = subsystem, .err_color = @enumToInt(comp.color), .pic = comp.bin_file.options.pic, .pie = comp.bin_file.options.pie, .link_libc = comp.bin_file.options.link_libc, .link_libcpp = comp.bin_file.options.link_libcpp, .strip = comp.bin_file.options.strip, .is_single_threaded = comp.bin_file.options.single_threaded, .dll_export_fns = comp.bin_file.options.dll_export_fns, .link_mode_dynamic = comp.bin_file.options.link_mode == .Dynamic, .valgrind_enabled = comp.bin_file.options.valgrind, .function_sections = comp.bin_file.options.function_sections, .enable_stack_probing = comp.bin_file.options.stack_check, .enable_time_report = comp.time_report, .enable_stack_report = comp.stack_report, .test_is_evented = comp.test_evented_io, .verbose_tokenize = comp.verbose_tokenize, .verbose_ast = comp.verbose_ast, .verbose_ir = comp.verbose_ir, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_cimport = comp.verbose_cimport, .verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features, .main_progress_node = main_progress_node, .have_c_main = false, .have_winmain = false, .have_wwinmain = false, .have_winmain_crt_startup = false, .have_wwinmain_crt_startup = false, .have_dllmain_crt_startup = false, }; const inferred_lib_start_index = comp.bin_file.options.system_libs.count(); stage1_module.build_object(); if (comp.bin_file.options.system_libs.count() > inferred_lib_start_index) { // We need to save the inferred link libs to the cache, otherwise if we get a cache hit // next time we will be missing these libs. var libs_txt = std.ArrayList(u8).init(arena); for (comp.bin_file.options.system_libs.items()[inferred_lib_start_index..]) |entry| { try libs_txt.writer().print("{s}\n", .{entry.key}); } try directory.handle.writeFile(libs_txt_basename, libs_txt.items); } mod.stage1_flags = .{ .have_c_main = stage1_module.have_c_main, .have_winmain = stage1_module.have_winmain, .have_wwinmain = stage1_module.have_wwinmain, .have_winmain_crt_startup = stage1_module.have_winmain_crt_startup, .have_wwinmain_crt_startup = stage1_module.have_wwinmain_crt_startup, .have_dllmain_crt_startup = stage1_module.have_dllmain_crt_startup, }; stage1_module.destroy(); const digest = man.final(); // Update the small file with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. const stage1_flags_byte = @bitCast(u8, mod.stage1_flags); log.debug("stage1 {} final digest={} flags={x}", .{ mod.root_pkg.root_src_path, digest, stage1_flags_byte, }); var digest_plus_flags: [digest.len + 2]u8 = undefined; digest_plus_flags[0..digest.len].* = digest; assert(std.fmt.formatIntBuf(digest_plus_flags[digest.len..], stage1_flags_byte, 16, false, .{ .width = 2, .fill = '0', }) == 2); log.debug("saved digest + flags: '{s}' (byte = {}) have_winmain_crt_startup={}", .{ digest_plus_flags, stage1_flags_byte, mod.stage1_flags.have_winmain_crt_startup, }); Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest_plus_flags) catch |err| { log.warn("failed to save stage1 hash digest file: {}", .{@errorName(err)}); }; // Failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. comp.stage1_lock = man.toOwnedLock(); } fn stage1LocPath(arena: *Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 { const loc = opt_loc orelse return ""; const directory = loc.directory orelse cache_directory; return directory.join(arena, &[_][]const u8{loc.basename}); } fn createStage1Pkg( arena: *Allocator, name: []const u8, pkg: *Package, parent_pkg: ?*stage1.Pkg, ) error{OutOfMemory}!*stage1.Pkg { const child_pkg = try arena.create(stage1.Pkg); const pkg_children = blk: { var children = std.ArrayList(*stage1.Pkg).init(arena); var it = pkg.table.iterator(); while (it.next()) |entry| { try children.append(try createStage1Pkg(arena, entry.key, entry.value, child_pkg)); } break :blk children.items; }; const src_path = try pkg.root_src_directory.join(arena, &[_][]const u8{pkg.root_src_path}); child_pkg.* = .{ .name_ptr = name.ptr, .name_len = name.len, .path_ptr = src_path.ptr, .path_len = src_path.len, .children_ptr = pkg_children.ptr, .children_len = pkg_children.len, .parent = parent_pkg, }; return child_pkg; } pub fn build_crt_file( comp: *Compilation, root_name: []const u8, output_mode: std.builtin.OutputMode, c_source_files: []const Compilation.CSourceFile, ) !void { const tracy = trace(@src()); defer tracy.end(); const target = comp.getTarget(); const basename = try std.zig.binNameAlloc(comp.gpa, .{ .root_name = root_name, .target = target, .output_mode = output_mode, }); errdefer comp.gpa.free(basename); // TODO: This is extracted into a local variable to work around a stage1 miscompilation. const emit_bin = Compilation.EmitLoc{ .directory = null, // Put it in the cache directory. .basename = basename, }; const sub_compilation = try Compilation.create(comp.gpa, .{ .local_cache_directory = comp.global_cache_directory, .global_cache_directory = comp.global_cache_directory, .zig_lib_directory = comp.zig_lib_directory, .target = target, .root_name = root_name, .root_pkg = null, .output_mode = output_mode, .thread_pool = comp.thread_pool, .libc_installation = comp.bin_file.options.libc_installation, .emit_bin = emit_bin, .optimize_mode = comp.bin_file.options.optimize_mode, .want_sanitize_c = false, .want_stack_check = false, .want_valgrind = false, .want_pic = comp.bin_file.options.pic, .want_pie = comp.bin_file.options.pie, .emit_h = null, .strip = comp.bin_file.options.strip, .is_native_os = comp.bin_file.options.is_native_os, .is_native_abi = comp.bin_file.options.is_native_abi, .self_exe_path = comp.self_exe_path, .c_source_files = c_source_files, .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_tokenize = comp.verbose_tokenize, .verbose_ast = comp.verbose_ast, .verbose_ir = comp.verbose_ir, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_cimport = comp.verbose_cimport, .verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features, .clang_passthrough_mode = comp.clang_passthrough_mode, .is_compiler_rt_or_libc = true, .parent_compilation_link_libc = comp.bin_file.options.link_libc, }); defer sub_compilation.destroy(); try sub_compilation.updateSubCompilation(); try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1); comp.crt_files.putAssumeCapacityNoClobber(basename, .{ .full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{ sub_compilation.bin_file.options.emit.?.sub_path, }), .lock = sub_compilation.bin_file.toOwnedLock(), }); } pub fn stage1AddLinkLib(comp: *Compilation, lib_name: []const u8) !void { // Avoid deadlocking on building import libs such as kernel32.lib // This can happen when the user uses `build-exe foo.obj -lkernel32` and then // when we create a sub-Compilation for zig libc, it also tries to build kernel32.lib. if (comp.bin_file.options.is_compiler_rt_or_libc) return; // This happens when an `extern "foo"` function is referenced by the stage1 backend. // If we haven't seen this library yet and we're targeting Windows, we need to queue up // a work item to produce the DLL import library for this. const gop = try comp.bin_file.options.system_libs.getOrPut(comp.gpa, lib_name); if (!gop.found_existing and comp.getTarget().os.tag == .windows) { try comp.work_queue.writeItem(.{ .windows_import_lib = comp.bin_file.options.system_libs.count() - 1, }); } }
src/Compilation.zig
const std = @import("std"); const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectApproxEqRel = std.testing.expectApproxEqRel; const Vector = std.meta.Vector; test "implicit cast vector to array - bool" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { const a: Vector(4, bool) = [_]bool{ true, false, true, false }; const result_array: [4]bool = a; try expect(mem.eql(bool, &result_array, &[4]bool{ true, false, true, false })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector wrap operators" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; var x: Vector(4, i32) = [4]i32{ 1, 2147483647, 3, 4 }; try expect(mem.eql(i32, &@as([4]i32, v +% x), &[4]i32{ -2147483648, 2147483645, 33, 44 })); try expect(mem.eql(i32, &@as([4]i32, v -% x), &[4]i32{ 2147483646, 2147483647, 27, 36 })); try expect(mem.eql(i32, &@as([4]i32, v *% x), &[4]i32{ 2147483647, 2, 90, 160 })); var z: Vector(4, i32) = [4]i32{ 1, 2, 3, -2147483648 }; try expect(mem.eql(i32, &@as([4]i32, -%z), &[4]i32{ -1, -2, -3, -2147483648 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector bin compares with mem.eql" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; var x: Vector(4, i32) = [4]i32{ 1, 2147483647, 30, 4 }; try expect(mem.eql(bool, &@as([4]bool, v == x), &[4]bool{ false, false, true, false })); try expect(mem.eql(bool, &@as([4]bool, v != x), &[4]bool{ true, true, false, true })); try expect(mem.eql(bool, &@as([4]bool, v < x), &[4]bool{ false, true, false, false })); try expect(mem.eql(bool, &@as([4]bool, v > x), &[4]bool{ true, false, false, true })); try expect(mem.eql(bool, &@as([4]bool, v <= x), &[4]bool{ false, true, true, false })); try expect(mem.eql(bool, &@as([4]bool, v >= x), &[4]bool{ true, false, true, true })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector int operators" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [4]i32{ 10, 20, 30, 40 }; var x: Vector(4, i32) = [4]i32{ 1, 2, 3, 4 }; try expect(mem.eql(i32, &@as([4]i32, v + x), &[4]i32{ 11, 22, 33, 44 })); try expect(mem.eql(i32, &@as([4]i32, v - x), &[4]i32{ 9, 18, 27, 36 })); try expect(mem.eql(i32, &@as([4]i32, v * x), &[4]i32{ 10, 40, 90, 160 })); try expect(mem.eql(i32, &@as([4]i32, -v), &[4]i32{ -10, -20, -30, -40 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector float operators" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, f32) = [4]f32{ 10, 20, 30, 40 }; var x: Vector(4, f32) = [4]f32{ 1, 2, 3, 4 }; try expect(mem.eql(f32, &@as([4]f32, v + x), &[4]f32{ 11, 22, 33, 44 })); try expect(mem.eql(f32, &@as([4]f32, v - x), &[4]f32{ 9, 18, 27, 36 })); try expect(mem.eql(f32, &@as([4]f32, v * x), &[4]f32{ 10, 40, 90, 160 })); try expect(mem.eql(f32, &@as([4]f32, -x), &[4]f32{ -1, -2, -3, -4 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector bit operators" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, u8) = [4]u8{ 0b10101010, 0b10101010, 0b10101010, 0b10101010 }; var x: Vector(4, u8) = [4]u8{ 0b11110000, 0b00001111, 0b10101010, 0b01010101 }; try expect(mem.eql(u8, &@as([4]u8, v ^ x), &[4]u8{ 0b01011010, 0b10100101, 0b00000000, 0b11111111 })); try expect(mem.eql(u8, &@as([4]u8, v | x), &[4]u8{ 0b11111010, 0b10101111, 0b10101010, 0b11111111 })); try expect(mem.eql(u8, &@as([4]u8, v & x), &[4]u8{ 0b10100000, 0b00001010, 0b10101010, 0b00000000 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "implicit cast vector to array" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var a: Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; var result_array: [4]i32 = a; result_array = a; try expect(mem.eql(i32, &result_array, &[4]i32{ 1, 2, 3, 4 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "array to vector" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO var foo: f32 = 3.14; var arr = [4]f32{ foo, 1.5, 0.0, 0.0 }; var vec: Vector(4, f32) = arr; _ = vec; } test "vector casts of sizes not divisible by 8" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { { var v: Vector(4, u3) = [4]u3{ 5, 2, 3, 0 }; var x: [4]u3 = v; try expect(mem.eql(u3, &x, &@as([4]u3, v))); } { var v: Vector(4, u2) = [4]u2{ 1, 2, 3, 0 }; var x: [4]u2 = v; try expect(mem.eql(u2, &x, &@as([4]u2, v))); } { var v: Vector(4, u1) = [4]u1{ 1, 0, 1, 0 }; var x: [4]u1 = v; try expect(mem.eql(u1, &x, &@as([4]u1, v))); } { var v: Vector(4, bool) = [4]bool{ false, false, true, false }; var x: [4]bool = v; try expect(mem.eql(bool, &x, &@as([4]bool, v))); } } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector @splat" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn testForT(comptime N: comptime_int, v: anytype) !void { const T = @TypeOf(v); var vec = @splat(N, v); try expectEqual(Vector(N, T), @TypeOf(vec)); var as_array = @as([N]T, vec); for (as_array) |elem| try expectEqual(v, elem); } fn doTheTest() !void { // Splats with multiple-of-8 bit types that fill a 128bit vector. try testForT(16, @as(u8, 0xEE)); try testForT(8, @as(u16, 0xBEEF)); try testForT(4, @as(u32, 0xDEADBEEF)); try testForT(2, @as(u64, 0xCAFEF00DDEADBEEF)); try testForT(8, @as(f16, 3.1415)); try testForT(4, @as(f32, 3.1415)); try testForT(2, @as(f64, 3.1415)); // Same but fill more than 128 bits. try testForT(16 * 2, @as(u8, 0xEE)); try testForT(8 * 2, @as(u16, 0xBEEF)); try testForT(4 * 2, @as(u32, 0xDEADBEEF)); try testForT(2 * 2, @as(u64, 0xCAFEF00DDEADBEEF)); try testForT(8 * 2, @as(f16, 3.1415)); try testForT(4 * 2, @as(f32, 3.1415)); try testForT(2 * 2, @as(f64, 3.1415)); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "load vector elements via comptime index" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; try expect(v[0] == 1); try expect(v[1] == 2); try expect(loadv(&v[2]) == 3); } fn loadv(ptr: anytype) i32 { return ptr.*; } }; try S.doTheTest(); comptime try S.doTheTest(); } test "store vector elements via comptime index" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; v[2] = 42; try expect(v[1] == 5); v[3] = -364; try expect(v[2] == 42); try expect(-364 == v[3]); storev(&v[0], 100); try expect(v[0] == 100); } fn storev(ptr: anytype, x: i32) void { ptr.* = x; } }; try S.doTheTest(); comptime try S.doTheTest(); } test "load vector elements via runtime index" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; var i: u32 = 0; try expect(v[i] == 1); i += 1; try expect(v[i] == 2); i += 1; try expect(v[i] == 3); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "store vector elements via runtime index" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; var i: u32 = 2; v[i] = 1; try expect(v[1] == 5); try expect(v[2] == 1); i += 1; v[i] = -364; try expect(-364 == v[3]); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "initialize vector which is a struct field" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const Vec4Obj = struct { data: Vector(4, f32), }; const S = struct { fn doTheTest() !void { var foo = Vec4Obj{ .data = [_]f32{ 1, 2, 3, 4 }, }; _ = foo; } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector comparison operators" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { { const v1: Vector(4, bool) = [_]bool{ true, false, true, false }; const v2: Vector(4, bool) = [_]bool{ false, true, false, true }; try expectEqual(@splat(4, true), v1 == v1); try expectEqual(@splat(4, false), v1 == v2); try expectEqual(@splat(4, true), v1 != v2); try expectEqual(@splat(4, false), v2 != v2); } { const v1 = @splat(4, @as(u32, 0xc0ffeeee)); const v2: Vector(4, c_uint) = v1; const v3 = @splat(4, @as(u32, 0xdeadbeef)); try expectEqual(@splat(4, true), v1 == v2); try expectEqual(@splat(4, false), v1 == v3); try expectEqual(@splat(4, true), v1 != v3); try expectEqual(@splat(4, false), v1 != v2); } { // Comptime-known LHS/RHS var v1: @Vector(4, u32) = [_]u32{ 2, 1, 2, 1 }; const v2 = @splat(4, @as(u32, 2)); const v3: @Vector(4, bool) = [_]bool{ true, false, true, false }; try expectEqual(v3, v1 == v2); try expectEqual(v3, v2 == v1); } } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector division operators" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTestDiv(comptime T: type, x: Vector(4, T), y: Vector(4, T)) !void { if (!comptime std.meta.trait.isSignedInt(T)) { const d0 = x / y; for (@as([4]T, d0)) |v, i| { try expectEqual(x[i] / y[i], v); } } const d1 = @divExact(x, y); for (@as([4]T, d1)) |v, i| { try expectEqual(@divExact(x[i], y[i]), v); } const d2 = @divFloor(x, y); for (@as([4]T, d2)) |v, i| { try expectEqual(@divFloor(x[i], y[i]), v); } const d3 = @divTrunc(x, y); for (@as([4]T, d3)) |v, i| { try expectEqual(@divTrunc(x[i], y[i]), v); } } fn doTheTestMod(comptime T: type, x: Vector(4, T), y: Vector(4, T)) !void { if ((!comptime std.meta.trait.isSignedInt(T)) and @typeInfo(T) != .Float) { const r0 = x % y; for (@as([4]T, r0)) |v, i| { try expectEqual(x[i] % y[i], v); } } const r1 = @mod(x, y); for (@as([4]T, r1)) |v, i| { try expectEqual(@mod(x[i], y[i]), v); } const r2 = @rem(x, y); for (@as([4]T, r2)) |v, i| { try expectEqual(@rem(x[i], y[i]), v); } } fn doTheTest() !void { // https://github.com/ziglang/zig/issues/4952 if (builtin.target.os.tag != .windows) { try doTheTestDiv(f16, [4]f16{ 4.0, -4.0, 4.0, -4.0 }, [4]f16{ 1.0, 2.0, -1.0, -2.0 }); } try doTheTestDiv(f32, [4]f32{ 4.0, -4.0, 4.0, -4.0 }, [4]f32{ 1.0, 2.0, -1.0, -2.0 }); try doTheTestDiv(f64, [4]f64{ 4.0, -4.0, 4.0, -4.0 }, [4]f64{ 1.0, 2.0, -1.0, -2.0 }); // https://github.com/ziglang/zig/issues/4952 if (builtin.target.os.tag != .windows) { try doTheTestMod(f16, [4]f16{ 4.0, -4.0, 4.0, -4.0 }, [4]f16{ 1.0, 2.0, 0.5, 3.0 }); } try doTheTestMod(f32, [4]f32{ 4.0, -4.0, 4.0, -4.0 }, [4]f32{ 1.0, 2.0, 0.5, 3.0 }); try doTheTestMod(f64, [4]f64{ 4.0, -4.0, 4.0, -4.0 }, [4]f64{ 1.0, 2.0, 0.5, 3.0 }); try doTheTestDiv(i8, [4]i8{ 4, -4, 4, -4 }, [4]i8{ 1, 2, -1, -2 }); try doTheTestDiv(i16, [4]i16{ 4, -4, 4, -4 }, [4]i16{ 1, 2, -1, -2 }); try doTheTestDiv(i32, [4]i32{ 4, -4, 4, -4 }, [4]i32{ 1, 2, -1, -2 }); try doTheTestDiv(i64, [4]i64{ 4, -4, 4, -4 }, [4]i64{ 1, 2, -1, -2 }); try doTheTestMod(i8, [4]i8{ 4, -4, 4, -4 }, [4]i8{ 1, 2, 4, 8 }); try doTheTestMod(i16, [4]i16{ 4, -4, 4, -4 }, [4]i16{ 1, 2, 4, 8 }); try doTheTestMod(i32, [4]i32{ 4, -4, 4, -4 }, [4]i32{ 1, 2, 4, 8 }); try doTheTestMod(i64, [4]i64{ 4, -4, 4, -4 }, [4]i64{ 1, 2, 4, 8 }); try doTheTestDiv(u8, [4]u8{ 1, 2, 4, 8 }, [4]u8{ 1, 1, 2, 4 }); try doTheTestDiv(u16, [4]u16{ 1, 2, 4, 8 }, [4]u16{ 1, 1, 2, 4 }); try doTheTestDiv(u32, [4]u32{ 1, 2, 4, 8 }, [4]u32{ 1, 1, 2, 4 }); try doTheTestDiv(u64, [4]u64{ 1, 2, 4, 8 }, [4]u64{ 1, 1, 2, 4 }); try doTheTestMod(u8, [4]u8{ 1, 2, 4, 8 }, [4]u8{ 1, 1, 2, 4 }); try doTheTestMod(u16, [4]u16{ 1, 2, 4, 8 }, [4]u16{ 1, 1, 2, 4 }); try doTheTestMod(u32, [4]u32{ 1, 2, 4, 8 }, [4]u32{ 1, 1, 2, 4 }); try doTheTestMod(u64, [4]u64{ 1, 2, 4, 8 }, [4]u64{ 1, 1, 2, 4 }); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector bitwise not operator" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTestNot(comptime T: type, x: Vector(4, T)) !void { var y = ~x; for (@as([4]T, y)) |v, i| { try expectEqual(~x[i], v); } } fn doTheTest() !void { try doTheTestNot(u8, [_]u8{ 0, 2, 4, 255 }); try doTheTestNot(u16, [_]u16{ 0, 2, 4, 255 }); try doTheTestNot(u32, [_]u32{ 0, 2, 4, 255 }); try doTheTestNot(u64, [_]u64{ 0, 2, 4, 255 }); try doTheTestNot(u8, [_]u8{ 0, 2, 4, 255 }); try doTheTestNot(u16, [_]u16{ 0, 2, 4, 255 }); try doTheTestNot(u32, [_]u32{ 0, 2, 4, 255 }); try doTheTestNot(u64, [_]u64{ 0, 2, 4, 255 }); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "vector shift operators" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTestShift(x: anytype, y: anytype) !void { const N = @typeInfo(@TypeOf(x)).Array.len; const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; var xv = @as(Vector(N, TX), x); var yv = @as(Vector(N, TY), y); var z0 = xv >> yv; for (@as([N]TX, z0)) |v, i| { try expectEqual(x[i] >> y[i], v); } var z1 = xv << yv; for (@as([N]TX, z1)) |v, i| { try expectEqual(x[i] << y[i], v); } } fn doTheTestShiftExact(x: anytype, y: anytype, dir: enum { Left, Right }) !void { const N = @typeInfo(@TypeOf(x)).Array.len; const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; var xv = @as(Vector(N, TX), x); var yv = @as(Vector(N, TY), y); var z = if (dir == .Left) @shlExact(xv, yv) else @shrExact(xv, yv); for (@as([N]TX, z)) |v, i| { const check = if (dir == .Left) x[i] << y[i] else x[i] >> y[i]; try expectEqual(check, v); } } fn doTheTest() !void { try doTheTestShift([_]u8{ 0, 2, 4, math.maxInt(u8) }, [_]u3{ 2, 0, 2, 7 }); try doTheTestShift([_]u16{ 0, 2, 4, math.maxInt(u16) }, [_]u4{ 2, 0, 2, 15 }); try doTheTestShift([_]u24{ 0, 2, 4, math.maxInt(u24) }, [_]u5{ 2, 0, 2, 23 }); try doTheTestShift([_]u32{ 0, 2, 4, math.maxInt(u32) }, [_]u5{ 2, 0, 2, 31 }); try doTheTestShift([_]u64{ 0xfe, math.maxInt(u64) }, [_]u6{ 0, 63 }); try doTheTestShift([_]i8{ 0, 2, 4, math.maxInt(i8) }, [_]u3{ 2, 0, 2, 7 }); try doTheTestShift([_]i16{ 0, 2, 4, math.maxInt(i16) }, [_]u4{ 2, 0, 2, 7 }); try doTheTestShift([_]i24{ 0, 2, 4, math.maxInt(i24) }, [_]u5{ 2, 0, 2, 7 }); try doTheTestShift([_]i32{ 0, 2, 4, math.maxInt(i32) }, [_]u5{ 2, 0, 2, 7 }); try doTheTestShift([_]i64{ 0xfe, math.maxInt(i64) }, [_]u6{ 0, 63 }); try doTheTestShiftExact([_]u8{ 0, 1, 1 << 7, math.maxInt(u8) ^ 1 }, [_]u3{ 4, 0, 7, 1 }, .Right); try doTheTestShiftExact([_]u16{ 0, 1, 1 << 15, math.maxInt(u16) ^ 1 }, [_]u4{ 4, 0, 15, 1 }, .Right); try doTheTestShiftExact([_]u24{ 0, 1, 1 << 23, math.maxInt(u24) ^ 1 }, [_]u5{ 4, 0, 23, 1 }, .Right); try doTheTestShiftExact([_]u32{ 0, 1, 1 << 31, math.maxInt(u32) ^ 1 }, [_]u5{ 4, 0, 31, 1 }, .Right); try doTheTestShiftExact([_]u64{ 1 << 63, 1 }, [_]u6{ 63, 0 }, .Right); try doTheTestShiftExact([_]u8{ 0, 1, 1, math.maxInt(u8) ^ (1 << 7) }, [_]u3{ 4, 0, 7, 1 }, .Left); try doTheTestShiftExact([_]u16{ 0, 1, 1, math.maxInt(u16) ^ (1 << 15) }, [_]u4{ 4, 0, 15, 1 }, .Left); try doTheTestShiftExact([_]u24{ 0, 1, 1, math.maxInt(u24) ^ (1 << 23) }, [_]u5{ 4, 0, 23, 1 }, .Left); try doTheTestShiftExact([_]u32{ 0, 1, 1, math.maxInt(u32) ^ (1 << 31) }, [_]u5{ 4, 0, 31, 1 }, .Left); try doTheTestShiftExact([_]u64{ 1 << 63, 1 }, [_]u6{ 0, 63 }, .Left); } }; switch (builtin.target.cpu.arch) { .i386, .aarch64, .aarch64_be, .aarch64_32, .arm, .armeb, .thumb, .thumbeb, .mips, .mipsel, .mips64, .mips64el, .riscv64, .sparcv9, => { // LLVM miscompiles on this architecture // https://github.com/ziglang/zig/issues/4951 return error.SkipZigTest; }, else => {}, } try S.doTheTest(); comptime try S.doTheTest(); } test "vector reduce operation" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn testReduce(comptime op: std.builtin.ReduceOp, x: anytype, expected: anytype) !void { const N = @typeInfo(@TypeOf(x)).Array.len; const TX = @typeInfo(@TypeOf(x)).Array.child; var r = @reduce(op, @as(@Vector(N, TX), x)); switch (@typeInfo(TX)) { .Int, .Bool => try expect(expected == r), .Float => { const expected_nan = math.isNan(expected); const got_nan = math.isNan(r); if (expected_nan and got_nan) { // Do this check explicitly as two NaN values are never // equal. } else { const F = @TypeOf(expected); const tolerance = @sqrt(math.epsilon(TX)); try expect(std.math.approxEqRel(F, expected, r, tolerance)); } }, else => unreachable, } } fn doTheTest() !void { try testReduce(.Add, [4]i16{ -9, -99, -999, -9999 }, @as(i32, -11106)); try testReduce(.Add, [4]u16{ 9, 99, 999, 9999 }, @as(u32, 11106)); try testReduce(.Add, [4]i32{ -9, -99, -999, -9999 }, @as(i32, -11106)); try testReduce(.Add, [4]u32{ 9, 99, 999, 9999 }, @as(u32, 11106)); try testReduce(.Add, [4]i64{ -9, -99, -999, -9999 }, @as(i64, -11106)); try testReduce(.Add, [4]u64{ 9, 99, 999, 9999 }, @as(u64, 11106)); try testReduce(.Add, [4]i128{ -9, -99, -999, -9999 }, @as(i128, -11106)); try testReduce(.Add, [4]u128{ 9, 99, 999, 9999 }, @as(u128, 11106)); try testReduce(.Add, [4]f16{ -1.9, 5.1, -60.3, 100.0 }, @as(f16, 42.9)); try testReduce(.Add, [4]f32{ -1.9, 5.1, -60.3, 100.0 }, @as(f32, 42.9)); try testReduce(.Add, [4]f64{ -1.9, 5.1, -60.3, 100.0 }, @as(f64, 42.9)); try testReduce(.And, [4]bool{ true, false, true, true }, @as(bool, false)); try testReduce(.And, [4]u1{ 1, 0, 1, 1 }, @as(u1, 0)); try testReduce(.And, [4]u16{ 0xffff, 0xff55, 0xaaff, 0x1010 }, @as(u16, 0x10)); try testReduce(.And, [4]u32{ 0xffffffff, 0xffff5555, 0xaaaaffff, 0x10101010 }, @as(u32, 0x1010)); try testReduce(.And, [4]u64{ 0xffffffff, 0xffff5555, 0xaaaaffff, 0x10101010 }, @as(u64, 0x1010)); try testReduce(.Min, [4]i16{ -1, 2, 3, 4 }, @as(i16, -1)); try testReduce(.Min, [4]u16{ 1, 2, 3, 4 }, @as(u16, 1)); try testReduce(.Min, [4]i32{ 1234567, -386, 0, 3 }, @as(i32, -386)); try testReduce(.Min, [4]u32{ 99, 9999, 9, 99999 }, @as(u32, 9)); // LLVM 11 ERROR: Cannot select type // https://github.com/ziglang/zig/issues/7138 if (builtin.target.cpu.arch != .aarch64) { try testReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386)); try testReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9)); } try testReduce(.Min, [4]i128{ 1234567, -386, 0, 3 }, @as(i128, -386)); try testReduce(.Min, [4]u128{ 99, 9999, 9, 99999 }, @as(u128, 9)); try testReduce(.Min, [4]f16{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f16, -100.0)); try testReduce(.Min, [4]f32{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f32, -100.0)); try testReduce(.Min, [4]f64{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f64, -100.0)); try testReduce(.Max, [4]i16{ -1, 2, 3, 4 }, @as(i16, 4)); try testReduce(.Max, [4]u16{ 1, 2, 3, 4 }, @as(u16, 4)); try testReduce(.Max, [4]i32{ 1234567, -386, 0, 3 }, @as(i32, 1234567)); try testReduce(.Max, [4]u32{ 99, 9999, 9, 99999 }, @as(u32, 99999)); // LLVM 11 ERROR: Cannot select type // https://github.com/ziglang/zig/issues/7138 if (builtin.target.cpu.arch != .aarch64) { try testReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567)); try testReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999)); } try testReduce(.Max, [4]i128{ 1234567, -386, 0, 3 }, @as(i128, 1234567)); try testReduce(.Max, [4]u128{ 99, 9999, 9, 99999 }, @as(u128, 99999)); try testReduce(.Max, [4]f16{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f16, 10.0e9)); try testReduce(.Max, [4]f32{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f32, 10.0e9)); try testReduce(.Max, [4]f64{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f64, 10.0e9)); try testReduce(.Mul, [4]i16{ -1, 2, 3, 4 }, @as(i16, -24)); try testReduce(.Mul, [4]u16{ 1, 2, 3, 4 }, @as(u16, 24)); try testReduce(.Mul, [4]i32{ -9, -99, -999, 999 }, @as(i32, -889218891)); try testReduce(.Mul, [4]u32{ 1, 2, 3, 4 }, @as(u32, 24)); try testReduce(.Mul, [4]i64{ 9, 99, 999, 9999 }, @as(i64, 8900199891)); try testReduce(.Mul, [4]u64{ 9, 99, 999, 9999 }, @as(u64, 8900199891)); try testReduce(.Mul, [4]i128{ -9, -99, -999, 9999 }, @as(i128, -8900199891)); try testReduce(.Mul, [4]u128{ 9, 99, 999, 9999 }, @as(u128, 8900199891)); try testReduce(.Mul, [4]f16{ -1.9, 5.1, -60.3, 100.0 }, @as(f16, 58430.7)); try testReduce(.Mul, [4]f32{ -1.9, 5.1, -60.3, 100.0 }, @as(f32, 58430.7)); try testReduce(.Mul, [4]f64{ -1.9, 5.1, -60.3, 100.0 }, @as(f64, 58430.7)); try testReduce(.Or, [4]bool{ false, true, false, false }, @as(bool, true)); try testReduce(.Or, [4]u1{ 0, 1, 0, 0 }, @as(u1, 1)); try testReduce(.Or, [4]u16{ 0xff00, 0xff00, 0xf0, 0xf }, ~@as(u16, 0)); try testReduce(.Or, [4]u32{ 0xffff0000, 0xff00, 0xf0, 0xf }, ~@as(u32, 0)); try testReduce(.Or, [4]u64{ 0xffff0000, 0xff00, 0xf0, 0xf }, @as(u64, 0xffffffff)); try testReduce(.Or, [4]u128{ 0xffff0000, 0xff00, 0xf0, 0xf }, @as(u128, 0xffffffff)); try testReduce(.Xor, [4]bool{ true, true, true, false }, @as(bool, true)); try testReduce(.Xor, [4]u1{ 1, 1, 1, 0 }, @as(u1, 1)); try testReduce(.Xor, [4]u16{ 0x0000, 0x3333, 0x8888, 0x4444 }, ~@as(u16, 0)); try testReduce(.Xor, [4]u32{ 0x00000000, 0x33333333, 0x88888888, 0x44444444 }, ~@as(u32, 0)); try testReduce(.Xor, [4]u64{ 0x00000000, 0x33333333, 0x88888888, 0x44444444 }, @as(u64, 0xffffffff)); try testReduce(.Xor, [4]u128{ 0x00000000, 0x33333333, 0x88888888, 0x44444444 }, @as(u128, 0xffffffff)); // Test the reduction on vectors containing NaNs. const f16_nan = math.nan(f16); const f32_nan = math.nan(f32); const f64_nan = math.nan(f64); try testReduce(.Add, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan); try testReduce(.Add, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan); try testReduce(.Add, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan); // LLVM 11 ERROR: Cannot select type // https://github.com/ziglang/zig/issues/7138 if (false) { try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan); try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan); try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan); try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan); try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan); try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan); } try testReduce(.Mul, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan); try testReduce(.Mul, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan); try testReduce(.Mul, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "mask parameter of @shuffle is comptime scope" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const __v4hi = std.meta.Vector(4, i16); var v4_a = __v4hi{ 0, 0, 0, 0 }; var v4_b = __v4hi{ 0, 0, 0, 0 }; var shuffled: __v4hi = @shuffle(i16, v4_a, v4_b, std.meta.Vector(4, i32){ std.zig.c_translation.shuffleVectorIndex(0, @typeInfo(@TypeOf(v4_a)).Vector.len), std.zig.c_translation.shuffleVectorIndex(0, @typeInfo(@TypeOf(v4_a)).Vector.len), std.zig.c_translation.shuffleVectorIndex(0, @typeInfo(@TypeOf(v4_a)).Vector.len), std.zig.c_translation.shuffleVectorIndex(0, @typeInfo(@TypeOf(v4_a)).Vector.len), }); _ = shuffled; } test "saturating add" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 255, 254, 1 } +| u8x3{ 1, 2, 255 })); const i8x3 = std.meta.Vector(3, i8); try expectEqual(i8x3{ 127, 127, 127 }, (i8x3{ 127, 126, 1 } +| i8x3{ 1, 2, 127 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "saturating subtraction" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); try expectEqual(u8x3{ 0, 0, 0 }, (u8x3{ 0, 0, 0 } -| u8x3{ 255, 255, 255 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "saturating multiplication" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO // TODO: once #9660 has been solved, remove this line if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 2, 2, 2 } *| u8x3{ 255, 255, 255 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } test "saturating shift-left" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 255, 255, 255 } <<| u8x3{ 1, 1, 1 })); } }; try S.doTheTest(); comptime try S.doTheTest(); }
test/behavior/vector.zig
const std = @import("std"); const concepts = @import("../../lib.zig").concepts; const assert = std.debug.assert; pub fn Visitor( comptime Context: type, comptime Value: type, comptime visitBool: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type, _: bool) Deserializer.Error!Value { unreachable; } }.f), comptime visitEnum: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type, _: anytype) Deserializer.Error!Value { unreachable; } }.f), comptime visitFloat: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type, _: anytype) Deserializer.Error!Value { unreachable; } }.f), comptime visitInt: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type, _: anytype) Deserializer.Error!Value { unreachable; } }.f), comptime visitMap: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type, _: anytype) Deserializer.Error!Value { unreachable; } }.f), comptime visitNull: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type) Deserializer.Error!Value { unreachable; } }.f), comptime visitSeq: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type, _: anytype) Deserializer.Error!Value { unreachable; } }.f), comptime visitString: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type, _: anytype) Deserializer.Error!Value { unreachable; } }.f), comptime visitSome: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, deserializer: anytype) @TypeOf(deserializer).Error!Value { unreachable; } }.f), comptime visitVoid: @TypeOf(struct { fn f(_: Context, _: ?std.mem.Allocator, comptime Deserializer: type) Deserializer.Error!Value { unreachable; } }.f), ) type { return struct { pub const @"getty.de.Visitor" = struct { context: Context, const Self = @This(); pub const Value = Value; pub fn visitBool(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type, input: bool) Deserializer.Error!Value { return try visitBool(self.context, allocator, Deserializer, input); } pub fn visitEnum(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type, input: anytype) Deserializer.Error!Value { comptime { switch (@typeInfo(@TypeOf(input))) { .Enum, .EnumLiteral => {}, else => @compileError("expected enum or enum literal, found `" ++ @typeName(@TypeOf(input)) ++ "`"), } } return try visitEnum(self.context, allocator, Deserializer, input); } pub fn visitFloat(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type, input: anytype) Deserializer.Error!Value { comptime { switch (@typeInfo(@TypeOf(input))) { .Float, .ComptimeFloat => {}, else => @compileError("expected floating-point, found `" ++ @typeName(@TypeOf(input)) ++ "`"), } } return try visitFloat(self.context, allocator, Deserializer, input); } pub fn visitInt(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type, input: anytype) Deserializer.Error!Value { comptime { switch (@typeInfo(@TypeOf(input))) { .Int, .ComptimeInt => {}, else => @compileError("expected integer, found `" ++ @typeName(@TypeOf(input)) ++ "`"), } } return try visitInt(self.context, allocator, Deserializer, input); } pub fn visitMap(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type, map: anytype) blk: { concepts.@"getty.de.Map"(@TypeOf(map)); break :blk Deserializer.Error!Value; } { return try visitMap(self.context, allocator, Deserializer, map); } pub fn visitNull(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type) Deserializer.Error!Value { return try visitNull(self.context, allocator, Deserializer); } /// /// /// The visitor is responsible for visiting the entire sequence. Note /// that this implies that `seq` must be able to identify /// the end of a sequence when it is encountered. pub fn visitSeq(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type, seq: anytype) blk: { concepts.@"getty.de.Seq"(@TypeOf(seq)); break :blk Deserializer.Error!Value; } { return try visitSeq(self.context, allocator, Deserializer, seq); } pub fn visitSome(self: Self, allocator: ?std.mem.Allocator, deserializer: anytype) blk: { concepts.@"getty.Deserializer"(@TypeOf(deserializer)); break :blk @TypeOf(deserializer).Error!Value; } { return try visitSome(self.context, allocator, deserializer); } /// /// /// The visitor is responsible for visiting the entire slice. pub fn visitString(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type, input: anytype) Deserializer.Error!Value { comptime { if (!std.meta.trait.isZigString(@TypeOf(input))) { @compileError("expected string, found `" ++ @typeName(@TypeOf(input)) ++ "`"); } } return try visitString(self.context, allocator, Deserializer, input); } pub fn visitVoid(self: Self, allocator: ?std.mem.Allocator, comptime Deserializer: type) Deserializer.Error!Value { return try visitVoid(self.context, allocator, Deserializer); } }; pub fn visitor(ctx: Context) @"getty.de.Visitor" { return .{ .context = ctx }; } }; }
src/de/interface/visitor.zig
@compileError("abandoned prototype playground - unused, untested"); const std = @import("std"); // specialises on type like f32, i32 etc. pub fn typed(comptime val_type) type { return struct { // helper for detecting generic struct type const StructType = enum { column_vector, row_matrix, column_matrix, }; // column vector pub fn Vector(comptime len : usize) type { if (len <= 0) @compileError("Invalid vector length!"); return struct { const Self = @This(); pub const ValType = val_type; pub const Type = StructType.column_vector; pub const len = len; pub const rows = len; pub const cols = 1; data : std.meta.Vector(len, ValType) = undefined, //data : [len]ValType, pub fn add(a : Self, b : Self) Self { var res = a; res.data *= res.b; return res; } }; } // row major matrix pub fn Matrix(comptime rows_ : usize, comptime cols_ : usize) type { //if (rows_ <= 0 or cols <= 0) @compileError("Matrix can't be of size 0!"); return struct { const Self = @This(); pub const ValType = val_type; pub const Type = StructType.row_matrix; pub const rows = rows_; pub const cols = rows_; pub const len = rows * cols; // Matches memory layout of opengl & directx. // matrix: // 00 01 02 // 10 11 12 // is memory as such: { 00, 01, 02, 10, 11, 12 } // therefore it is indexed row major, however see vector is indexed COLUMN major rows : [cols]std.meta.Vector(rows, ValType) = undefined, pub fn mult(a : Self, b : anytype) Self { comptime if (a.cols != b.rows) @compileError("Matrix multiplication requires that columns in A equals the number of rows in B!"); comptime switch (b.type) { .column_vector => { var result = a; for (rows) |*row| { row.* *= b.data; } return result; }, .row_matrix => { // TODO: read, learn, implement : https://www.cs.utexas.edu/users/pingali/CS378/2008sp/papers/gotoPaper.pdf @compileError("Not jet implemented!"); }, } } }; } pub fn ColumnMatrix(comptime rows_ : usize, comptime cols_ : usize) type { return struct { const Self = @This(); pub const ValType = val_type; pub const Type = StructType.row_matrix; pub const rows = rows_; pub const cols = rows_; pub const len = rows * cols; cols : [rows]std.meta.Vector(cols, ValType) = undefined, }; } }; }
src/vecmath.zig
const std = @import("std"); pub const PrintHelper = struct { out: std.io.StreamSource.OutStream, indentation: usize, indent_next: bool, pub fn init(out: std.io.StreamSource.OutStream) PrintHelper { return .{ .out = out, .indentation = 0, .indent_next = false, }; } // this should only be called when no error has happened (an error might // leave the indentation at a positive value), so don't use it with defer pub fn finish(self: *PrintHelper) void { std.debug.assert(self.indentation == 0); } pub fn print(self: *PrintHelper, outer_self: var, comptime fmt: []const u8, args: var) !void { if (self.indent_next) { self.indent_next = false; if (fmt.len > 0 and fmt[0] == '}') { self.indentation -= 1; } if (fmt.len > 0 and fmt[0] == '\n') { // don't indent blank lines } else { var i: usize = 0; while (i < self.indentation) : (i += 1) { try self.out.print(" ", .{}); } } } comptime var arg_index: usize = 0; comptime var i: usize = 0; inline while (i < fmt.len) { if (fmt[i] == '}' and i + 1 < fmt.len and fmt[i + 1] == '}') { try self.out.writeByte('}'); i += 2; continue; } if (fmt[i] == '{') { i += 1; if (i < fmt.len and fmt[i] == '{') { try self.out.writeByte('{'); i += 1; continue; } // find the closing brace const start = i; inline while (i < fmt.len) : (i += 1) { if (fmt[i] == '}') break; } if (i == fmt.len) { @compileError("`{` must be followed by `}`"); } const arg_format = fmt[start..i]; i += 1; const arg = args[arg_index]; arg_index += 1; if (comptime std.mem.eql(u8, arg_format, "auto")) { try self.out.print("{}", .{arg}); } else if (comptime std.mem.eql(u8, arg_format, "bool")) { try self.out.print("{}", .{@as(bool, arg)}); } else if (comptime std.mem.eql(u8, arg_format, "usize")) { try self.out.print("{}", .{@as(usize, arg)}); } else if (comptime std.mem.eql(u8, arg_format, "str")) { try self.out.writeAll(arg); } else if (comptime std.mem.eql(u8, arg_format, "number_literal")) { try self.out.writeAll(arg.verbatim); // ensure a decimal is present, so that in generated zig code it's interpreted as a float // (otherwise expressions like '1 / 10' would mistakenly do integer division). // first check if this is actually a number literal, because builtin constants go // into `verbatim` by name (e.g. `std.math.pi`). if (arg.verbatim[0] >= '0' and arg.verbatim[0] <= '9') { if (std.mem.indexOfScalar(u8, arg.verbatim, '.') == null) { try self.out.writeAll(".0"); } } } else { try outer_self.printArgValue(arg_format, arg); } } else { try self.out.writeByte(fmt[i]); i += 1; } } if (fmt.len >= 1 and fmt[fmt.len - 1] == '\n') { self.indent_next = true; if (fmt.len >= 2 and fmt[fmt.len - 2] == '{') { self.indentation += 1; } } } };
src/zangscript/print_helper.zig
pub const D3DRTYPECOUNT = @as(u32, 8); pub const D3DCS_LEFT = @as(i32, 1); pub const D3DCS_RIGHT = @as(i32, 2); pub const D3DCS_TOP = @as(i32, 4); pub const D3DCS_BOTTOM = @as(i32, 8); pub const D3DCS_FRONT = @as(i32, 16); pub const D3DCS_BACK = @as(i32, 32); pub const D3DCS_PLANE0 = @as(i32, 64); pub const D3DCS_PLANE1 = @as(i32, 128); pub const D3DCS_PLANE2 = @as(i32, 256); pub const D3DCS_PLANE3 = @as(i32, 512); pub const D3DCS_PLANE4 = @as(i32, 1024); pub const D3DCS_PLANE5 = @as(i32, 2048); pub const D3D_MAX_SIMULTANEOUS_RENDERTARGETS = @as(u32, 4); pub const D3DWRAP_W = @as(i32, 4); pub const D3DDMAPSAMPLER = @as(u32, 256); pub const D3DVERTEXTEXTURESAMPLER0 = @as(u32, 257); pub const D3DVERTEXTEXTURESAMPLER1 = @as(u32, 258); pub const D3DVERTEXTEXTURESAMPLER2 = @as(u32, 259); pub const D3DVERTEXTEXTURESAMPLER3 = @as(u32, 260); pub const D3DTSS_TCI_SPHEREMAP = @as(u32, 262144); pub const D3DTA_TEMP = @as(u32, 5); pub const D3DTA_CONSTANT = @as(u32, 6); pub const D3DFVF_XYZW = @as(u32, 16386); pub const D3DFVF_PSIZE = @as(u32, 32); pub const D3DFVF_LASTBETA_UBYTE4 = @as(u32, 4096); pub const D3DFVF_LASTBETA_D3DCOLOR = @as(u32, 32768); pub const MAXD3DDECLUSAGEINDEX = @as(u32, 15); pub const MAXD3DDECLLENGTH = @as(u32, 64); pub const D3DSTREAMSOURCE_INDEXEDDATA = @as(u32, 1073741824); pub const D3DSTREAMSOURCE_INSTANCEDATA = @as(u32, 2147483648); pub const D3DSI_OPCODE_MASK = @as(u32, 65535); pub const D3DSI_INSTLENGTH_MASK = @as(u32, 251658240); pub const D3DSI_INSTLENGTH_SHIFT = @as(u32, 24); pub const D3DSI_COISSUE = @as(u32, 1073741824); pub const D3DSP_OPCODESPECIFICCONTROL_MASK = @as(u32, 16711680); pub const D3DSP_OPCODESPECIFICCONTROL_SHIFT = @as(u32, 16); pub const D3DSHADER_COMPARISON_SHIFT = @as(u32, 16); pub const D3DSP_DCL_USAGE_SHIFT = @as(u32, 0); pub const D3DSP_DCL_USAGE_MASK = @as(u32, 15); pub const D3DSP_DCL_USAGEINDEX_SHIFT = @as(u32, 16); pub const D3DSP_DCL_USAGEINDEX_MASK = @as(u32, 983040); pub const D3DSP_TEXTURETYPE_SHIFT = @as(u32, 27); pub const D3DSP_TEXTURETYPE_MASK = @as(u32, 2013265920); pub const D3DSP_REGNUM_MASK = @as(u32, 2047); pub const D3DSP_WRITEMASK_0 = @as(u32, 65536); pub const D3DSP_WRITEMASK_1 = @as(u32, 131072); pub const D3DSP_WRITEMASK_2 = @as(u32, 262144); pub const D3DSP_WRITEMASK_3 = @as(u32, 524288); pub const D3DSP_WRITEMASK_ALL = @as(u32, 983040); pub const D3DSP_DSTMOD_SHIFT = @as(u32, 20); pub const D3DSP_DSTMOD_MASK = @as(u32, 15728640); pub const D3DSP_DSTSHIFT_SHIFT = @as(u32, 24); pub const D3DSP_DSTSHIFT_MASK = @as(u32, 251658240); pub const D3DSP_REGTYPE_SHIFT = @as(u32, 28); pub const D3DSP_REGTYPE_SHIFT2 = @as(u32, 8); pub const D3DSP_REGTYPE_MASK = @as(u32, 1879048192); pub const D3DSP_REGTYPE_MASK2 = @as(u32, 6144); pub const D3DVS_ADDRESSMODE_SHIFT = @as(u32, 13); pub const D3DSHADER_ADDRESSMODE_SHIFT = @as(u32, 13); pub const D3DVS_SWIZZLE_SHIFT = @as(u32, 16); pub const D3DVS_SWIZZLE_MASK = @as(u32, 16711680); pub const D3DSP_SWIZZLE_SHIFT = @as(u32, 16); pub const D3DSP_SWIZZLE_MASK = @as(u32, 16711680); pub const D3DSP_SRCMOD_SHIFT = @as(u32, 24); pub const D3DSP_SRCMOD_MASK = @as(u32, 251658240); pub const D3DSP_MIN_PRECISION_SHIFT = @as(u32, 14); pub const D3DSP_MIN_PRECISION_MASK = @as(u32, 49152); pub const D3DSI_COMMENTSIZE_SHIFT = @as(u32, 16); pub const D3DSI_COMMENTSIZE_MASK = @as(u32, 2147418112); pub const D3DPRESENT_RATE_DEFAULT = @as(u32, 0); pub const D3DPRESENTFLAG_LOCKABLE_BACKBUFFER = @as(u32, 1); pub const D3DPRESENTFLAG_DISCARD_DEPTHSTENCIL = @as(u32, 2); pub const D3DPRESENTFLAG_DEVICECLIP = @as(u32, 4); pub const D3DPRESENTFLAG_VIDEO = @as(u32, 16); pub const D3DPRESENTFLAG_NOAUTOROTATE = @as(u32, 32); pub const D3DPRESENTFLAG_UNPRUNEDMODE = @as(u32, 64); pub const D3DPRESENTFLAG_OVERLAY_LIMITEDRGB = @as(u32, 128); pub const D3DPRESENTFLAG_OVERLAY_YCbCr_BT709 = @as(u32, 256); pub const D3DPRESENTFLAG_OVERLAY_YCbCr_xvYCC = @as(u32, 512); pub const D3DPRESENTFLAG_RESTRICTED_CONTENT = @as(u32, 1024); pub const D3DPRESENTFLAG_RESTRICT_SHARED_RESOURCE_DRIVER = @as(u32, 2048); pub const D3DUSAGE_RENDERTARGET = @as(i32, 1); pub const D3DUSAGE_DEPTHSTENCIL = @as(i32, 2); pub const D3DUSAGE_DYNAMIC = @as(i32, 512); pub const D3DUSAGE_NONSECURE = @as(i32, 8388608); pub const D3DUSAGE_AUTOGENMIPMAP = @as(i32, 1024); pub const D3DUSAGE_DMAP = @as(i32, 16384); pub const D3DUSAGE_QUERY_LEGACYBUMPMAP = @as(i32, 32768); pub const D3DUSAGE_QUERY_SRGBREAD = @as(i32, 65536); pub const D3DUSAGE_QUERY_FILTER = @as(i32, 131072); pub const D3DUSAGE_QUERY_SRGBWRITE = @as(i32, 262144); pub const D3DUSAGE_QUERY_POSTPIXELSHADER_BLENDING = @as(i32, 524288); pub const D3DUSAGE_QUERY_VERTEXTEXTURE = @as(i32, 1048576); pub const D3DUSAGE_QUERY_WRAPANDMIP = @as(i32, 2097152); pub const D3DUSAGE_WRITEONLY = @as(i32, 8); pub const D3DUSAGE_SOFTWAREPROCESSING = @as(i32, 16); pub const D3DUSAGE_DONOTCLIP = @as(i32, 32); pub const D3DUSAGE_POINTS = @as(i32, 64); pub const D3DUSAGE_RTPATCHES = @as(i32, 128); pub const D3DUSAGE_NPATCHES = @as(i32, 256); pub const D3DUSAGE_TEXTAPI = @as(i32, 268435456); pub const D3DUSAGE_RESTRICTED_CONTENT = @as(i32, 2048); pub const D3DUSAGE_RESTRICT_SHARED_RESOURCE = @as(i32, 8192); pub const D3DUSAGE_RESTRICT_SHARED_RESOURCE_DRIVER = @as(i32, 4096); pub const D3DLOCK_READONLY = @as(i32, 16); pub const D3DLOCK_DISCARD = @as(i32, 8192); pub const D3DLOCK_NOOVERWRITE = @as(i32, 4096); pub const D3DLOCK_NOSYSLOCK = @as(i32, 2048); pub const D3DLOCK_DONOTWAIT = @as(i32, 16384); pub const D3DLOCK_NO_DIRTY_UPDATE = @as(i32, 32768); pub const MAX_DEVICE_IDENTIFIER_STRING = @as(u32, 512); pub const D3DISSUE_END = @as(u32, 1); pub const D3DISSUE_BEGIN = @as(u32, 2); pub const D3DGETDATA_FLUSH = @as(u32, 1); pub const D3DCOMPOSERECTS_MAXNUMRECTS = @as(u32, 65535); pub const D3DCONVOLUTIONMONO_MAXWIDTH = @as(u32, 7); pub const D3DCONVOLUTIONMONO_MAXHEIGHT = @as(u32, 7); pub const D3DFMT_A1_SURFACE_MAXWIDTH = @as(u32, 8192); pub const D3DFMT_A1_SURFACE_MAXHEIGHT = @as(u32, 2048); pub const D3D9_RESOURCE_PRIORITY_MINIMUM = @as(u32, 671088640); pub const D3D9_RESOURCE_PRIORITY_LOW = @as(u32, 1342177280); pub const D3D9_RESOURCE_PRIORITY_NORMAL = @as(u32, 2013265920); pub const D3D9_RESOURCE_PRIORITY_HIGH = @as(u32, 2684354560); pub const D3D9_RESOURCE_PRIORITY_MAXIMUM = @as(u32, 3355443200); pub const D3D_OMAC_SIZE = @as(u32, 16); pub const D3DAUTHENTICATEDQUERY_PROTECTION = Guid.initString("a84eb584-c495-48aa-b94d-8bd2d6fbce05"); pub const D3DAUTHENTICATEDQUERY_CHANNELTYPE = Guid.initString("bc1b18a5-b1fb-42ab-bd94-b5828b4bf7be"); pub const D3DAUTHENTICATEDQUERY_DEVICEHANDLE = Guid.initString("ec1c539d-8cff-4e2a-bcc4-f5692f99f480"); pub const D3DAUTHENTICATEDQUERY_CRYPTOSESSION = Guid.initString("2634499e-d018-4d74-ac17-7f724059528d"); pub const D3DAUTHENTICATEDQUERY_RESTRICTEDSHAREDRESOURCEPROCESSCOUNT = Guid.initString("0db207b3-9450-46a6-82de-1b96d44f9cf2"); pub const D3DAUTHENTICATEDQUERY_RESTRICTEDSHAREDRESOURCEPROCESS = Guid.initString("649bbadb-f0f4-4639-a15b-24393fc3abac"); pub const D3DAUTHENTICATEDQUERY_UNRESTRICTEDPROTECTEDSHAREDRESOURCECOUNT = Guid.initString("012f0bd6-e662-4474-befd-aa53e5143c6d"); pub const D3DAUTHENTICATEDQUERY_OUTPUTIDCOUNT = Guid.initString("2c042b5e-8c07-46d5-aabe-8f75cbad4c31"); pub const D3DAUTHENTICATEDQUERY_OUTPUTID = Guid.initString("839ddca3-9b4e-41e4-b053-892bd2a11ee7"); pub const D3DAUTHENTICATEDQUERY_ACCESSIBILITYATTRIBUTES = Guid.initString("6214d9d2-432c-4abb-9fce-216eea269e3b"); pub const D3DAUTHENTICATEDQUERY_ENCRYPTIONWHENACCESSIBLEGUIDCOUNT = Guid.initString("b30f7066-203c-4b07-93fc-ceaafd61241e"); pub const D3DAUTHENTICATEDQUERY_ENCRYPTIONWHENACCESSIBLEGUID = Guid.initString("f83a5958-e986-4bda-beb0-411f6a7a01b7"); pub const D3DAUTHENTICATEDQUERY_CURRENTENCRYPTIONWHENACCESSIBLE = Guid.initString("ec1791c7-dad3-4f15-9ec3-faa93d60d4f0"); pub const D3DAUTHENTICATEDCONFIGURE_INITIALIZE = Guid.initString("06114bdb-3523-470a-8dca-fbc2845154f0"); pub const D3DAUTHENTICATEDCONFIGURE_PROTECTION = Guid.initString("50455658-3f47-4362-bf99-bfdfcde9ed29"); pub const D3DAUTHENTICATEDCONFIGURE_CRYPTOSESSION = Guid.initString("6346cc54-2cfc-4ad4-8224-d15837de7700"); pub const D3DAUTHENTICATEDCONFIGURE_SHAREDRESOURCE = Guid.initString("0772d047-1b40-48e8-9ca6-b5f510de9f01"); pub const D3DAUTHENTICATEDCONFIGURE_ENCRYPTIONWHENACCESSIBLE = Guid.initString("41fff286-6ae0-4d43-9d55-a46e9efd158a"); pub const D3DVS20CAPS_PREDICATION = @as(u32, 1); pub const D3DVS20_MAX_DYNAMICFLOWCONTROLDEPTH = @as(u32, 24); pub const D3DVS20_MIN_DYNAMICFLOWCONTROLDEPTH = @as(u32, 0); pub const D3DVS20_MAX_NUMTEMPS = @as(u32, 32); pub const D3DVS20_MIN_NUMTEMPS = @as(u32, 12); pub const D3DVS20_MAX_STATICFLOWCONTROLDEPTH = @as(u32, 4); pub const D3DVS20_MIN_STATICFLOWCONTROLDEPTH = @as(u32, 1); pub const D3DPS20CAPS_ARBITRARYSWIZZLE = @as(u32, 1); pub const D3DPS20CAPS_GRADIENTINSTRUCTIONS = @as(u32, 2); pub const D3DPS20CAPS_PREDICATION = @as(u32, 4); pub const D3DPS20CAPS_NODEPENDENTREADLIMIT = @as(u32, 8); pub const D3DPS20CAPS_NOTEXINSTRUCTIONLIMIT = @as(u32, 16); pub const D3DPS20_MAX_DYNAMICFLOWCONTROLDEPTH = @as(u32, 24); pub const D3DPS20_MIN_DYNAMICFLOWCONTROLDEPTH = @as(u32, 0); pub const D3DPS20_MAX_NUMTEMPS = @as(u32, 32); pub const D3DPS20_MIN_NUMTEMPS = @as(u32, 12); pub const D3DPS20_MAX_STATICFLOWCONTROLDEPTH = @as(u32, 4); pub const D3DPS20_MIN_STATICFLOWCONTROLDEPTH = @as(u32, 0); pub const D3DPS20_MAX_NUMINSTRUCTIONSLOTS = @as(u32, 512); pub const D3DPS20_MIN_NUMINSTRUCTIONSLOTS = @as(u32, 96); pub const D3DMIN30SHADERINSTRUCTIONS = @as(u32, 512); pub const D3DMAX30SHADERINSTRUCTIONS = @as(u32, 32768); pub const D3DOVERLAYCAPS_FULLRANGERGB = @as(u32, 1); pub const D3DOVERLAYCAPS_LIMITEDRANGERGB = @as(u32, 2); pub const D3DOVERLAYCAPS_YCbCr_BT601 = @as(u32, 4); pub const D3DOVERLAYCAPS_YCbCr_BT709 = @as(u32, 8); pub const D3DOVERLAYCAPS_YCbCr_BT601_xvYCC = @as(u32, 16); pub const D3DOVERLAYCAPS_YCbCr_BT709_xvYCC = @as(u32, 32); pub const D3DOVERLAYCAPS_STRETCHX = @as(u32, 64); pub const D3DOVERLAYCAPS_STRETCHY = @as(u32, 128); pub const D3DCPCAPS_SOFTWARE = @as(u32, 1); pub const D3DCPCAPS_HARDWARE = @as(u32, 2); pub const D3DCPCAPS_PROTECTIONALWAYSON = @as(u32, 4); pub const D3DCPCAPS_PARTIALDECRYPTION = @as(u32, 8); pub const D3DCPCAPS_CONTENTKEY = @as(u32, 16); pub const D3DCPCAPS_FRESHENSESSIONKEY = @as(u32, 32); pub const D3DCPCAPS_ENCRYPTEDREADBACK = @as(u32, 64); pub const D3DCPCAPS_ENCRYPTEDREADBACKKEY = @as(u32, 128); pub const D3DCPCAPS_SEQUENTIAL_CTR_IV = @as(u32, 256); pub const D3DCPCAPS_ENCRYPTSLICEDATAONLY = @as(u32, 512); pub const D3DCRYPTOTYPE_AES128_CTR = Guid.initString("9b6bd711-4f74-41c9-9e7b-0be2d7d93b4f"); pub const D3DCRYPTOTYPE_PROPRIETARY = Guid.initString("ab4e9afd-1d1c-46e6-a72f-0869917b0de8"); pub const D3DKEYEXCHANGE_RSAES_OAEP = Guid.initString("c1949895-d72a-4a1d-8e5d-ed857d171520"); pub const D3DKEYEXCHANGE_DXVA = Guid.initString("43d3775c-38e5-4924-8d86-d3fccf153e9b"); pub const D3DCAPS_OVERLAY = @as(i32, 2048); pub const D3DCAPS_READ_SCANLINE = @as(i32, 131072); pub const D3DCAPS2_FULLSCREENGAMMA = @as(i32, 131072); pub const D3DCAPS2_CANCALIBRATEGAMMA = @as(i32, 1048576); pub const D3DCAPS2_RESERVED = @as(i32, 33554432); pub const D3DCAPS2_CANMANAGERESOURCE = @as(i32, 268435456); pub const D3DCAPS2_DYNAMICTEXTURES = @as(i32, 536870912); pub const D3DCAPS2_CANAUTOGENMIPMAP = @as(i32, 1073741824); pub const D3DCAPS2_CANSHARERESOURCE = @as(i32, -2147483648); pub const D3DCAPS3_RESERVED = @as(i32, -2147483617); pub const D3DCAPS3_ALPHA_FULLSCREEN_FLIP_OR_DISCARD = @as(i32, 32); pub const D3DCAPS3_LINEAR_TO_SRGB_PRESENTATION = @as(i32, 128); pub const D3DCAPS3_COPY_TO_VIDMEM = @as(i32, 256); pub const D3DCAPS3_COPY_TO_SYSTEMMEM = @as(i32, 512); pub const D3DCAPS3_DXVAHD = @as(i32, 1024); pub const D3DCAPS3_DXVAHD_LIMITED = @as(i32, 2048); pub const D3DPRESENT_INTERVAL_DEFAULT = @as(i32, 0); pub const D3DPRESENT_INTERVAL_ONE = @as(i32, 1); pub const D3DPRESENT_INTERVAL_TWO = @as(i32, 2); pub const D3DPRESENT_INTERVAL_THREE = @as(i32, 4); pub const D3DPRESENT_INTERVAL_FOUR = @as(i32, 8); pub const D3DPRESENT_INTERVAL_IMMEDIATE = @as(i32, -2147483648); pub const D3DCURSORCAPS_COLOR = @as(i32, 1); pub const D3DCURSORCAPS_LOWRES = @as(i32, 2); pub const D3DDEVCAPS_PUREDEVICE = @as(i32, 1048576); pub const D3DDEVCAPS_QUINTICRTPATCHES = @as(i32, 2097152); pub const D3DDEVCAPS_RTPATCHES = @as(i32, 4194304); pub const D3DDEVCAPS_RTPATCHHANDLEZERO = @as(i32, 8388608); pub const D3DDEVCAPS_NPATCHES = @as(i32, 16777216); pub const D3DPMISCCAPS_COLORWRITEENABLE = @as(i32, 128); pub const D3DPMISCCAPS_CLIPPLANESCALEDPOINTS = @as(i32, 256); pub const D3DPMISCCAPS_CLIPTLVERTS = @as(i32, 512); pub const D3DPMISCCAPS_TSSARGTEMP = @as(i32, 1024); pub const D3DPMISCCAPS_BLENDOP = @as(i32, 2048); pub const D3DPMISCCAPS_NULLREFERENCE = @as(i32, 4096); pub const D3DPMISCCAPS_INDEPENDENTWRITEMASKS = @as(i32, 16384); pub const D3DPMISCCAPS_PERSTAGECONSTANT = @as(i32, 32768); pub const D3DPMISCCAPS_FOGANDSPECULARALPHA = @as(i32, 65536); pub const D3DPMISCCAPS_SEPARATEALPHABLEND = @as(i32, 131072); pub const D3DPMISCCAPS_MRTINDEPENDENTBITDEPTHS = @as(i32, 262144); pub const D3DPMISCCAPS_MRTPOSTPIXELSHADERBLENDING = @as(i32, 524288); pub const D3DPMISCCAPS_FOGVERTEXCLAMPED = @as(i32, 1048576); pub const D3DPMISCCAPS_POSTBLENDSRGBCONVERT = @as(i32, 2097152); pub const D3DLINECAPS_TEXTURE = @as(i32, 1); pub const D3DLINECAPS_ZTEST = @as(i32, 2); pub const D3DLINECAPS_BLEND = @as(i32, 4); pub const D3DLINECAPS_ALPHACMP = @as(i32, 8); pub const D3DLINECAPS_FOG = @as(i32, 16); pub const D3DLINECAPS_ANTIALIAS = @as(i32, 32); pub const D3DPRASTERCAPS_COLORPERSPECTIVE = @as(i32, 4194304); pub const D3DPRASTERCAPS_SCISSORTEST = @as(i32, 16777216); pub const D3DPRASTERCAPS_SLOPESCALEDEPTHBIAS = @as(i32, 33554432); pub const D3DPRASTERCAPS_DEPTHBIAS = @as(i32, 67108864); pub const D3DPRASTERCAPS_MULTISAMPLE_TOGGLE = @as(i32, 134217728); pub const D3DPBLENDCAPS_BLENDFACTOR = @as(i32, 8192); pub const D3DPBLENDCAPS_SRCCOLOR2 = @as(i32, 16384); pub const D3DPBLENDCAPS_INVSRCCOLOR2 = @as(i32, 32768); pub const D3DPTEXTURECAPS_VOLUMEMAP = @as(i32, 8192); pub const D3DPTEXTURECAPS_MIPMAP = @as(i32, 16384); pub const D3DPTEXTURECAPS_MIPVOLUMEMAP = @as(i32, 32768); pub const D3DPTEXTURECAPS_MIPCUBEMAP = @as(i32, 65536); pub const D3DPTEXTURECAPS_CUBEMAP_POW2 = @as(i32, 131072); pub const D3DPTEXTURECAPS_VOLUMEMAP_POW2 = @as(i32, 262144); pub const D3DPTEXTURECAPS_NOPROJECTEDBUMPENV = @as(i32, 2097152); pub const D3DPTFILTERCAPS_MINFPYRAMIDALQUAD = @as(i32, 2048); pub const D3DPTFILTERCAPS_MINFGAUSSIANQUAD = @as(i32, 4096); pub const D3DPTFILTERCAPS_CONVOLUTIONMONO = @as(i32, 262144); pub const D3DPTFILTERCAPS_MAGFPYRAMIDALQUAD = @as(i32, 134217728); pub const D3DPTFILTERCAPS_MAGFGAUSSIANQUAD = @as(i32, 268435456); pub const D3DPTADDRESSCAPS_MIRRORONCE = @as(i32, 32); pub const D3DSTENCILCAPS_TWOSIDED = @as(i32, 256); pub const D3DTEXOPCAPS_MULTIPLYADD = @as(i32, 16777216); pub const D3DTEXOPCAPS_LERP = @as(i32, 33554432); pub const D3DFVFCAPS_PSIZE = @as(i32, 1048576); pub const D3DVTXPCAPS_TWEENING = @as(i32, 64); pub const D3DVTXPCAPS_TEXGEN_SPHEREMAP = @as(i32, 256); pub const D3DVTXPCAPS_NO_TEXGEN_NONLOCALVIEWER = @as(i32, 512); pub const D3DDEVCAPS2_STREAMOFFSET = @as(i32, 1); pub const D3DDEVCAPS2_DMAPNPATCH = @as(i32, 2); pub const D3DDEVCAPS2_ADAPTIVETESSRTPATCH = @as(i32, 4); pub const D3DDEVCAPS2_ADAPTIVETESSNPATCH = @as(i32, 8); pub const D3DDEVCAPS2_CAN_STRETCHRECT_FROM_TEXTURES = @as(i32, 16); pub const D3DDEVCAPS2_PRESAMPLEDDMAPNPATCH = @as(i32, 32); pub const D3DDEVCAPS2_VERTEXELEMENTSCANSHARESTREAMOFFSET = @as(i32, 64); pub const D3DDTCAPS_UBYTE4 = @as(i32, 1); pub const D3DDTCAPS_UBYTE4N = @as(i32, 2); pub const D3DDTCAPS_SHORT2N = @as(i32, 4); pub const D3DDTCAPS_SHORT4N = @as(i32, 8); pub const D3DDTCAPS_USHORT2N = @as(i32, 16); pub const D3DDTCAPS_USHORT4N = @as(i32, 32); pub const D3DDTCAPS_UDEC3 = @as(i32, 64); pub const D3DDTCAPS_DEC3N = @as(i32, 128); pub const D3DDTCAPS_FLOAT16_2 = @as(i32, 256); pub const D3DDTCAPS_FLOAT16_4 = @as(i32, 512); pub const D3DSPD_IUNKNOWN = @as(i32, 1); pub const D3DCREATE_FPU_PRESERVE = @as(i32, 2); pub const D3DCREATE_MULTITHREADED = @as(i32, 4); pub const D3DCREATE_PUREDEVICE = @as(i32, 16); pub const D3DCREATE_SOFTWARE_VERTEXPROCESSING = @as(i32, 32); pub const D3DCREATE_HARDWARE_VERTEXPROCESSING = @as(i32, 64); pub const D3DCREATE_MIXED_VERTEXPROCESSING = @as(i32, 128); pub const D3DCREATE_DISABLE_DRIVER_MANAGEMENT = @as(i32, 256); pub const D3DCREATE_ADAPTERGROUP_DEVICE = @as(i32, 512); pub const D3DADAPTER_DEFAULT = @as(u32, 0); pub const D3DENUM_NO_DRIVERVERSION = @as(i32, 4); pub const D3DPRESENT_BACK_BUFFERS_MAX = @as(i32, 3); pub const D3DSGR_NO_CALIBRATION = @as(i32, 0); pub const D3DSGR_CALIBRATE = @as(i32, 1); pub const D3DCURSOR_IMMEDIATE_UPDATE = @as(i32, 1); pub const D3DPRESENT_DONOTWAIT = @as(i32, 1); pub const D3DPRESENT_LINEAR_CONTENT = @as(i32, 2); pub const _FACD3D = @as(u32, 2166); pub const D3D_SDK_VERSION = @as(u32, 32); pub const D3D9b_SDK_VERSION = @as(u32, 31); pub const D3DCREATE_DISABLE_DRIVER_MANAGEMENT_EX = @as(i32, 1024); pub const D3DCREATE_NOWINDOWCHANGES = @as(i32, 2048); pub const D3DCREATE_DISABLE_PSGP_THREADING = @as(i32, 8192); pub const D3DCREATE_ENABLE_PRESENTSTATS = @as(i32, 16384); pub const D3DCREATE_DISABLE_PRINTSCREEN = @as(i32, 32768); pub const D3DCREATE_SCREENSAVER = @as(i32, 268435456); pub const D3DENUM_WHQL_LEVEL = @as(i32, 2); pub const D3DPRESENT_BACK_BUFFERS_MAX_EX = @as(i32, 30); pub const D3DPRESENT_DONOTFLIP = @as(i32, 4); pub const D3DPRESENT_FLIPRESTART = @as(i32, 8); pub const D3DPRESENT_VIDEO_RESTRICT_TO_MONITOR = @as(i32, 16); pub const D3DPRESENT_UPDATEOVERLAYONLY = @as(i32, 32); pub const D3DPRESENT_HIDEOVERLAY = @as(i32, 64); pub const D3DPRESENT_UPDATECOLORKEY = @as(i32, 128); pub const D3DPRESENT_FORCEIMMEDIATE = @as(i32, 256); //-------------------------------------------------------------------------------- // Section: Types (151) //-------------------------------------------------------------------------------- pub const D3DCOLORVALUE = extern struct { r: f32, g: f32, b: f32, a: f32, }; pub const D3DRECT = extern struct { x1: i32, y1: i32, x2: i32, y2: i32, }; pub const D3DVIEWPORT9 = extern struct { X: u32, Y: u32, Width: u32, Height: u32, MinZ: f32, MaxZ: f32, }; pub const D3DCLIPSTATUS9 = extern struct { ClipUnion: u32, ClipIntersection: u32, }; pub const D3DMATERIAL9 = extern struct { Diffuse: D3DCOLORVALUE, Ambient: D3DCOLORVALUE, Specular: D3DCOLORVALUE, Emissive: D3DCOLORVALUE, Power: f32, }; pub const D3DLIGHTTYPE = enum(i32) { POINT = 1, SPOT = 2, DIRECTIONAL = 3, FORCE_DWORD = 2147483647, }; pub const D3DLIGHT_POINT = D3DLIGHTTYPE.POINT; pub const D3DLIGHT_SPOT = D3DLIGHTTYPE.SPOT; pub const D3DLIGHT_DIRECTIONAL = D3DLIGHTTYPE.DIRECTIONAL; pub const D3DLIGHT_FORCE_DWORD = D3DLIGHTTYPE.FORCE_DWORD; pub const D3DLIGHT9 = extern struct { Type: D3DLIGHTTYPE, Diffuse: D3DCOLORVALUE, Specular: D3DCOLORVALUE, Ambient: D3DCOLORVALUE, Position: D3DVECTOR, Direction: D3DVECTOR, Range: f32, Falloff: f32, Attenuation0: f32, Attenuation1: f32, Attenuation2: f32, Theta: f32, Phi: f32, }; pub const D3DSHADEMODE = enum(i32) { FLAT = 1, GOURAUD = 2, PHONG = 3, FORCE_DWORD = 2147483647, }; pub const D3DSHADE_FLAT = D3DSHADEMODE.FLAT; pub const D3DSHADE_GOURAUD = D3DSHADEMODE.GOURAUD; pub const D3DSHADE_PHONG = D3DSHADEMODE.PHONG; pub const D3DSHADE_FORCE_DWORD = D3DSHADEMODE.FORCE_DWORD; pub const D3DFILLMODE = enum(i32) { POINT = 1, WIREFRAME = 2, SOLID = 3, FORCE_DWORD = 2147483647, }; pub const D3DFILL_POINT = D3DFILLMODE.POINT; pub const D3DFILL_WIREFRAME = D3DFILLMODE.WIREFRAME; pub const D3DFILL_SOLID = D3DFILLMODE.SOLID; pub const D3DFILL_FORCE_DWORD = D3DFILLMODE.FORCE_DWORD; pub const D3DBLEND = enum(u32) { ZERO = 1, ONE = 2, SRCCOLOR = 3, INVSRCCOLOR = 4, SRCALPHA = 5, INVSRCALPHA = 6, DESTALPHA = 7, INVDESTALPHA = 8, DESTCOLOR = 9, INVDESTCOLOR = 10, SRCALPHASAT = 11, BOTHSRCALPHA = 12, BOTHINVSRCALPHA = 13, BLENDFACTOR = 14, INVBLENDFACTOR = 15, SRCCOLOR2 = 16, INVSRCCOLOR2 = 17, FORCE_DWORD = 2147483647, }; pub const D3DBLEND_ZERO = D3DBLEND.ZERO; pub const D3DBLEND_ONE = D3DBLEND.ONE; pub const D3DBLEND_SRCCOLOR = D3DBLEND.SRCCOLOR; pub const D3DBLEND_INVSRCCOLOR = D3DBLEND.INVSRCCOLOR; pub const D3DBLEND_SRCALPHA = D3DBLEND.SRCALPHA; pub const D3DBLEND_INVSRCALPHA = D3DBLEND.INVSRCALPHA; pub const D3DBLEND_DESTALPHA = D3DBLEND.DESTALPHA; pub const D3DBLEND_INVDESTALPHA = D3DBLEND.INVDESTALPHA; pub const D3DBLEND_DESTCOLOR = D3DBLEND.DESTCOLOR; pub const D3DBLEND_INVDESTCOLOR = D3DBLEND.INVDESTCOLOR; pub const D3DBLEND_SRCALPHASAT = D3DBLEND.SRCALPHASAT; pub const D3DBLEND_BOTHSRCALPHA = D3DBLEND.BOTHSRCALPHA; pub const D3DBLEND_BOTHINVSRCALPHA = D3DBLEND.BOTHINVSRCALPHA; pub const D3DBLEND_BLENDFACTOR = D3DBLEND.BLENDFACTOR; pub const D3DBLEND_INVBLENDFACTOR = D3DBLEND.INVBLENDFACTOR; pub const D3DBLEND_SRCCOLOR2 = D3DBLEND.SRCCOLOR2; pub const D3DBLEND_INVSRCCOLOR2 = D3DBLEND.INVSRCCOLOR2; pub const D3DBLEND_FORCE_DWORD = D3DBLEND.FORCE_DWORD; pub const D3DBLENDOP = enum(u32) { ADD = 1, SUBTRACT = 2, REVSUBTRACT = 3, MIN = 4, MAX = 5, FORCE_DWORD = 2147483647, }; pub const D3DBLENDOP_ADD = D3DBLENDOP.ADD; pub const D3DBLENDOP_SUBTRACT = D3DBLENDOP.SUBTRACT; pub const D3DBLENDOP_REVSUBTRACT = D3DBLENDOP.REVSUBTRACT; pub const D3DBLENDOP_MIN = D3DBLENDOP.MIN; pub const D3DBLENDOP_MAX = D3DBLENDOP.MAX; pub const D3DBLENDOP_FORCE_DWORD = D3DBLENDOP.FORCE_DWORD; pub const D3DTEXTUREADDRESS = enum(i32) { WRAP = 1, MIRROR = 2, CLAMP = 3, BORDER = 4, MIRRORONCE = 5, FORCE_DWORD = 2147483647, }; pub const D3DTADDRESS_WRAP = D3DTEXTUREADDRESS.WRAP; pub const D3DTADDRESS_MIRROR = D3DTEXTUREADDRESS.MIRROR; pub const D3DTADDRESS_CLAMP = D3DTEXTUREADDRESS.CLAMP; pub const D3DTADDRESS_BORDER = D3DTEXTUREADDRESS.BORDER; pub const D3DTADDRESS_MIRRORONCE = D3DTEXTUREADDRESS.MIRRORONCE; pub const D3DTADDRESS_FORCE_DWORD = D3DTEXTUREADDRESS.FORCE_DWORD; pub const D3DCULL = enum(u32) { NONE = 1, CW = 2, CCW = 3, FORCE_DWORD = 2147483647, }; pub const D3DCULL_NONE = D3DCULL.NONE; pub const D3DCULL_CW = D3DCULL.CW; pub const D3DCULL_CCW = D3DCULL.CCW; pub const D3DCULL_FORCE_DWORD = D3DCULL.FORCE_DWORD; pub const D3DCMPFUNC = enum(i32) { NEVER = 1, LESS = 2, EQUAL = 3, LESSEQUAL = 4, GREATER = 5, NOTEQUAL = 6, GREATEREQUAL = 7, ALWAYS = 8, FORCE_DWORD = 2147483647, }; pub const D3DCMP_NEVER = D3DCMPFUNC.NEVER; pub const D3DCMP_LESS = D3DCMPFUNC.LESS; pub const D3DCMP_EQUAL = D3DCMPFUNC.EQUAL; pub const D3DCMP_LESSEQUAL = D3DCMPFUNC.LESSEQUAL; pub const D3DCMP_GREATER = D3DCMPFUNC.GREATER; pub const D3DCMP_NOTEQUAL = D3DCMPFUNC.NOTEQUAL; pub const D3DCMP_GREATEREQUAL = D3DCMPFUNC.GREATEREQUAL; pub const D3DCMP_ALWAYS = D3DCMPFUNC.ALWAYS; pub const D3DCMP_FORCE_DWORD = D3DCMPFUNC.FORCE_DWORD; pub const D3DSTENCILOP = enum(u32) { KEEP = 1, ZERO = 2, REPLACE = 3, INCRSAT = 4, DECRSAT = 5, INVERT = 6, INCR = 7, DECR = 8, FORCE_DWORD = 2147483647, }; pub const D3DSTENCILOP_KEEP = D3DSTENCILOP.KEEP; pub const D3DSTENCILOP_ZERO = D3DSTENCILOP.ZERO; pub const D3DSTENCILOP_REPLACE = D3DSTENCILOP.REPLACE; pub const D3DSTENCILOP_INCRSAT = D3DSTENCILOP.INCRSAT; pub const D3DSTENCILOP_DECRSAT = D3DSTENCILOP.DECRSAT; pub const D3DSTENCILOP_INVERT = D3DSTENCILOP.INVERT; pub const D3DSTENCILOP_INCR = D3DSTENCILOP.INCR; pub const D3DSTENCILOP_DECR = D3DSTENCILOP.DECR; pub const D3DSTENCILOP_FORCE_DWORD = D3DSTENCILOP.FORCE_DWORD; pub const D3DFOGMODE = enum(i32) { NONE = 0, EXP = 1, EXP2 = 2, LINEAR = 3, FORCE_DWORD = 2147483647, }; pub const D3DFOG_NONE = D3DFOGMODE.NONE; pub const D3DFOG_EXP = D3DFOGMODE.EXP; pub const D3DFOG_EXP2 = D3DFOGMODE.EXP2; pub const D3DFOG_LINEAR = D3DFOGMODE.LINEAR; pub const D3DFOG_FORCE_DWORD = D3DFOGMODE.FORCE_DWORD; pub const D3DZBUFFERTYPE = enum(i32) { FALSE = 0, TRUE = 1, USEW = 2, FORCE_DWORD = 2147483647, }; pub const D3DZB_FALSE = D3DZBUFFERTYPE.FALSE; pub const D3DZB_TRUE = D3DZBUFFERTYPE.TRUE; pub const D3DZB_USEW = D3DZBUFFERTYPE.USEW; pub const D3DZB_FORCE_DWORD = D3DZBUFFERTYPE.FORCE_DWORD; pub const D3DPRIMITIVETYPE = enum(i32) { POINTLIST = 1, LINELIST = 2, LINESTRIP = 3, TRIANGLELIST = 4, TRIANGLESTRIP = 5, TRIANGLEFAN = 6, FORCE_DWORD = 2147483647, }; pub const D3DPT_POINTLIST = D3DPRIMITIVETYPE.POINTLIST; pub const D3DPT_LINELIST = D3DPRIMITIVETYPE.LINELIST; pub const D3DPT_LINESTRIP = D3DPRIMITIVETYPE.LINESTRIP; pub const D3DPT_TRIANGLELIST = D3DPRIMITIVETYPE.TRIANGLELIST; pub const D3DPT_TRIANGLESTRIP = D3DPRIMITIVETYPE.TRIANGLESTRIP; pub const D3DPT_TRIANGLEFAN = D3DPRIMITIVETYPE.TRIANGLEFAN; pub const D3DPT_FORCE_DWORD = D3DPRIMITIVETYPE.FORCE_DWORD; pub const D3DTRANSFORMSTATETYPE = enum(i32) { VIEW = 2, PROJECTION = 3, TEXTURE0 = 16, TEXTURE1 = 17, TEXTURE2 = 18, TEXTURE3 = 19, TEXTURE4 = 20, TEXTURE5 = 21, TEXTURE6 = 22, TEXTURE7 = 23, FORCE_DWORD = 2147483647, }; pub const D3DTS_VIEW = D3DTRANSFORMSTATETYPE.VIEW; pub const D3DTS_PROJECTION = D3DTRANSFORMSTATETYPE.PROJECTION; pub const D3DTS_TEXTURE0 = D3DTRANSFORMSTATETYPE.TEXTURE0; pub const D3DTS_TEXTURE1 = D3DTRANSFORMSTATETYPE.TEXTURE1; pub const D3DTS_TEXTURE2 = D3DTRANSFORMSTATETYPE.TEXTURE2; pub const D3DTS_TEXTURE3 = D3DTRANSFORMSTATETYPE.TEXTURE3; pub const D3DTS_TEXTURE4 = D3DTRANSFORMSTATETYPE.TEXTURE4; pub const D3DTS_TEXTURE5 = D3DTRANSFORMSTATETYPE.TEXTURE5; pub const D3DTS_TEXTURE6 = D3DTRANSFORMSTATETYPE.TEXTURE6; pub const D3DTS_TEXTURE7 = D3DTRANSFORMSTATETYPE.TEXTURE7; pub const D3DTS_FORCE_DWORD = D3DTRANSFORMSTATETYPE.FORCE_DWORD; pub const D3DRENDERSTATETYPE = enum(i32) { ZENABLE = 7, FILLMODE = 8, SHADEMODE = 9, ZWRITEENABLE = 14, ALPHATESTENABLE = 15, LASTPIXEL = 16, SRCBLEND = 19, DESTBLEND = 20, CULLMODE = 22, ZFUNC = 23, ALPHAREF = 24, ALPHAFUNC = 25, DITHERENABLE = 26, ALPHABLENDENABLE = 27, FOGENABLE = 28, SPECULARENABLE = 29, FOGCOLOR = 34, FOGTABLEMODE = 35, FOGSTART = 36, FOGEND = 37, FOGDENSITY = 38, RANGEFOGENABLE = 48, STENCILENABLE = 52, STENCILFAIL = 53, STENCILZFAIL = 54, STENCILPASS = 55, STENCILFUNC = 56, STENCILREF = 57, STENCILMASK = 58, STENCILWRITEMASK = 59, TEXTUREFACTOR = 60, WRAP0 = 128, WRAP1 = 129, WRAP2 = 130, WRAP3 = 131, WRAP4 = 132, WRAP5 = 133, WRAP6 = 134, WRAP7 = 135, CLIPPING = 136, LIGHTING = 137, AMBIENT = 139, FOGVERTEXMODE = 140, COLORVERTEX = 141, LOCALVIEWER = 142, NORMALIZENORMALS = 143, DIFFUSEMATERIALSOURCE = 145, SPECULARMATERIALSOURCE = 146, AMBIENTMATERIALSOURCE = 147, EMISSIVEMATERIALSOURCE = 148, VERTEXBLEND = 151, CLIPPLANEENABLE = 152, POINTSIZE = 154, POINTSIZE_MIN = 155, POINTSPRITEENABLE = 156, POINTSCALEENABLE = 157, POINTSCALE_A = 158, POINTSCALE_B = 159, POINTSCALE_C = 160, MULTISAMPLEANTIALIAS = 161, MULTISAMPLEMASK = 162, PATCHEDGESTYLE = 163, DEBUGMONITORTOKEN = 165, POINTSIZE_MAX = 166, INDEXEDVERTEXBLENDENABLE = 167, COLORWRITEENABLE = 168, TWEENFACTOR = 170, BLENDOP = 171, POSITIONDEGREE = 172, NORMALDEGREE = 173, SCISSORTESTENABLE = 174, SLOPESCALEDEPTHBIAS = 175, ANTIALIASEDLINEENABLE = 176, MINTESSELLATIONLEVEL = 178, MAXTESSELLATIONLEVEL = 179, ADAPTIVETESS_X = 180, ADAPTIVETESS_Y = 181, ADAPTIVETESS_Z = 182, ADAPTIVETESS_W = 183, ENABLEADAPTIVETESSELLATION = 184, TWOSIDEDSTENCILMODE = 185, CCW_STENCILFAIL = 186, CCW_STENCILZFAIL = 187, CCW_STENCILPASS = 188, CCW_STENCILFUNC = 189, COLORWRITEENABLE1 = 190, COLORWRITEENABLE2 = 191, COLORWRITEENABLE3 = 192, BLENDFACTOR = 193, SRGBWRITEENABLE = 194, DEPTHBIAS = 195, WRAP8 = 198, WRAP9 = 199, WRAP10 = 200, WRAP11 = 201, WRAP12 = 202, WRAP13 = 203, WRAP14 = 204, WRAP15 = 205, SEPARATEALPHABLENDENABLE = 206, SRCBLENDALPHA = 207, DESTBLENDALPHA = 208, BLENDOPALPHA = 209, FORCE_DWORD = 2147483647, }; pub const D3DRS_ZENABLE = D3DRENDERSTATETYPE.ZENABLE; pub const D3DRS_FILLMODE = D3DRENDERSTATETYPE.FILLMODE; pub const D3DRS_SHADEMODE = D3DRENDERSTATETYPE.SHADEMODE; pub const D3DRS_ZWRITEENABLE = D3DRENDERSTATETYPE.ZWRITEENABLE; pub const D3DRS_ALPHATESTENABLE = D3DRENDERSTATETYPE.ALPHATESTENABLE; pub const D3DRS_LASTPIXEL = D3DRENDERSTATETYPE.LASTPIXEL; pub const D3DRS_SRCBLEND = D3DRENDERSTATETYPE.SRCBLEND; pub const D3DRS_DESTBLEND = D3DRENDERSTATETYPE.DESTBLEND; pub const D3DRS_CULLMODE = D3DRENDERSTATETYPE.CULLMODE; pub const D3DRS_ZFUNC = D3DRENDERSTATETYPE.ZFUNC; pub const D3DRS_ALPHAREF = D3DRENDERSTATETYPE.ALPHAREF; pub const D3DRS_ALPHAFUNC = D3DRENDERSTATETYPE.ALPHAFUNC; pub const D3DRS_DITHERENABLE = D3DRENDERSTATETYPE.DITHERENABLE; pub const D3DRS_ALPHABLENDENABLE = D3DRENDERSTATETYPE.ALPHABLENDENABLE; pub const D3DRS_FOGENABLE = D3DRENDERSTATETYPE.FOGENABLE; pub const D3DRS_SPECULARENABLE = D3DRENDERSTATETYPE.SPECULARENABLE; pub const D3DRS_FOGCOLOR = D3DRENDERSTATETYPE.FOGCOLOR; pub const D3DRS_FOGTABLEMODE = D3DRENDERSTATETYPE.FOGTABLEMODE; pub const D3DRS_FOGSTART = D3DRENDERSTATETYPE.FOGSTART; pub const D3DRS_FOGEND = D3DRENDERSTATETYPE.FOGEND; pub const D3DRS_FOGDENSITY = D3DRENDERSTATETYPE.FOGDENSITY; pub const D3DRS_RANGEFOGENABLE = D3DRENDERSTATETYPE.RANGEFOGENABLE; pub const D3DRS_STENCILENABLE = D3DRENDERSTATETYPE.STENCILENABLE; pub const D3DRS_STENCILFAIL = D3DRENDERSTATETYPE.STENCILFAIL; pub const D3DRS_STENCILZFAIL = D3DRENDERSTATETYPE.STENCILZFAIL; pub const D3DRS_STENCILPASS = D3DRENDERSTATETYPE.STENCILPASS; pub const D3DRS_STENCILFUNC = D3DRENDERSTATETYPE.STENCILFUNC; pub const D3DRS_STENCILREF = D3DRENDERSTATETYPE.STENCILREF; pub const D3DRS_STENCILMASK = D3DRENDERSTATETYPE.STENCILMASK; pub const D3DRS_STENCILWRITEMASK = D3DRENDERSTATETYPE.STENCILWRITEMASK; pub const D3DRS_TEXTUREFACTOR = D3DRENDERSTATETYPE.TEXTUREFACTOR; pub const D3DRS_WRAP0 = D3DRENDERSTATETYPE.WRAP0; pub const D3DRS_WRAP1 = D3DRENDERSTATETYPE.WRAP1; pub const D3DRS_WRAP2 = D3DRENDERSTATETYPE.WRAP2; pub const D3DRS_WRAP3 = D3DRENDERSTATETYPE.WRAP3; pub const D3DRS_WRAP4 = D3DRENDERSTATETYPE.WRAP4; pub const D3DRS_WRAP5 = D3DRENDERSTATETYPE.WRAP5; pub const D3DRS_WRAP6 = D3DRENDERSTATETYPE.WRAP6; pub const D3DRS_WRAP7 = D3DRENDERSTATETYPE.WRAP7; pub const D3DRS_CLIPPING = D3DRENDERSTATETYPE.CLIPPING; pub const D3DRS_LIGHTING = D3DRENDERSTATETYPE.LIGHTING; pub const D3DRS_AMBIENT = D3DRENDERSTATETYPE.AMBIENT; pub const D3DRS_FOGVERTEXMODE = D3DRENDERSTATETYPE.FOGVERTEXMODE; pub const D3DRS_COLORVERTEX = D3DRENDERSTATETYPE.COLORVERTEX; pub const D3DRS_LOCALVIEWER = D3DRENDERSTATETYPE.LOCALVIEWER; pub const D3DRS_NORMALIZENORMALS = D3DRENDERSTATETYPE.NORMALIZENORMALS; pub const D3DRS_DIFFUSEMATERIALSOURCE = D3DRENDERSTATETYPE.DIFFUSEMATERIALSOURCE; pub const D3DRS_SPECULARMATERIALSOURCE = D3DRENDERSTATETYPE.SPECULARMATERIALSOURCE; pub const D3DRS_AMBIENTMATERIALSOURCE = D3DRENDERSTATETYPE.AMBIENTMATERIALSOURCE; pub const D3DRS_EMISSIVEMATERIALSOURCE = D3DRENDERSTATETYPE.EMISSIVEMATERIALSOURCE; pub const D3DRS_VERTEXBLEND = D3DRENDERSTATETYPE.VERTEXBLEND; pub const D3DRS_CLIPPLANEENABLE = D3DRENDERSTATETYPE.CLIPPLANEENABLE; pub const D3DRS_POINTSIZE = D3DRENDERSTATETYPE.POINTSIZE; pub const D3DRS_POINTSIZE_MIN = D3DRENDERSTATETYPE.POINTSIZE_MIN; pub const D3DRS_POINTSPRITEENABLE = D3DRENDERSTATETYPE.POINTSPRITEENABLE; pub const D3DRS_POINTSCALEENABLE = D3DRENDERSTATETYPE.POINTSCALEENABLE; pub const D3DRS_POINTSCALE_A = D3DRENDERSTATETYPE.POINTSCALE_A; pub const D3DRS_POINTSCALE_B = D3DRENDERSTATETYPE.POINTSCALE_B; pub const D3DRS_POINTSCALE_C = D3DRENDERSTATETYPE.POINTSCALE_C; pub const D3DRS_MULTISAMPLEANTIALIAS = D3DRENDERSTATETYPE.MULTISAMPLEANTIALIAS; pub const D3DRS_MULTISAMPLEMASK = D3DRENDERSTATETYPE.MULTISAMPLEMASK; pub const D3DRS_PATCHEDGESTYLE = D3DRENDERSTATETYPE.PATCHEDGESTYLE; pub const D3DRS_DEBUGMONITORTOKEN = D3DRENDERSTATETYPE.DEBUGMONITORTOKEN; pub const D3DRS_POINTSIZE_MAX = D3DRENDERSTATETYPE.POINTSIZE_MAX; pub const D3DRS_INDEXEDVERTEXBLENDENABLE = D3DRENDERSTATETYPE.INDEXEDVERTEXBLENDENABLE; pub const D3DRS_COLORWRITEENABLE = D3DRENDERSTATETYPE.COLORWRITEENABLE; pub const D3DRS_TWEENFACTOR = D3DRENDERSTATETYPE.TWEENFACTOR; pub const D3DRS_BLENDOP = D3DRENDERSTATETYPE.BLENDOP; pub const D3DRS_POSITIONDEGREE = D3DRENDERSTATETYPE.POSITIONDEGREE; pub const D3DRS_NORMALDEGREE = D3DRENDERSTATETYPE.NORMALDEGREE; pub const D3DRS_SCISSORTESTENABLE = D3DRENDERSTATETYPE.SCISSORTESTENABLE; pub const D3DRS_SLOPESCALEDEPTHBIAS = D3DRENDERSTATETYPE.SLOPESCALEDEPTHBIAS; pub const D3DRS_ANTIALIASEDLINEENABLE = D3DRENDERSTATETYPE.ANTIALIASEDLINEENABLE; pub const D3DRS_MINTESSELLATIONLEVEL = D3DRENDERSTATETYPE.MINTESSELLATIONLEVEL; pub const D3DRS_MAXTESSELLATIONLEVEL = D3DRENDERSTATETYPE.MAXTESSELLATIONLEVEL; pub const D3DRS_ADAPTIVETESS_X = D3DRENDERSTATETYPE.ADAPTIVETESS_X; pub const D3DRS_ADAPTIVETESS_Y = D3DRENDERSTATETYPE.ADAPTIVETESS_Y; pub const D3DRS_ADAPTIVETESS_Z = D3DRENDERSTATETYPE.ADAPTIVETESS_Z; pub const D3DRS_ADAPTIVETESS_W = D3DRENDERSTATETYPE.ADAPTIVETESS_W; pub const D3DRS_ENABLEADAPTIVETESSELLATION = D3DRENDERSTATETYPE.ENABLEADAPTIVETESSELLATION; pub const D3DRS_TWOSIDEDSTENCILMODE = D3DRENDERSTATETYPE.TWOSIDEDSTENCILMODE; pub const D3DRS_CCW_STENCILFAIL = D3DRENDERSTATETYPE.CCW_STENCILFAIL; pub const D3DRS_CCW_STENCILZFAIL = D3DRENDERSTATETYPE.CCW_STENCILZFAIL; pub const D3DRS_CCW_STENCILPASS = D3DRENDERSTATETYPE.CCW_STENCILPASS; pub const D3DRS_CCW_STENCILFUNC = D3DRENDERSTATETYPE.CCW_STENCILFUNC; pub const D3DRS_COLORWRITEENABLE1 = D3DRENDERSTATETYPE.COLORWRITEENABLE1; pub const D3DRS_COLORWRITEENABLE2 = D3DRENDERSTATETYPE.COLORWRITEENABLE2; pub const D3DRS_COLORWRITEENABLE3 = D3DRENDERSTATETYPE.COLORWRITEENABLE3; pub const D3DRS_BLENDFACTOR = D3DRENDERSTATETYPE.BLENDFACTOR; pub const D3DRS_SRGBWRITEENABLE = D3DRENDERSTATETYPE.SRGBWRITEENABLE; pub const D3DRS_DEPTHBIAS = D3DRENDERSTATETYPE.DEPTHBIAS; pub const D3DRS_WRAP8 = D3DRENDERSTATETYPE.WRAP8; pub const D3DRS_WRAP9 = D3DRENDERSTATETYPE.WRAP9; pub const D3DRS_WRAP10 = D3DRENDERSTATETYPE.WRAP10; pub const D3DRS_WRAP11 = D3DRENDERSTATETYPE.WRAP11; pub const D3DRS_WRAP12 = D3DRENDERSTATETYPE.WRAP12; pub const D3DRS_WRAP13 = D3DRENDERSTATETYPE.WRAP13; pub const D3DRS_WRAP14 = D3DRENDERSTATETYPE.WRAP14; pub const D3DRS_WRAP15 = D3DRENDERSTATETYPE.WRAP15; pub const D3DRS_SEPARATEALPHABLENDENABLE = D3DRENDERSTATETYPE.SEPARATEALPHABLENDENABLE; pub const D3DRS_SRCBLENDALPHA = D3DRENDERSTATETYPE.SRCBLENDALPHA; pub const D3DRS_DESTBLENDALPHA = D3DRENDERSTATETYPE.DESTBLENDALPHA; pub const D3DRS_BLENDOPALPHA = D3DRENDERSTATETYPE.BLENDOPALPHA; pub const D3DRS_FORCE_DWORD = D3DRENDERSTATETYPE.FORCE_DWORD; pub const D3DMATERIALCOLORSOURCE = enum(i32) { MATERIAL = 0, COLOR1 = 1, COLOR2 = 2, FORCE_DWORD = 2147483647, }; pub const D3DMCS_MATERIAL = D3DMATERIALCOLORSOURCE.MATERIAL; pub const D3DMCS_COLOR1 = D3DMATERIALCOLORSOURCE.COLOR1; pub const D3DMCS_COLOR2 = D3DMATERIALCOLORSOURCE.COLOR2; pub const D3DMCS_FORCE_DWORD = D3DMATERIALCOLORSOURCE.FORCE_DWORD; pub const D3DTEXTURESTAGESTATETYPE = enum(i32) { COLOROP = 1, COLORARG1 = 2, COLORARG2 = 3, ALPHAOP = 4, ALPHAARG1 = 5, ALPHAARG2 = 6, BUMPENVMAT00 = 7, BUMPENVMAT01 = 8, BUMPENVMAT10 = 9, BUMPENVMAT11 = 10, TEXCOORDINDEX = 11, BUMPENVLSCALE = 22, BUMPENVLOFFSET = 23, TEXTURETRANSFORMFLAGS = 24, COLORARG0 = 26, ALPHAARG0 = 27, RESULTARG = 28, CONSTANT = 32, FORCE_DWORD = 2147483647, }; pub const D3DTSS_COLOROP = D3DTEXTURESTAGESTATETYPE.COLOROP; pub const D3DTSS_COLORARG1 = D3DTEXTURESTAGESTATETYPE.COLORARG1; pub const D3DTSS_COLORARG2 = D3DTEXTURESTAGESTATETYPE.COLORARG2; pub const D3DTSS_ALPHAOP = D3DTEXTURESTAGESTATETYPE.ALPHAOP; pub const D3DTSS_ALPHAARG1 = D3DTEXTURESTAGESTATETYPE.ALPHAARG1; pub const D3DTSS_ALPHAARG2 = D3DTEXTURESTAGESTATETYPE.ALPHAARG2; pub const D3DTSS_BUMPENVMAT00 = D3DTEXTURESTAGESTATETYPE.BUMPENVMAT00; pub const D3DTSS_BUMPENVMAT01 = D3DTEXTURESTAGESTATETYPE.BUMPENVMAT01; pub const D3DTSS_BUMPENVMAT10 = D3DTEXTURESTAGESTATETYPE.BUMPENVMAT10; pub const D3DTSS_BUMPENVMAT11 = D3DTEXTURESTAGESTATETYPE.BUMPENVMAT11; pub const D3DTSS_TEXCOORDINDEX = D3DTEXTURESTAGESTATETYPE.TEXCOORDINDEX; pub const D3DTSS_BUMPENVLSCALE = D3DTEXTURESTAGESTATETYPE.BUMPENVLSCALE; pub const D3DTSS_BUMPENVLOFFSET = D3DTEXTURESTAGESTATETYPE.BUMPENVLOFFSET; pub const D3DTSS_TEXTURETRANSFORMFLAGS = D3DTEXTURESTAGESTATETYPE.TEXTURETRANSFORMFLAGS; pub const D3DTSS_COLORARG0 = D3DTEXTURESTAGESTATETYPE.COLORARG0; pub const D3DTSS_ALPHAARG0 = D3DTEXTURESTAGESTATETYPE.ALPHAARG0; pub const D3DTSS_RESULTARG = D3DTEXTURESTAGESTATETYPE.RESULTARG; pub const D3DTSS_CONSTANT = D3DTEXTURESTAGESTATETYPE.CONSTANT; pub const D3DTSS_FORCE_DWORD = D3DTEXTURESTAGESTATETYPE.FORCE_DWORD; pub const D3DSAMPLERSTATETYPE = enum(i32) { ADDRESSU = 1, ADDRESSV = 2, ADDRESSW = 3, BORDERCOLOR = 4, MAGFILTER = 5, MINFILTER = 6, MIPFILTER = 7, MIPMAPLODBIAS = 8, MAXMIPLEVEL = 9, MAXANISOTROPY = 10, SRGBTEXTURE = 11, ELEMENTINDEX = 12, DMAPOFFSET = 13, FORCE_DWORD = 2147483647, }; pub const D3DSAMP_ADDRESSU = D3DSAMPLERSTATETYPE.ADDRESSU; pub const D3DSAMP_ADDRESSV = D3DSAMPLERSTATETYPE.ADDRESSV; pub const D3DSAMP_ADDRESSW = D3DSAMPLERSTATETYPE.ADDRESSW; pub const D3DSAMP_BORDERCOLOR = D3DSAMPLERSTATETYPE.BORDERCOLOR; pub const D3DSAMP_MAGFILTER = D3DSAMPLERSTATETYPE.MAGFILTER; pub const D3DSAMP_MINFILTER = D3DSAMPLERSTATETYPE.MINFILTER; pub const D3DSAMP_MIPFILTER = D3DSAMPLERSTATETYPE.MIPFILTER; pub const D3DSAMP_MIPMAPLODBIAS = D3DSAMPLERSTATETYPE.MIPMAPLODBIAS; pub const D3DSAMP_MAXMIPLEVEL = D3DSAMPLERSTATETYPE.MAXMIPLEVEL; pub const D3DSAMP_MAXANISOTROPY = D3DSAMPLERSTATETYPE.MAXANISOTROPY; pub const D3DSAMP_SRGBTEXTURE = D3DSAMPLERSTATETYPE.SRGBTEXTURE; pub const D3DSAMP_ELEMENTINDEX = D3DSAMPLERSTATETYPE.ELEMENTINDEX; pub const D3DSAMP_DMAPOFFSET = D3DSAMPLERSTATETYPE.DMAPOFFSET; pub const D3DSAMP_FORCE_DWORD = D3DSAMPLERSTATETYPE.FORCE_DWORD; pub const D3DTEXTUREOP = enum(i32) { DISABLE = 1, SELECTARG1 = 2, SELECTARG2 = 3, MODULATE = 4, MODULATE2X = 5, MODULATE4X = 6, ADD = 7, ADDSIGNED = 8, ADDSIGNED2X = 9, SUBTRACT = 10, ADDSMOOTH = 11, BLENDDIFFUSEALPHA = 12, BLENDTEXTUREALPHA = 13, BLENDFACTORALPHA = 14, BLENDTEXTUREALPHAPM = 15, BLENDCURRENTALPHA = 16, PREMODULATE = 17, MODULATEALPHA_ADDCOLOR = 18, MODULATECOLOR_ADDALPHA = 19, MODULATEINVALPHA_ADDCOLOR = 20, MODULATEINVCOLOR_ADDALPHA = 21, BUMPENVMAP = 22, BUMPENVMAPLUMINANCE = 23, DOTPRODUCT3 = 24, MULTIPLYADD = 25, LERP = 26, FORCE_DWORD = 2147483647, }; pub const D3DTOP_DISABLE = D3DTEXTUREOP.DISABLE; pub const D3DTOP_SELECTARG1 = D3DTEXTUREOP.SELECTARG1; pub const D3DTOP_SELECTARG2 = D3DTEXTUREOP.SELECTARG2; pub const D3DTOP_MODULATE = D3DTEXTUREOP.MODULATE; pub const D3DTOP_MODULATE2X = D3DTEXTUREOP.MODULATE2X; pub const D3DTOP_MODULATE4X = D3DTEXTUREOP.MODULATE4X; pub const D3DTOP_ADD = D3DTEXTUREOP.ADD; pub const D3DTOP_ADDSIGNED = D3DTEXTUREOP.ADDSIGNED; pub const D3DTOP_ADDSIGNED2X = D3DTEXTUREOP.ADDSIGNED2X; pub const D3DTOP_SUBTRACT = D3DTEXTUREOP.SUBTRACT; pub const D3DTOP_ADDSMOOTH = D3DTEXTUREOP.ADDSMOOTH; pub const D3DTOP_BLENDDIFFUSEALPHA = D3DTEXTUREOP.BLENDDIFFUSEALPHA; pub const D3DTOP_BLENDTEXTUREALPHA = D3DTEXTUREOP.BLENDTEXTUREALPHA; pub const D3DTOP_BLENDFACTORALPHA = D3DTEXTUREOP.BLENDFACTORALPHA; pub const D3DTOP_BLENDTEXTUREALPHAPM = D3DTEXTUREOP.BLENDTEXTUREALPHAPM; pub const D3DTOP_BLENDCURRENTALPHA = D3DTEXTUREOP.BLENDCURRENTALPHA; pub const D3DTOP_PREMODULATE = D3DTEXTUREOP.PREMODULATE; pub const D3DTOP_MODULATEALPHA_ADDCOLOR = D3DTEXTUREOP.MODULATEALPHA_ADDCOLOR; pub const D3DTOP_MODULATECOLOR_ADDALPHA = D3DTEXTUREOP.MODULATECOLOR_ADDALPHA; pub const D3DTOP_MODULATEINVALPHA_ADDCOLOR = D3DTEXTUREOP.MODULATEINVALPHA_ADDCOLOR; pub const D3DTOP_MODULATEINVCOLOR_ADDALPHA = D3DTEXTUREOP.MODULATEINVCOLOR_ADDALPHA; pub const D3DTOP_BUMPENVMAP = D3DTEXTUREOP.BUMPENVMAP; pub const D3DTOP_BUMPENVMAPLUMINANCE = D3DTEXTUREOP.BUMPENVMAPLUMINANCE; pub const D3DTOP_DOTPRODUCT3 = D3DTEXTUREOP.DOTPRODUCT3; pub const D3DTOP_MULTIPLYADD = D3DTEXTUREOP.MULTIPLYADD; pub const D3DTOP_LERP = D3DTEXTUREOP.LERP; pub const D3DTOP_FORCE_DWORD = D3DTEXTUREOP.FORCE_DWORD; pub const D3DTEXTUREFILTERTYPE = enum(i32) { NONE = 0, POINT = 1, LINEAR = 2, ANISOTROPIC = 3, PYRAMIDALQUAD = 6, GAUSSIANQUAD = 7, CONVOLUTIONMONO = 8, FORCE_DWORD = 2147483647, }; pub const D3DTEXF_NONE = D3DTEXTUREFILTERTYPE.NONE; pub const D3DTEXF_POINT = D3DTEXTUREFILTERTYPE.POINT; pub const D3DTEXF_LINEAR = D3DTEXTUREFILTERTYPE.LINEAR; pub const D3DTEXF_ANISOTROPIC = D3DTEXTUREFILTERTYPE.ANISOTROPIC; pub const D3DTEXF_PYRAMIDALQUAD = D3DTEXTUREFILTERTYPE.PYRAMIDALQUAD; pub const D3DTEXF_GAUSSIANQUAD = D3DTEXTUREFILTERTYPE.GAUSSIANQUAD; pub const D3DTEXF_CONVOLUTIONMONO = D3DTEXTUREFILTERTYPE.CONVOLUTIONMONO; pub const D3DTEXF_FORCE_DWORD = D3DTEXTUREFILTERTYPE.FORCE_DWORD; pub const D3DDECLUSAGE = enum(i32) { POSITION = 0, BLENDWEIGHT = 1, BLENDINDICES = 2, NORMAL = 3, PSIZE = 4, TEXCOORD = 5, TANGENT = 6, BINORMAL = 7, TESSFACTOR = 8, POSITIONT = 9, COLOR = 10, FOG = 11, DEPTH = 12, SAMPLE = 13, }; pub const D3DDECLUSAGE_POSITION = D3DDECLUSAGE.POSITION; pub const D3DDECLUSAGE_BLENDWEIGHT = D3DDECLUSAGE.BLENDWEIGHT; pub const D3DDECLUSAGE_BLENDINDICES = D3DDECLUSAGE.BLENDINDICES; pub const D3DDECLUSAGE_NORMAL = D3DDECLUSAGE.NORMAL; pub const D3DDECLUSAGE_PSIZE = D3DDECLUSAGE.PSIZE; pub const D3DDECLUSAGE_TEXCOORD = D3DDECLUSAGE.TEXCOORD; pub const D3DDECLUSAGE_TANGENT = D3DDECLUSAGE.TANGENT; pub const D3DDECLUSAGE_BINORMAL = D3DDECLUSAGE.BINORMAL; pub const D3DDECLUSAGE_TESSFACTOR = D3DDECLUSAGE.TESSFACTOR; pub const D3DDECLUSAGE_POSITIONT = D3DDECLUSAGE.POSITIONT; pub const D3DDECLUSAGE_COLOR = D3DDECLUSAGE.COLOR; pub const D3DDECLUSAGE_FOG = D3DDECLUSAGE.FOG; pub const D3DDECLUSAGE_DEPTH = D3DDECLUSAGE.DEPTH; pub const D3DDECLUSAGE_SAMPLE = D3DDECLUSAGE.SAMPLE; pub const D3DDECLMETHOD = enum(i32) { DEFAULT = 0, PARTIALU = 1, PARTIALV = 2, CROSSUV = 3, UV = 4, LOOKUP = 5, LOOKUPPRESAMPLED = 6, }; pub const D3DDECLMETHOD_DEFAULT = D3DDECLMETHOD.DEFAULT; pub const D3DDECLMETHOD_PARTIALU = D3DDECLMETHOD.PARTIALU; pub const D3DDECLMETHOD_PARTIALV = D3DDECLMETHOD.PARTIALV; pub const D3DDECLMETHOD_CROSSUV = D3DDECLMETHOD.CROSSUV; pub const D3DDECLMETHOD_UV = D3DDECLMETHOD.UV; pub const D3DDECLMETHOD_LOOKUP = D3DDECLMETHOD.LOOKUP; pub const D3DDECLMETHOD_LOOKUPPRESAMPLED = D3DDECLMETHOD.LOOKUPPRESAMPLED; pub const D3DDECLTYPE = enum(i32) { FLOAT1 = 0, FLOAT2 = 1, FLOAT3 = 2, FLOAT4 = 3, D3DCOLOR = 4, UBYTE4 = 5, SHORT2 = 6, SHORT4 = 7, UBYTE4N = 8, SHORT2N = 9, SHORT4N = 10, USHORT2N = 11, USHORT4N = 12, UDEC3 = 13, DEC3N = 14, FLOAT16_2 = 15, FLOAT16_4 = 16, UNUSED = 17, }; pub const D3DDECLTYPE_FLOAT1 = D3DDECLTYPE.FLOAT1; pub const D3DDECLTYPE_FLOAT2 = D3DDECLTYPE.FLOAT2; pub const D3DDECLTYPE_FLOAT3 = D3DDECLTYPE.FLOAT3; pub const D3DDECLTYPE_FLOAT4 = D3DDECLTYPE.FLOAT4; pub const D3DDECLTYPE_D3DCOLOR = D3DDECLTYPE.D3DCOLOR; pub const D3DDECLTYPE_UBYTE4 = D3DDECLTYPE.UBYTE4; pub const D3DDECLTYPE_SHORT2 = D3DDECLTYPE.SHORT2; pub const D3DDECLTYPE_SHORT4 = D3DDECLTYPE.SHORT4; pub const D3DDECLTYPE_UBYTE4N = D3DDECLTYPE.UBYTE4N; pub const D3DDECLTYPE_SHORT2N = D3DDECLTYPE.SHORT2N; pub const D3DDECLTYPE_SHORT4N = D3DDECLTYPE.SHORT4N; pub const D3DDECLTYPE_USHORT2N = D3DDECLTYPE.USHORT2N; pub const D3DDECLTYPE_USHORT4N = D3DDECLTYPE.USHORT4N; pub const D3DDECLTYPE_UDEC3 = D3DDECLTYPE.UDEC3; pub const D3DDECLTYPE_DEC3N = D3DDECLTYPE.DEC3N; pub const D3DDECLTYPE_FLOAT16_2 = D3DDECLTYPE.FLOAT16_2; pub const D3DDECLTYPE_FLOAT16_4 = D3DDECLTYPE.FLOAT16_4; pub const D3DDECLTYPE_UNUSED = D3DDECLTYPE.UNUSED; pub const D3DVERTEXELEMENT9 = extern struct { Stream: u16, Offset: u16, Type: u8, Method: u8, Usage: u8, UsageIndex: u8, }; pub const D3DSHADER_INSTRUCTION_OPCODE_TYPE = enum(i32) { NOP = 0, MOV = 1, ADD = 2, SUB = 3, MAD = 4, MUL = 5, RCP = 6, RSQ = 7, DP3 = 8, DP4 = 9, MIN = 10, MAX = 11, SLT = 12, SGE = 13, EXP = 14, LOG = 15, LIT = 16, DST = 17, LRP = 18, FRC = 19, M4x4 = 20, M4x3 = 21, M3x4 = 22, M3x3 = 23, M3x2 = 24, CALL = 25, CALLNZ = 26, LOOP = 27, RET = 28, ENDLOOP = 29, LABEL = 30, DCL = 31, POW = 32, CRS = 33, SGN = 34, ABS = 35, NRM = 36, SINCOS = 37, REP = 38, ENDREP = 39, IF = 40, IFC = 41, ELSE = 42, ENDIF = 43, BREAK = 44, BREAKC = 45, MOVA = 46, DEFB = 47, DEFI = 48, TEXCOORD = 64, TEXKILL = 65, TEX = 66, TEXBEM = 67, TEXBEML = 68, TEXREG2AR = 69, TEXREG2GB = 70, TEXM3x2PAD = 71, TEXM3x2TEX = 72, TEXM3x3PAD = 73, TEXM3x3TEX = 74, RESERVED0 = 75, TEXM3x3SPEC = 76, TEXM3x3VSPEC = 77, EXPP = 78, LOGP = 79, CND = 80, DEF = 81, TEXREG2RGB = 82, TEXDP3TEX = 83, TEXM3x2DEPTH = 84, TEXDP3 = 85, TEXM3x3 = 86, TEXDEPTH = 87, CMP = 88, BEM = 89, DP2ADD = 90, DSX = 91, DSY = 92, TEXLDD = 93, SETP = 94, TEXLDL = 95, BREAKP = 96, PHASE = 65533, COMMENT = 65534, END = 65535, FORCE_DWORD = 2147483647, }; pub const D3DSIO_NOP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.NOP; pub const D3DSIO_MOV = D3DSHADER_INSTRUCTION_OPCODE_TYPE.MOV; pub const D3DSIO_ADD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.ADD; pub const D3DSIO_SUB = D3DSHADER_INSTRUCTION_OPCODE_TYPE.SUB; pub const D3DSIO_MAD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.MAD; pub const D3DSIO_MUL = D3DSHADER_INSTRUCTION_OPCODE_TYPE.MUL; pub const D3DSIO_RCP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.RCP; pub const D3DSIO_RSQ = D3DSHADER_INSTRUCTION_OPCODE_TYPE.RSQ; pub const D3DSIO_DP3 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DP3; pub const D3DSIO_DP4 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DP4; pub const D3DSIO_MIN = D3DSHADER_INSTRUCTION_OPCODE_TYPE.MIN; pub const D3DSIO_MAX = D3DSHADER_INSTRUCTION_OPCODE_TYPE.MAX; pub const D3DSIO_SLT = D3DSHADER_INSTRUCTION_OPCODE_TYPE.SLT; pub const D3DSIO_SGE = D3DSHADER_INSTRUCTION_OPCODE_TYPE.SGE; pub const D3DSIO_EXP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.EXP; pub const D3DSIO_LOG = D3DSHADER_INSTRUCTION_OPCODE_TYPE.LOG; pub const D3DSIO_LIT = D3DSHADER_INSTRUCTION_OPCODE_TYPE.LIT; pub const D3DSIO_DST = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DST; pub const D3DSIO_LRP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.LRP; pub const D3DSIO_FRC = D3DSHADER_INSTRUCTION_OPCODE_TYPE.FRC; pub const D3DSIO_M4x4 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.M4x4; pub const D3DSIO_M4x3 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.M4x3; pub const D3DSIO_M3x4 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.M3x4; pub const D3DSIO_M3x3 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.M3x3; pub const D3DSIO_M3x2 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.M3x2; pub const D3DSIO_CALL = D3DSHADER_INSTRUCTION_OPCODE_TYPE.CALL; pub const D3DSIO_CALLNZ = D3DSHADER_INSTRUCTION_OPCODE_TYPE.CALLNZ; pub const D3DSIO_LOOP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.LOOP; pub const D3DSIO_RET = D3DSHADER_INSTRUCTION_OPCODE_TYPE.RET; pub const D3DSIO_ENDLOOP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.ENDLOOP; pub const D3DSIO_LABEL = D3DSHADER_INSTRUCTION_OPCODE_TYPE.LABEL; pub const D3DSIO_DCL = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DCL; pub const D3DSIO_POW = D3DSHADER_INSTRUCTION_OPCODE_TYPE.POW; pub const D3DSIO_CRS = D3DSHADER_INSTRUCTION_OPCODE_TYPE.CRS; pub const D3DSIO_SGN = D3DSHADER_INSTRUCTION_OPCODE_TYPE.SGN; pub const D3DSIO_ABS = D3DSHADER_INSTRUCTION_OPCODE_TYPE.ABS; pub const D3DSIO_NRM = D3DSHADER_INSTRUCTION_OPCODE_TYPE.NRM; pub const D3DSIO_SINCOS = D3DSHADER_INSTRUCTION_OPCODE_TYPE.SINCOS; pub const D3DSIO_REP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.REP; pub const D3DSIO_ENDREP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.ENDREP; pub const D3DSIO_IF = D3DSHADER_INSTRUCTION_OPCODE_TYPE.IF; pub const D3DSIO_IFC = D3DSHADER_INSTRUCTION_OPCODE_TYPE.IFC; pub const D3DSIO_ELSE = D3DSHADER_INSTRUCTION_OPCODE_TYPE.ELSE; pub const D3DSIO_ENDIF = D3DSHADER_INSTRUCTION_OPCODE_TYPE.ENDIF; pub const D3DSIO_BREAK = D3DSHADER_INSTRUCTION_OPCODE_TYPE.BREAK; pub const D3DSIO_BREAKC = D3DSHADER_INSTRUCTION_OPCODE_TYPE.BREAKC; pub const D3DSIO_MOVA = D3DSHADER_INSTRUCTION_OPCODE_TYPE.MOVA; pub const D3DSIO_DEFB = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DEFB; pub const D3DSIO_DEFI = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DEFI; pub const D3DSIO_TEXCOORD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXCOORD; pub const D3DSIO_TEXKILL = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXKILL; pub const D3DSIO_TEX = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEX; pub const D3DSIO_TEXBEM = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXBEM; pub const D3DSIO_TEXBEML = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXBEML; pub const D3DSIO_TEXREG2AR = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXREG2AR; pub const D3DSIO_TEXREG2GB = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXREG2GB; pub const D3DSIO_TEXM3x2PAD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x2PAD; pub const D3DSIO_TEXM3x2TEX = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x2TEX; pub const D3DSIO_TEXM3x3PAD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x3PAD; pub const D3DSIO_TEXM3x3TEX = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x3TEX; pub const D3DSIO_RESERVED0 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.RESERVED0; pub const D3DSIO_TEXM3x3SPEC = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x3SPEC; pub const D3DSIO_TEXM3x3VSPEC = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x3VSPEC; pub const D3DSIO_EXPP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.EXPP; pub const D3DSIO_LOGP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.LOGP; pub const D3DSIO_CND = D3DSHADER_INSTRUCTION_OPCODE_TYPE.CND; pub const D3DSIO_DEF = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DEF; pub const D3DSIO_TEXREG2RGB = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXREG2RGB; pub const D3DSIO_TEXDP3TEX = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXDP3TEX; pub const D3DSIO_TEXM3x2DEPTH = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x2DEPTH; pub const D3DSIO_TEXDP3 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXDP3; pub const D3DSIO_TEXM3x3 = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXM3x3; pub const D3DSIO_TEXDEPTH = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXDEPTH; pub const D3DSIO_CMP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.CMP; pub const D3DSIO_BEM = D3DSHADER_INSTRUCTION_OPCODE_TYPE.BEM; pub const D3DSIO_DP2ADD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DP2ADD; pub const D3DSIO_DSX = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DSX; pub const D3DSIO_DSY = D3DSHADER_INSTRUCTION_OPCODE_TYPE.DSY; pub const D3DSIO_TEXLDD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXLDD; pub const D3DSIO_SETP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.SETP; pub const D3DSIO_TEXLDL = D3DSHADER_INSTRUCTION_OPCODE_TYPE.TEXLDL; pub const D3DSIO_BREAKP = D3DSHADER_INSTRUCTION_OPCODE_TYPE.BREAKP; pub const D3DSIO_PHASE = D3DSHADER_INSTRUCTION_OPCODE_TYPE.PHASE; pub const D3DSIO_COMMENT = D3DSHADER_INSTRUCTION_OPCODE_TYPE.COMMENT; pub const D3DSIO_END = D3DSHADER_INSTRUCTION_OPCODE_TYPE.END; pub const D3DSIO_FORCE_DWORD = D3DSHADER_INSTRUCTION_OPCODE_TYPE.FORCE_DWORD; pub const D3DSHADER_COMPARISON = enum(i32) { RESERVED0 = 0, GT = 1, EQ = 2, GE = 3, LT = 4, NE = 5, LE = 6, RESERVED1 = 7, }; pub const D3DSPC_RESERVED0 = D3DSHADER_COMPARISON.RESERVED0; pub const D3DSPC_GT = D3DSHADER_COMPARISON.GT; pub const D3DSPC_EQ = D3DSHADER_COMPARISON.EQ; pub const D3DSPC_GE = D3DSHADER_COMPARISON.GE; pub const D3DSPC_LT = D3DSHADER_COMPARISON.LT; pub const D3DSPC_NE = D3DSHADER_COMPARISON.NE; pub const D3DSPC_LE = D3DSHADER_COMPARISON.LE; pub const D3DSPC_RESERVED1 = D3DSHADER_COMPARISON.RESERVED1; pub const D3DSAMPLER_TEXTURE_TYPE = enum(i32) { UNKNOWN = 0, @"2D" = 268435456, CUBE = 402653184, VOLUME = 536870912, FORCE_DWORD = 2147483647, }; pub const D3DSTT_UNKNOWN = D3DSAMPLER_TEXTURE_TYPE.UNKNOWN; pub const D3DSTT_2D = D3DSAMPLER_TEXTURE_TYPE.@"2D"; pub const D3DSTT_CUBE = D3DSAMPLER_TEXTURE_TYPE.CUBE; pub const D3DSTT_VOLUME = D3DSAMPLER_TEXTURE_TYPE.VOLUME; pub const D3DSTT_FORCE_DWORD = D3DSAMPLER_TEXTURE_TYPE.FORCE_DWORD; pub const D3DSHADER_PARAM_REGISTER_TYPE = enum(i32) { TEMP = 0, INPUT = 1, CONST = 2, ADDR = 3, // TEXTURE = 3, this enum value conflicts with ADDR RASTOUT = 4, ATTROUT = 5, TEXCRDOUT = 6, // OUTPUT = 6, this enum value conflicts with TEXCRDOUT CONSTINT = 7, COLOROUT = 8, DEPTHOUT = 9, SAMPLER = 10, CONST2 = 11, CONST3 = 12, CONST4 = 13, CONSTBOOL = 14, LOOP = 15, TEMPFLOAT16 = 16, MISCTYPE = 17, LABEL = 18, PREDICATE = 19, FORCE_DWORD = 2147483647, }; pub const D3DSPR_TEMP = D3DSHADER_PARAM_REGISTER_TYPE.TEMP; pub const D3DSPR_INPUT = D3DSHADER_PARAM_REGISTER_TYPE.INPUT; pub const D3DSPR_CONST = D3DSHADER_PARAM_REGISTER_TYPE.CONST; pub const D3DSPR_ADDR = D3DSHADER_PARAM_REGISTER_TYPE.ADDR; pub const D3DSPR_TEXTURE = D3DSHADER_PARAM_REGISTER_TYPE.ADDR; pub const D3DSPR_RASTOUT = D3DSHADER_PARAM_REGISTER_TYPE.RASTOUT; pub const D3DSPR_ATTROUT = D3DSHADER_PARAM_REGISTER_TYPE.ATTROUT; pub const D3DSPR_TEXCRDOUT = D3DSHADER_PARAM_REGISTER_TYPE.TEXCRDOUT; pub const D3DSPR_OUTPUT = D3DSHADER_PARAM_REGISTER_TYPE.TEXCRDOUT; pub const D3DSPR_CONSTINT = D3DSHADER_PARAM_REGISTER_TYPE.CONSTINT; pub const D3DSPR_COLOROUT = D3DSHADER_PARAM_REGISTER_TYPE.COLOROUT; pub const D3DSPR_DEPTHOUT = D3DSHADER_PARAM_REGISTER_TYPE.DEPTHOUT; pub const D3DSPR_SAMPLER = D3DSHADER_PARAM_REGISTER_TYPE.SAMPLER; pub const D3DSPR_CONST2 = D3DSHADER_PARAM_REGISTER_TYPE.CONST2; pub const D3DSPR_CONST3 = D3DSHADER_PARAM_REGISTER_TYPE.CONST3; pub const D3DSPR_CONST4 = D3DSHADER_PARAM_REGISTER_TYPE.CONST4; pub const D3DSPR_CONSTBOOL = D3DSHADER_PARAM_REGISTER_TYPE.CONSTBOOL; pub const D3DSPR_LOOP = D3DSHADER_PARAM_REGISTER_TYPE.LOOP; pub const D3DSPR_TEMPFLOAT16 = D3DSHADER_PARAM_REGISTER_TYPE.TEMPFLOAT16; pub const D3DSPR_MISCTYPE = D3DSHADER_PARAM_REGISTER_TYPE.MISCTYPE; pub const D3DSPR_LABEL = D3DSHADER_PARAM_REGISTER_TYPE.LABEL; pub const D3DSPR_PREDICATE = D3DSHADER_PARAM_REGISTER_TYPE.PREDICATE; pub const D3DSPR_FORCE_DWORD = D3DSHADER_PARAM_REGISTER_TYPE.FORCE_DWORD; pub const D3DSHADER_MISCTYPE_OFFSETS = enum(i32) { POSITION = 0, FACE = 1, }; pub const D3DSMO_POSITION = D3DSHADER_MISCTYPE_OFFSETS.POSITION; pub const D3DSMO_FACE = D3DSHADER_MISCTYPE_OFFSETS.FACE; pub const D3DVS_RASTOUT_OFFSETS = enum(i32) { POSITION = 0, FOG = 1, POINT_SIZE = 2, FORCE_DWORD = 2147483647, }; pub const D3DSRO_POSITION = D3DVS_RASTOUT_OFFSETS.POSITION; pub const D3DSRO_FOG = D3DVS_RASTOUT_OFFSETS.FOG; pub const D3DSRO_POINT_SIZE = D3DVS_RASTOUT_OFFSETS.POINT_SIZE; pub const D3DSRO_FORCE_DWORD = D3DVS_RASTOUT_OFFSETS.FORCE_DWORD; pub const D3DVS_ADDRESSMODE_TYPE = enum(i32) { ABSOLUTE = 0, RELATIVE = 8192, FORCE_DWORD = 2147483647, }; pub const D3DVS_ADDRMODE_ABSOLUTE = D3DVS_ADDRESSMODE_TYPE.ABSOLUTE; pub const D3DVS_ADDRMODE_RELATIVE = D3DVS_ADDRESSMODE_TYPE.RELATIVE; pub const D3DVS_ADDRMODE_FORCE_DWORD = D3DVS_ADDRESSMODE_TYPE.FORCE_DWORD; pub const D3DSHADER_ADDRESSMODE_TYPE = enum(i32) { ABSOLUTE = 0, RELATIVE = 8192, FORCE_DWORD = 2147483647, }; pub const D3DSHADER_ADDRMODE_ABSOLUTE = D3DSHADER_ADDRESSMODE_TYPE.ABSOLUTE; pub const D3DSHADER_ADDRMODE_RELATIVE = D3DSHADER_ADDRESSMODE_TYPE.RELATIVE; pub const D3DSHADER_ADDRMODE_FORCE_DWORD = D3DSHADER_ADDRESSMODE_TYPE.FORCE_DWORD; pub const D3DSHADER_PARAM_SRCMOD_TYPE = enum(i32) { NONE = 0, NEG = 16777216, BIAS = 33554432, BIASNEG = 50331648, SIGN = 67108864, SIGNNEG = 83886080, COMP = 100663296, X2 = 117440512, X2NEG = 134217728, DZ = 150994944, DW = 167772160, ABS = 184549376, ABSNEG = 201326592, NOT = 218103808, FORCE_DWORD = 2147483647, }; pub const D3DSPSM_NONE = D3DSHADER_PARAM_SRCMOD_TYPE.NONE; pub const D3DSPSM_NEG = D3DSHADER_PARAM_SRCMOD_TYPE.NEG; pub const D3DSPSM_BIAS = D3DSHADER_PARAM_SRCMOD_TYPE.BIAS; pub const D3DSPSM_BIASNEG = D3DSHADER_PARAM_SRCMOD_TYPE.BIASNEG; pub const D3DSPSM_SIGN = D3DSHADER_PARAM_SRCMOD_TYPE.SIGN; pub const D3DSPSM_SIGNNEG = D3DSHADER_PARAM_SRCMOD_TYPE.SIGNNEG; pub const D3DSPSM_COMP = D3DSHADER_PARAM_SRCMOD_TYPE.COMP; pub const D3DSPSM_X2 = D3DSHADER_PARAM_SRCMOD_TYPE.X2; pub const D3DSPSM_X2NEG = D3DSHADER_PARAM_SRCMOD_TYPE.X2NEG; pub const D3DSPSM_DZ = D3DSHADER_PARAM_SRCMOD_TYPE.DZ; pub const D3DSPSM_DW = D3DSHADER_PARAM_SRCMOD_TYPE.DW; pub const D3DSPSM_ABS = D3DSHADER_PARAM_SRCMOD_TYPE.ABS; pub const D3DSPSM_ABSNEG = D3DSHADER_PARAM_SRCMOD_TYPE.ABSNEG; pub const D3DSPSM_NOT = D3DSHADER_PARAM_SRCMOD_TYPE.NOT; pub const D3DSPSM_FORCE_DWORD = D3DSHADER_PARAM_SRCMOD_TYPE.FORCE_DWORD; pub const D3DSHADER_MIN_PRECISION = enum(i32) { DEFAULT = 0, @"16" = 1, @"2_8" = 2, }; pub const D3DMP_DEFAULT = D3DSHADER_MIN_PRECISION.DEFAULT; pub const D3DMP_16 = D3DSHADER_MIN_PRECISION.@"16"; pub const D3DMP_2_8 = D3DSHADER_MIN_PRECISION.@"2_8"; pub const D3DBASISTYPE = enum(i32) { BEZIER = 0, BSPLINE = 1, CATMULL_ROM = 2, FORCE_DWORD = 2147483647, }; pub const D3DBASIS_BEZIER = D3DBASISTYPE.BEZIER; pub const D3DBASIS_BSPLINE = D3DBASISTYPE.BSPLINE; pub const D3DBASIS_CATMULL_ROM = D3DBASISTYPE.CATMULL_ROM; pub const D3DBASIS_FORCE_DWORD = D3DBASISTYPE.FORCE_DWORD; pub const D3DDEGREETYPE = enum(i32) { LINEAR = 1, QUADRATIC = 2, CUBIC = 3, QUINTIC = 5, FORCE_DWORD = 2147483647, }; pub const D3DDEGREE_LINEAR = D3DDEGREETYPE.LINEAR; pub const D3DDEGREE_QUADRATIC = D3DDEGREETYPE.QUADRATIC; pub const D3DDEGREE_CUBIC = D3DDEGREETYPE.CUBIC; pub const D3DDEGREE_QUINTIC = D3DDEGREETYPE.QUINTIC; pub const D3DDEGREE_FORCE_DWORD = D3DDEGREETYPE.FORCE_DWORD; pub const D3DPATCHEDGESTYLE = enum(i32) { DISCRETE = 0, CONTINUOUS = 1, FORCE_DWORD = 2147483647, }; pub const D3DPATCHEDGE_DISCRETE = D3DPATCHEDGESTYLE.DISCRETE; pub const D3DPATCHEDGE_CONTINUOUS = D3DPATCHEDGESTYLE.CONTINUOUS; pub const D3DPATCHEDGE_FORCE_DWORD = D3DPATCHEDGESTYLE.FORCE_DWORD; pub const D3DSTATEBLOCKTYPE = enum(i32) { ALL = 1, PIXELSTATE = 2, VERTEXSTATE = 3, FORCE_DWORD = 2147483647, }; pub const D3DSBT_ALL = D3DSTATEBLOCKTYPE.ALL; pub const D3DSBT_PIXELSTATE = D3DSTATEBLOCKTYPE.PIXELSTATE; pub const D3DSBT_VERTEXSTATE = D3DSTATEBLOCKTYPE.VERTEXSTATE; pub const D3DSBT_FORCE_DWORD = D3DSTATEBLOCKTYPE.FORCE_DWORD; pub const D3DVERTEXBLENDFLAGS = enum(i32) { DISABLE = 0, @"1WEIGHTS" = 1, @"2WEIGHTS" = 2, @"3WEIGHTS" = 3, TWEENING = 255, @"0WEIGHTS" = 256, FORCE_DWORD = 2147483647, }; pub const D3DVBF_DISABLE = D3DVERTEXBLENDFLAGS.DISABLE; pub const D3DVBF_1WEIGHTS = D3DVERTEXBLENDFLAGS.@"1WEIGHTS"; pub const D3DVBF_2WEIGHTS = D3DVERTEXBLENDFLAGS.@"2WEIGHTS"; pub const D3DVBF_3WEIGHTS = D3DVERTEXBLENDFLAGS.@"3WEIGHTS"; pub const D3DVBF_TWEENING = D3DVERTEXBLENDFLAGS.TWEENING; pub const D3DVBF_0WEIGHTS = D3DVERTEXBLENDFLAGS.@"0WEIGHTS"; pub const D3DVBF_FORCE_DWORD = D3DVERTEXBLENDFLAGS.FORCE_DWORD; pub const D3DTEXTURETRANSFORMFLAGS = enum(i32) { DISABLE = 0, COUNT1 = 1, COUNT2 = 2, COUNT3 = 3, COUNT4 = 4, PROJECTED = 256, FORCE_DWORD = 2147483647, }; pub const D3DTTFF_DISABLE = D3DTEXTURETRANSFORMFLAGS.DISABLE; pub const D3DTTFF_COUNT1 = D3DTEXTURETRANSFORMFLAGS.COUNT1; pub const D3DTTFF_COUNT2 = D3DTEXTURETRANSFORMFLAGS.COUNT2; pub const D3DTTFF_COUNT3 = D3DTEXTURETRANSFORMFLAGS.COUNT3; pub const D3DTTFF_COUNT4 = D3DTEXTURETRANSFORMFLAGS.COUNT4; pub const D3DTTFF_PROJECTED = D3DTEXTURETRANSFORMFLAGS.PROJECTED; pub const D3DTTFF_FORCE_DWORD = D3DTEXTURETRANSFORMFLAGS.FORCE_DWORD; pub const D3DDEVTYPE = enum(u32) { HAL = 1, REF = 2, SW = 3, NULLREF = 4, FORCE_DWORD = 2147483647, }; pub const D3DDEVTYPE_HAL = D3DDEVTYPE.HAL; pub const D3DDEVTYPE_REF = D3DDEVTYPE.REF; pub const D3DDEVTYPE_SW = D3DDEVTYPE.SW; pub const D3DDEVTYPE_NULLREF = D3DDEVTYPE.NULLREF; pub const D3DDEVTYPE_FORCE_DWORD = D3DDEVTYPE.FORCE_DWORD; pub const D3DMULTISAMPLE_TYPE = enum(i32) { NONE = 0, NONMASKABLE = 1, @"2_SAMPLES" = 2, @"3_SAMPLES" = 3, @"4_SAMPLES" = 4, @"5_SAMPLES" = 5, @"6_SAMPLES" = 6, @"7_SAMPLES" = 7, @"8_SAMPLES" = 8, @"9_SAMPLES" = 9, @"10_SAMPLES" = 10, @"11_SAMPLES" = 11, @"12_SAMPLES" = 12, @"13_SAMPLES" = 13, @"14_SAMPLES" = 14, @"15_SAMPLES" = 15, @"16_SAMPLES" = 16, FORCE_DWORD = 2147483647, }; pub const D3DMULTISAMPLE_NONE = D3DMULTISAMPLE_TYPE.NONE; pub const D3DMULTISAMPLE_NONMASKABLE = D3DMULTISAMPLE_TYPE.NONMASKABLE; pub const D3DMULTISAMPLE_2_SAMPLES = D3DMULTISAMPLE_TYPE.@"2_SAMPLES"; pub const D3DMULTISAMPLE_3_SAMPLES = D3DMULTISAMPLE_TYPE.@"3_SAMPLES"; pub const D3DMULTISAMPLE_4_SAMPLES = D3DMULTISAMPLE_TYPE.@"4_SAMPLES"; pub const D3DMULTISAMPLE_5_SAMPLES = D3DMULTISAMPLE_TYPE.@"5_SAMPLES"; pub const D3DMULTISAMPLE_6_SAMPLES = D3DMULTISAMPLE_TYPE.@"6_SAMPLES"; pub const D3DMULTISAMPLE_7_SAMPLES = D3DMULTISAMPLE_TYPE.@"7_SAMPLES"; pub const D3DMULTISAMPLE_8_SAMPLES = D3DMULTISAMPLE_TYPE.@"8_SAMPLES"; pub const D3DMULTISAMPLE_9_SAMPLES = D3DMULTISAMPLE_TYPE.@"9_SAMPLES"; pub const D3DMULTISAMPLE_10_SAMPLES = D3DMULTISAMPLE_TYPE.@"10_SAMPLES"; pub const D3DMULTISAMPLE_11_SAMPLES = D3DMULTISAMPLE_TYPE.@"11_SAMPLES"; pub const D3DMULTISAMPLE_12_SAMPLES = D3DMULTISAMPLE_TYPE.@"12_SAMPLES"; pub const D3DMULTISAMPLE_13_SAMPLES = D3DMULTISAMPLE_TYPE.@"13_SAMPLES"; pub const D3DMULTISAMPLE_14_SAMPLES = D3DMULTISAMPLE_TYPE.@"14_SAMPLES"; pub const D3DMULTISAMPLE_15_SAMPLES = D3DMULTISAMPLE_TYPE.@"15_SAMPLES"; pub const D3DMULTISAMPLE_16_SAMPLES = D3DMULTISAMPLE_TYPE.@"16_SAMPLES"; pub const D3DMULTISAMPLE_FORCE_DWORD = D3DMULTISAMPLE_TYPE.FORCE_DWORD; pub const D3DFORMAT = enum(u32) { UNKNOWN = 0, R8G8B8 = 20, A8R8G8B8 = 21, X8R8G8B8 = 22, R5G6B5 = 23, X1R5G5B5 = 24, A1R5G5B5 = 25, A4R4G4B4 = 26, R3G3B2 = 27, A8 = 28, A8R3G3B2 = 29, X4R4G4B4 = 30, A2B10G10R10 = 31, A8B8G8R8 = 32, X8B8G8R8 = 33, G16R16 = 34, A2R10G10B10 = 35, A16B16G16R16 = 36, A8P8 = 40, P8 = 41, L8 = 50, A8L8 = 51, A4L4 = 52, V8U8 = 60, L6V5U5 = 61, X8L8V8U8 = 62, Q8W8V8U8 = 63, V16U16 = 64, A2W10V10U10 = 67, UYVY = 1498831189, R8G8_B8G8 = 1195525970, YUY2 = 844715353, G8R8_G8B8 = 1111970375, DXT1 = 827611204, DXT2 = 844388420, DXT3 = 861165636, DXT4 = 877942852, DXT5 = 894720068, D16_LOCKABLE = 70, D32 = 71, D15S1 = 73, D24S8 = 75, D24X8 = 77, D24X4S4 = 79, D16 = 80, D32F_LOCKABLE = 82, D24FS8 = 83, D32_LOCKABLE = 84, S8_LOCKABLE = 85, L16 = 81, VERTEXDATA = 100, INDEX16 = 101, INDEX32 = 102, Q16W16V16U16 = 110, MULTI2_ARGB8 = 827606349, R16F = 111, G16R16F = 112, A16B16G16R16F = 113, R32F = 114, G32R32F = 115, A32B32G32R32F = 116, CxV8U8 = 117, A1 = 118, A2B10G10R10_XR_BIAS = 119, BINARYBUFFER = 199, FORCE_DWORD = 2147483647, }; pub const D3DFMT_UNKNOWN = D3DFORMAT.UNKNOWN; pub const D3DFMT_R8G8B8 = D3DFORMAT.R8G8B8; pub const D3DFMT_A8R8G8B8 = D3DFORMAT.A8R8G8B8; pub const D3DFMT_X8R8G8B8 = D3DFORMAT.X8R8G8B8; pub const D3DFMT_R5G6B5 = D3DFORMAT.R5G6B5; pub const D3DFMT_X1R5G5B5 = D3DFORMAT.X1R5G5B5; pub const D3DFMT_A1R5G5B5 = D3DFORMAT.A1R5G5B5; pub const D3DFMT_A4R4G4B4 = D3DFORMAT.A4R4G4B4; pub const D3DFMT_R3G3B2 = D3DFORMAT.R3G3B2; pub const D3DFMT_A8 = D3DFORMAT.A8; pub const D3DFMT_A8R3G3B2 = D3DFORMAT.A8R3G3B2; pub const D3DFMT_X4R4G4B4 = D3DFORMAT.X4R4G4B4; pub const D3DFMT_A2B10G10R10 = D3DFORMAT.A2B10G10R10; pub const D3DFMT_A8B8G8R8 = D3DFORMAT.A8B8G8R8; pub const D3DFMT_X8B8G8R8 = D3DFORMAT.X8B8G8R8; pub const D3DFMT_G16R16 = D3DFORMAT.G16R16; pub const D3DFMT_A2R10G10B10 = D3DFORMAT.A2R10G10B10; pub const D3DFMT_A16B16G16R16 = D3DFORMAT.A16B16G16R16; pub const D3DFMT_A8P8 = D3DFORMAT.A8P8; pub const D3DFMT_P8 = D3DFORMAT.P8; pub const D3DFMT_L8 = D3DFORMAT.L8; pub const D3DFMT_A8L8 = D3DFORMAT.A8L8; pub const D3DFMT_A4L4 = D3DFORMAT.A4L4; pub const D3DFMT_V8U8 = D3DFORMAT.V8U8; pub const D3DFMT_L6V5U5 = D3DFORMAT.L6V5U5; pub const D3DFMT_X8L8V8U8 = D3DFORMAT.X8L8V8U8; pub const D3DFMT_Q8W8V8U8 = D3DFORMAT.Q8W8V8U8; pub const D3DFMT_V16U16 = D3DFORMAT.V16U16; pub const D3DFMT_A2W10V10U10 = D3DFORMAT.A2W10V10U10; pub const D3DFMT_UYVY = D3DFORMAT.UYVY; pub const D3DFMT_R8G8_B8G8 = D3DFORMAT.R8G8_B8G8; pub const D3DFMT_YUY2 = D3DFORMAT.YUY2; pub const D3DFMT_G8R8_G8B8 = D3DFORMAT.G8R8_G8B8; pub const D3DFMT_DXT1 = D3DFORMAT.DXT1; pub const D3DFMT_DXT2 = D3DFORMAT.DXT2; pub const D3DFMT_DXT3 = D3DFORMAT.DXT3; pub const D3DFMT_DXT4 = D3DFORMAT.DXT4; pub const D3DFMT_DXT5 = D3DFORMAT.DXT5; pub const D3DFMT_D16_LOCKABLE = D3DFORMAT.D16_LOCKABLE; pub const D3DFMT_D32 = D3DFORMAT.D32; pub const D3DFMT_D15S1 = D3DFORMAT.D15S1; pub const D3DFMT_D24S8 = D3DFORMAT.D24S8; pub const D3DFMT_D24X8 = D3DFORMAT.D24X8; pub const D3DFMT_D24X4S4 = D3DFORMAT.D24X4S4; pub const D3DFMT_D16 = D3DFORMAT.D16; pub const D3DFMT_D32F_LOCKABLE = D3DFORMAT.D32F_LOCKABLE; pub const D3DFMT_D24FS8 = D3DFORMAT.D24FS8; pub const D3DFMT_D32_LOCKABLE = D3DFORMAT.D32_LOCKABLE; pub const D3DFMT_S8_LOCKABLE = D3DFORMAT.S8_LOCKABLE; pub const D3DFMT_L16 = D3DFORMAT.L16; pub const D3DFMT_VERTEXDATA = D3DFORMAT.VERTEXDATA; pub const D3DFMT_INDEX16 = D3DFORMAT.INDEX16; pub const D3DFMT_INDEX32 = D3DFORMAT.INDEX32; pub const D3DFMT_Q16W16V16U16 = D3DFORMAT.Q16W16V16U16; pub const D3DFMT_MULTI2_ARGB8 = D3DFORMAT.MULTI2_ARGB8; pub const D3DFMT_R16F = D3DFORMAT.R16F; pub const D3DFMT_G16R16F = D3DFORMAT.G16R16F; pub const D3DFMT_A16B16G16R16F = D3DFORMAT.A16B16G16R16F; pub const D3DFMT_R32F = D3DFORMAT.R32F; pub const D3DFMT_G32R32F = D3DFORMAT.G32R32F; pub const D3DFMT_A32B32G32R32F = D3DFORMAT.A32B32G32R32F; pub const D3DFMT_CxV8U8 = D3DFORMAT.CxV8U8; pub const D3DFMT_A1 = D3DFORMAT.A1; pub const D3DFMT_A2B10G10R10_XR_BIAS = D3DFORMAT.A2B10G10R10_XR_BIAS; pub const D3DFMT_BINARYBUFFER = D3DFORMAT.BINARYBUFFER; pub const D3DFMT_FORCE_DWORD = D3DFORMAT.FORCE_DWORD; pub const D3DDISPLAYMODE = extern struct { Width: u32, Height: u32, RefreshRate: u32, Format: D3DFORMAT, }; pub const D3DDEVICE_CREATION_PARAMETERS = extern struct { AdapterOrdinal: u32, DeviceType: D3DDEVTYPE, hFocusWindow: ?HWND, BehaviorFlags: u32, }; pub const D3DSWAPEFFECT = enum(u32) { DISCARD = 1, FLIP = 2, COPY = 3, OVERLAY = 4, FLIPEX = 5, FORCE_DWORD = 2147483647, }; pub const D3DSWAPEFFECT_DISCARD = D3DSWAPEFFECT.DISCARD; pub const D3DSWAPEFFECT_FLIP = D3DSWAPEFFECT.FLIP; pub const D3DSWAPEFFECT_COPY = D3DSWAPEFFECT.COPY; pub const D3DSWAPEFFECT_OVERLAY = D3DSWAPEFFECT.OVERLAY; pub const D3DSWAPEFFECT_FLIPEX = D3DSWAPEFFECT.FLIPEX; pub const D3DSWAPEFFECT_FORCE_DWORD = D3DSWAPEFFECT.FORCE_DWORD; pub const D3DPOOL = enum(u32) { DEFAULT = 0, MANAGED = 1, SYSTEMMEM = 2, SCRATCH = 3, FORCE_DWORD = 2147483647, }; pub const D3DPOOL_DEFAULT = D3DPOOL.DEFAULT; pub const D3DPOOL_MANAGED = D3DPOOL.MANAGED; pub const D3DPOOL_SYSTEMMEM = D3DPOOL.SYSTEMMEM; pub const D3DPOOL_SCRATCH = D3DPOOL.SCRATCH; pub const D3DPOOL_FORCE_DWORD = D3DPOOL.FORCE_DWORD; pub const D3DPRESENT_PARAMETERS = extern struct { BackBufferWidth: u32, BackBufferHeight: u32, BackBufferFormat: D3DFORMAT, BackBufferCount: u32, MultiSampleType: D3DMULTISAMPLE_TYPE, MultiSampleQuality: u32, SwapEffect: D3DSWAPEFFECT, hDeviceWindow: ?HWND, Windowed: BOOL, EnableAutoDepthStencil: BOOL, AutoDepthStencilFormat: D3DFORMAT, Flags: u32, FullScreen_RefreshRateInHz: u32, PresentationInterval: u32, }; pub const D3DGAMMARAMP = extern struct { red: [256]u16, green: [256]u16, blue: [256]u16, }; pub const D3DBACKBUFFER_TYPE = enum(u32) { MONO = 0, LEFT = 1, RIGHT = 2, FORCE_DWORD = 2147483647, }; pub const D3DBACKBUFFER_TYPE_MONO = D3DBACKBUFFER_TYPE.MONO; pub const D3DBACKBUFFER_TYPE_LEFT = D3DBACKBUFFER_TYPE.LEFT; pub const D3DBACKBUFFER_TYPE_RIGHT = D3DBACKBUFFER_TYPE.RIGHT; pub const D3DBACKBUFFER_TYPE_FORCE_DWORD = D3DBACKBUFFER_TYPE.FORCE_DWORD; pub const D3DRESOURCETYPE = enum(i32) { SURFACE = 1, VOLUME = 2, TEXTURE = 3, VOLUMETEXTURE = 4, CUBETEXTURE = 5, VERTEXBUFFER = 6, INDEXBUFFER = 7, FORCE_DWORD = 2147483647, }; pub const D3DRTYPE_SURFACE = D3DRESOURCETYPE.SURFACE; pub const D3DRTYPE_VOLUME = D3DRESOURCETYPE.VOLUME; pub const D3DRTYPE_TEXTURE = D3DRESOURCETYPE.TEXTURE; pub const D3DRTYPE_VOLUMETEXTURE = D3DRESOURCETYPE.VOLUMETEXTURE; pub const D3DRTYPE_CUBETEXTURE = D3DRESOURCETYPE.CUBETEXTURE; pub const D3DRTYPE_VERTEXBUFFER = D3DRESOURCETYPE.VERTEXBUFFER; pub const D3DRTYPE_INDEXBUFFER = D3DRESOURCETYPE.INDEXBUFFER; pub const D3DRTYPE_FORCE_DWORD = D3DRESOURCETYPE.FORCE_DWORD; pub const D3DCUBEMAP_FACES = enum(i32) { POSITIVE_X = 0, NEGATIVE_X = 1, POSITIVE_Y = 2, NEGATIVE_Y = 3, POSITIVE_Z = 4, NEGATIVE_Z = 5, FORCE_DWORD = 2147483647, }; pub const D3DCUBEMAP_FACE_POSITIVE_X = D3DCUBEMAP_FACES.POSITIVE_X; pub const D3DCUBEMAP_FACE_NEGATIVE_X = D3DCUBEMAP_FACES.NEGATIVE_X; pub const D3DCUBEMAP_FACE_POSITIVE_Y = D3DCUBEMAP_FACES.POSITIVE_Y; pub const D3DCUBEMAP_FACE_NEGATIVE_Y = D3DCUBEMAP_FACES.NEGATIVE_Y; pub const D3DCUBEMAP_FACE_POSITIVE_Z = D3DCUBEMAP_FACES.POSITIVE_Z; pub const D3DCUBEMAP_FACE_NEGATIVE_Z = D3DCUBEMAP_FACES.NEGATIVE_Z; pub const D3DCUBEMAP_FACE_FORCE_DWORD = D3DCUBEMAP_FACES.FORCE_DWORD; pub const D3DVERTEXBUFFER_DESC = extern struct { Format: D3DFORMAT, Type: D3DRESOURCETYPE, Usage: u32, Pool: D3DPOOL, Size: u32, FVF: u32, }; pub const D3DINDEXBUFFER_DESC = extern struct { Format: D3DFORMAT, Type: D3DRESOURCETYPE, Usage: u32, Pool: D3DPOOL, Size: u32, }; pub const D3DSURFACE_DESC = extern struct { Format: D3DFORMAT, Type: D3DRESOURCETYPE, Usage: u32, Pool: D3DPOOL, MultiSampleType: D3DMULTISAMPLE_TYPE, MultiSampleQuality: u32, Width: u32, Height: u32, }; pub const D3DVOLUME_DESC = extern struct { Format: D3DFORMAT, Type: D3DRESOURCETYPE, Usage: u32, Pool: D3DPOOL, Width: u32, Height: u32, Depth: u32, }; pub const D3DLOCKED_RECT = extern struct { Pitch: i32, pBits: ?*anyopaque, }; pub const D3DBOX = extern struct { Left: u32, Top: u32, Right: u32, Bottom: u32, Front: u32, Back: u32, }; pub const D3DLOCKED_BOX = extern struct { RowPitch: i32, SlicePitch: i32, pBits: ?*anyopaque, }; pub const D3DRANGE = extern struct { Offset: u32, Size: u32, }; pub const D3DRECTPATCH_INFO = extern struct { StartVertexOffsetWidth: u32, StartVertexOffsetHeight: u32, Width: u32, Height: u32, Stride: u32, Basis: D3DBASISTYPE, Degree: D3DDEGREETYPE, }; pub const D3DTRIPATCH_INFO = extern struct { StartVertexOffset: u32, NumVertices: u32, Basis: D3DBASISTYPE, Degree: D3DDEGREETYPE, }; pub const D3DRASTER_STATUS = extern struct { InVBlank: BOOL, ScanLine: u32, }; pub const D3DDEBUGMONITORTOKENS = enum(i32) { ENABLE = 0, DISABLE = 1, FORCE_DWORD = 2147483647, }; pub const D3DDMT_ENABLE = D3DDEBUGMONITORTOKENS.ENABLE; pub const D3DDMT_DISABLE = D3DDEBUGMONITORTOKENS.DISABLE; pub const D3DDMT_FORCE_DWORD = D3DDEBUGMONITORTOKENS.FORCE_DWORD; pub const D3DQUERYTYPE = enum(i32) { VCACHE = 4, RESOURCEMANAGER = 5, VERTEXSTATS = 6, EVENT = 8, OCCLUSION = 9, TIMESTAMP = 10, TIMESTAMPDISJOINT = 11, TIMESTAMPFREQ = 12, PIPELINETIMINGS = 13, INTERFACETIMINGS = 14, VERTEXTIMINGS = 15, PIXELTIMINGS = 16, BANDWIDTHTIMINGS = 17, CACHEUTILIZATION = 18, MEMORYPRESSURE = 19, }; pub const D3DQUERYTYPE_VCACHE = D3DQUERYTYPE.VCACHE; pub const D3DQUERYTYPE_RESOURCEMANAGER = D3DQUERYTYPE.RESOURCEMANAGER; pub const D3DQUERYTYPE_VERTEXSTATS = D3DQUERYTYPE.VERTEXSTATS; pub const D3DQUERYTYPE_EVENT = D3DQUERYTYPE.EVENT; pub const D3DQUERYTYPE_OCCLUSION = D3DQUERYTYPE.OCCLUSION; pub const D3DQUERYTYPE_TIMESTAMP = D3DQUERYTYPE.TIMESTAMP; pub const D3DQUERYTYPE_TIMESTAMPDISJOINT = D3DQUERYTYPE.TIMESTAMPDISJOINT; pub const D3DQUERYTYPE_TIMESTAMPFREQ = D3DQUERYTYPE.TIMESTAMPFREQ; pub const D3DQUERYTYPE_PIPELINETIMINGS = D3DQUERYTYPE.PIPELINETIMINGS; pub const D3DQUERYTYPE_INTERFACETIMINGS = D3DQUERYTYPE.INTERFACETIMINGS; pub const D3DQUERYTYPE_VERTEXTIMINGS = D3DQUERYTYPE.VERTEXTIMINGS; pub const D3DQUERYTYPE_PIXELTIMINGS = D3DQUERYTYPE.PIXELTIMINGS; pub const D3DQUERYTYPE_BANDWIDTHTIMINGS = D3DQUERYTYPE.BANDWIDTHTIMINGS; pub const D3DQUERYTYPE_CACHEUTILIZATION = D3DQUERYTYPE.CACHEUTILIZATION; pub const D3DQUERYTYPE_MEMORYPRESSURE = D3DQUERYTYPE.MEMORYPRESSURE; pub const D3DRESOURCESTATS = extern struct { bThrashing: BOOL, ApproxBytesDownloaded: u32, NumEvicts: u32, NumVidCreates: u32, LastPri: u32, NumUsed: u32, NumUsedInVidMem: u32, WorkingSet: u32, WorkingSetBytes: u32, TotalManaged: u32, TotalBytes: u32, }; pub const D3DDEVINFO_RESOURCEMANAGER = extern struct { stats: [8]D3DRESOURCESTATS, }; pub const D3DDEVINFO_D3DVERTEXSTATS = extern struct { NumRenderedTriangles: u32, NumExtraClippingTriangles: u32, }; pub const D3DDEVINFO_VCACHE = extern struct { Pattern: u32, OptMethod: u32, CacheSize: u32, MagicNumber: u32, }; pub const D3DDEVINFO_D3D9PIPELINETIMINGS = extern struct { VertexProcessingTimePercent: f32, PixelProcessingTimePercent: f32, OtherGPUProcessingTimePercent: f32, GPUIdleTimePercent: f32, }; pub const D3DDEVINFO_D3D9INTERFACETIMINGS = extern struct { WaitingForGPUToUseApplicationResourceTimePercent: f32, WaitingForGPUToAcceptMoreCommandsTimePercent: f32, WaitingForGPUToStayWithinLatencyTimePercent: f32, WaitingForGPUExclusiveResourceTimePercent: f32, WaitingForGPUOtherTimePercent: f32, }; pub const D3DDEVINFO_D3D9STAGETIMINGS = extern struct { MemoryProcessingPercent: f32, ComputationProcessingPercent: f32, }; pub const D3DDEVINFO_D3D9BANDWIDTHTIMINGS = extern struct { MaxBandwidthUtilized: f32, FrontEndUploadMemoryUtilizedPercent: f32, VertexRateUtilizedPercent: f32, TriangleSetupRateUtilizedPercent: f32, FillRateUtilizedPercent: f32, }; pub const D3DDEVINFO_D3D9CACHEUTILIZATION = extern struct { TextureCacheHitRate: f32, PostTransformVertexCacheHitRate: f32, }; pub const D3DCOMPOSERECTSOP = enum(i32) { COPY = 1, OR = 2, AND = 3, NEG = 4, FORCE_DWORD = 2147483647, }; pub const D3DCOMPOSERECTS_COPY = D3DCOMPOSERECTSOP.COPY; pub const D3DCOMPOSERECTS_OR = D3DCOMPOSERECTSOP.OR; pub const D3DCOMPOSERECTS_AND = D3DCOMPOSERECTSOP.AND; pub const D3DCOMPOSERECTS_NEG = D3DCOMPOSERECTSOP.NEG; pub const D3DCOMPOSERECTS_FORCE_DWORD = D3DCOMPOSERECTSOP.FORCE_DWORD; pub const D3DCOMPOSERECTDESC = extern struct { X: u16, Y: u16, Width: u16, Height: u16, }; pub const D3DCOMPOSERECTDESTINATION = extern struct { SrcRectIndex: u16, Reserved: u16, X: i16, Y: i16, }; pub const D3DSCANLINEORDERING = enum(i32) { UNKNOWN = 0, PROGRESSIVE = 1, INTERLACED = 2, }; pub const D3DSCANLINEORDERING_UNKNOWN = D3DSCANLINEORDERING.UNKNOWN; pub const D3DSCANLINEORDERING_PROGRESSIVE = D3DSCANLINEORDERING.PROGRESSIVE; pub const D3DSCANLINEORDERING_INTERLACED = D3DSCANLINEORDERING.INTERLACED; pub const D3DDISPLAYMODEEX = extern struct { Size: u32, Width: u32, Height: u32, RefreshRate: u32, Format: D3DFORMAT, ScanLineOrdering: D3DSCANLINEORDERING, }; pub const D3DDISPLAYMODEFILTER = extern struct { Size: u32, Format: D3DFORMAT, ScanLineOrdering: D3DSCANLINEORDERING, }; pub const D3DDISPLAYROTATION = enum(i32) { IDENTITY = 1, @"90" = 2, @"180" = 3, @"270" = 4, }; pub const D3DDISPLAYROTATION_IDENTITY = D3DDISPLAYROTATION.IDENTITY; pub const D3DDISPLAYROTATION_90 = D3DDISPLAYROTATION.@"90"; pub const D3DDISPLAYROTATION_180 = D3DDISPLAYROTATION.@"180"; pub const D3DDISPLAYROTATION_270 = D3DDISPLAYROTATION.@"270"; pub const D3D_OMAC = extern struct { Omac: [16]u8, }; pub const D3DAUTHENTICATEDCHANNELTYPE = enum(i32) { @"3D9" = 1, RIVER_SOFTWARE = 2, RIVER_HARDWARE = 3, }; pub const D3DAUTHENTICATEDCHANNEL_D3D9 = D3DAUTHENTICATEDCHANNELTYPE.@"3D9"; pub const D3DAUTHENTICATEDCHANNEL_DRIVER_SOFTWARE = D3DAUTHENTICATEDCHANNELTYPE.RIVER_SOFTWARE; pub const D3DAUTHENTICATEDCHANNEL_DRIVER_HARDWARE = D3DAUTHENTICATEDCHANNELTYPE.RIVER_HARDWARE; pub const D3DAUTHENTICATEDCHANNEL_QUERY_INPUT = extern struct { QueryType: Guid, hChannel: ?HANDLE, SequenceNumber: u32, }; pub const D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT = extern struct { omac: D3D_OMAC, QueryType: Guid, hChannel: ?HANDLE, SequenceNumber: u32, ReturnCode: HRESULT, }; pub const D3DAUTHENTICATEDCHANNEL_PROTECTION_FLAGS = extern struct { Anonymous: extern union { Anonymous: extern struct { _bitfield: u32, }, Value: u32, }, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYPROTECTION_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, ProtectionFlags: D3DAUTHENTICATEDCHANNEL_PROTECTION_FLAGS, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYCHANNELTYPE_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, ChannelType: D3DAUTHENTICATEDCHANNELTYPE, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYDEVICEHANDLE_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, DeviceHandle: ?HANDLE, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYCRYPTOSESSION_INPUT = extern struct { Input: D3DAUTHENTICATEDCHANNEL_QUERY_INPUT, DXVA2DecodeHandle: ?HANDLE, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYCRYPTOSESSION_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, DXVA2DecodeHandle: ?HANDLE, CryptoSessionHandle: ?HANDLE, DeviceHandle: ?HANDLE, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYRESTRICTEDSHAREDRESOURCEPROCESSCOUNT_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, NumRestrictedSharedResourceProcesses: u32, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYRESTRICTEDSHAREDRESOURCEPROCESS_INPUT = extern struct { Input: D3DAUTHENTICATEDCHANNEL_QUERY_INPUT, ProcessIndex: u32, }; pub const D3DAUTHENTICATEDCHANNEL_PROCESSIDENTIFIERTYPE = enum(i32) { UNKNOWN = 0, DWM = 1, HANDLE = 2, }; pub const PROCESSIDTYPE_UNKNOWN = D3DAUTHENTICATEDCHANNEL_PROCESSIDENTIFIERTYPE.UNKNOWN; pub const PROCESSIDTYPE_DWM = D3DAUTHENTICATEDCHANNEL_PROCESSIDENTIFIERTYPE.DWM; pub const PROCESSIDTYPE_HANDLE = D3DAUTHENTICATEDCHANNEL_PROCESSIDENTIFIERTYPE.HANDLE; pub const D3DAUTHENTICATEDCHANNEL_QUERYRESTRICTEDSHAREDRESOURCEPROCESS_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, ProcessIndex: u32, ProcessIdentifer: D3DAUTHENTICATEDCHANNEL_PROCESSIDENTIFIERTYPE, ProcessHandle: ?HANDLE, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYUNRESTRICTEDPROTECTEDSHAREDRESOURCECOUNT_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, NumUnrestrictedProtectedSharedResources: u32, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYOUTPUTIDCOUNT_INPUT = extern struct { Input: D3DAUTHENTICATEDCHANNEL_QUERY_INPUT, DeviceHandle: ?HANDLE, CryptoSessionHandle: ?HANDLE, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYOUTPUTIDCOUNT_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, DeviceHandle: ?HANDLE, CryptoSessionHandle: ?HANDLE, NumOutputIDs: u32, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYOUTPUTID_INPUT = extern struct { Input: D3DAUTHENTICATEDCHANNEL_QUERY_INPUT, DeviceHandle: ?HANDLE, CryptoSessionHandle: ?HANDLE, OutputIDIndex: u32, }; pub const D3DBUSTYPE = enum(i32) { TYPE_OTHER = 0, TYPE_PCI = 1, TYPE_PCIX = 2, TYPE_PCIEXPRESS = 3, TYPE_AGP = 4, IMPL_MODIFIER_INSIDE_OF_CHIPSET = 65536, IMPL_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_CHIP = 131072, IMPL_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_SOCKET = 196608, IMPL_MODIFIER_DAUGHTER_BOARD_CONNECTOR = 262144, IMPL_MODIFIER_DAUGHTER_BOARD_CONNECTOR_INSIDE_OF_NUAE = 327680, IMPL_MODIFIER_NON_STANDARD = -2147483648, }; pub const D3DBUSTYPE_OTHER = D3DBUSTYPE.TYPE_OTHER; pub const D3DBUSTYPE_PCI = D3DBUSTYPE.TYPE_PCI; pub const D3DBUSTYPE_PCIX = D3DBUSTYPE.TYPE_PCIX; pub const D3DBUSTYPE_PCIEXPRESS = D3DBUSTYPE.TYPE_PCIEXPRESS; pub const D3DBUSTYPE_AGP = D3DBUSTYPE.TYPE_AGP; pub const D3DBUSIMPL_MODIFIER_INSIDE_OF_CHIPSET = D3DBUSTYPE.IMPL_MODIFIER_INSIDE_OF_CHIPSET; pub const D3DBUSIMPL_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_CHIP = D3DBUSTYPE.IMPL_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_CHIP; pub const D3DBUSIMPL_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_SOCKET = D3DBUSTYPE.IMPL_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_SOCKET; pub const D3DBUSIMPL_MODIFIER_DAUGHTER_BOARD_CONNECTOR = D3DBUSTYPE.IMPL_MODIFIER_DAUGHTER_BOARD_CONNECTOR; pub const D3DBUSIMPL_MODIFIER_DAUGHTER_BOARD_CONNECTOR_INSIDE_OF_NUAE = D3DBUSTYPE.IMPL_MODIFIER_DAUGHTER_BOARD_CONNECTOR_INSIDE_OF_NUAE; pub const D3DBUSIMPL_MODIFIER_NON_STANDARD = D3DBUSTYPE.IMPL_MODIFIER_NON_STANDARD; pub const D3DAUTHENTICATEDCHANNEL_QUERYINFOBUSTYPE_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, BusType: D3DBUSTYPE, bAccessibleInContiguousBlocks: BOOL, bAccessibleInNonContiguousBlocks: BOOL, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYEVICTIONENCRYPTIONGUIDCOUNT_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, NumEncryptionGuids: u32, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYEVICTIONENCRYPTIONGUID_INPUT = extern struct { Input: D3DAUTHENTICATEDCHANNEL_QUERY_INPUT, EncryptionGuidIndex: u32, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYEVICTIONENCRYPTIONGUID_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, EncryptionGuidIndex: u32, EncryptionGuid: Guid, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYUNCOMPRESSEDENCRYPTIONLEVEL_OUTPUT = extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, EncryptionGuid: Guid, }; pub const D3DAUTHENTICATEDCHANNEL_CONFIGURE_INPUT = extern struct { omac: D3D_OMAC, ConfigureType: Guid, hChannel: ?HANDLE, SequenceNumber: u32, }; pub const D3DAUTHENTICATEDCHANNEL_CONFIGURE_OUTPUT = extern struct { omac: D3D_OMAC, ConfigureType: Guid, hChannel: ?HANDLE, SequenceNumber: u32, ReturnCode: HRESULT, }; pub const D3DAUTHENTICATEDCHANNEL_CONFIGUREINITIALIZE = extern struct { Parameters: D3DAUTHENTICATEDCHANNEL_CONFIGURE_INPUT, StartSequenceQuery: u32, StartSequenceConfigure: u32, }; pub const D3DAUTHENTICATEDCHANNEL_CONFIGUREPROTECTION = extern struct { Parameters: D3DAUTHENTICATEDCHANNEL_CONFIGURE_INPUT, Protections: D3DAUTHENTICATEDCHANNEL_PROTECTION_FLAGS, }; pub const D3DAUTHENTICATEDCHANNEL_CONFIGURECRYPTOSESSION = extern struct { Parameters: D3DAUTHENTICATEDCHANNEL_CONFIGURE_INPUT, DXVA2DecodeHandle: ?HANDLE, CryptoSessionHandle: ?HANDLE, DeviceHandle: ?HANDLE, }; pub const D3DAUTHENTICATEDCHANNEL_CONFIGURESHAREDRESOURCE = extern struct { Parameters: D3DAUTHENTICATEDCHANNEL_CONFIGURE_INPUT, ProcessIdentiferType: D3DAUTHENTICATEDCHANNEL_PROCESSIDENTIFIERTYPE, ProcessHandle: ?HANDLE, AllowAccess: BOOL, }; pub const D3DAUTHENTICATEDCHANNEL_CONFIGUREUNCOMPRESSEDENCRYPTION = extern struct { Parameters: D3DAUTHENTICATEDCHANNEL_CONFIGURE_INPUT, EncryptionGuid: Guid, }; pub const D3DENCRYPTED_BLOCK_INFO = extern struct { NumEncryptedBytesAtBeginning: u32, NumBytesInSkipPattern: u32, NumBytesInEncryptPattern: u32, }; pub const D3DVSHADERCAPS2_0 = extern struct { Caps: u32, DynamicFlowControlDepth: i32, NumTemps: i32, StaticFlowControlDepth: i32, }; pub const D3DPSHADERCAPS2_0 = extern struct { Caps: u32, DynamicFlowControlDepth: i32, NumTemps: i32, StaticFlowControlDepth: i32, NumInstructionSlots: i32, }; pub const D3DCAPS9 = extern struct { DeviceType: D3DDEVTYPE, AdapterOrdinal: u32, Caps: u32, Caps2: u32, Caps3: u32, PresentationIntervals: u32, CursorCaps: u32, DevCaps: u32, PrimitiveMiscCaps: u32, RasterCaps: u32, ZCmpCaps: u32, SrcBlendCaps: u32, DestBlendCaps: u32, AlphaCmpCaps: u32, ShadeCaps: u32, TextureCaps: u32, TextureFilterCaps: u32, CubeTextureFilterCaps: u32, VolumeTextureFilterCaps: u32, TextureAddressCaps: u32, VolumeTextureAddressCaps: u32, LineCaps: u32, MaxTextureWidth: u32, MaxTextureHeight: u32, MaxVolumeExtent: u32, MaxTextureRepeat: u32, MaxTextureAspectRatio: u32, MaxAnisotropy: u32, MaxVertexW: f32, GuardBandLeft: f32, GuardBandTop: f32, GuardBandRight: f32, GuardBandBottom: f32, ExtentsAdjust: f32, StencilCaps: u32, FVFCaps: u32, TextureOpCaps: u32, MaxTextureBlendStages: u32, MaxSimultaneousTextures: u32, VertexProcessingCaps: u32, MaxActiveLights: u32, MaxUserClipPlanes: u32, MaxVertexBlendMatrices: u32, MaxVertexBlendMatrixIndex: u32, MaxPointSize: f32, MaxPrimitiveCount: u32, MaxVertexIndex: u32, MaxStreams: u32, MaxStreamStride: u32, VertexShaderVersion: u32, MaxVertexShaderConst: u32, PixelShaderVersion: u32, PixelShader1xMaxValue: f32, DevCaps2: u32, MaxNpatchTessellationLevel: f32, Reserved5: u32, MasterAdapterOrdinal: u32, AdapterOrdinalInGroup: u32, NumberOfAdaptersInGroup: u32, DeclTypes: u32, NumSimultaneousRTs: u32, StretchRectFilterCaps: u32, VS20Caps: D3DVSHADERCAPS2_0, PS20Caps: D3DPSHADERCAPS2_0, VertexTextureFilterCaps: u32, MaxVShaderInstructionsExecuted: u32, MaxPShaderInstructionsExecuted: u32, MaxVertexShader30InstructionSlots: u32, MaxPixelShader30InstructionSlots: u32, }; const IID_IDirect3D9_Value = Guid.initString("81bdcbca-64d4-426d-ae8d-ad0147f4275c"); pub const IID_IDirect3D9 = &IID_IDirect3D9_Value; pub const IDirect3D9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, RegisterSoftwareDevice: fn( self: *const IDirect3D9, pInitializeFunction: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAdapterCount: fn( self: *const IDirect3D9, ) callconv(@import("std").os.windows.WINAPI) u32, GetAdapterIdentifier: fn( self: *const IDirect3D9, Adapter: u32, Flags: u32, pIdentifier: ?*D3DADAPTER_IDENTIFIER9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAdapterModeCount: fn( self: *const IDirect3D9, Adapter: u32, Format: D3DFORMAT, ) callconv(@import("std").os.windows.WINAPI) u32, EnumAdapterModes: fn( self: *const IDirect3D9, Adapter: u32, Format: D3DFORMAT, Mode: u32, pMode: ?*D3DDISPLAYMODE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAdapterDisplayMode: fn( self: *const IDirect3D9, Adapter: u32, pMode: ?*D3DDISPLAYMODE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckDeviceType: fn( self: *const IDirect3D9, Adapter: u32, DevType: D3DDEVTYPE, AdapterFormat: D3DFORMAT, BackBufferFormat: D3DFORMAT, bWindowed: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckDeviceFormat: fn( self: *const IDirect3D9, Adapter: u32, DeviceType: D3DDEVTYPE, AdapterFormat: D3DFORMAT, Usage: u32, RType: D3DRESOURCETYPE, CheckFormat: D3DFORMAT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckDeviceMultiSampleType: fn( self: *const IDirect3D9, Adapter: u32, DeviceType: D3DDEVTYPE, SurfaceFormat: D3DFORMAT, Windowed: BOOL, MultiSampleType: D3DMULTISAMPLE_TYPE, pQualityLevels: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckDepthStencilMatch: fn( self: *const IDirect3D9, Adapter: u32, DeviceType: D3DDEVTYPE, AdapterFormat: D3DFORMAT, RenderTargetFormat: D3DFORMAT, DepthStencilFormat: D3DFORMAT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckDeviceFormatConversion: fn( self: *const IDirect3D9, Adapter: u32, DeviceType: D3DDEVTYPE, SourceFormat: D3DFORMAT, TargetFormat: D3DFORMAT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDeviceCaps: fn( self: *const IDirect3D9, Adapter: u32, DeviceType: D3DDEVTYPE, pCaps: ?*D3DCAPS9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAdapterMonitor: fn( self: *const IDirect3D9, Adapter: u32, ) callconv(@import("std").os.windows.WINAPI) ?HMONITOR, CreateDevice: fn( self: *const IDirect3D9, Adapter: u32, DeviceType: D3DDEVTYPE, hFocusWindow: ?HWND, BehaviorFlags: u32, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, ppReturnedDeviceInterface: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_RegisterSoftwareDevice(self: *const T, pInitializeFunction: ?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).RegisterSoftwareDevice(@ptrCast(*const IDirect3D9, self), pInitializeFunction); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_GetAdapterCount(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IDirect3D9.VTable, self.vtable).GetAdapterCount(@ptrCast(*const IDirect3D9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_GetAdapterIdentifier(self: *const T, Adapter: u32, Flags: u32, pIdentifier: ?*D3DADAPTER_IDENTIFIER9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).GetAdapterIdentifier(@ptrCast(*const IDirect3D9, self), Adapter, Flags, pIdentifier); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_GetAdapterModeCount(self: *const T, Adapter: u32, Format: D3DFORMAT) callconv(.Inline) u32 { return @ptrCast(*const IDirect3D9.VTable, self.vtable).GetAdapterModeCount(@ptrCast(*const IDirect3D9, self), Adapter, Format); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_EnumAdapterModes(self: *const T, Adapter: u32, Format: D3DFORMAT, Mode: u32, pMode: ?*D3DDISPLAYMODE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).EnumAdapterModes(@ptrCast(*const IDirect3D9, self), Adapter, Format, Mode, pMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_GetAdapterDisplayMode(self: *const T, Adapter: u32, pMode: ?*D3DDISPLAYMODE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).GetAdapterDisplayMode(@ptrCast(*const IDirect3D9, self), Adapter, pMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_CheckDeviceType(self: *const T, Adapter: u32, DevType: D3DDEVTYPE, AdapterFormat: D3DFORMAT, BackBufferFormat: D3DFORMAT, bWindowed: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).CheckDeviceType(@ptrCast(*const IDirect3D9, self), Adapter, DevType, AdapterFormat, BackBufferFormat, bWindowed); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_CheckDeviceFormat(self: *const T, Adapter: u32, DeviceType: D3DDEVTYPE, AdapterFormat: D3DFORMAT, Usage: u32, RType: D3DRESOURCETYPE, CheckFormat: D3DFORMAT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).CheckDeviceFormat(@ptrCast(*const IDirect3D9, self), Adapter, DeviceType, AdapterFormat, Usage, RType, CheckFormat); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_CheckDeviceMultiSampleType(self: *const T, Adapter: u32, DeviceType: D3DDEVTYPE, SurfaceFormat: D3DFORMAT, Windowed: BOOL, MultiSampleType: D3DMULTISAMPLE_TYPE, pQualityLevels: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).CheckDeviceMultiSampleType(@ptrCast(*const IDirect3D9, self), Adapter, DeviceType, SurfaceFormat, Windowed, MultiSampleType, pQualityLevels); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_CheckDepthStencilMatch(self: *const T, Adapter: u32, DeviceType: D3DDEVTYPE, AdapterFormat: D3DFORMAT, RenderTargetFormat: D3DFORMAT, DepthStencilFormat: D3DFORMAT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).CheckDepthStencilMatch(@ptrCast(*const IDirect3D9, self), Adapter, DeviceType, AdapterFormat, RenderTargetFormat, DepthStencilFormat); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_CheckDeviceFormatConversion(self: *const T, Adapter: u32, DeviceType: D3DDEVTYPE, SourceFormat: D3DFORMAT, TargetFormat: D3DFORMAT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).CheckDeviceFormatConversion(@ptrCast(*const IDirect3D9, self), Adapter, DeviceType, SourceFormat, TargetFormat); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_GetDeviceCaps(self: *const T, Adapter: u32, DeviceType: D3DDEVTYPE, pCaps: ?*D3DCAPS9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).GetDeviceCaps(@ptrCast(*const IDirect3D9, self), Adapter, DeviceType, pCaps); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_GetAdapterMonitor(self: *const T, Adapter: u32) callconv(.Inline) ?HMONITOR { return @ptrCast(*const IDirect3D9.VTable, self.vtable).GetAdapterMonitor(@ptrCast(*const IDirect3D9, self), Adapter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9_CreateDevice(self: *const T, Adapter: u32, DeviceType: D3DDEVTYPE, hFocusWindow: ?HWND, BehaviorFlags: u32, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, ppReturnedDeviceInterface: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9.VTable, self.vtable).CreateDevice(@ptrCast(*const IDirect3D9, self), Adapter, DeviceType, hFocusWindow, BehaviorFlags, pPresentationParameters, ppReturnedDeviceInterface); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DDevice9_Value = Guid.initString("d0223b96-bf7a-43fd-92bd-a43b0d82b9eb"); pub const IID_IDirect3DDevice9 = &IID_IDirect3DDevice9_Value; pub const IDirect3DDevice9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, TestCooperativeLevel: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAvailableTextureMem: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) u32, EvictManagedResources: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDirect3D: fn( self: *const IDirect3DDevice9, ppD3D9: ?*?*IDirect3D9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDeviceCaps: fn( self: *const IDirect3DDevice9, pCaps: ?*D3DCAPS9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDisplayMode: fn( self: *const IDirect3DDevice9, iSwapChain: u32, pMode: ?*D3DDISPLAYMODE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCreationParameters: fn( self: *const IDirect3DDevice9, pParameters: ?*D3DDEVICE_CREATION_PARAMETERS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetCursorProperties: fn( self: *const IDirect3DDevice9, XHotSpot: u32, YHotSpot: u32, pCursorBitmap: ?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetCursorPosition: fn( self: *const IDirect3DDevice9, X: i32, Y: i32, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) void, ShowCursor: fn( self: *const IDirect3DDevice9, bShow: BOOL, ) callconv(@import("std").os.windows.WINAPI) BOOL, CreateAdditionalSwapChain: fn( self: *const IDirect3DDevice9, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, pSwapChain: ?*?*IDirect3DSwapChain9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSwapChain: fn( self: *const IDirect3DDevice9, iSwapChain: u32, pSwapChain: ?*?*IDirect3DSwapChain9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNumberOfSwapChains: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) u32, Reset: fn( self: *const IDirect3DDevice9, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Present: fn( self: *const IDirect3DDevice9, pSourceRect: ?*const RECT, pDestRect: ?*const RECT, hDestWindowOverride: ?HWND, pDirtyRegion: ?*const RGNDATA, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBackBuffer: fn( self: *const IDirect3DDevice9, iSwapChain: u32, iBackBuffer: u32, Type: D3DBACKBUFFER_TYPE, ppBackBuffer: ?*?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRasterStatus: fn( self: *const IDirect3DDevice9, iSwapChain: u32, pRasterStatus: ?*D3DRASTER_STATUS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetDialogBoxMode: fn( self: *const IDirect3DDevice9, bEnableDialogs: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetGammaRamp: fn( self: *const IDirect3DDevice9, iSwapChain: u32, Flags: u32, pRamp: ?*const D3DGAMMARAMP, ) callconv(@import("std").os.windows.WINAPI) void, GetGammaRamp: fn( self: *const IDirect3DDevice9, iSwapChain: u32, pRamp: ?*D3DGAMMARAMP, ) callconv(@import("std").os.windows.WINAPI) void, CreateTexture: fn( self: *const IDirect3DDevice9, Width: u32, Height: u32, Levels: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppTexture: ?*?*IDirect3DTexture9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateVolumeTexture: fn( self: *const IDirect3DDevice9, Width: u32, Height: u32, Depth: u32, Levels: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppVolumeTexture: ?*?*IDirect3DVolumeTexture9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateCubeTexture: fn( self: *const IDirect3DDevice9, EdgeLength: u32, Levels: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppCubeTexture: ?*?*IDirect3DCubeTexture9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateVertexBuffer: fn( self: *const IDirect3DDevice9, Length: u32, Usage: u32, FVF: u32, Pool: D3DPOOL, ppVertexBuffer: ?*?*IDirect3DVertexBuffer9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateIndexBuffer: fn( self: *const IDirect3DDevice9, Length: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppIndexBuffer: ?*?*IDirect3DIndexBuffer9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateRenderTarget: fn( self: *const IDirect3DDevice9, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Lockable: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateDepthStencilSurface: fn( self: *const IDirect3DDevice9, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Discard: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateSurface: fn( self: *const IDirect3DDevice9, pSourceSurface: ?*IDirect3DSurface9, pSourceRect: ?*const RECT, pDestinationSurface: ?*IDirect3DSurface9, pDestPoint: ?*const POINT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateTexture: fn( self: *const IDirect3DDevice9, pSourceTexture: ?*IDirect3DBaseTexture9, pDestinationTexture: ?*IDirect3DBaseTexture9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRenderTargetData: fn( self: *const IDirect3DDevice9, pRenderTarget: ?*IDirect3DSurface9, pDestSurface: ?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFrontBufferData: fn( self: *const IDirect3DDevice9, iSwapChain: u32, pDestSurface: ?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, StretchRect: fn( self: *const IDirect3DDevice9, pSourceSurface: ?*IDirect3DSurface9, pSourceRect: ?*const RECT, pDestSurface: ?*IDirect3DSurface9, pDestRect: ?*const RECT, Filter: D3DTEXTUREFILTERTYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ColorFill: fn( self: *const IDirect3DDevice9, pSurface: ?*IDirect3DSurface9, pRect: ?*const RECT, color: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateOffscreenPlainSurface: fn( self: *const IDirect3DDevice9, Width: u32, Height: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRenderTarget: fn( self: *const IDirect3DDevice9, RenderTargetIndex: u32, pRenderTarget: ?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRenderTarget: fn( self: *const IDirect3DDevice9, RenderTargetIndex: u32, ppRenderTarget: ?*?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetDepthStencilSurface: fn( self: *const IDirect3DDevice9, pNewZStencil: ?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDepthStencilSurface: fn( self: *const IDirect3DDevice9, ppZStencilSurface: ?*?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BeginScene: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EndScene: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clear: fn( self: *const IDirect3DDevice9, Count: u32, pRects: ?*const D3DRECT, Flags: u32, Color: u32, Z: f32, Stencil: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetTransform: fn( self: *const IDirect3DDevice9, State: D3DTRANSFORMSTATETYPE, pMatrix: ?*const D3DMATRIX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetTransform: fn( self: *const IDirect3DDevice9, State: D3DTRANSFORMSTATETYPE, pMatrix: ?*D3DMATRIX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MultiplyTransform: fn( self: *const IDirect3DDevice9, param0: D3DTRANSFORMSTATETYPE, param1: ?*const D3DMATRIX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetViewport: fn( self: *const IDirect3DDevice9, pViewport: ?*const D3DVIEWPORT9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetViewport: fn( self: *const IDirect3DDevice9, pViewport: ?*D3DVIEWPORT9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetMaterial: fn( self: *const IDirect3DDevice9, pMaterial: ?*const D3DMATERIAL9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMaterial: fn( self: *const IDirect3DDevice9, pMaterial: ?*D3DMATERIAL9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetLight: fn( self: *const IDirect3DDevice9, Index: u32, param1: ?*const D3DLIGHT9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLight: fn( self: *const IDirect3DDevice9, Index: u32, param1: ?*D3DLIGHT9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LightEnable: fn( self: *const IDirect3DDevice9, Index: u32, Enable: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLightEnable: fn( self: *const IDirect3DDevice9, Index: u32, pEnable: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetClipPlane: fn( self: *const IDirect3DDevice9, Index: u32, pPlane: ?*const f32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClipPlane: fn( self: *const IDirect3DDevice9, Index: u32, pPlane: ?*f32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRenderState: fn( self: *const IDirect3DDevice9, State: D3DRENDERSTATETYPE, Value: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRenderState: fn( self: *const IDirect3DDevice9, State: D3DRENDERSTATETYPE, pValue: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateStateBlock: fn( self: *const IDirect3DDevice9, Type: D3DSTATEBLOCKTYPE, ppSB: ?*?*IDirect3DStateBlock9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BeginStateBlock: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EndStateBlock: fn( self: *const IDirect3DDevice9, ppSB: ?*?*IDirect3DStateBlock9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetClipStatus: fn( self: *const IDirect3DDevice9, pClipStatus: ?*const D3DCLIPSTATUS9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetClipStatus: fn( self: *const IDirect3DDevice9, pClipStatus: ?*D3DCLIPSTATUS9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetTexture: fn( self: *const IDirect3DDevice9, Stage: u32, ppTexture: ?*?*IDirect3DBaseTexture9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetTexture: fn( self: *const IDirect3DDevice9, Stage: u32, pTexture: ?*IDirect3DBaseTexture9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetTextureStageState: fn( self: *const IDirect3DDevice9, Stage: u32, Type: D3DTEXTURESTAGESTATETYPE, pValue: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetTextureStageState: fn( self: *const IDirect3DDevice9, Stage: u32, Type: D3DTEXTURESTAGESTATETYPE, Value: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSamplerState: fn( self: *const IDirect3DDevice9, Sampler: u32, Type: D3DSAMPLERSTATETYPE, pValue: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSamplerState: fn( self: *const IDirect3DDevice9, Sampler: u32, Type: D3DSAMPLERSTATETYPE, Value: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ValidateDevice: fn( self: *const IDirect3DDevice9, pNumPasses: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPaletteEntries: fn( self: *const IDirect3DDevice9, PaletteNumber: u32, pEntries: ?*const PALETTEENTRY, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPaletteEntries: fn( self: *const IDirect3DDevice9, PaletteNumber: u32, pEntries: ?*PALETTEENTRY, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetCurrentTexturePalette: fn( self: *const IDirect3DDevice9, PaletteNumber: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCurrentTexturePalette: fn( self: *const IDirect3DDevice9, PaletteNumber: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetScissorRect: fn( self: *const IDirect3DDevice9, pRect: ?*const RECT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetScissorRect: fn( self: *const IDirect3DDevice9, pRect: ?*RECT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSoftwareVertexProcessing: fn( self: *const IDirect3DDevice9, bSoftware: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSoftwareVertexProcessing: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) BOOL, SetNPatchMode: fn( self: *const IDirect3DDevice9, nSegments: f32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNPatchMode: fn( self: *const IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) f32, DrawPrimitive: fn( self: *const IDirect3DDevice9, PrimitiveType: D3DPRIMITIVETYPE, StartVertex: u32, PrimitiveCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DrawIndexedPrimitive: fn( self: *const IDirect3DDevice9, param0: D3DPRIMITIVETYPE, BaseVertexIndex: i32, MinVertexIndex: u32, NumVertices: u32, startIndex: u32, primCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DrawPrimitiveUP: fn( self: *const IDirect3DDevice9, PrimitiveType: D3DPRIMITIVETYPE, PrimitiveCount: u32, pVertexStreamZeroData: ?*const anyopaque, VertexStreamZeroStride: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DrawIndexedPrimitiveUP: fn( self: *const IDirect3DDevice9, PrimitiveType: D3DPRIMITIVETYPE, MinVertexIndex: u32, NumVertices: u32, PrimitiveCount: u32, pIndexData: ?*const anyopaque, IndexDataFormat: D3DFORMAT, pVertexStreamZeroData: ?*const anyopaque, VertexStreamZeroStride: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ProcessVertices: fn( self: *const IDirect3DDevice9, SrcStartIndex: u32, DestIndex: u32, VertexCount: u32, pDestBuffer: ?*IDirect3DVertexBuffer9, pVertexDecl: ?*IDirect3DVertexDeclaration9, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateVertexDeclaration: fn( self: *const IDirect3DDevice9, pVertexElements: ?*const D3DVERTEXELEMENT9, ppDecl: ?*?*IDirect3DVertexDeclaration9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetVertexDeclaration: fn( self: *const IDirect3DDevice9, pDecl: ?*IDirect3DVertexDeclaration9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVertexDeclaration: fn( self: *const IDirect3DDevice9, ppDecl: ?*?*IDirect3DVertexDeclaration9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetFVF: fn( self: *const IDirect3DDevice9, FVF: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFVF: fn( self: *const IDirect3DDevice9, pFVF: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateVertexShader: fn( self: *const IDirect3DDevice9, pFunction: ?*const u32, ppShader: ?*?*IDirect3DVertexShader9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetVertexShader: fn( self: *const IDirect3DDevice9, pShader: ?*IDirect3DVertexShader9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVertexShader: fn( self: *const IDirect3DDevice9, ppShader: ?*?*IDirect3DVertexShader9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetVertexShaderConstantF: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*const f32, Vector4fCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVertexShaderConstantF: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*f32, Vector4fCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetVertexShaderConstantI: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*const i32, Vector4iCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVertexShaderConstantI: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*i32, Vector4iCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetVertexShaderConstantB: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*const BOOL, BoolCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVertexShaderConstantB: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*BOOL, BoolCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetStreamSource: fn( self: *const IDirect3DDevice9, StreamNumber: u32, pStreamData: ?*IDirect3DVertexBuffer9, OffsetInBytes: u32, Stride: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStreamSource: fn( self: *const IDirect3DDevice9, StreamNumber: u32, ppStreamData: ?*?*IDirect3DVertexBuffer9, pOffsetInBytes: ?*u32, pStride: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetStreamSourceFreq: fn( self: *const IDirect3DDevice9, StreamNumber: u32, Setting: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStreamSourceFreq: fn( self: *const IDirect3DDevice9, StreamNumber: u32, pSetting: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetIndices: fn( self: *const IDirect3DDevice9, pIndexData: ?*IDirect3DIndexBuffer9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetIndices: fn( self: *const IDirect3DDevice9, ppIndexData: ?*?*IDirect3DIndexBuffer9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreatePixelShader: fn( self: *const IDirect3DDevice9, pFunction: ?*const u32, ppShader: ?*?*IDirect3DPixelShader9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPixelShader: fn( self: *const IDirect3DDevice9, pShader: ?*IDirect3DPixelShader9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPixelShader: fn( self: *const IDirect3DDevice9, ppShader: ?*?*IDirect3DPixelShader9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPixelShaderConstantF: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*const f32, Vector4fCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPixelShaderConstantF: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*f32, Vector4fCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPixelShaderConstantI: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*const i32, Vector4iCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPixelShaderConstantI: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*i32, Vector4iCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPixelShaderConstantB: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*const BOOL, BoolCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPixelShaderConstantB: fn( self: *const IDirect3DDevice9, StartRegister: u32, pConstantData: ?*BOOL, BoolCount: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DrawRectPatch: fn( self: *const IDirect3DDevice9, Handle: u32, pNumSegs: ?*const f32, pRectPatchInfo: ?*const D3DRECTPATCH_INFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DrawTriPatch: fn( self: *const IDirect3DDevice9, Handle: u32, pNumSegs: ?*const f32, pTriPatchInfo: ?*const D3DTRIPATCH_INFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeletePatch: fn( self: *const IDirect3DDevice9, Handle: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateQuery: fn( self: *const IDirect3DDevice9, Type: D3DQUERYTYPE, ppQuery: ?*?*IDirect3DQuery9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_TestCooperativeLevel(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).TestCooperativeLevel(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetAvailableTextureMem(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetAvailableTextureMem(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_EvictManagedResources(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).EvictManagedResources(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetDirect3D(self: *const T, ppD3D9: ?*?*IDirect3D9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetDirect3D(@ptrCast(*const IDirect3DDevice9, self), ppD3D9); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetDeviceCaps(self: *const T, pCaps: ?*D3DCAPS9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetDeviceCaps(@ptrCast(*const IDirect3DDevice9, self), pCaps); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetDisplayMode(self: *const T, iSwapChain: u32, pMode: ?*D3DDISPLAYMODE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetDisplayMode(@ptrCast(*const IDirect3DDevice9, self), iSwapChain, pMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetCreationParameters(self: *const T, pParameters: ?*D3DDEVICE_CREATION_PARAMETERS) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetCreationParameters(@ptrCast(*const IDirect3DDevice9, self), pParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetCursorProperties(self: *const T, XHotSpot: u32, YHotSpot: u32, pCursorBitmap: ?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetCursorProperties(@ptrCast(*const IDirect3DDevice9, self), XHotSpot, YHotSpot, pCursorBitmap); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetCursorPosition(self: *const T, X: i32, Y: i32, Flags: u32) callconv(.Inline) void { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetCursorPosition(@ptrCast(*const IDirect3DDevice9, self), X, Y, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_ShowCursor(self: *const T, bShow: BOOL) callconv(.Inline) BOOL { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).ShowCursor(@ptrCast(*const IDirect3DDevice9, self), bShow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateAdditionalSwapChain(self: *const T, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, pSwapChain: ?*?*IDirect3DSwapChain9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateAdditionalSwapChain(@ptrCast(*const IDirect3DDevice9, self), pPresentationParameters, pSwapChain); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetSwapChain(self: *const T, iSwapChain: u32, pSwapChain: ?*?*IDirect3DSwapChain9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetSwapChain(@ptrCast(*const IDirect3DDevice9, self), iSwapChain, pSwapChain); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetNumberOfSwapChains(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetNumberOfSwapChains(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_Reset(self: *const T, pPresentationParameters: ?*D3DPRESENT_PARAMETERS) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).Reset(@ptrCast(*const IDirect3DDevice9, self), pPresentationParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_Present(self: *const T, pSourceRect: ?*const RECT, pDestRect: ?*const RECT, hDestWindowOverride: ?HWND, pDirtyRegion: ?*const RGNDATA) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).Present(@ptrCast(*const IDirect3DDevice9, self), pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetBackBuffer(self: *const T, iSwapChain: u32, iBackBuffer: u32, Type: D3DBACKBUFFER_TYPE, ppBackBuffer: ?*?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetBackBuffer(@ptrCast(*const IDirect3DDevice9, self), iSwapChain, iBackBuffer, Type, ppBackBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetRasterStatus(self: *const T, iSwapChain: u32, pRasterStatus: ?*D3DRASTER_STATUS) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetRasterStatus(@ptrCast(*const IDirect3DDevice9, self), iSwapChain, pRasterStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetDialogBoxMode(self: *const T, bEnableDialogs: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetDialogBoxMode(@ptrCast(*const IDirect3DDevice9, self), bEnableDialogs); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetGammaRamp(self: *const T, iSwapChain: u32, Flags: u32, pRamp: ?*const D3DGAMMARAMP) callconv(.Inline) void { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetGammaRamp(@ptrCast(*const IDirect3DDevice9, self), iSwapChain, Flags, pRamp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetGammaRamp(self: *const T, iSwapChain: u32, pRamp: ?*D3DGAMMARAMP) callconv(.Inline) void { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetGammaRamp(@ptrCast(*const IDirect3DDevice9, self), iSwapChain, pRamp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateTexture(self: *const T, Width: u32, Height: u32, Levels: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppTexture: ?*?*IDirect3DTexture9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateTexture(@ptrCast(*const IDirect3DDevice9, self), Width, Height, Levels, Usage, Format, Pool, ppTexture, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateVolumeTexture(self: *const T, Width: u32, Height: u32, Depth: u32, Levels: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppVolumeTexture: ?*?*IDirect3DVolumeTexture9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateVolumeTexture(@ptrCast(*const IDirect3DDevice9, self), Width, Height, Depth, Levels, Usage, Format, Pool, ppVolumeTexture, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateCubeTexture(self: *const T, EdgeLength: u32, Levels: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppCubeTexture: ?*?*IDirect3DCubeTexture9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateCubeTexture(@ptrCast(*const IDirect3DDevice9, self), EdgeLength, Levels, Usage, Format, Pool, ppCubeTexture, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateVertexBuffer(self: *const T, Length: u32, Usage: u32, FVF: u32, Pool: D3DPOOL, ppVertexBuffer: ?*?*IDirect3DVertexBuffer9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateVertexBuffer(@ptrCast(*const IDirect3DDevice9, self), Length, Usage, FVF, Pool, ppVertexBuffer, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateIndexBuffer(self: *const T, Length: u32, Usage: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppIndexBuffer: ?*?*IDirect3DIndexBuffer9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateIndexBuffer(@ptrCast(*const IDirect3DDevice9, self), Length, Usage, Format, Pool, ppIndexBuffer, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateRenderTarget(self: *const T, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Lockable: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateRenderTarget(@ptrCast(*const IDirect3DDevice9, self), Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateDepthStencilSurface(self: *const T, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Discard: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateDepthStencilSurface(@ptrCast(*const IDirect3DDevice9, self), Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_UpdateSurface(self: *const T, pSourceSurface: ?*IDirect3DSurface9, pSourceRect: ?*const RECT, pDestinationSurface: ?*IDirect3DSurface9, pDestPoint: ?*const POINT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).UpdateSurface(@ptrCast(*const IDirect3DDevice9, self), pSourceSurface, pSourceRect, pDestinationSurface, pDestPoint); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_UpdateTexture(self: *const T, pSourceTexture: ?*IDirect3DBaseTexture9, pDestinationTexture: ?*IDirect3DBaseTexture9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).UpdateTexture(@ptrCast(*const IDirect3DDevice9, self), pSourceTexture, pDestinationTexture); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetRenderTargetData(self: *const T, pRenderTarget: ?*IDirect3DSurface9, pDestSurface: ?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetRenderTargetData(@ptrCast(*const IDirect3DDevice9, self), pRenderTarget, pDestSurface); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetFrontBufferData(self: *const T, iSwapChain: u32, pDestSurface: ?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetFrontBufferData(@ptrCast(*const IDirect3DDevice9, self), iSwapChain, pDestSurface); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_StretchRect(self: *const T, pSourceSurface: ?*IDirect3DSurface9, pSourceRect: ?*const RECT, pDestSurface: ?*IDirect3DSurface9, pDestRect: ?*const RECT, Filter: D3DTEXTUREFILTERTYPE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).StretchRect(@ptrCast(*const IDirect3DDevice9, self), pSourceSurface, pSourceRect, pDestSurface, pDestRect, Filter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_ColorFill(self: *const T, pSurface: ?*IDirect3DSurface9, pRect: ?*const RECT, color: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).ColorFill(@ptrCast(*const IDirect3DDevice9, self), pSurface, pRect, color); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateOffscreenPlainSurface(self: *const T, Width: u32, Height: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateOffscreenPlainSurface(@ptrCast(*const IDirect3DDevice9, self), Width, Height, Format, Pool, ppSurface, pSharedHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetRenderTarget(self: *const T, RenderTargetIndex: u32, pRenderTarget: ?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetRenderTarget(@ptrCast(*const IDirect3DDevice9, self), RenderTargetIndex, pRenderTarget); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetRenderTarget(self: *const T, RenderTargetIndex: u32, ppRenderTarget: ?*?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetRenderTarget(@ptrCast(*const IDirect3DDevice9, self), RenderTargetIndex, ppRenderTarget); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetDepthStencilSurface(self: *const T, pNewZStencil: ?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetDepthStencilSurface(@ptrCast(*const IDirect3DDevice9, self), pNewZStencil); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetDepthStencilSurface(self: *const T, ppZStencilSurface: ?*?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetDepthStencilSurface(@ptrCast(*const IDirect3DDevice9, self), ppZStencilSurface); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_BeginScene(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).BeginScene(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_EndScene(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).EndScene(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_Clear(self: *const T, Count: u32, pRects: ?*const D3DRECT, Flags: u32, Color: u32, Z: f32, Stencil: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).Clear(@ptrCast(*const IDirect3DDevice9, self), Count, pRects, Flags, Color, Z, Stencil); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetTransform(self: *const T, State: D3DTRANSFORMSTATETYPE, pMatrix: ?*const D3DMATRIX) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetTransform(@ptrCast(*const IDirect3DDevice9, self), State, pMatrix); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetTransform(self: *const T, State: D3DTRANSFORMSTATETYPE, pMatrix: ?*D3DMATRIX) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetTransform(@ptrCast(*const IDirect3DDevice9, self), State, pMatrix); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_MultiplyTransform(self: *const T, param0: D3DTRANSFORMSTATETYPE, param1: ?*const D3DMATRIX) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).MultiplyTransform(@ptrCast(*const IDirect3DDevice9, self), param0, param1); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetViewport(self: *const T, pViewport: ?*const D3DVIEWPORT9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetViewport(@ptrCast(*const IDirect3DDevice9, self), pViewport); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetViewport(self: *const T, pViewport: ?*D3DVIEWPORT9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetViewport(@ptrCast(*const IDirect3DDevice9, self), pViewport); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetMaterial(self: *const T, pMaterial: ?*const D3DMATERIAL9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetMaterial(@ptrCast(*const IDirect3DDevice9, self), pMaterial); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetMaterial(self: *const T, pMaterial: ?*D3DMATERIAL9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetMaterial(@ptrCast(*const IDirect3DDevice9, self), pMaterial); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetLight(self: *const T, Index: u32, param1: ?*const D3DLIGHT9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetLight(@ptrCast(*const IDirect3DDevice9, self), Index, param1); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetLight(self: *const T, Index: u32, param1: ?*D3DLIGHT9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetLight(@ptrCast(*const IDirect3DDevice9, self), Index, param1); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_LightEnable(self: *const T, Index: u32, Enable: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).LightEnable(@ptrCast(*const IDirect3DDevice9, self), Index, Enable); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetLightEnable(self: *const T, Index: u32, pEnable: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetLightEnable(@ptrCast(*const IDirect3DDevice9, self), Index, pEnable); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetClipPlane(self: *const T, Index: u32, pPlane: ?*const f32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetClipPlane(@ptrCast(*const IDirect3DDevice9, self), Index, pPlane); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetClipPlane(self: *const T, Index: u32, pPlane: ?*f32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetClipPlane(@ptrCast(*const IDirect3DDevice9, self), Index, pPlane); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetRenderState(self: *const T, State: D3DRENDERSTATETYPE, Value: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetRenderState(@ptrCast(*const IDirect3DDevice9, self), State, Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetRenderState(self: *const T, State: D3DRENDERSTATETYPE, pValue: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetRenderState(@ptrCast(*const IDirect3DDevice9, self), State, pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateStateBlock(self: *const T, Type: D3DSTATEBLOCKTYPE, ppSB: ?*?*IDirect3DStateBlock9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateStateBlock(@ptrCast(*const IDirect3DDevice9, self), Type, ppSB); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_BeginStateBlock(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).BeginStateBlock(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_EndStateBlock(self: *const T, ppSB: ?*?*IDirect3DStateBlock9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).EndStateBlock(@ptrCast(*const IDirect3DDevice9, self), ppSB); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetClipStatus(self: *const T, pClipStatus: ?*const D3DCLIPSTATUS9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetClipStatus(@ptrCast(*const IDirect3DDevice9, self), pClipStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetClipStatus(self: *const T, pClipStatus: ?*D3DCLIPSTATUS9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetClipStatus(@ptrCast(*const IDirect3DDevice9, self), pClipStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetTexture(self: *const T, Stage: u32, ppTexture: ?*?*IDirect3DBaseTexture9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetTexture(@ptrCast(*const IDirect3DDevice9, self), Stage, ppTexture); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetTexture(self: *const T, Stage: u32, pTexture: ?*IDirect3DBaseTexture9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetTexture(@ptrCast(*const IDirect3DDevice9, self), Stage, pTexture); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetTextureStageState(self: *const T, Stage: u32, Type: D3DTEXTURESTAGESTATETYPE, pValue: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetTextureStageState(@ptrCast(*const IDirect3DDevice9, self), Stage, Type, pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetTextureStageState(self: *const T, Stage: u32, Type: D3DTEXTURESTAGESTATETYPE, Value: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetTextureStageState(@ptrCast(*const IDirect3DDevice9, self), Stage, Type, Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetSamplerState(self: *const T, Sampler: u32, Type: D3DSAMPLERSTATETYPE, pValue: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetSamplerState(@ptrCast(*const IDirect3DDevice9, self), Sampler, Type, pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetSamplerState(self: *const T, Sampler: u32, Type: D3DSAMPLERSTATETYPE, Value: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetSamplerState(@ptrCast(*const IDirect3DDevice9, self), Sampler, Type, Value); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_ValidateDevice(self: *const T, pNumPasses: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).ValidateDevice(@ptrCast(*const IDirect3DDevice9, self), pNumPasses); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetPaletteEntries(self: *const T, PaletteNumber: u32, pEntries: ?*const PALETTEENTRY) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetPaletteEntries(@ptrCast(*const IDirect3DDevice9, self), PaletteNumber, pEntries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetPaletteEntries(self: *const T, PaletteNumber: u32, pEntries: ?*PALETTEENTRY) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetPaletteEntries(@ptrCast(*const IDirect3DDevice9, self), PaletteNumber, pEntries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetCurrentTexturePalette(self: *const T, PaletteNumber: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetCurrentTexturePalette(@ptrCast(*const IDirect3DDevice9, self), PaletteNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetCurrentTexturePalette(self: *const T, PaletteNumber: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetCurrentTexturePalette(@ptrCast(*const IDirect3DDevice9, self), PaletteNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetScissorRect(self: *const T, pRect: ?*const RECT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetScissorRect(@ptrCast(*const IDirect3DDevice9, self), pRect); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetScissorRect(self: *const T, pRect: ?*RECT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetScissorRect(@ptrCast(*const IDirect3DDevice9, self), pRect); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetSoftwareVertexProcessing(self: *const T, bSoftware: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetSoftwareVertexProcessing(@ptrCast(*const IDirect3DDevice9, self), bSoftware); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetSoftwareVertexProcessing(self: *const T) callconv(.Inline) BOOL { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetSoftwareVertexProcessing(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetNPatchMode(self: *const T, nSegments: f32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetNPatchMode(@ptrCast(*const IDirect3DDevice9, self), nSegments); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetNPatchMode(self: *const T) callconv(.Inline) f32 { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetNPatchMode(@ptrCast(*const IDirect3DDevice9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_DrawPrimitive(self: *const T, PrimitiveType: D3DPRIMITIVETYPE, StartVertex: u32, PrimitiveCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).DrawPrimitive(@ptrCast(*const IDirect3DDevice9, self), PrimitiveType, StartVertex, PrimitiveCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_DrawIndexedPrimitive(self: *const T, param0: D3DPRIMITIVETYPE, BaseVertexIndex: i32, MinVertexIndex: u32, NumVertices: u32, startIndex: u32, primCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).DrawIndexedPrimitive(@ptrCast(*const IDirect3DDevice9, self), param0, BaseVertexIndex, MinVertexIndex, NumVertices, startIndex, primCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_DrawPrimitiveUP(self: *const T, PrimitiveType: D3DPRIMITIVETYPE, PrimitiveCount: u32, pVertexStreamZeroData: ?*const anyopaque, VertexStreamZeroStride: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).DrawPrimitiveUP(@ptrCast(*const IDirect3DDevice9, self), PrimitiveType, PrimitiveCount, pVertexStreamZeroData, VertexStreamZeroStride); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_DrawIndexedPrimitiveUP(self: *const T, PrimitiveType: D3DPRIMITIVETYPE, MinVertexIndex: u32, NumVertices: u32, PrimitiveCount: u32, pIndexData: ?*const anyopaque, IndexDataFormat: D3DFORMAT, pVertexStreamZeroData: ?*const anyopaque, VertexStreamZeroStride: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).DrawIndexedPrimitiveUP(@ptrCast(*const IDirect3DDevice9, self), PrimitiveType, MinVertexIndex, NumVertices, PrimitiveCount, pIndexData, IndexDataFormat, pVertexStreamZeroData, VertexStreamZeroStride); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_ProcessVertices(self: *const T, SrcStartIndex: u32, DestIndex: u32, VertexCount: u32, pDestBuffer: ?*IDirect3DVertexBuffer9, pVertexDecl: ?*IDirect3DVertexDeclaration9, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).ProcessVertices(@ptrCast(*const IDirect3DDevice9, self), SrcStartIndex, DestIndex, VertexCount, pDestBuffer, pVertexDecl, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateVertexDeclaration(self: *const T, pVertexElements: ?*const D3DVERTEXELEMENT9, ppDecl: ?*?*IDirect3DVertexDeclaration9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateVertexDeclaration(@ptrCast(*const IDirect3DDevice9, self), pVertexElements, ppDecl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetVertexDeclaration(self: *const T, pDecl: ?*IDirect3DVertexDeclaration9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetVertexDeclaration(@ptrCast(*const IDirect3DDevice9, self), pDecl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetVertexDeclaration(self: *const T, ppDecl: ?*?*IDirect3DVertexDeclaration9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetVertexDeclaration(@ptrCast(*const IDirect3DDevice9, self), ppDecl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetFVF(self: *const T, FVF: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetFVF(@ptrCast(*const IDirect3DDevice9, self), FVF); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetFVF(self: *const T, pFVF: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetFVF(@ptrCast(*const IDirect3DDevice9, self), pFVF); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateVertexShader(self: *const T, pFunction: ?*const u32, ppShader: ?*?*IDirect3DVertexShader9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateVertexShader(@ptrCast(*const IDirect3DDevice9, self), pFunction, ppShader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetVertexShader(self: *const T, pShader: ?*IDirect3DVertexShader9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetVertexShader(@ptrCast(*const IDirect3DDevice9, self), pShader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetVertexShader(self: *const T, ppShader: ?*?*IDirect3DVertexShader9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetVertexShader(@ptrCast(*const IDirect3DDevice9, self), ppShader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetVertexShaderConstantF(self: *const T, StartRegister: u32, pConstantData: ?*const f32, Vector4fCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetVertexShaderConstantF(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4fCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetVertexShaderConstantF(self: *const T, StartRegister: u32, pConstantData: ?*f32, Vector4fCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetVertexShaderConstantF(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4fCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetVertexShaderConstantI(self: *const T, StartRegister: u32, pConstantData: ?*const i32, Vector4iCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetVertexShaderConstantI(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4iCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetVertexShaderConstantI(self: *const T, StartRegister: u32, pConstantData: ?*i32, Vector4iCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetVertexShaderConstantI(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4iCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetVertexShaderConstantB(self: *const T, StartRegister: u32, pConstantData: ?*const BOOL, BoolCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetVertexShaderConstantB(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, BoolCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetVertexShaderConstantB(self: *const T, StartRegister: u32, pConstantData: ?*BOOL, BoolCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetVertexShaderConstantB(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, BoolCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetStreamSource(self: *const T, StreamNumber: u32, pStreamData: ?*IDirect3DVertexBuffer9, OffsetInBytes: u32, Stride: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetStreamSource(@ptrCast(*const IDirect3DDevice9, self), StreamNumber, pStreamData, OffsetInBytes, Stride); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetStreamSource(self: *const T, StreamNumber: u32, ppStreamData: ?*?*IDirect3DVertexBuffer9, pOffsetInBytes: ?*u32, pStride: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetStreamSource(@ptrCast(*const IDirect3DDevice9, self), StreamNumber, ppStreamData, pOffsetInBytes, pStride); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetStreamSourceFreq(self: *const T, StreamNumber: u32, Setting: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetStreamSourceFreq(@ptrCast(*const IDirect3DDevice9, self), StreamNumber, Setting); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetStreamSourceFreq(self: *const T, StreamNumber: u32, pSetting: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetStreamSourceFreq(@ptrCast(*const IDirect3DDevice9, self), StreamNumber, pSetting); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetIndices(self: *const T, pIndexData: ?*IDirect3DIndexBuffer9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetIndices(@ptrCast(*const IDirect3DDevice9, self), pIndexData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetIndices(self: *const T, ppIndexData: ?*?*IDirect3DIndexBuffer9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetIndices(@ptrCast(*const IDirect3DDevice9, self), ppIndexData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreatePixelShader(self: *const T, pFunction: ?*const u32, ppShader: ?*?*IDirect3DPixelShader9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreatePixelShader(@ptrCast(*const IDirect3DDevice9, self), pFunction, ppShader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetPixelShader(self: *const T, pShader: ?*IDirect3DPixelShader9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetPixelShader(@ptrCast(*const IDirect3DDevice9, self), pShader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetPixelShader(self: *const T, ppShader: ?*?*IDirect3DPixelShader9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetPixelShader(@ptrCast(*const IDirect3DDevice9, self), ppShader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetPixelShaderConstantF(self: *const T, StartRegister: u32, pConstantData: ?*const f32, Vector4fCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetPixelShaderConstantF(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4fCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetPixelShaderConstantF(self: *const T, StartRegister: u32, pConstantData: ?*f32, Vector4fCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetPixelShaderConstantF(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4fCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetPixelShaderConstantI(self: *const T, StartRegister: u32, pConstantData: ?*const i32, Vector4iCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetPixelShaderConstantI(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4iCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetPixelShaderConstantI(self: *const T, StartRegister: u32, pConstantData: ?*i32, Vector4iCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetPixelShaderConstantI(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, Vector4iCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_SetPixelShaderConstantB(self: *const T, StartRegister: u32, pConstantData: ?*const BOOL, BoolCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).SetPixelShaderConstantB(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, BoolCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_GetPixelShaderConstantB(self: *const T, StartRegister: u32, pConstantData: ?*BOOL, BoolCount: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).GetPixelShaderConstantB(@ptrCast(*const IDirect3DDevice9, self), StartRegister, pConstantData, BoolCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_DrawRectPatch(self: *const T, Handle: u32, pNumSegs: ?*const f32, pRectPatchInfo: ?*const D3DRECTPATCH_INFO) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).DrawRectPatch(@ptrCast(*const IDirect3DDevice9, self), Handle, pNumSegs, pRectPatchInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_DrawTriPatch(self: *const T, Handle: u32, pNumSegs: ?*const f32, pTriPatchInfo: ?*const D3DTRIPATCH_INFO) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).DrawTriPatch(@ptrCast(*const IDirect3DDevice9, self), Handle, pNumSegs, pTriPatchInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_DeletePatch(self: *const T, Handle: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).DeletePatch(@ptrCast(*const IDirect3DDevice9, self), Handle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9_CreateQuery(self: *const T, Type: D3DQUERYTYPE, ppQuery: ?*?*IDirect3DQuery9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9.VTable, self.vtable).CreateQuery(@ptrCast(*const IDirect3DDevice9, self), Type, ppQuery); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DStateBlock9_Value = Guid.initString("b07c4fe5-310d-4ba8-a23c-4f0f206f218b"); pub const IID_IDirect3DStateBlock9 = &IID_IDirect3DStateBlock9_Value; pub const IDirect3DStateBlock9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDevice: fn( self: *const IDirect3DStateBlock9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Capture: fn( self: *const IDirect3DStateBlock9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Apply: fn( self: *const IDirect3DStateBlock9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DStateBlock9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DStateBlock9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DStateBlock9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DStateBlock9_Capture(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DStateBlock9.VTable, self.vtable).Capture(@ptrCast(*const IDirect3DStateBlock9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DStateBlock9_Apply(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DStateBlock9.VTable, self.vtable).Apply(@ptrCast(*const IDirect3DStateBlock9, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DSwapChain9_Value = Guid.initString("794950f2-adfc-458a-905e-10a10b0b503b"); pub const IID_IDirect3DSwapChain9 = &IID_IDirect3DSwapChain9_Value; pub const IDirect3DSwapChain9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Present: fn( self: *const IDirect3DSwapChain9, pSourceRect: ?*const RECT, pDestRect: ?*const RECT, hDestWindowOverride: ?HWND, pDirtyRegion: ?*const RGNDATA, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFrontBufferData: fn( self: *const IDirect3DSwapChain9, pDestSurface: ?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBackBuffer: fn( self: *const IDirect3DSwapChain9, iBackBuffer: u32, Type: D3DBACKBUFFER_TYPE, ppBackBuffer: ?*?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRasterStatus: fn( self: *const IDirect3DSwapChain9, pRasterStatus: ?*D3DRASTER_STATUS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDisplayMode: fn( self: *const IDirect3DSwapChain9, pMode: ?*D3DDISPLAYMODE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDevice: fn( self: *const IDirect3DSwapChain9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPresentParameters: fn( self: *const IDirect3DSwapChain9, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9_Present(self: *const T, pSourceRect: ?*const RECT, pDestRect: ?*const RECT, hDestWindowOverride: ?HWND, pDirtyRegion: ?*const RGNDATA, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9.VTable, self.vtable).Present(@ptrCast(*const IDirect3DSwapChain9, self), pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9_GetFrontBufferData(self: *const T, pDestSurface: ?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9.VTable, self.vtable).GetFrontBufferData(@ptrCast(*const IDirect3DSwapChain9, self), pDestSurface); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9_GetBackBuffer(self: *const T, iBackBuffer: u32, Type: D3DBACKBUFFER_TYPE, ppBackBuffer: ?*?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9.VTable, self.vtable).GetBackBuffer(@ptrCast(*const IDirect3DSwapChain9, self), iBackBuffer, Type, ppBackBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9_GetRasterStatus(self: *const T, pRasterStatus: ?*D3DRASTER_STATUS) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9.VTable, self.vtable).GetRasterStatus(@ptrCast(*const IDirect3DSwapChain9, self), pRasterStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9_GetDisplayMode(self: *const T, pMode: ?*D3DDISPLAYMODE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9.VTable, self.vtable).GetDisplayMode(@ptrCast(*const IDirect3DSwapChain9, self), pMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DSwapChain9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9_GetPresentParameters(self: *const T, pPresentationParameters: ?*D3DPRESENT_PARAMETERS) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9.VTable, self.vtable).GetPresentParameters(@ptrCast(*const IDirect3DSwapChain9, self), pPresentationParameters); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DResource9_Value = Guid.initString("05eec05d-8f7d-4362-b999-d1baf357c704"); pub const IID_IDirect3DResource9 = &IID_IDirect3DResource9_Value; pub const IDirect3DResource9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDevice: fn( self: *const IDirect3DResource9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPrivateData: fn( self: *const IDirect3DResource9, refguid: ?*const Guid, pData: ?*const anyopaque, SizeOfData: u32, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPrivateData: fn( self: *const IDirect3DResource9, refguid: ?*const Guid, pData: ?*anyopaque, pSizeOfData: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FreePrivateData: fn( self: *const IDirect3DResource9, refguid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPriority: fn( self: *const IDirect3DResource9, PriorityNew: u32, ) callconv(@import("std").os.windows.WINAPI) u32, GetPriority: fn( self: *const IDirect3DResource9, ) callconv(@import("std").os.windows.WINAPI) u32, PreLoad: fn( self: *const IDirect3DResource9, ) callconv(@import("std").os.windows.WINAPI) void, GetType: fn( self: *const IDirect3DResource9, ) callconv(@import("std").os.windows.WINAPI) D3DRESOURCETYPE, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DResource9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_SetPrivateData(self: *const T, refguid: ?*const Guid, pData: ?*const anyopaque, SizeOfData: u32, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).SetPrivateData(@ptrCast(*const IDirect3DResource9, self), refguid, pData, SizeOfData, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_GetPrivateData(self: *const T, refguid: ?*const Guid, pData: ?*anyopaque, pSizeOfData: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).GetPrivateData(@ptrCast(*const IDirect3DResource9, self), refguid, pData, pSizeOfData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_FreePrivateData(self: *const T, refguid: ?*const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).FreePrivateData(@ptrCast(*const IDirect3DResource9, self), refguid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_SetPriority(self: *const T, PriorityNew: u32) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).SetPriority(@ptrCast(*const IDirect3DResource9, self), PriorityNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_GetPriority(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).GetPriority(@ptrCast(*const IDirect3DResource9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_PreLoad(self: *const T) callconv(.Inline) void { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).PreLoad(@ptrCast(*const IDirect3DResource9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DResource9_GetType(self: *const T) callconv(.Inline) D3DRESOURCETYPE { return @ptrCast(*const IDirect3DResource9.VTable, self.vtable).GetType(@ptrCast(*const IDirect3DResource9, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DVertexDeclaration9_Value = Guid.initString("dd13c59c-36fa-4098-a8fb-c7ed39dc8546"); pub const IID_IDirect3DVertexDeclaration9 = &IID_IDirect3DVertexDeclaration9_Value; pub const IDirect3DVertexDeclaration9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDevice: fn( self: *const IDirect3DVertexDeclaration9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDeclaration: fn( self: *const IDirect3DVertexDeclaration9, pElement: ?*D3DVERTEXELEMENT9, pNumElements: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVertexDeclaration9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVertexDeclaration9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DVertexDeclaration9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVertexDeclaration9_GetDeclaration(self: *const T, pElement: ?*D3DVERTEXELEMENT9, pNumElements: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVertexDeclaration9.VTable, self.vtable).GetDeclaration(@ptrCast(*const IDirect3DVertexDeclaration9, self), pElement, pNumElements); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DVertexShader9_Value = Guid.initString("efc5557e-6265-4613-8a94-43857889eb36"); pub const IID_IDirect3DVertexShader9 = &IID_IDirect3DVertexShader9_Value; pub const IDirect3DVertexShader9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDevice: fn( self: *const IDirect3DVertexShader9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFunction: fn( self: *const IDirect3DVertexShader9, param0: ?*anyopaque, pSizeOfData: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVertexShader9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVertexShader9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DVertexShader9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVertexShader9_GetFunction(self: *const T, param0: ?*anyopaque, pSizeOfData: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVertexShader9.VTable, self.vtable).GetFunction(@ptrCast(*const IDirect3DVertexShader9, self), param0, pSizeOfData); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DPixelShader9_Value = Guid.initString("6d3bdbdc-5b02-4415-b852-ce5e8bccb289"); pub const IID_IDirect3DPixelShader9 = &IID_IDirect3DPixelShader9_Value; pub const IDirect3DPixelShader9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDevice: fn( self: *const IDirect3DPixelShader9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFunction: fn( self: *const IDirect3DPixelShader9, param0: ?*anyopaque, pSizeOfData: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DPixelShader9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DPixelShader9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DPixelShader9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DPixelShader9_GetFunction(self: *const T, param0: ?*anyopaque, pSizeOfData: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DPixelShader9.VTable, self.vtable).GetFunction(@ptrCast(*const IDirect3DPixelShader9, self), param0, pSizeOfData); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DBaseTexture9_Value = Guid.initString("580ca87e-1d3c-4d54-991d-b7d3e3c298ce"); pub const IID_IDirect3DBaseTexture9 = &IID_IDirect3DBaseTexture9_Value; pub const IDirect3DBaseTexture9 = extern struct { pub const VTable = extern struct { base: IDirect3DResource9.VTable, SetLOD: fn( self: *const IDirect3DBaseTexture9, LODNew: u32, ) callconv(@import("std").os.windows.WINAPI) u32, GetLOD: fn( self: *const IDirect3DBaseTexture9, ) callconv(@import("std").os.windows.WINAPI) u32, GetLevelCount: fn( self: *const IDirect3DBaseTexture9, ) callconv(@import("std").os.windows.WINAPI) u32, SetAutoGenFilterType: fn( self: *const IDirect3DBaseTexture9, FilterType: D3DTEXTUREFILTERTYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAutoGenFilterType: fn( self: *const IDirect3DBaseTexture9, ) callconv(@import("std").os.windows.WINAPI) D3DTEXTUREFILTERTYPE, GenerateMipSubLevels: fn( self: *const IDirect3DBaseTexture9, ) callconv(@import("std").os.windows.WINAPI) void, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DResource9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DBaseTexture9_SetLOD(self: *const T, LODNew: u32) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DBaseTexture9.VTable, self.vtable).SetLOD(@ptrCast(*const IDirect3DBaseTexture9, self), LODNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DBaseTexture9_GetLOD(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DBaseTexture9.VTable, self.vtable).GetLOD(@ptrCast(*const IDirect3DBaseTexture9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DBaseTexture9_GetLevelCount(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DBaseTexture9.VTable, self.vtable).GetLevelCount(@ptrCast(*const IDirect3DBaseTexture9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DBaseTexture9_SetAutoGenFilterType(self: *const T, FilterType: D3DTEXTUREFILTERTYPE) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DBaseTexture9.VTable, self.vtable).SetAutoGenFilterType(@ptrCast(*const IDirect3DBaseTexture9, self), FilterType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DBaseTexture9_GetAutoGenFilterType(self: *const T) callconv(.Inline) D3DTEXTUREFILTERTYPE { return @ptrCast(*const IDirect3DBaseTexture9.VTable, self.vtable).GetAutoGenFilterType(@ptrCast(*const IDirect3DBaseTexture9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DBaseTexture9_GenerateMipSubLevels(self: *const T) callconv(.Inline) void { return @ptrCast(*const IDirect3DBaseTexture9.VTable, self.vtable).GenerateMipSubLevels(@ptrCast(*const IDirect3DBaseTexture9, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DTexture9_Value = Guid.initString("85c31227-3de5-4f00-9b3a-f11ac38c18b5"); pub const IID_IDirect3DTexture9 = &IID_IDirect3DTexture9_Value; pub const IDirect3DTexture9 = extern struct { pub const VTable = extern struct { base: IDirect3DBaseTexture9.VTable, GetLevelDesc: fn( self: *const IDirect3DTexture9, Level: u32, pDesc: ?*D3DSURFACE_DESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSurfaceLevel: fn( self: *const IDirect3DTexture9, Level: u32, ppSurfaceLevel: ?*?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LockRect: fn( self: *const IDirect3DTexture9, Level: u32, pLockedRect: ?*D3DLOCKED_RECT, pRect: ?*const RECT, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnlockRect: fn( self: *const IDirect3DTexture9, Level: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDirtyRect: fn( self: *const IDirect3DTexture9, pDirtyRect: ?*const RECT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DBaseTexture9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DTexture9_GetLevelDesc(self: *const T, Level: u32, pDesc: ?*D3DSURFACE_DESC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DTexture9.VTable, self.vtable).GetLevelDesc(@ptrCast(*const IDirect3DTexture9, self), Level, pDesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DTexture9_GetSurfaceLevel(self: *const T, Level: u32, ppSurfaceLevel: ?*?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DTexture9.VTable, self.vtable).GetSurfaceLevel(@ptrCast(*const IDirect3DTexture9, self), Level, ppSurfaceLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DTexture9_LockRect(self: *const T, Level: u32, pLockedRect: ?*D3DLOCKED_RECT, pRect: ?*const RECT, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DTexture9.VTable, self.vtable).LockRect(@ptrCast(*const IDirect3DTexture9, self), Level, pLockedRect, pRect, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DTexture9_UnlockRect(self: *const T, Level: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DTexture9.VTable, self.vtable).UnlockRect(@ptrCast(*const IDirect3DTexture9, self), Level); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DTexture9_AddDirtyRect(self: *const T, pDirtyRect: ?*const RECT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DTexture9.VTable, self.vtable).AddDirtyRect(@ptrCast(*const IDirect3DTexture9, self), pDirtyRect); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DVolumeTexture9_Value = Guid.initString("2518526c-e789-4111-a7b9-47ef328d13e6"); pub const IID_IDirect3DVolumeTexture9 = &IID_IDirect3DVolumeTexture9_Value; pub const IDirect3DVolumeTexture9 = extern struct { pub const VTable = extern struct { base: IDirect3DBaseTexture9.VTable, GetLevelDesc: fn( self: *const IDirect3DVolumeTexture9, Level: u32, pDesc: ?*D3DVOLUME_DESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetVolumeLevel: fn( self: *const IDirect3DVolumeTexture9, Level: u32, ppVolumeLevel: ?*?*IDirect3DVolume9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LockBox: fn( self: *const IDirect3DVolumeTexture9, Level: u32, pLockedVolume: ?*D3DLOCKED_BOX, pBox: ?*const D3DBOX, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnlockBox: fn( self: *const IDirect3DVolumeTexture9, Level: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDirtyBox: fn( self: *const IDirect3DVolumeTexture9, pDirtyBox: ?*const D3DBOX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DBaseTexture9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolumeTexture9_GetLevelDesc(self: *const T, Level: u32, pDesc: ?*D3DVOLUME_DESC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolumeTexture9.VTable, self.vtable).GetLevelDesc(@ptrCast(*const IDirect3DVolumeTexture9, self), Level, pDesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolumeTexture9_GetVolumeLevel(self: *const T, Level: u32, ppVolumeLevel: ?*?*IDirect3DVolume9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolumeTexture9.VTable, self.vtable).GetVolumeLevel(@ptrCast(*const IDirect3DVolumeTexture9, self), Level, ppVolumeLevel); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolumeTexture9_LockBox(self: *const T, Level: u32, pLockedVolume: ?*D3DLOCKED_BOX, pBox: ?*const D3DBOX, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolumeTexture9.VTable, self.vtable).LockBox(@ptrCast(*const IDirect3DVolumeTexture9, self), Level, pLockedVolume, pBox, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolumeTexture9_UnlockBox(self: *const T, Level: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolumeTexture9.VTable, self.vtable).UnlockBox(@ptrCast(*const IDirect3DVolumeTexture9, self), Level); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolumeTexture9_AddDirtyBox(self: *const T, pDirtyBox: ?*const D3DBOX) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolumeTexture9.VTable, self.vtable).AddDirtyBox(@ptrCast(*const IDirect3DVolumeTexture9, self), pDirtyBox); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DCubeTexture9_Value = Guid.initString("fff32f81-d953-473a-9223-93d652aba93f"); pub const IID_IDirect3DCubeTexture9 = &IID_IDirect3DCubeTexture9_Value; pub const IDirect3DCubeTexture9 = extern struct { pub const VTable = extern struct { base: IDirect3DBaseTexture9.VTable, GetLevelDesc: fn( self: *const IDirect3DCubeTexture9, Level: u32, pDesc: ?*D3DSURFACE_DESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCubeMapSurface: fn( self: *const IDirect3DCubeTexture9, FaceType: D3DCUBEMAP_FACES, Level: u32, ppCubeMapSurface: ?*?*IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LockRect: fn( self: *const IDirect3DCubeTexture9, FaceType: D3DCUBEMAP_FACES, Level: u32, pLockedRect: ?*D3DLOCKED_RECT, pRect: ?*const RECT, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnlockRect: fn( self: *const IDirect3DCubeTexture9, FaceType: D3DCUBEMAP_FACES, Level: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddDirtyRect: fn( self: *const IDirect3DCubeTexture9, FaceType: D3DCUBEMAP_FACES, pDirtyRect: ?*const RECT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DBaseTexture9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DCubeTexture9_GetLevelDesc(self: *const T, Level: u32, pDesc: ?*D3DSURFACE_DESC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DCubeTexture9.VTable, self.vtable).GetLevelDesc(@ptrCast(*const IDirect3DCubeTexture9, self), Level, pDesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DCubeTexture9_GetCubeMapSurface(self: *const T, FaceType: D3DCUBEMAP_FACES, Level: u32, ppCubeMapSurface: ?*?*IDirect3DSurface9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DCubeTexture9.VTable, self.vtable).GetCubeMapSurface(@ptrCast(*const IDirect3DCubeTexture9, self), FaceType, Level, ppCubeMapSurface); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DCubeTexture9_LockRect(self: *const T, FaceType: D3DCUBEMAP_FACES, Level: u32, pLockedRect: ?*D3DLOCKED_RECT, pRect: ?*const RECT, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DCubeTexture9.VTable, self.vtable).LockRect(@ptrCast(*const IDirect3DCubeTexture9, self), FaceType, Level, pLockedRect, pRect, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DCubeTexture9_UnlockRect(self: *const T, FaceType: D3DCUBEMAP_FACES, Level: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DCubeTexture9.VTable, self.vtable).UnlockRect(@ptrCast(*const IDirect3DCubeTexture9, self), FaceType, Level); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DCubeTexture9_AddDirtyRect(self: *const T, FaceType: D3DCUBEMAP_FACES, pDirtyRect: ?*const RECT) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DCubeTexture9.VTable, self.vtable).AddDirtyRect(@ptrCast(*const IDirect3DCubeTexture9, self), FaceType, pDirtyRect); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DVertexBuffer9_Value = Guid.initString("b64bb1b5-fd70-4df6-bf91-19d0a12455e3"); pub const IID_IDirect3DVertexBuffer9 = &IID_IDirect3DVertexBuffer9_Value; pub const IDirect3DVertexBuffer9 = extern struct { pub const VTable = extern struct { base: IDirect3DResource9.VTable, Lock: fn( self: *const IDirect3DVertexBuffer9, OffsetToLock: u32, SizeToLock: u32, ppbData: ?*?*anyopaque, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Unlock: fn( self: *const IDirect3DVertexBuffer9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDesc: fn( self: *const IDirect3DVertexBuffer9, pDesc: ?*D3DVERTEXBUFFER_DESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DResource9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVertexBuffer9_Lock(self: *const T, OffsetToLock: u32, SizeToLock: u32, ppbData: ?*?*anyopaque, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVertexBuffer9.VTable, self.vtable).Lock(@ptrCast(*const IDirect3DVertexBuffer9, self), OffsetToLock, SizeToLock, ppbData, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVertexBuffer9_Unlock(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVertexBuffer9.VTable, self.vtable).Unlock(@ptrCast(*const IDirect3DVertexBuffer9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVertexBuffer9_GetDesc(self: *const T, pDesc: ?*D3DVERTEXBUFFER_DESC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVertexBuffer9.VTable, self.vtable).GetDesc(@ptrCast(*const IDirect3DVertexBuffer9, self), pDesc); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DIndexBuffer9_Value = Guid.initString("7c9dd65e-d3f7-4529-acee-785830acde35"); pub const IID_IDirect3DIndexBuffer9 = &IID_IDirect3DIndexBuffer9_Value; pub const IDirect3DIndexBuffer9 = extern struct { pub const VTable = extern struct { base: IDirect3DResource9.VTable, Lock: fn( self: *const IDirect3DIndexBuffer9, OffsetToLock: u32, SizeToLock: u32, ppbData: ?*?*anyopaque, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Unlock: fn( self: *const IDirect3DIndexBuffer9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDesc: fn( self: *const IDirect3DIndexBuffer9, pDesc: ?*D3DINDEXBUFFER_DESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DResource9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DIndexBuffer9_Lock(self: *const T, OffsetToLock: u32, SizeToLock: u32, ppbData: ?*?*anyopaque, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DIndexBuffer9.VTable, self.vtable).Lock(@ptrCast(*const IDirect3DIndexBuffer9, self), OffsetToLock, SizeToLock, ppbData, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DIndexBuffer9_Unlock(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DIndexBuffer9.VTable, self.vtable).Unlock(@ptrCast(*const IDirect3DIndexBuffer9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DIndexBuffer9_GetDesc(self: *const T, pDesc: ?*D3DINDEXBUFFER_DESC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DIndexBuffer9.VTable, self.vtable).GetDesc(@ptrCast(*const IDirect3DIndexBuffer9, self), pDesc); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DSurface9_Value = Guid.initString("0cfbaf3a-9ff6-429a-99b3-a2796af8b89b"); pub const IID_IDirect3DSurface9 = &IID_IDirect3DSurface9_Value; pub const IDirect3DSurface9 = extern struct { pub const VTable = extern struct { base: IDirect3DResource9.VTable, GetContainer: fn( self: *const IDirect3DSurface9, riid: ?*const Guid, ppContainer: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDesc: fn( self: *const IDirect3DSurface9, pDesc: ?*D3DSURFACE_DESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LockRect: fn( self: *const IDirect3DSurface9, pLockedRect: ?*D3DLOCKED_RECT, pRect: ?*const RECT, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnlockRect: fn( self: *const IDirect3DSurface9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDC: fn( self: *const IDirect3DSurface9, phdc: ?*?HDC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReleaseDC: fn( self: *const IDirect3DSurface9, hdc: ?HDC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DResource9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSurface9_GetContainer(self: *const T, riid: ?*const Guid, ppContainer: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSurface9.VTable, self.vtable).GetContainer(@ptrCast(*const IDirect3DSurface9, self), riid, ppContainer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSurface9_GetDesc(self: *const T, pDesc: ?*D3DSURFACE_DESC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSurface9.VTable, self.vtable).GetDesc(@ptrCast(*const IDirect3DSurface9, self), pDesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSurface9_LockRect(self: *const T, pLockedRect: ?*D3DLOCKED_RECT, pRect: ?*const RECT, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSurface9.VTable, self.vtable).LockRect(@ptrCast(*const IDirect3DSurface9, self), pLockedRect, pRect, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSurface9_UnlockRect(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSurface9.VTable, self.vtable).UnlockRect(@ptrCast(*const IDirect3DSurface9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSurface9_GetDC(self: *const T, phdc: ?*?HDC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSurface9.VTable, self.vtable).GetDC(@ptrCast(*const IDirect3DSurface9, self), phdc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSurface9_ReleaseDC(self: *const T, hdc: ?HDC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSurface9.VTable, self.vtable).ReleaseDC(@ptrCast(*const IDirect3DSurface9, self), hdc); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DVolume9_Value = Guid.initString("24f416e6-1f67-4aa7-b88e-d33f6f3128a1"); pub const IID_IDirect3DVolume9 = &IID_IDirect3DVolume9_Value; pub const IDirect3DVolume9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDevice: fn( self: *const IDirect3DVolume9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetPrivateData: fn( self: *const IDirect3DVolume9, refguid: ?*const Guid, pData: ?*const anyopaque, SizeOfData: u32, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPrivateData: fn( self: *const IDirect3DVolume9, refguid: ?*const Guid, pData: ?*anyopaque, pSizeOfData: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FreePrivateData: fn( self: *const IDirect3DVolume9, refguid: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetContainer: fn( self: *const IDirect3DVolume9, riid: ?*const Guid, ppContainer: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDesc: fn( self: *const IDirect3DVolume9, pDesc: ?*D3DVOLUME_DESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LockBox: fn( self: *const IDirect3DVolume9, pLockedVolume: ?*D3DLOCKED_BOX, pBox: ?*const D3DBOX, Flags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnlockBox: fn( self: *const IDirect3DVolume9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DVolume9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_SetPrivateData(self: *const T, refguid: ?*const Guid, pData: ?*const anyopaque, SizeOfData: u32, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).SetPrivateData(@ptrCast(*const IDirect3DVolume9, self), refguid, pData, SizeOfData, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_GetPrivateData(self: *const T, refguid: ?*const Guid, pData: ?*anyopaque, pSizeOfData: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).GetPrivateData(@ptrCast(*const IDirect3DVolume9, self), refguid, pData, pSizeOfData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_FreePrivateData(self: *const T, refguid: ?*const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).FreePrivateData(@ptrCast(*const IDirect3DVolume9, self), refguid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_GetContainer(self: *const T, riid: ?*const Guid, ppContainer: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).GetContainer(@ptrCast(*const IDirect3DVolume9, self), riid, ppContainer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_GetDesc(self: *const T, pDesc: ?*D3DVOLUME_DESC) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).GetDesc(@ptrCast(*const IDirect3DVolume9, self), pDesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_LockBox(self: *const T, pLockedVolume: ?*D3DLOCKED_BOX, pBox: ?*const D3DBOX, Flags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).LockBox(@ptrCast(*const IDirect3DVolume9, self), pLockedVolume, pBox, Flags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DVolume9_UnlockBox(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DVolume9.VTable, self.vtable).UnlockBox(@ptrCast(*const IDirect3DVolume9, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DQuery9_Value = Guid.initString("d9771460-a695-4f26-bbd3-27b840b541cc"); pub const IID_IDirect3DQuery9 = &IID_IDirect3DQuery9_Value; pub const IDirect3DQuery9 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDevice: fn( self: *const IDirect3DQuery9, ppDevice: ?*?*IDirect3DDevice9, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetType: fn( self: *const IDirect3DQuery9, ) callconv(@import("std").os.windows.WINAPI) D3DQUERYTYPE, GetDataSize: fn( self: *const IDirect3DQuery9, ) callconv(@import("std").os.windows.WINAPI) u32, Issue: fn( self: *const IDirect3DQuery9, dwIssueFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetData: fn( self: *const IDirect3DQuery9, pData: ?*anyopaque, dwSize: u32, dwGetDataFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DQuery9_GetDevice(self: *const T, ppDevice: ?*?*IDirect3DDevice9) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DQuery9.VTable, self.vtable).GetDevice(@ptrCast(*const IDirect3DQuery9, self), ppDevice); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DQuery9_GetType(self: *const T) callconv(.Inline) D3DQUERYTYPE { return @ptrCast(*const IDirect3DQuery9.VTable, self.vtable).GetType(@ptrCast(*const IDirect3DQuery9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DQuery9_GetDataSize(self: *const T) callconv(.Inline) u32 { return @ptrCast(*const IDirect3DQuery9.VTable, self.vtable).GetDataSize(@ptrCast(*const IDirect3DQuery9, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DQuery9_Issue(self: *const T, dwIssueFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DQuery9.VTable, self.vtable).Issue(@ptrCast(*const IDirect3DQuery9, self), dwIssueFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DQuery9_GetData(self: *const T, pData: ?*anyopaque, dwSize: u32, dwGetDataFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DQuery9.VTable, self.vtable).GetData(@ptrCast(*const IDirect3DQuery9, self), pData, dwSize, dwGetDataFlags); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3D9Ex_Value = Guid.initString("02177241-69fc-400c-8ff1-93a44df6861d"); pub const IID_IDirect3D9Ex = &IID_IDirect3D9Ex_Value; pub const IDirect3D9Ex = extern struct { pub const VTable = extern struct { base: IDirect3D9.VTable, GetAdapterModeCountEx: fn( self: *const IDirect3D9Ex, Adapter: u32, pFilter: ?*const D3DDISPLAYMODEFILTER, ) callconv(@import("std").os.windows.WINAPI) u32, EnumAdapterModesEx: fn( self: *const IDirect3D9Ex, Adapter: u32, pFilter: ?*const D3DDISPLAYMODEFILTER, Mode: u32, pMode: ?*D3DDISPLAYMODEEX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAdapterDisplayModeEx: fn( self: *const IDirect3D9Ex, Adapter: u32, pMode: ?*D3DDISPLAYMODEEX, pRotation: ?*D3DDISPLAYROTATION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateDeviceEx: fn( self: *const IDirect3D9Ex, Adapter: u32, DeviceType: D3DDEVTYPE, hFocusWindow: ?HWND, BehaviorFlags: u32, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, pFullscreenDisplayMode: ?*D3DDISPLAYMODEEX, ppReturnedDeviceInterface: ?*?*IDirect3DDevice9Ex, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAdapterLUID: fn( self: *const IDirect3D9Ex, Adapter: u32, pLUID: ?*LUID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3D9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9Ex_GetAdapterModeCountEx(self: *const T, Adapter: u32, pFilter: ?*const D3DDISPLAYMODEFILTER) callconv(.Inline) u32 { return @ptrCast(*const IDirect3D9Ex.VTable, self.vtable).GetAdapterModeCountEx(@ptrCast(*const IDirect3D9Ex, self), Adapter, pFilter); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9Ex_EnumAdapterModesEx(self: *const T, Adapter: u32, pFilter: ?*const D3DDISPLAYMODEFILTER, Mode: u32, pMode: ?*D3DDISPLAYMODEEX) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9Ex.VTable, self.vtable).EnumAdapterModesEx(@ptrCast(*const IDirect3D9Ex, self), Adapter, pFilter, Mode, pMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9Ex_GetAdapterDisplayModeEx(self: *const T, Adapter: u32, pMode: ?*D3DDISPLAYMODEEX, pRotation: ?*D3DDISPLAYROTATION) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9Ex.VTable, self.vtable).GetAdapterDisplayModeEx(@ptrCast(*const IDirect3D9Ex, self), Adapter, pMode, pRotation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9Ex_CreateDeviceEx(self: *const T, Adapter: u32, DeviceType: D3DDEVTYPE, hFocusWindow: ?HWND, BehaviorFlags: u32, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, pFullscreenDisplayMode: ?*D3DDISPLAYMODEEX, ppReturnedDeviceInterface: ?*?*IDirect3DDevice9Ex) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9Ex.VTable, self.vtable).CreateDeviceEx(@ptrCast(*const IDirect3D9Ex, self), Adapter, DeviceType, hFocusWindow, BehaviorFlags, pPresentationParameters, pFullscreenDisplayMode, ppReturnedDeviceInterface); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3D9Ex_GetAdapterLUID(self: *const T, Adapter: u32, pLUID: ?*LUID) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3D9Ex.VTable, self.vtable).GetAdapterLUID(@ptrCast(*const IDirect3D9Ex, self), Adapter, pLUID); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DDevice9Ex_Value = Guid.initString("b18b10ce-2649-405a-870f-95f777d4313a"); pub const IID_IDirect3DDevice9Ex = &IID_IDirect3DDevice9Ex_Value; pub const IDirect3DDevice9Ex = extern struct { pub const VTable = extern struct { base: IDirect3DDevice9.VTable, SetConvolutionMonoKernel: fn( self: *const IDirect3DDevice9Ex, width: u32, height: u32, rows: ?*f32, columns: ?*f32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ComposeRects: fn( self: *const IDirect3DDevice9Ex, pSrc: ?*IDirect3DSurface9, pDst: ?*IDirect3DSurface9, pSrcRectDescs: ?*IDirect3DVertexBuffer9, NumRects: u32, pDstRectDescs: ?*IDirect3DVertexBuffer9, Operation: D3DCOMPOSERECTSOP, Xoffset: i32, Yoffset: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PresentEx: fn( self: *const IDirect3DDevice9Ex, pSourceRect: ?*const RECT, pDestRect: ?*const RECT, hDestWindowOverride: ?HWND, pDirtyRegion: ?*const RGNDATA, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetGPUThreadPriority: fn( self: *const IDirect3DDevice9Ex, pPriority: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetGPUThreadPriority: fn( self: *const IDirect3DDevice9Ex, Priority: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, WaitForVBlank: fn( self: *const IDirect3DDevice9Ex, iSwapChain: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckResourceResidency: fn( self: *const IDirect3DDevice9Ex, pResourceArray: ?*?*IDirect3DResource9, NumResources: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetMaximumFrameLatency: fn( self: *const IDirect3DDevice9Ex, MaxLatency: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMaximumFrameLatency: fn( self: *const IDirect3DDevice9Ex, pMaxLatency: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckDeviceState: fn( self: *const IDirect3DDevice9Ex, hDestinationWindow: ?HWND, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateRenderTargetEx: fn( self: *const IDirect3DDevice9Ex, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Lockable: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, Usage: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateOffscreenPlainSurfaceEx: fn( self: *const IDirect3DDevice9Ex, Width: u32, Height: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, Usage: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateDepthStencilSurfaceEx: fn( self: *const IDirect3DDevice9Ex, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Discard: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, Usage: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResetEx: fn( self: *const IDirect3DDevice9Ex, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, pFullscreenDisplayMode: ?*D3DDISPLAYMODEEX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDisplayModeEx: fn( self: *const IDirect3DDevice9Ex, iSwapChain: u32, pMode: ?*D3DDISPLAYMODEEX, pRotation: ?*D3DDISPLAYROTATION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DDevice9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_SetConvolutionMonoKernel(self: *const T, width: u32, height: u32, rows: ?*f32, columns: ?*f32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).SetConvolutionMonoKernel(@ptrCast(*const IDirect3DDevice9Ex, self), width, height, rows, columns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_ComposeRects(self: *const T, pSrc: ?*IDirect3DSurface9, pDst: ?*IDirect3DSurface9, pSrcRectDescs: ?*IDirect3DVertexBuffer9, NumRects: u32, pDstRectDescs: ?*IDirect3DVertexBuffer9, Operation: D3DCOMPOSERECTSOP, Xoffset: i32, Yoffset: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).ComposeRects(@ptrCast(*const IDirect3DDevice9Ex, self), pSrc, pDst, pSrcRectDescs, NumRects, pDstRectDescs, Operation, Xoffset, Yoffset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_PresentEx(self: *const T, pSourceRect: ?*const RECT, pDestRect: ?*const RECT, hDestWindowOverride: ?HWND, pDirtyRegion: ?*const RGNDATA, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).PresentEx(@ptrCast(*const IDirect3DDevice9Ex, self), pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_GetGPUThreadPriority(self: *const T, pPriority: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).GetGPUThreadPriority(@ptrCast(*const IDirect3DDevice9Ex, self), pPriority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_SetGPUThreadPriority(self: *const T, Priority: i32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).SetGPUThreadPriority(@ptrCast(*const IDirect3DDevice9Ex, self), Priority); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_WaitForVBlank(self: *const T, iSwapChain: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).WaitForVBlank(@ptrCast(*const IDirect3DDevice9Ex, self), iSwapChain); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_CheckResourceResidency(self: *const T, pResourceArray: ?*?*IDirect3DResource9, NumResources: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).CheckResourceResidency(@ptrCast(*const IDirect3DDevice9Ex, self), pResourceArray, NumResources); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_SetMaximumFrameLatency(self: *const T, MaxLatency: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).SetMaximumFrameLatency(@ptrCast(*const IDirect3DDevice9Ex, self), MaxLatency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_GetMaximumFrameLatency(self: *const T, pMaxLatency: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).GetMaximumFrameLatency(@ptrCast(*const IDirect3DDevice9Ex, self), pMaxLatency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_CheckDeviceState(self: *const T, hDestinationWindow: ?HWND) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).CheckDeviceState(@ptrCast(*const IDirect3DDevice9Ex, self), hDestinationWindow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_CreateRenderTargetEx(self: *const T, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Lockable: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, Usage: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).CreateRenderTargetEx(@ptrCast(*const IDirect3DDevice9Ex, self), Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle, Usage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_CreateOffscreenPlainSurfaceEx(self: *const T, Width: u32, Height: u32, Format: D3DFORMAT, Pool: D3DPOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, Usage: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).CreateOffscreenPlainSurfaceEx(@ptrCast(*const IDirect3DDevice9Ex, self), Width, Height, Format, Pool, ppSurface, pSharedHandle, Usage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_CreateDepthStencilSurfaceEx(self: *const T, Width: u32, Height: u32, Format: D3DFORMAT, MultiSample: D3DMULTISAMPLE_TYPE, MultisampleQuality: u32, Discard: BOOL, ppSurface: ?*?*IDirect3DSurface9, pSharedHandle: ?*?HANDLE, Usage: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).CreateDepthStencilSurfaceEx(@ptrCast(*const IDirect3DDevice9Ex, self), Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle, Usage); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_ResetEx(self: *const T, pPresentationParameters: ?*D3DPRESENT_PARAMETERS, pFullscreenDisplayMode: ?*D3DDISPLAYMODEEX) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).ResetEx(@ptrCast(*const IDirect3DDevice9Ex, self), pPresentationParameters, pFullscreenDisplayMode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DDevice9Ex_GetDisplayModeEx(self: *const T, iSwapChain: u32, pMode: ?*D3DDISPLAYMODEEX, pRotation: ?*D3DDISPLAYROTATION) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DDevice9Ex.VTable, self.vtable).GetDisplayModeEx(@ptrCast(*const IDirect3DDevice9Ex, self), iSwapChain, pMode, pRotation); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDirect3DSwapChain9Ex_Value = Guid.initString("91886caf-1c3d-4d2e-a0ab-3e4c7d8d3303"); pub const IID_IDirect3DSwapChain9Ex = &IID_IDirect3DSwapChain9Ex_Value; pub const IDirect3DSwapChain9Ex = extern struct { pub const VTable = extern struct { base: IDirect3DSwapChain9.VTable, GetLastPresentCount: fn( self: *const IDirect3DSwapChain9Ex, pLastPresentCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPresentStats: fn( self: *const IDirect3DSwapChain9Ex, pPresentationStatistics: ?*D3DPRESENTSTATS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDisplayModeEx: fn( self: *const IDirect3DSwapChain9Ex, pMode: ?*D3DDISPLAYMODEEX, pRotation: ?*D3DDISPLAYROTATION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDirect3DSwapChain9.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9Ex_GetLastPresentCount(self: *const T, pLastPresentCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9Ex.VTable, self.vtable).GetLastPresentCount(@ptrCast(*const IDirect3DSwapChain9Ex, self), pLastPresentCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9Ex_GetPresentStats(self: *const T, pPresentationStatistics: ?*D3DPRESENTSTATS) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9Ex.VTable, self.vtable).GetPresentStats(@ptrCast(*const IDirect3DSwapChain9Ex, self), pPresentationStatistics); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDirect3DSwapChain9Ex_GetDisplayModeEx(self: *const T, pMode: ?*D3DDISPLAYMODEEX, pRotation: ?*D3DDISPLAYROTATION) callconv(.Inline) HRESULT { return @ptrCast(*const IDirect3DSwapChain9Ex.VTable, self.vtable).GetDisplayModeEx(@ptrCast(*const IDirect3DSwapChain9Ex, self), pMode, pRotation); } };} pub usingnamespace MethodMixin(@This()); }; pub const D3DADAPTER_IDENTIFIER9 = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { Driver: [512]CHAR, Description: [512]CHAR, DeviceName: [32]CHAR, DriverVersion: LARGE_INTEGER, VendorId: u32, DeviceId: u32, SubSysId: u32, Revision: u32, DeviceIdentifier: Guid, WHQLLevel: u32, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug Driver: [512]CHAR, Description: [512]CHAR, DeviceName: [32]CHAR, DriverVersion: LARGE_INTEGER, VendorId: u32, DeviceId: u32, SubSysId: u32, Revision: u32, DeviceIdentifier: Guid, WHQLLevel: u32, }, }; pub const D3DMEMORYPRESSURE = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { BytesEvictedFromProcess: u64, SizeOfInefficientAllocation: u64, LevelOfEfficiency: u32, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug BytesEvictedFromProcess: u64, SizeOfInefficientAllocation: u64, LevelOfEfficiency: u32, }, }; pub const D3DPRESENTSTATS = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { PresentCount: u32, PresentRefreshCount: u32, SyncRefreshCount: u32, SyncQPCTime: LARGE_INTEGER, SyncGPUTime: LARGE_INTEGER, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug PresentCount: u32, PresentRefreshCount: u32, SyncRefreshCount: u32, SyncQPCTime: LARGE_INTEGER, SyncGPUTime: LARGE_INTEGER, }, }; pub const D3DAUTHENTICATEDCHANNEL_QUERYOUTPUTID_OUTPUT = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, DeviceHandle: ?HANDLE, CryptoSessionHandle: ?HANDLE, OutputIDIndex: u32, OutputID: u64, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug Output: D3DAUTHENTICATEDCHANNEL_QUERY_OUTPUT, DeviceHandle: ?HANDLE, CryptoSessionHandle: ?HANDLE, OutputIDIndex: u32, OutputID: u64, }, }; pub const D3DAES_CTR_IV = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { IV: u64, Count: u64, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug IV: u64, Count: u64, }, }; //-------------------------------------------------------------------------------- // Section: Functions (9) //-------------------------------------------------------------------------------- pub extern "d3d9" fn Direct3DCreate9( SDKVersion: u32, ) callconv(@import("std").os.windows.WINAPI) ?*IDirect3D9; pub extern "d3d9" fn D3DPERF_BeginEvent( col: u32, wszName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "d3d9" fn D3DPERF_EndEvent( ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "d3d9" fn D3DPERF_SetMarker( col: u32, wszName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "d3d9" fn D3DPERF_SetRegion( col: u32, wszName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "d3d9" fn D3DPERF_QueryRepeatFrame( ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "d3d9" fn D3DPERF_SetOptions( dwOptions: u32, ) callconv(@import("std").os.windows.WINAPI) void; pub extern "d3d9" fn D3DPERF_GetStatus( ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "d3d9" fn Direct3DCreate9Ex( SDKVersion: u32, param1: ?*?*IDirect3D9Ex, ) callconv(@import("std").os.windows.WINAPI) HRESULT; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (18) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const CHAR = @import("../foundation.zig").CHAR; const D3DMATRIX = @import("../graphics/direct3d.zig").D3DMATRIX; const D3DVECTOR = @import("../graphics/direct3d.zig").D3DVECTOR; const HANDLE = @import("../foundation.zig").HANDLE; const HDC = @import("../graphics/gdi.zig").HDC; const HMONITOR = @import("../graphics/gdi.zig").HMONITOR; const HRESULT = @import("../foundation.zig").HRESULT; const HWND = @import("../foundation.zig").HWND; const IUnknown = @import("../system/com.zig").IUnknown; const LARGE_INTEGER = @import("../foundation.zig").LARGE_INTEGER; const LUID = @import("../foundation.zig").LUID; const PALETTEENTRY = @import("../graphics/gdi.zig").PALETTEENTRY; const POINT = @import("../foundation.zig").POINT; const PWSTR = @import("../foundation.zig").PWSTR; const RECT = @import("../foundation.zig").RECT; const RGNDATA = @import("../graphics/gdi.zig").RGNDATA; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/graphics/direct3d9.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const config = @import("../config.zig"); const Message = @import("../message_pool.zig").MessagePool.Message; const RingBuffer = @import("../ring_buffer.zig").RingBuffer; const vsr = @import("../vsr.zig"); const Header = vsr.Header; const Timeout = vsr.Timeout; const Command = vsr.Command; const Version = vsr.Version; const log = std.log.scoped(.replica); pub const Status = enum { normal, view_change, recovering, }; const ClientTable = std.AutoHashMapUnmanaged(u128, ClientTableEntry); /// We found two bugs in the VRR paper relating to the client table: /// /// 1. a correctness bug, where successive client crashes may cause request numbers to collide for /// different request payloads, resulting in requests receiving the wrong reply, and /// /// 2. a liveness bug, where if the client table is updated for request and prepare messages with /// the client's latest request number, then the client may be locked out from the cluster if the /// request is ever reordered through a view change. /// /// We therefore take a different approach with the implementation of our client table, to: /// /// 1. register client sessions explicitly through the state machine to ensure that client session /// numbers always increase, and /// /// 2. make a more careful distinction between uncommitted and committed request numbers, /// considering that uncommitted requests may not survive a view change. const ClientTableEntry = struct { /// The client's session number as committed to the cluster by a register request. session: u64, /// The reply sent to the client's latest committed request. reply: *Message, }; const Prepare = struct { /// The current prepare message (used to cross-check prepare_ok messages, and for resending). message: *Message, /// Unique prepare_ok messages for the same view, op number and checksum from ALL replicas. ok_from_all_replicas: QuorumMessages = QuorumMessagesNull, /// Whether a quorum of prepare_ok messages has been received for this prepare. ok_quorum_received: bool = false, }; const QuorumMessages = [config.replicas_max]?*Message; const QuorumMessagesNull = [_]?*Message{null} ** config.replicas_max; pub fn Replica( comptime StateMachine: type, comptime MessageBus: type, comptime Storage: type, comptime Time: type, ) type { return struct { const Self = @This(); const Journal = vsr.Journal(Self, Storage); const Clock = vsr.Clock(Time); /// The number of the cluster to which this replica belongs: cluster: u32, /// The number of replicas in the cluster: replica_count: u8, /// The index of this replica's address in the configuration array held by the MessageBus: replica: u8, /// The minimum number of replicas required to form a replication quorum: quorum_replication: u8, /// The minimum number of replicas required to form a view change quorum: quorum_view_change: u8, /// A distributed fault-tolerant clock for lower and upper bounds on the leader's wall clock: clock: Clock, /// The persistent log of hash-chained journal entries: journal: Journal, /// An abstraction to send messages from the replica to another replica or client. /// The message bus will also deliver messages to this replica by calling `on_message()`. message_bus: *MessageBus, /// For executing service up-calls after an operation has been committed: state_machine: *StateMachine, /// The client table records for each client the latest session and the latest committed reply. client_table: ClientTable, /// The current view, initially 0: view: u32, /// The latest view, in which the replica's status was normal. view_normal: u32, /// The current status, either normal, view_change, or recovering: /// TODO Don't default to normal, set the starting status according to the journal's health. status: Status = .normal, /// The op number assigned to the most recently prepared operation: op: u64, /// The op number of the latest committed and executed operation (according to the replica): /// The replica may have to wait for repairs to complete before commit_min reaches commit_max. commit_min: u64, /// The op number of the latest committed operation (according to the cluster): /// This is the commit number in terms of the VRR paper. commit_max: u64, /// Whether we are reading a prepare from storage in order to commit. committing: bool = false, /// Whether we are reading a prepare from storage in order to push to the pipeline. repairing_pipeline: bool = false, /// The leader's pipeline of inflight prepares waiting to commit in FIFO order. /// This allows us to pipeline without the complexity of out-of-order commits. pipeline: RingBuffer(Prepare, config.pipelining_max) = .{}, /// In some cases, a replica may send a message to itself. We do not submit these messages /// to the message bus but rather queue them here for guaranteed immediate delivery, which /// we require and assert in our protocol implementation. loopback_queue: ?*Message = null, /// Unique start_view_change messages for the same view from OTHER replicas (excluding ourself). start_view_change_from_other_replicas: QuorumMessages = QuorumMessagesNull, /// Unique do_view_change messages for the same view from ALL replicas (including ourself). do_view_change_from_all_replicas: QuorumMessages = QuorumMessagesNull, /// Unique nack_prepare messages for the same view from OTHER replicas (excluding ourself). nack_prepare_from_other_replicas: QuorumMessages = QuorumMessagesNull, /// Whether a replica has received a quorum of start_view_change messages for the view change: start_view_change_quorum: bool = false, /// Whether the leader has received a quorum of do_view_change messages for the view change: /// Determines whether the leader may effect repairs according to the CTRL protocol. do_view_change_quorum: bool = false, /// Whether the leader is expecting to receive a nack_prepare and for which op: nack_prepare_op: ?u64 = null, /// The number of ticks before a leader or follower broadcasts a ping to other replicas. /// TODO Explain why we need this (MessageBus handshaking, leapfrogging faulty replicas, /// deciding whether starting a view change would be detrimental under some network partitions). ping_timeout: Timeout, /// The number of ticks without enough prepare_ok's before the leader resends a prepare. prepare_timeout: Timeout, /// The number of ticks before the leader sends a commit heartbeat: /// The leader always sends a commit heartbeat irrespective of when it last sent a prepare. /// This improves liveness when prepare messages cannot be replicated fully due to partitions. commit_timeout: Timeout, /// The number of ticks without hearing from the leader before starting a view change. /// This transitions from .normal status to .view_change status. normal_status_timeout: Timeout, /// The number of ticks before a view change is timed out: /// This transitions from `view_change` status to `view_change` status but for a newer view. view_change_status_timeout: Timeout, /// The number of ticks before resending a `start_view_change` or `do_view_change` message: view_change_message_timeout: Timeout, /// The number of ticks before repairing missing/disconnected headers and/or dirty entries: repair_timeout: Timeout, /// Used to provide deterministic entropy to `choose_any_other_replica()`. /// Incremented whenever `choose_any_other_replica()` is called. choose_any_other_replica_ticks: u64 = 0, /// Used to calculate exponential backoff with random jitter. /// Seeded with the replica's index number. prng: std.rand.DefaultPrng, on_change_state: ?fn (replica: *Self) void = null, pub fn init( allocator: *Allocator, cluster: u32, replica_count: u8, replica: u8, time: *Time, storage: *Storage, message_bus: *MessageBus, state_machine: *StateMachine, ) !Self { assert(replica_count > 0); assert(replica < replica_count); const majority = (replica_count / 2) + 1; assert(majority <= replica_count); assert(config.quorum_replication_max >= 2); const quorum_replication = std.math.min(config.quorum_replication_max, majority); assert(quorum_replication >= 2 or quorum_replication == replica_count); const quorum_view_change = std.math.max( replica_count - quorum_replication + 1, majority, ); // The view change quorum may be more expensive to make the replication quorum cheaper. // The insight is that the replication phase is by far more common than the view change. // This trade-off allows us to optimize for the common case. // See the comments in `config.zig` for further explanation. assert(quorum_view_change >= majority); if (replica_count <= 2) { assert(quorum_replication == replica_count); assert(quorum_view_change == replica_count); } else { assert(quorum_replication < replica_count); assert(quorum_view_change < replica_count); } // Flexible quorums are safe if these two quorums intersect so that this relation holds: assert(quorum_replication + quorum_view_change > replica_count); var client_table: ClientTable = .{}; errdefer client_table.deinit(allocator); try client_table.ensureCapacity(allocator, @intCast(u32, config.clients_max)); assert(client_table.capacity() >= config.clients_max); var init_prepare = Header{ .parent = 0, .client = 0, .context = 0, .request = 0, .cluster = cluster, .epoch = 0, .view = 0, .op = 0, .commit = 0, .offset = 0, .size = @sizeOf(Header), .replica = 0, .command = .prepare, .operation = .init, .version = Version, }; init_prepare.set_checksum_body(&[0]u8{}); init_prepare.set_checksum(); var self = Self{ .cluster = cluster, .replica_count = replica_count, .replica = replica, .quorum_replication = quorum_replication, .quorum_view_change = quorum_view_change, .clock = try Clock.init( allocator, replica_count, replica, time, ), .journal = try Journal.init( allocator, storage, replica, config.journal_size_max, config.journal_headers_max, &init_prepare, ), .message_bus = message_bus, .state_machine = state_machine, .client_table = client_table, .view = init_prepare.view, .view_normal = init_prepare.view, .op = init_prepare.op, .commit_min = init_prepare.commit, .commit_max = init_prepare.commit, .ping_timeout = Timeout{ .name = "ping_timeout", .id = replica, .after = 100, }, .prepare_timeout = Timeout{ .name = "prepare_timeout", .id = replica, .after = 50, }, .commit_timeout = Timeout{ .name = "commit_timeout", .id = replica, .after = 100, }, .normal_status_timeout = Timeout{ .name = "normal_status_timeout", .id = replica, .after = 500, }, .view_change_status_timeout = Timeout{ .name = "view_change_status_timeout", .id = replica, .after = 500, }, .view_change_message_timeout = Timeout{ .name = "view_change_message_timeout", .id = replica, .after = 50, }, .repair_timeout = Timeout{ .name = "repair_timeout", .id = replica, .after = 50, }, .prng = std.rand.DefaultPrng.init(replica), }; log.debug("{}: init: replica_count={} quorum_view_change={} quorum_replication={}", .{ self.replica, self.replica_count, self.quorum_view_change, self.quorum_replication, }); // To reduce the probability of clustering, for efficient linear probing, the hash map will // always overallocate capacity by a factor of two. log.debug("{}: init: client_table.capacity()={} for config.clients_max={} entries", .{ self.replica, self.client_table.capacity(), config.clients_max, }); // We must initialize timeouts here, not in tick() on the first tick, because on_message() // can race with tick()... before timeouts have been initialized: assert(self.status == .normal); if (self.leader()) { log.debug("{}: init: leader", .{self.replica}); self.ping_timeout.start(); self.commit_timeout.start(); self.repair_timeout.start(); } else { log.debug("{}: init: follower", .{self.replica}); self.ping_timeout.start(); self.normal_status_timeout.start(); self.repair_timeout.start(); } return self; } /// Free all memory and unref all messages held by the replica /// This does not deinitialize the StateMachine, MessageBus, Storage, or Time pub fn deinit(self: *Self, allocator: *Allocator) void { self.journal.deinit(allocator); self.clock.deinit(allocator); { var it = self.client_table.iterator(); while (it.next()) |entry| { self.message_bus.unref(entry.value_ptr.reply); } self.client_table.deinit(allocator); } { var it = self.pipeline.iterator(); while (it.next()) |prepare| { self.message_bus.unref(prepare.message); for (prepare.ok_from_all_replicas) |message| { if (message) |m| self.message_bus.unref(m); } } } if (self.loopback_queue) |loopback_message| { assert(loopback_message.next == null); self.message_bus.unref(loopback_message); self.loopback_queue = null; } for (self.start_view_change_from_other_replicas) |message| { if (message) |m| self.message_bus.unref(m); } for (self.do_view_change_from_all_replicas) |message| { if (message) |m| self.message_bus.unref(m); } for (self.nack_prepare_from_other_replicas) |message| { if (message) |m| self.message_bus.unref(m); } } /// Time is measured in logical ticks that are incremented on every call to tick(). /// This eliminates a dependency on the system time and enables deterministic testing. pub fn tick(self: *Self) void { // Ensure that all asynchronous IO callbacks flushed the loopback queue as needed. // If an IO callback queues a loopback message without flushing the queue then this will // delay the delivery of messages (e.g. a prepare_ok from the leader to itself) and // decrease throughput significantly. assert(self.loopback_queue == null); self.clock.tick(); if (!self.journal.recovered) { self.journal.recover(); return; } else { assert(!self.journal.recovering); } self.ping_timeout.tick(); self.prepare_timeout.tick(); self.commit_timeout.tick(); self.normal_status_timeout.tick(); self.view_change_status_timeout.tick(); self.view_change_message_timeout.tick(); self.repair_timeout.tick(); if (self.ping_timeout.fired()) self.on_ping_timeout(); if (self.prepare_timeout.fired()) self.on_prepare_timeout(); if (self.commit_timeout.fired()) self.on_commit_timeout(); if (self.normal_status_timeout.fired()) self.on_normal_status_timeout(); if (self.view_change_status_timeout.fired()) self.on_view_change_status_timeout(); if (self.view_change_message_timeout.fired()) self.on_view_change_message_timeout(); if (self.repair_timeout.fired()) self.on_repair_timeout(); // None of the on_timeout() functions above should send a message to this replica. assert(self.loopback_queue == null); } /// Called by the MessageBus to deliver a message to the replica. pub fn on_message(self: *Self, message: *Message) void { assert(self.loopback_queue == null); log.debug("{}: on_message: view={} status={s} {}", .{ self.replica, self.view, @tagName(self.status), message.header, }); if (message.header.invalid()) |reason| { log.alert("{}: on_message: invalid ({s})", .{ self.replica, reason }); return; } // No client or replica should ever send a .reserved message. assert(message.header.command != .reserved); if (message.header.cluster != self.cluster) { log.warn("{}: on_message: wrong cluster (cluster must be {} not {})", .{ self.replica, self.cluster, message.header.cluster, }); return; } if (!self.journal.recovered) { self.journal.recover(); log.debug("{}: on_message: waiting for journal to recover", .{self.replica}); return; } else { assert(!self.journal.recovering); } assert(message.header.replica < self.replica_count); switch (message.header.command) { .ping => self.on_ping(message), .pong => self.on_pong(message), .request => self.on_request(message), .prepare => self.on_prepare(message), .prepare_ok => self.on_prepare_ok(message), .commit => self.on_commit(message), .start_view_change => self.on_start_view_change(message), .do_view_change => self.on_do_view_change(message), .start_view => self.on_start_view(message), .recovery => self.on_recovery(message), .recovery_response => return, // TODO .request_start_view => self.on_request_start_view(message), .request_prepare => self.on_request_prepare(message), .request_headers => self.on_request_headers(message), .headers => self.on_headers(message), .nack_prepare => self.on_nack_prepare(message), // A replica should never handle misdirected messages intended for a client: .eviction, .reply => { log.warn("{}: on_message: ignoring misdirected {s} message", .{ self.replica, @tagName(message.header.command), }); return; }, .reserved => unreachable, } if (self.loopback_queue) |loopback_message| { log.emerg("{}: on_message: on_{s}() queued a {s} loopback message with no flush", .{ self.replica, @tagName(message.header.command), @tagName(loopback_message.header.command), }); } // Any message handlers that loopback must take responsibility for the flush. assert(self.loopback_queue == null); } fn on_ping(self: *Self, message: *const Message) void { if (self.status != .normal and self.status != .view_change) return; assert(self.status == .normal or self.status == .view_change); // TODO Drop pings that were not addressed to us. var pong = Header{ .command = .pong, .cluster = self.cluster, .replica = self.replica, .view = self.view, }; if (message.header.client > 0) { assert(message.header.replica == 0); // We must only ever send our view number to a client via a pong message if we are // in normal status. Otherwise, we may be partitioned from the cluster with a newer // view number, leak this to the client, which would then pass this to the cluster // in subsequent client requests, which would then ignore these client requests with // a newer view number, locking out the client. The principle here is that we must // never send view numbers for views that have not yet started. if (self.status == .normal) { self.send_header_to_client(message.header.client, pong); } } else if (message.header.replica == self.replica) { log.warn("{}: on_ping: ignoring (self)", .{self.replica}); } else { // Copy the ping's monotonic timestamp to our pong and add our wall clock sample: pong.op = message.header.op; pong.offset = @bitCast(u64, self.clock.realtime()); self.send_header_to_replica(message.header.replica, pong); } } fn on_pong(self: *Self, message: *const Message) void { if (message.header.client > 0) return; if (message.header.replica == self.replica) return; const m0 = message.header.op; const t1 = @bitCast(i64, message.header.offset); const m2 = self.clock.monotonic(); self.clock.learn(message.header.replica, m0, t1, m2); } /// The primary advances op-number, adds the request to the end of the log, and updates the /// information for this client in the client-table to contain the new request number, s. /// Then it sends a ⟨PREPARE v, m, n, k⟩ message to the other replicas, where v is the current /// view-number, m is the message it received from the client, n is the op-number it assigned to /// the request, and k is the commit-number. fn on_request(self: *Self, message: *Message) void { if (self.ignore_request_message(message)) return; assert(self.status == .normal); assert(self.leader()); assert(self.commit_min == self.commit_max); assert(self.commit_max + self.pipeline.count == self.op); assert(message.header.command == .request); assert(message.header.view <= self.view); // The client's view may be behind ours. const realtime = self.clock.realtime_synchronized() orelse { log.alert("{}: on_request: dropping (clock not synchronized)", .{self.replica}); return; }; log.debug("{}: on_request: request {}", .{ self.replica, message.header.checksum }); self.state_machine.prepare( realtime, message.header.operation.cast(StateMachine), message.body(), ); var latest_entry = self.journal.entry_for_op_exact(self.op).?; message.header.parent = latest_entry.checksum; message.header.context = message.header.checksum; message.header.view = self.view; message.header.op = self.op + 1; message.header.commit = self.commit_max; message.header.offset = self.journal.next_offset(latest_entry); message.header.replica = self.replica; message.header.command = .prepare; message.header.set_checksum_body(message.body()); message.header.set_checksum(); log.debug("{}: on_request: prepare {}", .{ self.replica, message.header.checksum }); self.pipeline.push(.{ .message = message.ref() }) catch unreachable; assert(self.pipeline.count >= 1); if (self.pipeline.count == 1) { // This is the only prepare in the pipeline, start the timeout: assert(!self.prepare_timeout.ticking); self.prepare_timeout.start(); } else { // Do not restart the prepare timeout as it is already ticking for another prepare. assert(self.prepare_timeout.ticking); } self.on_prepare(message); // We expect `on_prepare()` to increment `self.op` to match the leader's latest prepare: // This is critical to ensure that pipelined prepares do not receive the same op number. assert(self.op == message.header.op); } /// Replication is simple, with a single code path for the leader and followers. /// /// The leader starts by sending a prepare message to itself. /// /// Each replica (including the leader) then forwards this prepare message to the next replica /// in the configuration, in parallel to writing to its own journal, closing the circle until /// the next replica is back to the leader, in which case the replica does not forward. /// /// This keeps the leader's outgoing bandwidth limited (one-for-one) to incoming bandwidth, /// since the leader need only replicate to the next replica. Otherwise, the leader would need /// to replicate to multiple followers, dividing available bandwidth. /// /// This does not impact latency, since with Flexible Paxos we need only one remote prepare_ok. /// It is ideal if this synchronous replication to one remote replica is to the next replica, /// since that is the replica next in line to be leader, which will need to be up-to-date before /// it can start the next view. /// /// At the same time, asynchronous replication keeps going, so that if our local disk is slow, /// then any latency spike will be masked by more remote prepare_ok messages as they come in. /// This gives automatic tail latency tolerance for storage latency spikes. /// /// The remaining problem then is tail latency tolerance for network latency spikes. /// If the next replica is down or partitioned, then the leader's prepare timeout will fire, /// and the leader will resend but to another replica, until it receives enough prepare_ok's. fn on_prepare(self: *Self, message: *Message) void { self.view_jump(message.header); if (self.is_repair(message)) { log.debug("{}: on_prepare: ignoring (repair)", .{self.replica}); self.on_repair(message); return; } if (self.status != .normal) { log.debug("{}: on_prepare: ignoring ({})", .{ self.replica, self.status }); return; } if (message.header.view < self.view) { log.debug("{}: on_prepare: ignoring (older view)", .{self.replica}); return; } if (message.header.view > self.view) { log.debug("{}: on_prepare: ignoring (newer view)", .{self.replica}); return; } assert(self.status == .normal); assert(message.header.view == self.view); assert(self.leader() or self.follower()); assert(message.header.replica == self.leader_index(message.header.view)); assert(message.header.op > self.op); assert(message.header.op > self.commit_min); if (self.follower()) self.normal_status_timeout.reset(); if (message.header.op > self.op + 1) { log.debug("{}: on_prepare: newer op", .{self.replica}); self.jump_to_newer_op_in_normal_status(message.header); } if (self.journal.previous_entry(message.header)) |previous| { // Any previous entry may be a whole journal's worth of ops behind due to wrapping. // We therefore do not do any further op, offset or checksum assertions beyond this: self.panic_if_hash_chain_would_break_in_the_same_view(previous, message.header); } // We must advance our op and set the header as dirty before replicating and journalling. // The leader needs this before its journal is outrun by any prepare_ok quorum: log.debug("{}: on_prepare: advancing: op={}..{} checksum={}..{}", .{ self.replica, self.op, message.header.op, message.header.parent, message.header.checksum, }); assert(message.header.op == self.op + 1); self.op = message.header.op; self.journal.set_entry_as_dirty(message.header); self.replicate(message); self.append(message); if (self.follower()) { // A prepare may already be committed if requested by repair() so take the max: self.commit_ops(std.math.max(message.header.commit, self.commit_max)); assert(self.commit_max >= message.header.commit); } } fn on_prepare_ok(self: *Self, message: *Message) void { if (self.ignore_prepare_ok(message)) return; assert(self.status == .normal); assert(message.header.view == self.view); assert(self.leader()); const prepare = self.pipeline_prepare_for_prepare_ok(message) orelse return; assert(prepare.message.header.checksum == message.header.context); assert(prepare.message.header.op >= self.commit_max + 1); assert(prepare.message.header.op <= self.commit_max + self.pipeline.count); assert(prepare.message.header.op <= self.op); // Wait until we have `f + 1` prepare_ok messages (including ourself) for quorum: const threshold = self.quorum_replication; const count = self.add_message_and_receive_quorum_exactly_once( &prepare.ok_from_all_replicas, message, threshold, ) orelse return; assert(count == threshold); assert(!prepare.ok_quorum_received); prepare.ok_quorum_received = true; log.debug("{}: on_prepare_ok: quorum received, context={}", .{ self.replica, prepare.message.header.checksum, }); self.commit_pipeline(); } /// Known issue: /// TODO The leader should stand down if it sees too many retries in on_prepare_timeout(). /// It's possible for the network to be one-way partitioned so that followers don't see the /// leader as down, but neither can the leader hear from the followers. fn on_commit(self: *Self, message: *const Message) void { self.view_jump(message.header); if (self.status != .normal) { log.debug("{}: on_commit: ignoring ({})", .{ self.replica, self.status }); return; } if (message.header.view < self.view) { log.debug("{}: on_commit: ignoring (older view)", .{self.replica}); return; } if (message.header.view > self.view) { log.debug("{}: on_commit: ignoring (newer view)", .{self.replica}); return; } if (self.leader()) { log.warn("{}: on_commit: ignoring (leader)", .{self.replica}); return; } assert(self.status == .normal); assert(self.follower()); assert(message.header.view == self.view); assert(message.header.replica == self.leader_index(message.header.view)); // We may not always have the latest commit entry but if we do our checksum must match: if (self.journal.entry_for_op_exact(message.header.commit)) |commit_entry| { if (commit_entry.checksum == message.header.context) { log.debug("{}: on_commit: checksum verified", .{self.replica}); } else if (self.valid_hash_chain("on_commit")) { @panic("commit checksum verification failed"); } else { // We may still be repairing after receiving the start_view message. log.debug("{}: on_commit: skipping checksum verification", .{self.replica}); } } self.normal_status_timeout.reset(); self.commit_ops(message.header.commit); } fn on_repair(self: *Self, message: *Message) void { assert(message.header.command == .prepare); if (self.status != .normal and self.status != .view_change) { log.debug("{}: on_repair: ignoring ({})", .{ self.replica, self.status }); return; } if (message.header.view > self.view) { log.debug("{}: on_repair: ignoring (newer view)", .{self.replica}); return; } if (self.status == .view_change and message.header.view == self.view) { log.debug("{}: on_repair: ignoring (view started)", .{self.replica}); return; } if (self.status == .view_change and self.leader_index(self.view) != self.replica) { log.debug("{}: on_repair: ignoring (view change, follower)", .{self.replica}); return; } if (self.status == .view_change and !self.do_view_change_quorum) { log.debug("{}: on_repair: ignoring (view change, waiting for quorum)", .{self.replica}); return; } if (message.header.op > self.op) { assert(message.header.view < self.view); log.debug("{}: on_repair: ignoring (would advance self.op)", .{self.replica}); return; } assert(self.status == .normal or self.status == .view_change); assert(self.repairs_allowed()); assert(message.header.view <= self.view); assert(message.header.op <= self.op); // Repairs may never advance `self.op`. if (self.journal.has_clean(message.header)) { log.debug("{}: on_repair: ignoring (duplicate)", .{self.replica}); self.send_prepare_ok(message.header); defer self.flush_loopback_queue(); return; } if (self.repair_header(message.header)) { assert(self.journal.has_dirty(message.header)); if (self.nack_prepare_op) |nack_prepare_op| { if (nack_prepare_op == message.header.op) { log.debug("{}: on_repair: repairing uncommitted op={}", .{ self.replica, message.header.op, }); self.reset_quorum_nack_prepare(); } } log.debug("{}: on_repair: repairing journal", .{self.replica}); self.write_prepare(message, .repair); } } fn on_start_view_change(self: *Self, message: *Message) void { if (self.ignore_view_change_message(message)) return; assert(self.status == .normal or self.status == .view_change); assert(message.header.view >= self.view); assert(message.header.replica != self.replica); self.view_jump(message.header); assert(self.status == .view_change); assert(message.header.view == self.view); if (self.leader_index(self.view) == self.replica) { // If we are the leader of the new view, then wait until we have a message to send a // do_view_change message to ourself. The on_do_view_change() handler will panic if // we received a start_view_change quorum without a do_view_change to ourself. if (self.message_bus.get_message()) |available| { self.message_bus.unref(available); } else { log.alert("{}: on_start_view_change: waiting for message for do_view_change", .{ self.replica, }); return; } } // Wait until we have `f` messages (excluding ourself) for quorum: assert(self.replica_count > 1); const threshold = self.quorum_view_change - 1; const count = self.add_message_and_receive_quorum_exactly_once( &self.start_view_change_from_other_replicas, message, threshold, ) orelse return; assert(count == threshold); assert(self.start_view_change_from_other_replicas[self.replica] == null); log.debug("{}: on_start_view_change: view={} quorum received", .{ self.replica, self.view, }); assert(!self.start_view_change_quorum); assert(!self.do_view_change_quorum); self.start_view_change_quorum = true; // When replica i receives start_view_change messages for its view from f other replicas, // it sends a ⟨do_view_change v, l, v’, n, k, i⟩ message to the node that will be the // primary in the new view. Here v is its view, l is its log, v′ is the view number of the // latest view in which its status was normal, n is the op number, and k is the commit // number. self.send_do_view_change(); defer self.flush_loopback_queue(); } /// When the new primary receives f + 1 do_view_change messages from different replicas /// (including itself), it sets its view number to that in the messages and selects as the /// new log the one contained in the message with the largest v′; if several messages have /// the same v′ it selects the one among them with the largest n. It sets its op number to /// that of the topmost entry in the new log, sets its commit number to the largest such /// number it received in the do_view_change messages, changes its status to normal, and /// informs the other replicas of the completion of the view change by sending /// ⟨start_view v, l, n, k⟩ messages to the other replicas, where l is the new log, n is the /// op number, and k is the commit number. fn on_do_view_change(self: *Self, message: *Message) void { if (self.ignore_view_change_message(message)) return; assert(self.status == .normal or self.status == .view_change); assert(message.header.view >= self.view); assert(self.leader_index(message.header.view) == self.replica); self.view_jump(message.header); assert(self.status == .view_change); assert(message.header.view == self.view); // We may receive a `do_view_change` quorum from other replicas, which already have a // `start_view_change_quorum`, before we receive a `start_view_change_quorum`: if (!self.start_view_change_quorum) { log.debug("{}: on_do_view_change: waiting for start_view_change quorum", .{ self.replica, }); return; } // Wait until we have `f + 1` messages (including ourself) for quorum: assert(self.replica_count > 1); const threshold = self.quorum_view_change; const count = self.add_message_and_receive_quorum_exactly_once( &self.do_view_change_from_all_replicas, message, threshold, ) orelse return; assert(count == threshold); assert(self.do_view_change_from_all_replicas[self.replica] != null); log.debug("{}: on_do_view_change: view={} quorum received", .{ self.replica, self.view, }); var v: ?u32 = null; var k: ?u64 = null; var latest = Header.reserved(); for (self.do_view_change_from_all_replicas) |received, replica| { if (received) |m| { assert(m.header.command == .do_view_change); assert(m.header.cluster == self.cluster); assert(m.header.replica == replica); assert(m.header.view == self.view); // The latest normal view experienced by this replica: // This may be higher than the view in any of the prepare headers. var replica_view_normal = @intCast(u32, m.header.offset); assert(replica_view_normal < m.header.view); var replica_latest = Header.reserved(); self.set_latest_op(self.message_body_as_headers(m), &replica_latest); assert(replica_latest.op == m.header.op); log.debug( "{}: on_do_view_change: replica={} v'={} op={} commit={} latest={}", .{ self.replica, m.header.replica, replica_view_normal, m.header.op, m.header.commit, replica_latest, }, ); if (v == null or replica_view_normal > v.?) { v = replica_view_normal; latest = replica_latest; } else if (replica_view_normal == v.? and replica_latest.op > latest.op) { v = replica_view_normal; latest = replica_latest; } if (k == null or m.header.commit > k.?) k = m.header.commit; } } self.set_latest_op_and_k(&latest, k.?, "on_do_view_change"); // Now that we have the latest op in place, repair any other headers: for (self.do_view_change_from_all_replicas) |received| { if (received) |m| { for (self.message_body_as_headers(m)) |*h| { _ = self.repair_header(h); } } } // Verify that the repairs above have not replaced or advanced the latest op: assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum); assert(self.start_view_change_quorum); assert(!self.do_view_change_quorum); self.do_view_change_quorum = true; self.discard_uncommitted_headers(); assert(self.op >= self.commit_max); assert(self.journal.entry_for_op_exact(self.op) != null); // Start repairs according to the CTRL protocol: assert(!self.repair_timeout.ticking); self.repair_timeout.start(); self.repair(); } /// When other replicas receive the start_view message, they replace their log with the one /// in the message, set their op number to that of the latest entry in the log, set their /// view number to the view number in the message, change their status to normal, and update /// the information in their client table. If there are non-committed operations in the log, /// they send a ⟨prepare_ok v, n, i⟩ message to the primary; here n is the op-number. Then /// they execute all operations known to be committed that they haven’t executed previously, /// advance their commit number, and update the information in their client table. fn on_start_view(self: *Self, message: *const Message) void { if (self.ignore_view_change_message(message)) return; assert(self.status == .view_change or self.status == .normal); assert(message.header.view >= self.view); assert(message.header.replica != self.replica); assert(message.header.replica == self.leader_index(message.header.view)); self.view_jump(message.header); assert(self.status == .view_change); assert(message.header.view == self.view); var latest = Header.reserved(); self.set_latest_op(self.message_body_as_headers(message), &latest); assert(latest.op == message.header.op); self.set_latest_op_and_k(&latest, message.header.commit, "on_start_view"); // Now that we have the latest op in place, repair any other headers: for (self.message_body_as_headers(message)) |*h| { _ = self.repair_header(h); } // Verify that the repairs above have not replaced or advanced the latest op: assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum); if (self.status == .view_change) { self.transition_to_normal_status(message.header.view); self.send_prepare_oks_after_view_change(); } assert(self.status == .normal); assert(message.header.view == self.view); assert(self.follower()); self.commit_ops(self.commit_max); self.repair(); } fn on_request_start_view(self: *Self, message: *const Message) void { if (self.ignore_repair_message(message)) return; assert(self.status == .normal); assert(message.header.view == self.view); assert(message.header.replica != self.replica); assert(self.leader()); const start_view = self.create_view_change_message(.start_view) orelse { log.alert("{}: on_request_start_view: dropping start_view, no message available", .{ self.replica, }); return; }; defer self.message_bus.unref(start_view); assert(start_view.references == 1); assert(start_view.header.command == .start_view); assert(start_view.header.view == self.view); assert(start_view.header.op == self.op); assert(start_view.header.commit == self.commit_max); self.send_message_to_replica(message.header.replica, start_view); } /// TODO This is a work in progress (out of scope for the bounty) fn on_recovery(self: *Self, message: *const Message) void { if (self.status != .normal) { log.debug("{}: on_recovery: ignoring ({})", .{ self.replica, self.status }); return; } if (message.header.replica == self.replica) { log.warn("{}: on_recovery: ignoring (self)", .{self.replica}); return; } const response = self.message_bus.get_message() orelse { log.alert("{}: on_recovery: ignoring (waiting for message)", .{self.replica}); return; }; defer self.message_bus.unref(response); response.header.* = .{ .command = .recovery_response, .cluster = self.cluster, .context = message.header.context, .replica = self.replica, .view = self.view, .op = self.op, .commit = self.commit_max, }; const count_max = 8; // The number of prepare headers to include in the body. const size_max = @sizeOf(Header) * std.math.min( std.math.max(@divFloor(response.buffer.len, @sizeOf(Header)), 2), 1 + count_max, ); assert(size_max > @sizeOf(Header)); const count = self.journal.copy_latest_headers_between( 0, self.op, std.mem.bytesAsSlice(Header, response.buffer[@sizeOf(Header)..size_max]), ); // We expect that self.op always exists. assert(count > 0); response.header.size = @intCast(u32, @sizeOf(Header) + @sizeOf(Header) * count); response.header.set_checksum_body(response.body()); response.header.set_checksum(); assert(self.status == .normal); // The checksum for a recovery message is deterministic, and cannot be used as a nonce: assert(response.header.context != message.header.checksum); self.send_message_to_replica(message.header.replica, response); } /// TODO This is a work in progress (out of scope for the bounty) fn on_recovery_response(self: *Self, message: *Message) void {} fn on_request_prepare(self: *Self, message: *const Message) void { if (self.ignore_repair_message(message)) return; assert(self.status == .normal or self.status == .view_change); assert(message.header.view == self.view); assert(message.header.replica != self.replica); const op = message.header.op; var checksum: ?u128 = message.header.context; if (self.leader_index(self.view) == self.replica and checksum.? == 0) checksum = null; if (self.journal.entry_for_op_exact_with_checksum(op, checksum)) |entry| { assert(entry.op == op); assert(checksum == null or entry.checksum == checksum.?); if (!self.journal.dirty.bit(op)) { assert(!self.journal.faulty.bit(op)); log.debug("{}: on_request_prepare: op={} checksum={} reading", .{ self.replica, op, checksum, }); // TODO Do not reissue the read if we are already reading in order to send to // this particular destination replica. self.journal.read_prepare( on_request_prepare_read, op, entry.checksum, message.header.replica, ); // We have guaranteed the prepare and our copy is clean (not safe to nack). return; } else if (self.journal.faulty.bit(op)) { log.debug("{}: on_request_prepare: op={} checksum={} faulty", .{ self.replica, op, checksum, }); // We have gauranteed the prepare but our copy is faulty (not safe to nack). return; } // We know of the prepare but we have yet to write or guarantee it (safe to nack). // Continue through below... } if (self.status == .view_change) { assert(message.header.replica == self.leader_index(self.view)); assert(checksum != null); if (self.journal.entry_for_op_exact_with_checksum(op, checksum) != null) { assert(self.journal.dirty.bit(op) and !self.journal.faulty.bit(op)); } log.debug("{}: on_request_prepare: op={} checksum={} nacking", .{ self.replica, op, checksum, }); self.send_header_to_replica(message.header.replica, .{ .command = .nack_prepare, .context = checksum.?, .cluster = self.cluster, .replica = self.replica, .view = self.view, .op = op, }); } } fn on_request_prepare_read(self: *Self, prepare: ?*Message, destination_replica: ?u8) void { const message = prepare orelse { log.debug("{}: on_request_prepare_read: prepare=null", .{self.replica}); return; }; log.debug("{}: on_request_prepare_read: op={} checksum={} sending to replica={}", .{ self.replica, message.header.op, message.header.checksum, destination_replica.?, }); assert(destination_replica.? != self.replica); self.send_message_to_replica(destination_replica.?, message); } fn on_request_headers(self: *Self, message: *const Message) void { if (self.ignore_repair_message(message)) return; assert(self.status == .normal or self.status == .view_change); assert(message.header.view == self.view); assert(message.header.replica != self.replica); const response = self.message_bus.get_message() orelse { log.alert("{}: on_request_headers: ignoring (op={}..{}, no message available)", .{ self.replica, message.header.commit, message.header.op, }); return; }; defer self.message_bus.unref(response); response.header.* = .{ .command = .headers, // We echo the context back to the replica so that they can match up our response: .context = message.header.context, .cluster = self.cluster, .replica = self.replica, .view = self.view, }; const op_min = message.header.commit; const op_max = message.header.op; assert(op_max >= op_min); // We must add 1 because op_max and op_min are both inclusive: const count_max = @intCast(u32, std.math.min(64, op_max - op_min + 1)); assert(count_max > 0); const size_max = @sizeOf(Header) * std.math.min( std.math.max(@divFloor(message.buffer.len, @sizeOf(Header)), 2), 1 + count_max, ); assert(size_max > @sizeOf(Header)); const count = self.journal.copy_latest_headers_between( op_min, op_max, std.mem.bytesAsSlice(Header, response.buffer[@sizeOf(Header)..size_max]), ); if (count == 0) { log.debug("{}: on_request_headers: ignoring (op={}..{}, no headers)", .{ self.replica, op_min, op_max, }); return; } response.header.size = @intCast(u32, @sizeOf(Header) + @sizeOf(Header) * count); response.header.set_checksum_body(response.body()); response.header.set_checksum(); self.send_message_to_replica(message.header.replica, response); } fn on_nack_prepare(self: *Self, message: *Message) void { if (self.ignore_repair_message(message)) return; assert(self.status == .view_change); assert(message.header.view == self.view); assert(message.header.replica != self.replica); assert(self.leader_index(self.view) == self.replica); assert(self.do_view_change_quorum); assert(self.repairs_allowed()); if (self.nack_prepare_op == null) { log.debug("{}: on_nack_prepare: ignoring (no longer expected)", .{self.replica}); return; } const op = self.nack_prepare_op.?; const checksum = self.journal.entry_for_op_exact(op).?.checksum; if (message.header.op != op) { log.debug("{}: on_nack_prepare: ignoring (repairing another op)", .{self.replica}); return; } if (message.header.context != checksum) { log.debug("{}: on_nack_prepare: ignoring (repairing another checksum)", .{ self.replica, }); return; } // Followers may not send a `nack_prepare` for a different checksum: // However our op may change in between sending the request and getting the nack. assert(message.header.op == op); assert(message.header.context == checksum); // Here are what our nack quorums look like, if we know our op is faulty: // These are for various replication quorums under Flexible Paxos. // We need to have enough nacks to guarantee that `quorum_replication` was not reached, // because if the replication quorum was reached, then it may have been committed. // We add `1` in each case because our op is faulty and may have been counted. // // replica_count=2 - quorum_replication=2 + 1 = 0 + 1 = 1 nacks required // replica_count=3 - quorum_replication=2 + 1 = 1 + 1 = 2 nacks required // replica_count=4 - quorum_replication=2 + 1 = 2 + 1 = 3 nacks required // replica_count=4 - quorum_replication=3 + 1 = 1 + 1 = 2 nacks required // replica_count=5 - quorum_replication=2 + 1 = 3 + 1 = 4 nacks required // replica_count=5 - quorum_replication=3 + 1 = 2 + 1 = 3 nacks required // // Otherwise, if we know we do not have the op, then we can exclude ourselves. assert(self.replica_count > 1); const threshold = if (self.journal.faulty.bit(op)) self.replica_count - self.quorum_replication + 1 else self.replica_count - self.quorum_replication; if (threshold == 0) { assert(self.replica_count == 2); assert(!self.journal.faulty.bit(op)); // This is a special case for a cluster-of-two, handled in `repair_prepare()`. log.debug("{}: on_nack_prepare: ignoring (cluster-of-two, not faulty)", .{ self.replica, }); return; } log.debug("{}: on_nack_prepare: quorum_replication={} threshold={}", .{ self.replica, self.quorum_replication, threshold, }); // We should never expect to receive a nack from ourselves: // Detect if we ever set `threshold` to `quorum_view_change` for a cluster-of-two again. assert(threshold < self.replica_count); // Wait until we have `threshold` messages for quorum: const count = self.add_message_and_receive_quorum_exactly_once( &self.nack_prepare_from_other_replicas, message, threshold, ) orelse return; assert(count == threshold); assert(self.nack_prepare_from_other_replicas[self.replica] == null); log.debug("{}: on_nack_prepare: quorum received", .{self.replica}); self.discard_uncommitted_ops_from(op, checksum); self.reset_quorum_nack_prepare(); self.repair(); } fn on_headers(self: *Self, message: *const Message) void { if (self.ignore_repair_message(message)) return; assert(self.status == .normal or self.status == .view_change); assert(message.header.view == self.view); assert(message.header.replica != self.replica); // We expect at least one header in the body, or otherwise no response to our request. assert(message.header.size > @sizeOf(Header)); var op_min: ?u64 = null; var op_max: ?u64 = null; for (self.message_body_as_headers(message)) |*h| { if (op_min == null or h.op < op_min.?) op_min = h.op; if (op_max == null or h.op > op_max.?) op_max = h.op; _ = self.repair_header(h); } assert(op_max.? >= op_min.?); self.repair(); } fn on_ping_timeout(self: *Self) void { self.ping_timeout.reset(); // TODO We may want to ping for connectivity during a view change. assert(self.status == .normal); assert(self.leader() or self.follower()); var ping = Header{ .command = .ping, .cluster = self.cluster, .replica = self.replica, .view = self.view, .op = self.clock.monotonic(), }; self.send_header_to_other_replicas(ping); } fn on_prepare_timeout(self: *Self) void { // We will decide below whether to reset or backoff the timeout. assert(self.status == .normal); assert(self.leader()); const prepare = self.pipeline.head_ptr().?; assert(prepare.message.header.command == .prepare); if (prepare.ok_quorum_received) { self.prepare_timeout.reset(); // We were unable to commit at the time because we were waiting for a message. log.debug("{}: on_prepare_timeout: quorum already received, retrying commit", .{ self.replica, }); self.commit_pipeline(); return; } // The list of remote replicas yet to send a prepare_ok: var waiting: [config.replicas_max]u8 = undefined; var waiting_len: usize = 0; for (prepare.ok_from_all_replicas[0..self.replica_count]) |received, replica| { if (received == null and replica != self.replica) { waiting[waiting_len] = @intCast(u8, replica); waiting_len += 1; } } if (waiting_len == 0) { self.prepare_timeout.reset(); log.debug("{}: on_prepare_timeout: waiting for journal", .{self.replica}); assert(prepare.ok_from_all_replicas[self.replica] == null); // We may be slow and waiting for the write to complete. // // We may even have maxed out our IO depth and been unable to initiate the write, // which can happen if `config.pipelining_max` exceeds `config.io_depth_write`. // This can lead to deadlock for a cluster of one or two (if we do not retry here), // since there is no other way for the leader to repair the dirty op because no // other replica has it. // // Retry the write through `on_repair()` which will work out which is which. // We do expect that the op would have been run through `on_prepare()` already. assert(prepare.message.header.op <= self.op); self.on_repair(prepare.message); return; } self.prepare_timeout.backoff(&self.prng); assert(waiting_len <= self.replica_count); for (waiting[0..waiting_len]) |replica| { assert(replica < self.replica_count); log.debug("{}: on_prepare_timeout: waiting for replica {}", .{ self.replica, replica, }); } // Cycle through the list to reach live replicas and get around partitions: // We do not assert `prepare_timeout.attempts > 0` since the counter may wrap back to 0. const replica = waiting[self.prepare_timeout.attempts % waiting_len]; assert(replica != self.replica); log.debug("{}: on_prepare_timeout: replicating to replica {}", .{ self.replica, replica }); self.send_message_to_replica(replica, prepare.message); } fn on_commit_timeout(self: *Self) void { self.commit_timeout.reset(); assert(self.status == .normal); assert(self.leader()); assert(self.commit_min == self.commit_max); // TODO Snapshots: Use snapshot checksum if commit is no longer in journal. const latest_committed_entry = self.journal.entry_for_op_exact(self.commit_max).?; self.send_header_to_other_replicas(.{ .command = .commit, .context = latest_committed_entry.checksum, .cluster = self.cluster, .replica = self.replica, .view = self.view, .commit = self.commit_max, }); } fn on_normal_status_timeout(self: *Self) void { assert(self.status == .normal); assert(self.follower()); self.transition_to_view_change_status(self.view + 1); } fn on_view_change_status_timeout(self: *Self) void { assert(self.status == .view_change); self.transition_to_view_change_status(self.view + 1); } fn on_view_change_message_timeout(self: *Self) void { self.view_change_message_timeout.reset(); assert(self.status == .view_change); // Keep sending `start_view_change` messages: // We may have a `start_view_change_quorum` but other replicas may not. // However, the leader may stop sending once it has a `do_view_change_quorum`. if (!self.do_view_change_quorum) self.send_start_view_change(); // It is critical that a `do_view_change` message implies a `start_view_change_quorum`: if (self.start_view_change_quorum) { // The leader need not retry to send a `do_view_change` message to itself: // We assume the MessageBus will not drop messages sent by a replica to itself. if (self.leader_index(self.view) != self.replica) self.send_do_view_change(); } } fn on_repair_timeout(self: *Self) void { assert(self.status == .normal or self.status == .view_change); self.repair(); } fn add_message_and_receive_quorum_exactly_once( self: *Self, messages: *QuorumMessages, message: *Message, threshold: u32, ) ?usize { assert(threshold >= 1); assert(threshold <= self.replica_count); assert(messages.len == config.replicas_max); assert(message.header.cluster == self.cluster); assert(message.header.replica < self.replica_count); assert(message.header.view == self.view); switch (message.header.command) { .prepare_ok => { if (self.replica_count <= 2) assert(threshold == self.replica_count); assert(self.status == .normal); assert(self.leader()); }, .start_view_change => { assert(self.replica_count > 1); if (self.replica_count == 2) assert(threshold == 1); assert(self.status == .view_change); }, .do_view_change => { assert(self.replica_count > 1); if (self.replica_count == 2) assert(threshold == 2); assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); }, .nack_prepare => { assert(self.replica_count > 1); if (self.replica_count == 2) assert(threshold >= 1); assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); }, else => unreachable, } const command: []const u8 = @tagName(message.header.command); // Do not allow duplicate messages to trigger multiple passes through a state transition: if (messages[message.header.replica]) |m| { // Assert that this is a duplicate message and not a different message: assert(m.header.command == message.header.command); assert(m.header.replica == message.header.replica); assert(m.header.view == message.header.view); assert(m.header.op == message.header.op); assert(m.header.commit == message.header.commit); assert(m.header.checksum_body == message.header.checksum_body); assert(m.header.checksum == message.header.checksum); log.debug("{}: on_{s}: ignoring (duplicate message)", .{ self.replica, command }); return null; } // Record the first receipt of this message: assert(messages[message.header.replica] == null); messages[message.header.replica] = message.ref(); // Count the number of unique messages now received: const count = self.count_quorum(messages, message.header.command, message.header.context); log.debug("{}: on_{s}: {} message(s)", .{ self.replica, command, count }); // Wait until we have exactly `threshold` messages for quorum: if (count < threshold) { log.debug("{}: on_{s}: waiting for quorum", .{ self.replica, command }); return null; } // This is not the first time we have had quorum, the state transition has already happened: if (count > threshold) { log.debug("{}: on_{s}: ignoring (quorum received already)", .{ self.replica, command }); return null; } assert(count == threshold); return count; } fn append(self: *Self, message: *Message) void { assert(self.status == .normal); assert(message.header.command == .prepare); assert(message.header.view == self.view); assert(message.header.op == self.op); log.debug("{}: append: appending to journal", .{self.replica}); self.write_prepare(message, .append); } /// Returns whether `b` succeeds `a` by having a newer view or same view and newer op. fn ascending_viewstamps( self: *Self, a: *const Header, b: *const Header, ) bool { assert(a.command == .prepare); assert(b.command == .prepare); if (a.view < b.view) { // We do not assert b.op >= a.op, ops may be reordered during a view change. return true; } else if (a.view > b.view) { // We do not assert b.op <= a.op, ops may be reordered during a view change. return false; } else if (a.op < b.op) { assert(a.view == b.view); return true; } else if (a.op > b.op) { assert(a.view == b.view); return false; } else { unreachable; } } /// Choose a different replica each time if possible (excluding ourself). fn choose_any_other_replica(self: *Self) ?u8 { if (self.replica_count == 1) return null; var count: usize = 0; while (count < self.replica_count) : (count += 1) { self.choose_any_other_replica_ticks += 1; const replica = @mod( self.replica + self.choose_any_other_replica_ticks, self.replica_count, ); if (replica == self.replica) continue; return @intCast(u8, replica); } unreachable; } /// Commit ops up to commit number `commit` (inclusive). /// A function which calls `commit_ops()` to set `commit_max` must first call `view_jump()`. /// Otherwise, we may fork the log. fn commit_ops(self: *Self, commit: u64) void { // TODO Restrict `view_change` status only to the leader purely as defense-in-depth. // Be careful of concurrency when doing this, as successive view changes can happen quickly. assert(self.status == .normal or self.status == .view_change); assert(self.commit_min <= self.commit_max); assert(self.commit_min <= self.op); assert(self.commit_max <= self.op or self.commit_max > self.op); assert(commit <= self.op or commit > self.op); // We have already committed this far: if (commit <= self.commit_min) return; // We must update `commit_max` even if we are already committing, otherwise we will lose // information that we should know, and `set_latest_op_and_k()` will catch us out: if (commit > self.commit_max) { log.debug("{}: commit_ops: advancing commit_max={}..{}", .{ self.replica, self.commit_max, commit, }); self.commit_max = commit; } // Guard against multiple concurrent invocations of commit_ops(): if (self.committing) { log.debug("{}: commit_ops: already committing...", .{self.replica}); return; } // We check the hash chain before we read each op, rather than once upfront, because // it's possible for `commit_max` to change while we read asynchronously, after we // validate the hash chain. // // We therefore cannot keep committing until we reach `commit_max`. We need to verify // the hash chain before each read. Once verified (before the read) we can commit in the // callback after the read, but if we see a change we need to stop committing any // further ops, because `commit_max` may have been bumped and may refer to a different // op. assert(!self.committing); self.committing = true; self.commit_ops_read(); } fn commit_ops_read(self: *Self) void { assert(self.committing); assert(self.status == .normal or self.status == .view_change); assert(self.commit_min <= self.commit_max); assert(self.commit_min <= self.op); if (!self.valid_hash_chain("commit_ops_read")) { self.committing = false; return; } assert(self.op >= self.commit_max); // We may receive commit numbers for ops we do not yet have (`commit_max > self.op`): // Even a naive state transfer may fail to correct for this. if (self.commit_min < self.commit_max and self.commit_min < self.op) { const op = self.commit_min + 1; const checksum = self.journal.entry_for_op_exact(op).?.checksum; self.journal.read_prepare(commit_ops_commit, op, checksum, null); } else { self.committing = false; // This is an optimization to expedite the view change before the `repair_timeout`: if (self.status == .view_change and self.repairs_allowed()) self.repair(); } } fn commit_ops_commit(self: *Self, prepare: ?*Message, destination_replica: ?u8) void { assert(destination_replica == null); assert(self.committing); self.committing = false; if (prepare == null) { log.debug("{}: commit_ops_commit: prepare == null", .{self.replica}); return; } if (self.status == .view_change) { if (self.leader_index(self.view) != self.replica) { log.debug("{}: commit_ops_commit: no longer leader", .{self.replica}); return; } // Only the leader may commit during a view change before starting the new view. // Fall through if this is indeed the case. } else if (self.status != .normal) { log.debug("{}: commit_ops_commit: no longer in normal status", .{self.replica}); return; } const op = self.commit_min + 1; if (prepare.?.header.op != op) { log.debug("{}: commit_ops_commit: op changed", .{self.replica}); return; } if (prepare.?.header.checksum != self.journal.entry_for_op_exact(op).?.checksum) { log.debug("{}: commit_ops_commit: checksum changed", .{self.replica}); return; } // TODO We can optimize this to commit into the client table reply if it exists. const reply = self.message_bus.get_message() orelse { log.alert("{}: commit_ops_commit: waiting for message", .{self.replica}); return; }; defer self.message_bus.unref(reply); self.commit_op(prepare.?, reply); assert(self.commit_min == op); assert(self.commit_min <= self.commit_max); assert(self.commit_min <= self.op); self.committing = true; self.commit_ops_read(); } fn commit_op(self: *Self, prepare: *const Message, reply: *Message) void { // TODO Can we add more checks around allowing commit_op() during a view change? assert(self.status == .normal or self.status == .view_change); assert(prepare.header.command == .prepare); assert(prepare.header.operation != .init); assert(prepare.header.op == self.commit_min + 1); assert(prepare.header.op <= self.op); // If we are a follower committing through `commit_ops()` then a view change may have // happened since we last checked in `commit_ops_read()`. However, this would relate to // subsequent ops, since by now we have already verified the hash chain for this commit. assert(self.journal.entry_for_op_exact(self.commit_min).?.checksum == prepare.header.parent); log.debug("{}: commit_op: executing view={} {} op={} checksum={} ({s})", .{ self.replica, self.view, self.leader_index(self.view) == self.replica, prepare.header.op, prepare.header.checksum, @tagName(prepare.header.operation.cast(StateMachine)), }); const reply_body_size = @intCast(u32, self.state_machine.commit( prepare.header.client, prepare.header.operation.cast(StateMachine), prepare.buffer[@sizeOf(Header)..prepare.header.size], reply.buffer[@sizeOf(Header)..], )); self.commit_min += 1; assert(self.commit_min == prepare.header.op); if (self.commit_min > self.commit_max) self.commit_max = self.commit_min; if (self.on_change_state) |hook| hook(self); reply.header.* = .{ .command = .reply, .operation = prepare.header.operation, .parent = prepare.header.context, // The prepare's context has `request.checksum`. .client = prepare.header.client, .request = prepare.header.request, .cluster = prepare.header.cluster, .replica = prepare.header.replica, .view = prepare.header.view, .op = prepare.header.op, .commit = prepare.header.op, .size = @sizeOf(Header) + reply_body_size, }; assert(reply.header.offset == 0); assert(reply.header.epoch == 0); reply.header.set_checksum_body(reply.buffer[@sizeOf(Header)..reply.header.size]); reply.header.set_checksum(); if (reply.header.operation == .register) { self.create_client_table_entry(reply); } else { self.update_client_table_entry(reply); } if (self.leader_index(self.view) == self.replica) { log.debug("{}: commit_op: replying to client: {}", .{ self.replica, reply.header }); self.message_bus.send_message_to_client(reply.header.client, reply); } } /// Commits, frees and pops as many prepares at the head of the pipeline as have quorum. /// Can be called only when the pipeline has at least one prepare. /// Stops the prepare timeout and resets the timeouts counter if the pipeline becomes empty. fn commit_pipeline(self: *Self) void { assert(self.status == .normal); assert(self.leader()); assert(self.pipeline.count > 0); while (self.pipeline.head_ptr()) |prepare| { assert(self.pipeline.count > 0); assert(self.commit_min == self.commit_max); assert(self.commit_max + self.pipeline.count == self.op); assert(self.commit_max + 1 == prepare.message.header.op); if (!prepare.ok_quorum_received) { // Eventually handled by on_prepare_timeout(). log.debug("{}: commit_pipeline: waiting for quorum", .{self.replica}); return; } const count = self.count_quorum( &prepare.ok_from_all_replicas, .prepare_ok, prepare.message.header.checksum, ); assert(count >= self.quorum_replication); // TODO We can optimize this to commit into the client table reply if it exists. const reply = self.message_bus.get_message() orelse { // Eventually handled by on_prepare_timeout(). log.alert("{}: commit_pipeline: waiting for message", .{self.replica}); return; }; defer self.message_bus.unref(reply); self.commit_op(prepare.message, reply); assert(self.commit_min == self.commit_max); assert(self.commit_max == prepare.message.header.op); self.unref_prepare_message_and_quorum_messages(prepare); assert(self.pipeline.pop() != null); } assert(self.prepare_timeout.ticking); if (self.pipeline.count == 0) self.prepare_timeout.stop(); } fn count_quorum( self: *Self, messages: *QuorumMessages, command: Command, context: u128, ) usize { assert(messages.len == config.replicas_max); var count: usize = 0; for (messages) |received, replica| { if (received) |m| { assert(replica < self.replica_count); assert(m.header.cluster == self.cluster); assert(m.header.command == command); assert(m.header.context == context); assert(m.header.replica == replica); switch (command) { .prepare_ok => { if (self.status == .normal) { assert(self.leader()); assert(m.header.view == self.view); } else { assert(self.status == .view_change); assert(m.header.view < self.view); } }, .start_view_change => { assert(m.header.replica != self.replica); assert(m.header.view == self.view); }, .do_view_change => assert(m.header.view == self.view), .nack_prepare => { // TODO See if we can restrict this branch further. assert(m.header.replica != self.replica); assert(m.header.op == self.nack_prepare_op.?); }, else => unreachable, } count += 1; } } assert(count <= self.replica_count); return count; } /// Creates an entry in the client table when registering a new client session. /// Asserts that the new session does not yet exist. /// Evicts another entry deterministically, if necessary, to make space for the insert. fn create_client_table_entry(self: *Self, reply: *Message) void { assert(reply.header.command == .reply); assert(reply.header.operation == .register); assert(reply.header.client > 0); assert(reply.header.context == 0); assert(reply.header.op == reply.header.commit); assert(reply.header.size == @sizeOf(Header)); const session = reply.header.commit; // The commit number becomes the session number. const request = reply.header.request; assert(session > 0); // We reserved the `0` commit number for the cluster `.init` operation. assert(request == 0); // For correctness, it's critical that all replicas evict deterministically: // We cannot depend on `HashMap.capacity()` since `HashMap.ensureCapacity()` may change // across different versions of the Zig std lib. We therefore rely on `config.clients_max`, // which must be the same across all replicas, and must not change after initing a cluster. // We also do not depend on `HashMap.valueIterator()` being deterministic here. However, we // do require that all entries have different commit numbers and are at least iterated. // This ensures that we will always pick the entry with the oldest commit number. // We also double-check that a client has only one entry in the hash map (or it's buggy). const clients = self.client_table.count(); assert(clients <= config.clients_max); if (clients == config.clients_max) { var evictee: ?*Message = null; var iterated: usize = 0; var iterator = self.client_table.valueIterator(); while (iterator.next()) |entry| : (iterated += 1) { assert(entry.reply.header.command == .reply); assert(entry.reply.header.context == 0); assert(entry.reply.header.op == entry.reply.header.commit); assert(entry.reply.header.commit >= entry.session); if (evictee) |evictee_reply| { assert(entry.reply.header.client != evictee_reply.header.client); assert(entry.reply.header.commit != evictee_reply.header.commit); if (entry.reply.header.commit < evictee_reply.header.commit) { evictee = entry.reply; } } else { evictee = entry.reply; } } assert(iterated == clients); log.alert("{}: create_client_table_entry: clients={}/{} evicting client={}", .{ self.replica, clients, config.clients_max, evictee.?.header.client, }); assert(self.client_table.remove(evictee.?.header.client)); assert(!self.client_table.contains(evictee.?.header.client)); self.message_bus.unref(evictee.?); } log.debug("{}: create_client_table_entry: client={} session={} request={}", .{ self.replica, reply.header.client, session, request, }); // Any duplicate .register requests should have received the same session number if the // client table entry already existed, or been dropped if a session was being committed: self.client_table.putAssumeCapacityNoClobber(reply.header.client, .{ .session = session, .reply = reply.ref(), }); assert(self.client_table.count() <= config.clients_max); } /// The caller owns the returned message, if any, which has exactly 1 reference. fn create_view_change_message(self: *Self, command: Command) ?*Message { assert(command == .do_view_change or command == .start_view); // We may send a start_view message in normal status to resolve a follower's view jump: assert(self.status == .normal or self.status == .view_change); const message = self.message_bus.get_message() orelse return null; defer self.message_bus.unref(message); message.header.* = .{ .command = command, .cluster = self.cluster, .replica = self.replica, .view = self.view, // The latest normal view (as specified in the 2012 paper) is different to the view // number contained in the prepare headers we include in the body. The former shows // how recent a view change the replica participated in, which may be much higher. // We use the `offset` field to send this in addition to the current view number: .offset = if (command == .do_view_change) self.view_normal else 0, .op = self.op, .commit = self.commit_max, }; // CRITICAL: The number of prepare headers to include in the body: // We must provide enough headers to cover all uncommitted headers so that the new // leader (if we are in a view change) can decide whether to discard uncommitted headers // that cannot be repaired because they are gaps, and this must be relative to the // cluster as a whole (not relative to the difference between our op and commit number) // as otherwise we would break correctness. const count_max = config.pipelining_max; assert(count_max > 0); const size_max = @sizeOf(Header) * std.math.min( std.math.max(@divFloor(message.buffer.len, @sizeOf(Header)), 2), 1 + count_max, ); assert(size_max > @sizeOf(Header)); const count = self.journal.copy_latest_headers_between( 0, self.op, std.mem.bytesAsSlice(Header, message.buffer[@sizeOf(Header)..size_max]), ); // We expect that self.op always exists. assert(count > 0); message.header.size = @intCast(u32, @sizeOf(Header) + @sizeOf(Header) * count); message.header.set_checksum_body(message.body()); message.header.set_checksum(); return message.ref(); } /// The caller owns the returned message, if any, which has exactly 1 reference. fn create_message_from_header(self: *Self, header: Header) ?*Message { assert(header.replica == self.replica); assert(header.view == self.view or header.command == .request_start_view); assert(header.size == @sizeOf(Header)); const message = self.message_bus.pool.get_header_only_message() orelse return null; defer self.message_bus.unref(message); message.header.* = header; message.header.set_checksum_body(message.body()); message.header.set_checksum(); return message.ref(); } /// Discards uncommitted headers during a view change before the new leader starts the view. /// This is required to maximize availability in the presence of storage faults. /// Refer to the CTRL protocol from Protocol-Aware Recovery for Consensus-Based Storage. /// /// It's possible for the new leader to have done an op jump in a previous view, and so /// introduced a header gap for an op, which was then discarded by another leader during a /// newer view change, before surviving into this view as a gap because our latest op was /// set as the latest op for the quorum. /// /// In this case, it may be impossible for the new leader to repair the missing header since /// the rest of the cluster may have already discarded it. We therefore iterate over our /// uncommitted header gaps and compare them with the quorum of do_view_change messages /// received from other replicas, before starting the new view, to discard any that may be /// impossible to repair. fn discard_uncommitted_headers(self: *Self) void { assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); assert(self.do_view_change_quorum); assert(!self.repair_timeout.ticking); assert(self.op >= self.commit_max); assert(self.replica_count > 1); const threshold = self.replica_count - self.quorum_replication; if (threshold == 0) { assert(self.replica_count == 2); return; } var op = self.op; while (op > self.commit_max) : (op -= 1) { if (self.journal.entry_for_op_exact(op) != null) continue; log.debug("{}: discard_uncommitted_headers: op={} gap", .{ self.replica, op }); var nacks: usize = 0; for (self.do_view_change_from_all_replicas) |received, replica| { if (received) |m| { assert(m.header.command == .do_view_change); assert(m.header.cluster == self.cluster); assert(m.header.replica == replica); assert(m.header.view == self.view); if (replica != self.replica) { if (m.header.op < op) nacks += 1; log.debug("{}: discard_uncommitted_headers: replica={} op={}", .{ self.replica, m.header.replica, m.header.op, }); } } } log.debug("{}: discard_uncommitted_headers: op={} nacks={} threshold={}", .{ self.replica, op, nacks, threshold, }); if (nacks >= threshold) { self.journal.remove_entries_from(op); self.op = op - 1; assert(self.journal.entry_for_op(op) == null); assert(!self.journal.dirty.bit(op)); assert(!self.journal.faulty.bit(op)); } } } /// Discards uncommitted ops during a view change from after and including `op`. /// This is required to maximize availability in the presence of storage faults. /// Refer to the CTRL protocol from Protocol-Aware Recovery for Consensus-Based Storage. fn discard_uncommitted_ops_from(self: *Self, op: u64, checksum: u128) void { assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); assert(self.repairs_allowed()); assert(self.valid_hash_chain("discard_uncommitted_ops_from")); assert(op > self.commit_max); assert(op <= self.op); assert(self.journal.entry_for_op_exact_with_checksum(op, checksum) != null); assert(self.journal.dirty.bit(op)); log.debug("{}: discard_uncommitted_ops_from: ops={}..{} view={}", .{ self.replica, op, self.op, self.view, }); self.journal.remove_entries_from(op); self.op = op - 1; assert(self.journal.entry_for_op(op) == null); assert(!self.journal.dirty.bit(op)); assert(!self.journal.faulty.bit(op)); // We require that `self.op` always exists. Rewinding `self.op` could change that. // However, we do this only as the leader within a view change, with all headers intact. assert(self.journal.entry_for_op_exact(self.op) != null); } /// Returns whether the replica is a follower for the current view. /// This may be used only when the replica status is normal. fn follower(self: *Self) bool { return !self.leader(); } fn flush_loopback_queue(self: *Self) void { // There are three cases where a replica will send a message to itself: // However, of these three cases, only two cases will call send_message_to_replica(). // // 1. In on_request(), the leader sends a synchronous prepare to itself, but this is // done by calling on_prepare() directly, and subsequent prepare timeout retries will // never resend to self. // 2. In on_prepare(), after writing to storage, the leader sends a (typically) // asynchronous prepare_ok to itself. // 3. In on_start_view_change(), after receiving a quorum of start_view_change // messages, the new leader sends a synchronous do_view_change to itself. if (self.loopback_queue) |message| { defer self.message_bus.unref(message); assert(message.next == null); self.loopback_queue = null; assert(message.header.replica == self.replica); self.on_message(message); // We do not call flush_loopback_queue() within on_message() to avoid recursion. } // We expect that delivering a prepare_ok or do_view_change message to ourselves will // not result in any further messages being added synchronously to the loopback queue. assert(self.loopback_queue == null); } fn ignore_prepare_ok(self: *Self, message: *const Message) bool { if (self.status != .normal) { log.debug("{}: on_prepare_ok: ignoring ({})", .{ self.replica, self.status }); return true; } if (message.header.view < self.view) { log.debug("{}: on_prepare_ok: ignoring (older view)", .{self.replica}); return true; } if (message.header.view > self.view) { // Another replica is treating us as the leader for a view we do not know about. // This may be caused by a fault in the network topology. log.warn("{}: on_prepare_ok: ignoring (newer view)", .{self.replica}); return true; } if (self.follower()) { // This may be caused by a fault in the network topology. log.warn("{}: on_prepare_ok: ignoring (follower)", .{self.replica}); return true; } return false; } fn ignore_repair_message(self: *Self, message: *const Message) bool { assert(message.header.command == .request_start_view or message.header.command == .request_headers or message.header.command == .request_prepare or message.header.command == .headers or message.header.command == .nack_prepare); const command: []const u8 = @tagName(message.header.command); if (self.status != .normal and self.status != .view_change) { log.debug("{}: on_{s}: ignoring ({})", .{ self.replica, command, self.status }); return true; } if (message.header.view < self.view) { log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command }); return true; } if (message.header.view > self.view) { log.debug("{}: on_{s}: ignoring (newer view)", .{ self.replica, command }); return true; } if (self.ignore_repair_message_during_view_change(message)) return true; if (message.header.replica == self.replica) { log.warn("{}: on_{s}: ignoring (self)", .{ self.replica, command }); return true; } if (self.leader_index(self.view) != self.replica) { switch (message.header.command) { // Only the leader may receive these messages: .request_start_view, .nack_prepare => { log.warn("{}: on_{s}: ignoring (follower)", .{ self.replica, command }); return true; }, // Only the leader may answer a request for a prepare without a context: .request_prepare => if (message.header.context == 0) { log.warn("{}: on_{s}: ignoring (no context)", .{ self.replica, command }); return true; }, else => {}, } } if (message.header.command == .nack_prepare and self.status == .normal) { log.debug("{}: on_{s}: ignoring (view started)", .{ self.replica, command }); return true; } // Only allow repairs for same view as defense-in-depth: assert(message.header.view == self.view); return false; } fn ignore_repair_message_during_view_change(self: *Self, message: *const Message) bool { if (self.status != .view_change) return false; const command: []const u8 = @tagName(message.header.command); switch (message.header.command) { .request_start_view => { log.debug("{}: on_{s}: ignoring (view change)", .{ self.replica, command }); return true; }, .request_headers, .request_prepare => { if (self.leader_index(self.view) != message.header.replica) { log.debug("{}: on_{s}: ignoring (view change, requested by follower)", .{ self.replica, command, }); return true; } }, .headers, .nack_prepare => { if (self.leader_index(self.view) != self.replica) { log.debug("{}: on_{s}: ignoring (view change, received by follower)", .{ self.replica, command, }); return true; } else if (!self.do_view_change_quorum) { log.debug("{}: on_{s}: ignoring (view change, waiting for quorum)", .{ self.replica, command, }); return true; } }, else => unreachable, } return false; } fn ignore_request_message(self: *Self, message: *Message) bool { assert(message.header.command == .request); if (self.status != .normal) { log.debug("{}: on_request: ignoring ({s})", .{ self.replica, self.status }); return true; } if (self.ignore_request_message_follower(message)) return true; if (self.ignore_request_message_duplicate(message)) return true; if (self.ignore_request_message_preparing(message)) return true; return false; } /// Returns whether the request is stale, or a duplicate of the latest committed request. /// Resends the reply to the latest request if the request has been committed. fn ignore_request_message_duplicate(self: *Self, message: *const Message) bool { assert(self.status == .normal); assert(self.leader()); assert(message.header.command == .request); assert(message.header.client > 0); assert(message.header.view <= self.view); // See ignore_request_message_follower(). assert(message.header.context == 0 or message.header.operation != .register); assert(message.header.request == 0 or message.header.operation != .register); if (self.client_table.getPtr(message.header.client)) |entry| { assert(entry.reply.header.command == .reply); assert(entry.reply.header.client == message.header.client); if (message.header.operation == .register) { // Fall through below to check if we should resend the .register session reply. } else if (entry.session > message.header.context) { // The client must not reuse the ephemeral client ID when registering a new session. log.alert("{}: on_request: ignoring older session (client bug)", .{self.replica}); return true; } else if (entry.session < message.header.context) { // This cannot be because of a partition since we check the client's view number. log.alert("{}: on_request: ignoring newer session (client bug)", .{self.replica}); return true; } if (entry.reply.header.request > message.header.request) { log.debug("{}: on_request: ignoring older request", .{self.replica}); return true; } else if (entry.reply.header.request == message.header.request) { if (message.header.checksum == entry.reply.header.parent) { assert(entry.reply.header.operation == message.header.operation); log.debug("{}: on_request: replying to duplicate request", .{self.replica}); self.message_bus.send_message_to_client(message.header.client, entry.reply); return true; } else { log.alert("{}: on_request: request collision (client bug)", .{self.replica}); return true; } } else if (entry.reply.header.request + 1 == message.header.request) { if (message.header.parent == entry.reply.header.checksum) { // The client has proved that they received our last reply. log.debug("{}: on_request: new request", .{self.replica}); return false; } else { // The client may have only one request inflight at a time. log.alert("{}: on_request: ignoring new request (client bug)", .{self.replica}); return true; } } else { log.alert("{}: on_request: ignoring newer request (client bug)", .{self.replica}); return true; } } else if (message.header.operation == .register) { log.debug("{}: on_request: new session", .{self.replica}); return false; } else if (self.pipeline_prepare_for_client(message.header.client)) |_| { // The client registered with the previous leader, which committed and replied back // to the client before the view change, after which the register operation was // reloaded into the pipeline to be driven to completion by the new leader, which // now receives a request from the client that appears to have no session. // However, the session is about to be registered, so we must wait for it to commit. log.debug("{}: on_request: waiting for session to commit", .{self.replica}); return true; } else { // We must have all commits to know whether a session has been evicted. For example, // there is the risk of sending an eviction message (even as the leader) if we are // partitioned and don't yet know about a session. We solve this by having clients // include the view number and rejecting messages from clients with newer views. log.err("{}: on_request: no session", .{self.replica}); self.send_eviction_message_to_client(message.header.client); return true; } } /// Returns whether the replica is eligible to process this request as the leader. /// Takes the client's perspective into account if the client is aware of a newer view. /// Forwards requests to the leader if the client has an older view. fn ignore_request_message_follower(self: *Self, message: *Message) bool { assert(self.status == .normal); assert(message.header.command == .request); // The client is aware of a newer view: // Even if we think we are the leader, we may be partitioned from the rest of the cluster. // We therefore drop the message rather than flood our partition with traffic. if (message.header.view > self.view) { log.debug("{}: on_request: ignoring (newer view)", .{self.replica}); return true; } else if (self.leader()) { return false; } if (message.header.operation == .register) { // We do not forward `.register` requests for the sake of `Header.peer_type()`. // This enables the MessageBus to identify client connections on the first message. log.debug("{}: on_request: ignoring (follower, register)", .{self.replica}); } else if (message.header.view < self.view) { // The client may not know who the leader is, or may be retrying after a leader failure. // We forward to the new leader ahead of any client retry timeout to reduce latency. // Since the client is already connected to all replicas, the client may yet receive the // reply from the new leader directly. log.debug("{}: on_request: forwarding (follower)", .{self.replica}); self.send_message_to_replica(self.leader_index(self.view), message); } else { assert(message.header.view == self.view); // The client has the correct view, but has retried against a follower. // This may mean that the leader is down and that we are about to do a view change. // There is also not much we can do as the client already knows who the leader is. // We do not forward as this would amplify traffic on the network. // TODO This may also indicate a client-leader partition. If we see enough of these, // should we trigger a view change to select a leader that clients can reach? // This is a question of weighing the probability of a partition vs routing error. log.debug("{}: on_request: ignoring (follower, same view)", .{self.replica}); } assert(self.follower()); return true; } fn ignore_request_message_preparing(self: *Self, message: *const Message) bool { assert(self.status == .normal); assert(self.leader()); assert(message.header.command == .request); assert(message.header.client > 0); assert(message.header.view <= self.view); // See ignore_request_message_follower(). if (self.pipeline_prepare_for_client(message.header.client)) |prepare| { assert(prepare.message.header.command == .prepare); assert(prepare.message.header.client == message.header.client); assert(prepare.message.header.op > self.commit_max); if (message.header.checksum == prepare.message.header.context) { log.debug("{}: on_request: ignoring (already preparing)", .{self.replica}); return true; } else { log.alert("{}: on_request: ignoring (client forked)", .{self.replica}); return true; } } if (self.pipeline.full()) { log.debug("{}: on_request: ignoring (pipeline full)", .{self.replica}); return true; } return false; } fn ignore_view_change_message(self: *Self, message: *const Message) bool { assert(message.header.command == .start_view_change or message.header.command == .do_view_change or message.header.command == .start_view); assert(message.header.view > 0); // The initial view is already zero. const command: []const u8 = @tagName(message.header.command); // 4.3 Recovery // While a replica's status is recovering it does not participate in either the request // processing protocol or the view change protocol. // This is critical for correctness (to avoid data loss): if (self.status == .recovering) { log.debug("{}: on_{s}: ignoring (recovering)", .{ self.replica, command }); return true; } if (message.header.view < self.view) { log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command }); return true; } if (message.header.view == self.view and self.status == .normal) { log.debug("{}: on_{s}: ignoring (view started)", .{ self.replica, command }); return true; } // These may be caused by faults in the network topology. switch (message.header.command) { .start_view_change, .start_view => { if (message.header.replica == self.replica) { log.warn("{}: on_{s}: ignoring (self)", .{ self.replica, command }); return true; } }, .do_view_change => { if (self.leader_index(message.header.view) != self.replica) { log.warn("{}: on_{s}: ignoring (follower)", .{ self.replica, command }); return true; } }, else => unreachable, } return false; } fn is_repair(self: *Self, message: *const Message) bool { assert(message.header.command == .prepare); if (self.status == .normal) { if (message.header.view < self.view) return true; if (message.header.view == self.view and message.header.op <= self.op) return true; } else if (self.status == .view_change) { if (message.header.view < self.view) return true; // The view has already started or is newer. } return false; } /// Returns whether the replica is the leader for the current view. /// This may be used only when the replica status is normal. fn leader(self: *Self) bool { assert(self.status == .normal); return self.leader_index(self.view) == self.replica; } /// Returns the index into the configuration of the leader for a given view. fn leader_index(self: *Self, view: u32) u8 { return @intCast(u8, @mod(view, self.replica_count)); } /// Advances `op` to where we need to be before `header` can be processed as a prepare: fn jump_to_newer_op_in_normal_status(self: *Self, header: *const Header) void { assert(self.status == .normal); assert(self.follower()); assert(header.view == self.view); assert(header.op > self.op + 1); // We may have learned of a higher `commit_max` through a commit message before jumping to a // newer op that is less than `commit_max` but greater than `commit_min`: assert(header.op > self.commit_min); log.debug("{}: jump_to_newer_op: advancing: op={}..{} checksum={}..{}", .{ self.replica, self.op, header.op - 1, self.journal.entry_for_op_exact(self.op).?.checksum, header.parent, }); self.op = header.op - 1; assert(self.op >= self.commit_min); assert(self.op + 1 == header.op); } fn message_body_as_headers(self: *Self, message: *const Message) []Header { // TODO Assert message commands that we expect this to be called for. assert(message.header.size > @sizeOf(Header)); // Body must contain at least one header. return std.mem.bytesAsSlice(Header, message.buffer[@sizeOf(Header)..message.header.size]); } /// Panics if immediate neighbors in the same view would have a broken hash chain. /// Assumes gaps and does not require that a preceeds b. fn panic_if_hash_chain_would_break_in_the_same_view( self: *Self, a: *const Header, b: *const Header, ) void { assert(a.command == .prepare); assert(b.command == .prepare); assert(a.cluster == b.cluster); if (a.view == b.view and a.op + 1 == b.op and a.checksum != b.parent) { assert(a.valid_checksum()); assert(b.valid_checksum()); log.emerg("{}: panic_if_hash_chain_would_break: a: {}", .{ self.replica, a }); log.emerg("{}: panic_if_hash_chain_would_break: b: {}", .{ self.replica, b }); @panic("hash chain would break"); } } /// Searches the pipeline for a prepare for a given client. fn pipeline_prepare_for_client(self: *Self, client: u128) ?*Prepare { assert(self.status == .normal); assert(self.leader()); assert(self.commit_min == self.commit_max); var op = self.commit_max + 1; var parent = self.journal.entry_for_op_exact(self.commit_max).?.checksum; var iterator = self.pipeline.iterator(); while (iterator.next_ptr()) |prepare| { assert(prepare.message.header.command == .prepare); assert(prepare.message.header.op == op); assert(prepare.message.header.parent == parent); // A client may have multiple requests in the pipeline if these were committed by // the previous leader and were reloaded into the pipeline after a view change. if (prepare.message.header.client == client) return prepare; parent = prepare.message.header.checksum; op += 1; } assert(self.pipeline.count <= config.pipelining_max); assert(self.commit_max + self.pipeline.count == op - 1); assert(self.commit_max + self.pipeline.count == self.op); return null; } /// Searches the pipeline for a prepare for a given client and checksum. /// Passing the prepare_ok message prevents these u128s from being accidentally swapped. /// Asserts that the returned prepare, if any, exactly matches the prepare_ok. fn pipeline_prepare_for_prepare_ok(self: *Self, ok: *const Message) ?*Prepare { assert(ok.header.command == .prepare_ok); assert(self.status == .normal); assert(self.leader()); const prepare = self.pipeline_prepare_for_client(ok.header.client) orelse { log.debug("{}: pipeline_prepare_for_prepare_ok: not preparing", .{self.replica}); return null; }; if (ok.header.context != prepare.message.header.checksum) { // This can be normal, for example, if an old prepare_ok is replayed. log.debug("{}: pipeline_prepare_for_prepare_ok: preparing a different client op", .{ self.replica, }); return null; } assert(prepare.message.header.parent == ok.header.parent); assert(prepare.message.header.client == ok.header.client); assert(prepare.message.header.request == ok.header.request); assert(prepare.message.header.cluster == ok.header.cluster); assert(prepare.message.header.epoch == ok.header.epoch); // A prepare may be committed in the same view or in a newer view: assert(prepare.message.header.view <= ok.header.view); assert(prepare.message.header.op == ok.header.op); assert(prepare.message.header.commit == ok.header.commit); assert(prepare.message.header.offset == ok.header.offset); assert(prepare.message.header.operation == ok.header.operation); return prepare; } /// Starting from the latest journal entry, backfill any missing or disconnected headers. /// A header is disconnected if it breaks the hash chain with its newer neighbor to the right. /// Since we work backwards from the latest entry, we should always be able to fix the chain. /// Once headers are connected, backfill any dirty or faulty prepares. fn repair(self: *Self) void { if (!self.repair_timeout.ticking) { log.debug("{}: repair: ignoring (optimistic, not ticking)", .{self.replica}); return; } self.repair_timeout.reset(); assert(self.status == .normal or self.status == .view_change); assert(self.repairs_allowed()); assert(self.commit_min <= self.op); assert(self.commit_min <= self.commit_max); // We expect these always to exist: assert(self.journal.entry_for_op_exact(self.commit_min) != null); assert(self.journal.entry_for_op_exact(self.op) != null); // Request outstanding committed prepares to advance our op number: // This handles the case of an idle cluster, where a follower will not otherwise advance. // This is not required for correctness, but for durability. if (self.op < self.commit_max) { // If the leader repairs during a view change, it will have already advanced // `self.op` to the latest op according to the quorum of `do_view_change` messages // received, so we must therefore be a follower in normal status: assert(self.status == .normal); assert(self.follower()); log.debug("{}: repair: op={} < commit_max={}", .{ self.replica, self.op, self.commit_max, }); // We need to advance our op number and therefore have to `request_prepare`, // since only `on_prepare()` can do this, not `repair_header()` in `on_headers()`. self.send_header_to_replica(self.leader_index(self.view), .{ .command = .request_prepare, // We cannot yet know the checksum of the prepare so we set the context to 0: // Context is optional when requesting from the leader but required otherwise. .context = 0, .cluster = self.cluster, .replica = self.replica, .view = self.view, .op = self.commit_max, }); return; } // Request any missing or disconnected headers: // TODO Snapshots: Ensure that self.commit_min op always exists in the journal. var broken = self.journal.find_latest_headers_break_between(self.commit_min, self.op); if (broken) |range| { log.debug("{}: repair: break: view={} op_min={} op_max={} (commit={}..{} op={})", .{ self.replica, self.view, range.op_min, range.op_max, self.commit_min, self.commit_max, self.op, }); assert(range.op_min > self.commit_min); assert(range.op_max < self.op); // A range of `op_min=0` or `op_max=0` should be impossible as a header break: // This is the init op that is prepared when the cluster is initialized. assert(range.op_min > 0); assert(range.op_max > 0); if (self.choose_any_other_replica()) |replica| { self.send_header_to_replica(replica, .{ .command = .request_headers, .cluster = self.cluster, .replica = self.replica, .view = self.view, .commit = range.op_min, .op = range.op_max, }); } return; } // Assert that all headers are now present and connected with a perfect hash chain: assert(self.op >= self.commit_max); assert(self.valid_hash_chain_between(self.commit_min, self.op)); // Request and repair any dirty or faulty prepares: if (self.journal.dirty.len > 0) return self.repair_prepares(); // Commit ops, which may in turn discover faulty prepares and drive more repairs: if (self.commit_min < self.commit_max) return self.commit_ops(self.commit_max); if (self.status == .view_change and self.leader_index(self.view) == self.replica) { if (self.repair_pipeline_op() != null) return self.repair_pipeline(); // Start the view as the new leader: self.start_view_as_the_new_leader(); } } /// Decide whether or not to insert or update a header: /// /// A repair may never advance or replace `self.op` (critical for correctness): /// /// Repairs must always backfill in behind `self.op` but may never advance `self.op`. /// Otherwise, a split-brain leader may reapply an op that was removed through a view /// change, which could be committed by a higher `commit_max` number in a commit message. /// /// See this commit message for an example: /// https://github.com/coilhq/tigerbeetle/commit/6119c7f759f924d09c088422d5c60ac6334d03de /// /// Our guiding principles around repairs in general: /// /// * The latest op makes sense of everything else and must not be replaced with a different /// op or advanced except by the leader in the current view. /// /// * Do not jump to a view in normal status without receiving a start_view message. /// /// * Do not commit until the hash chain between `self.commit_min` and `self.op` is fully /// connected, to ensure that all the ops in this range are correct. /// /// * Ensure that `self.commit_max` is never advanced for a newer view without first /// receiving a start_view message, otherwise `self.commit_max` may refer to different ops. /// /// * Ensure that `self.op` is never advanced by a repair since repairs may occur in a view /// change where the view has not yet started. /// /// * Do not assume that an existing op with a older viewstamp can be replaced by an op with /// a newer viewstamp, but only compare ops in the same view or with reference to the chain. /// See Figure 3.7 on page 41 in Diego Ongaro's Raft thesis for an example of where an op /// with an older view number may be committed instead of an op with a newer view number: /// http://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf. /// fn repair_header(self: *Self, header: *const Header) bool { assert(header.valid_checksum()); assert(header.invalid() == null); assert(header.command == .prepare); switch (self.status) { .normal => assert(header.view <= self.view), .view_change => assert(header.view <= self.view), else => unreachable, } if (header.op > self.op) { log.debug("{}: repair_header: false (advances self.op)", .{self.replica}); return false; } else if (header.op == self.op) { if (self.journal.entry_for_op_exact_with_checksum(self.op, header.checksum)) |_| { // Fall through below to check if self.op is uncommitted AND reordered, // which we would see by the presence of an earlier op with higher view number, // that breaks the chain with self.op. In this case, we must skip the repair to // avoid overwriting any overlapping op. } else { log.debug("{}: repair_header: false (changes self.op={})", .{ self.replica, self.op, }); return false; } } if (self.journal.entry(header)) |existing| { // Do not replace any existing op lightly as doing so may impair durability and even // violate correctness by undoing a prepare already acknowledged to the leader: if (existing.checksum == header.checksum) { if (!self.journal.dirty.bit(header.op)) { log.debug("{}: repair_header: false (checksum clean)", .{self.replica}); return false; } log.debug("{}: repair_header: exists, checksum dirty", .{self.replica}); } else if (existing.view == header.view) { // The journal must have wrapped: // We expect that the same view and op will have the same checksum. assert(existing.op != header.op); if (existing.op > header.op) { log.debug("{}: repair_header: false (view has newer op)", .{self.replica}); return false; } log.debug("{}: repair_header: exists, view has older op", .{self.replica}); } else { assert(existing.view != header.view); assert(existing.op == header.op or existing.op != header.op); if (!self.repair_header_would_connect_hash_chain(header)) { // We cannot replace this op until we are sure that doing so would not // violate any prior commitments made to the leader. log.debug("{}: repair_header: false (exists)", .{self.replica}); return false; } log.debug("{}: repair_header: exists, connects hash chain", .{self.replica}); } } else { log.debug("{}: repair_header: gap", .{self.replica}); } // Caveat: Do not repair an existing op or gap if doing so would break the hash chain: if (self.repair_header_would_break_hash_chain_with_next_entry(header)) { log.debug("{}: repair_header: false (breaks hash chain)", .{self.replica}); return false; } // Caveat: Do not repair an existing op or gap if doing so would overlap another: if (self.repair_header_would_overlap_another(header)) { if (!self.repair_header_would_connect_hash_chain(header)) { log.debug("{}: repair_header: false (overlap)", .{self.replica}); return false; } // We may have to overlap previous entries in order to connect the hash chain: log.debug("{}: repair_header: overlap, connects hash chain", .{self.replica}); } // TODO Snapshots: Skip if this header is already snapshotted. assert(header.op < self.op or self.journal.entry_for_op_exact(self.op).?.checksum == header.checksum); self.journal.set_entry_as_dirty(header); return true; } /// If we repair this header, then would this break the hash chain only to our immediate right? /// This offers a weak guarantee compared to `repair_header_would_connect_hash_chain()` below. /// However, this is useful for allowing repairs when the hash chain is sparse. fn repair_header_would_break_hash_chain_with_next_entry( self: *Self, header: *const Header, ) bool { if (self.journal.previous_entry(header)) |previous| { self.panic_if_hash_chain_would_break_in_the_same_view(previous, header); } if (self.journal.next_entry(header)) |next| { self.panic_if_hash_chain_would_break_in_the_same_view(header, next); if (header.checksum == next.parent) { assert(header.view <= next.view); assert(header.op + 1 == next.op); // We don't break with `next` but this is no guarantee that `next` does not break. return false; } else { // If the journal has wrapped, then err in favor of a break regardless of op order: return true; } } // We are not completely sure since there is no entry to the immediate right: return false; } /// If we repair this header, then would this connect the hash chain through to the latest op? /// This offers a strong guarantee that may be used to replace or overlap an existing op. /// /// Here is an example of what could go wrong if we did not check for complete connection: /// /// 1. We do a prepare that's going to be committed. /// 2. We do a stale prepare to the right of this, ignoring the hash chain break to the left. /// 3. We do another stale prepare that replaces the first op because it connects to the second. /// /// This would violate our quorum replication commitment to the leader. /// The mistake in this example was not that we ignored the break to the left, which we must /// do to repair reordered ops, but that we did not check for connection to the right. fn repair_header_would_connect_hash_chain(self: *Self, header: *const Header) bool { var entry = header; while (entry.op < self.op) { if (self.journal.next_entry(entry)) |next| { if (entry.checksum == next.parent) { assert(entry.view <= next.view); assert(entry.op + 1 == next.op); entry = next; } else { return false; } } else { return false; } } assert(entry.op == self.op); assert(entry.checksum == self.journal.entry_for_op_exact(self.op).?.checksum); return true; } /// If we repair this header, then would this overlap and overwrite part of another batch? /// Journal entries have variable-sized batches that may overlap if entries are disconnected. fn repair_header_would_overlap_another(self: *Self, header: *const Header) bool { // TODO Snapshots: Handle journal wrap around. { // Look behind this entry for any preceeding entry that this would overlap: var op: u64 = header.op; while (op > 0) { op -= 1; if (self.journal.entry_for_op(op)) |neighbor| { if (self.journal.next_offset(neighbor) > header.offset) return true; break; } } } { // Look beyond this entry for any succeeding entry that this would overlap: var op: u64 = header.op + 1; while (op <= self.op) : (op += 1) { if (self.journal.entry_for_op(op)) |neighbor| { if (self.journal.next_offset(header) > neighbor.offset) return true; break; } } } return false; } /// Reads prepares into the pipeline (before we start the view as the new leader). fn repair_pipeline(self: *Self) void { assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); assert(self.commit_max < self.op); if (self.repairing_pipeline) { log.debug("{}: repair_pipeline: already repairing...", .{self.replica}); return; } log.debug("{}: repair_pipeline: repairing", .{self.replica}); assert(!self.repairing_pipeline); self.repairing_pipeline = true; self.repair_pipeline_read(); } /// Returns the next `op` number that needs to be read into the pipeline. fn repair_pipeline_op(self: *Self) ?u64 { assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); const op = self.commit_max + self.pipeline.count + 1; if (op <= self.op) return op; assert(self.commit_max + self.pipeline.count == self.op); return null; } fn repair_pipeline_read(self: *Self) void { assert(self.repairing_pipeline); assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); if (self.repair_pipeline_op()) |op| { assert(op > self.commit_max); assert(op <= self.op); assert(self.commit_max + self.pipeline.count + 1 == op); const checksum = self.journal.entry_for_op_exact(op).?.checksum; log.debug("{}: repair_pipeline_read: op={} checksum={}", .{ self.replica, op, checksum, }); self.journal.read_prepare(repair_pipeline_push, op, checksum, null); } else { log.debug("{}: repair_pipeline_read: repaired", .{self.replica}); self.repairing_pipeline = false; self.repair(); } } fn repair_pipeline_push( self: *Self, prepare: ?*Message, destination_replica: ?u8, ) void { assert(destination_replica == null); assert(self.repairing_pipeline); self.repairing_pipeline = false; if (prepare == null) { log.debug("{}: repair_pipeline_push: prepare == null", .{self.replica}); return; } // Our state may have advanced significantly while we were reading from disk. if (self.status != .view_change) { log.debug("{}: repair_pipeline_push: no longer in view change status", .{ self.replica, }); return; } if (self.leader_index(self.view) != self.replica) { log.debug("{}: repair_pipeline_push: no longer leader", .{self.replica}); return; } // We may even be several views ahead and may now have a completely different pipeline. const op = self.repair_pipeline_op() orelse { log.debug("{}: repair_pipeline_push: pipeline changed", .{self.replica}); return; }; assert(op > self.commit_max); assert(op <= self.op); assert(self.commit_max + self.pipeline.count + 1 == op); if (prepare.?.header.op != op) { log.debug("{}: repair_pipeline_push: op changed", .{self.replica}); return; } if (prepare.?.header.checksum != self.journal.entry_for_op_exact(op).?.checksum) { log.debug("{}: repair_pipeline_push: checksum changed", .{self.replica}); return; } assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); log.debug("{}: repair_pipeline_push: op={} checksum={}", .{ self.replica, prepare.?.header.op, prepare.?.header.checksum, }); self.pipeline.push(.{ .message = prepare.?.ref() }) catch unreachable; assert(self.pipeline.count >= 1); self.repairing_pipeline = true; self.repair_pipeline_read(); } fn repair_prepares(self: *Self) void { assert(self.status == .normal or self.status == .view_change); assert(self.repairs_allowed()); assert(self.journal.dirty.len > 0); // Request enough prepares to utilize our max IO depth: var budget = self.journal.writes.available(); if (budget == 0) { log.debug("{}: repair_prepares: waiting for IOP", .{self.replica}); return; } var op = self.op + 1; while (op > 0) { op -= 1; if (self.journal.dirty.bit(op)) { // If this is an uncommitted op, and we are the leader in `view_change` status, // then we will `request_prepare` from the cluster, set `nack_prepare_op`, // and stop repairing any further prepares: // This will also rebroadcast any `request_prepare` every `repair_timeout` tick. if (self.repair_prepare(op)) { if (self.nack_prepare_op) |nack_prepare_op| { assert(nack_prepare_op == op); assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); assert(op > self.commit_max); return; } // Otherwise, we continue to request prepares until our budget is used: budget -= 1; if (budget == 0) { log.debug("{}: repair_prepares: request budget used", .{self.replica}); return; } } } else { assert(!self.journal.faulty.bit(op)); } } } /// During a view change, for uncommitted ops, which are few, we optimize for latency: /// /// * request a `prepare` or `nack_prepare` from all followers in parallel, /// * repair as soon as we get a `prepare`, or /// * discard as soon as we get a majority of `nack_prepare` messages for the same checksum. /// /// For committed ops, which represent the bulk of ops, we optimize for throughput: /// /// * have multiple requests in flight to prime the repair queue, /// * rotate these requests across the cluster round-robin, /// * to spread the load across connected peers, /// * to take advantage of each peer's outgoing bandwidth, and /// * to parallelize disk seeks and disk read bandwidth. /// /// This is effectively "many-to-one" repair, where a single replica recovers using the /// resources of many replicas, for faster recovery. fn repair_prepare(self: *Self, op: u64) bool { assert(self.status == .normal or self.status == .view_change); assert(self.repairs_allowed()); assert(self.journal.dirty.bit(op)); const checksum = self.journal.entry_for_op_exact(op).?.checksum; // We may be appending to or repairing the journal concurrently. // We do not want to re-request any of these prepares unnecessarily. if (self.journal.writing(op, checksum)) { log.debug("{}: repair_prepare: already writing op={} checksum={}", .{ self.replica, op, checksum, }); return false; } const request_prepare = Header{ .command = .request_prepare, // If we request a prepare from a follower, as below, it is critical to pass a checksum: // Otherwise we could receive different prepares for the same op number. .context = checksum, .cluster = self.cluster, .replica = self.replica, .view = self.view, .op = op, }; if (self.status == .view_change and op > self.commit_max) { // Only the leader is allowed to do repairs in a view change: assert(self.leader_index(self.view) == self.replica); const reason = if (self.journal.faulty.bit(op)) "faulty" else "dirty"; log.debug( "{}: repair_prepare: op={} checksum={} (uncommitted, {s}, view_change)", .{ self.replica, op, checksum, reason, }, ); if (self.replica_count == 2 and !self.journal.faulty.bit(op)) { // This is required to avoid a liveness issue for a cluster-of-two where a new // leader learns of an op during a view change but where the op is faulty on // the old leader. We must immediately roll back the op since it could not have // been committed by the old leader if we know we do not have it, and because // the old leader cannot send a nack_prepare for its faulty copy. // For this to be correct, the recovery protocol must set all headers as faulty, // not only as dirty. self.discard_uncommitted_ops_from(op, checksum); return false; } // Initialize the `nack_prepare` quorum counter for this uncommitted op: // It is also possible that we may start repairing a lower uncommitted op, having // initialized `nack_prepare_op` before we learn of a higher uncommitted dirty op, // in which case we also want to reset the quorum counter. if (self.nack_prepare_op) |nack_prepare_op| { assert(nack_prepare_op <= op); if (nack_prepare_op != op) { self.nack_prepare_op = op; self.reset_quorum_messages( &self.nack_prepare_from_other_replicas, .nack_prepare, ); } } else { self.nack_prepare_op = op; self.reset_quorum_messages( &self.nack_prepare_from_other_replicas, .nack_prepare, ); } assert(self.nack_prepare_op.? == op); assert(request_prepare.context == checksum); self.send_header_to_other_replicas(request_prepare); } else { const nature = if (op > self.commit_max) "uncommitted" else "committed"; const reason = if (self.journal.faulty.bit(op)) "faulty" else "dirty"; log.debug("{}: repair_prepare: op={} checksum={} ({s}, {s})", .{ self.replica, op, checksum, nature, reason, }); // We expect that `repair_prepare()` is called in reverse chronological order: // Any uncommitted ops should have already been dealt with. // We never roll back committed ops, and thus never regard `nack_prepare` responses. // Alternatively, we may not be the leader, in which case we do distinguish anyway. assert(self.nack_prepare_op == null); assert(request_prepare.context == checksum); if (self.choose_any_other_replica()) |replica| { self.send_header_to_replica(replica, request_prepare); } } return true; } fn repairs_allowed(self: *Self) bool { switch (self.status) { .view_change => { if (self.do_view_change_quorum) { assert(self.leader_index(self.view) == self.replica); return true; } else { return false; } }, .normal => return true, else => return false, } } /// Replicates to the next replica in the configuration (until we get back to the leader): /// Replication starts and ends with the leader, we never forward back to the leader. /// Does not flood the network with prepares that have already committed. /// TODO Use recent heartbeat data for next replica to leapfrog if faulty (optimization). fn replicate(self: *Self, message: *Message) void { assert(self.status == .normal); assert(message.header.command == .prepare); assert(message.header.view == self.view); assert(message.header.op == self.op); if (message.header.op <= self.commit_max) { log.debug("{}: replicate: not replicating (committed)", .{self.replica}); return; } const next = @mod(self.replica + 1, @intCast(u8, self.replica_count)); if (next == self.leader_index(message.header.view)) { log.debug("{}: replicate: not replicating (completed)", .{self.replica}); return; } log.debug("{}: replicate: replicating to replica {}", .{ self.replica, next }); self.send_message_to_replica(next, message); } /// Empties the prepare pipeline, unreffing all prepare and prepare_ok messages. /// Stops the prepare timeout and resets the timeouts counter. fn reset_pipeline(self: *Self) void { while (self.pipeline.pop()) |prepare| { self.unref_prepare_message_and_quorum_messages(&prepare); } self.prepare_timeout.stop(); assert(self.pipeline.count == 0); assert(self.prepare_timeout.ticking == false); // Do not reset `repairing_pipeline` here as this must be reset by the read callback. // Otherwise, we would be making `repair_pipeline()` reentrant. } fn reset_quorum_messages(self: *Self, messages: *QuorumMessages, command: Command) void { assert(messages.len == config.replicas_max); var view: ?u32 = null; var count: usize = 0; for (messages) |*received, replica| { if (received.*) |message| { assert(replica < self.replica_count); assert(message.header.command == command); assert(message.header.replica == replica); // We may have transitioned into a newer view: // However, all messages in the quorum should have the same view. assert(message.header.view <= self.view); if (view) |v| { assert(message.header.view == v); } else { view = message.header.view; } self.message_bus.unref(message); count += 1; } received.* = null; } assert(count <= self.replica_count); log.debug("{}: reset {} {s} message(s)", .{ self.replica, count, @tagName(command) }); } fn reset_quorum_do_view_change(self: *Self) void { self.reset_quorum_messages(&self.do_view_change_from_all_replicas, .do_view_change); self.do_view_change_quorum = false; } fn reset_quorum_nack_prepare(self: *Self) void { self.reset_quorum_messages(&self.nack_prepare_from_other_replicas, .nack_prepare); self.nack_prepare_op = null; } fn reset_quorum_start_view_change(self: *Self) void { self.reset_quorum_messages(&self.start_view_change_from_other_replicas, .start_view_change); self.start_view_change_quorum = false; } fn send_prepare_ok(self: *Self, header: *const Header) void { assert(header.command == .prepare); assert(header.cluster == self.cluster); assert(header.replica == self.leader_index(header.view)); assert(header.view <= self.view); assert(header.op <= self.op or header.view < self.view); if (self.status != .normal) { log.debug("{}: send_prepare_ok: not sending ({})", .{ self.replica, self.status }); return; } if (header.op > self.op) { assert(header.view < self.view); // An op may be reordered concurrently through a view change while being journalled: log.debug("{}: send_prepare_ok: not sending (reordered)", .{self.replica}); return; } assert(self.status == .normal); // After a view change, replicas send prepare_oks for uncommitted ops with older views: // However, we only send to the leader of the current view (see below where we send). assert(header.view <= self.view); assert(header.op <= self.op); if (header.op <= self.commit_max) { log.debug("{}: send_prepare_ok: not sending (committed)", .{self.replica}); return; } if (self.journal.has_clean(header)) { log.debug("{}: send_prepare_ok: op={} checksum={}", .{ self.replica, header.op, header.checksum, }); // It is crucial that replicas stop accepting prepare messages from earlier views // once they start the view change protocol. Without this constraint, the system // could get into a state in which there are two active primaries: the old one, // which hasn't failed but is merely slow or not well connected to the network, and // the new one. If a replica sent a prepare_ok message to the old primary after // sending its log to the new one, the old primary might commit an operation that // the new primary doesn't learn about in the do_view_change messages. // We therefore only ever send to the leader of the current view, never to the // leader of the prepare header's view: self.send_header_to_replica(self.leader_index(self.view), .{ .command = .prepare_ok, .parent = header.parent, .client = header.client, .context = header.checksum, .request = header.request, .cluster = self.cluster, .replica = self.replica, .epoch = header.epoch, .view = self.view, .op = header.op, .commit = header.commit, .offset = header.offset, .operation = header.operation, }); } else { log.debug("{}: send_prepare_ok: not sending (dirty)", .{self.replica}); return; } } fn send_prepare_oks_after_view_change(self: *Self) void { assert(self.status == .normal); var op = self.commit_max + 1; while (op <= self.op) : (op += 1) { // We may have breaks or stale headers in our uncommitted chain here. However: // * being able to send what we have will allow the pipeline to commit earlier, and // * the leader will drop any prepare_ok for a prepare not in the pipeline. // This is safe only because the leader can verify against the prepare checksum. if (self.journal.entry_for_op_exact(op)) |header| { self.send_prepare_ok(header); defer self.flush_loopback_queue(); } } } fn send_start_view_change(self: *Self) void { assert(self.status == .view_change); assert(!self.do_view_change_quorum); // Send only to other replicas (and not to ourself) to avoid a quorum off-by-one error: // This could happen if the replica mistakenly counts its own message in the quorum. self.send_header_to_other_replicas(.{ .command = .start_view_change, .cluster = self.cluster, .replica = self.replica, .view = self.view, }); } fn send_do_view_change(self: *Self) void { assert(self.status == .view_change); assert(self.start_view_change_quorum); assert(!self.do_view_change_quorum); const count_start_view_change = self.count_quorum( &self.start_view_change_from_other_replicas, .start_view_change, 0, ); assert(count_start_view_change >= self.quorum_view_change - 1); const message = self.create_view_change_message(.do_view_change) orelse { log.alert("{}: send_do_view_change: waiting for message", .{self.replica}); return; }; defer self.message_bus.unref(message); assert(message.references == 1); assert(message.header.command == .do_view_change); assert(message.header.view == self.view); assert(message.header.op == self.op); assert(message.header.commit == self.commit_max); // TODO Assert that latest header in message body matches self.op. self.send_message_to_replica(self.leader_index(self.view), message); } fn send_eviction_message_to_client(self: *Self, client: u128) void { assert(self.status == .normal); assert(self.leader()); log.alert("{}: too many sessions, sending eviction message to client={}", .{ self.replica, client, }); self.send_header_to_client(client, .{ .command = .eviction, .cluster = self.cluster, .replica = self.replica, .view = self.view, .client = client, }); } fn send_header_to_client(self: *Self, client: u128, header: Header) void { const message = self.create_message_from_header(header) orelse { log.alert("{}: no header-only message available, dropping message to client {}", .{ self.replica, client, }); return; }; defer self.message_bus.unref(message); self.message_bus.send_message_to_client(client, message); } fn send_header_to_other_replicas(self: *Self, header: Header) void { const message = self.create_message_from_header(header) orelse { log.alert("{}: no header-only message available, dropping message to replicas", .{ self.replica, }); return; }; defer self.message_bus.unref(message); var replica: u8 = 0; while (replica < self.replica_count) : (replica += 1) { if (replica != self.replica) { self.send_message_to_replica(replica, message); } } } fn send_header_to_replica(self: *Self, replica: u8, header: Header) void { const message = self.create_message_from_header(header) orelse { log.alert("{}: no header-only message available, dropping message to replica {}", .{ self.replica, replica, }); return; }; defer self.message_bus.unref(message); self.send_message_to_replica(replica, message); } fn send_message_to_other_replicas(self: *Self, message: *Message) void { var replica: u8 = 0; while (replica < self.replica_count) : (replica += 1) { if (replica != self.replica) { self.send_message_to_replica(replica, message); } } } fn send_message_to_replica(self: *Self, replica: u8, message: *Message) void { log.debug("{}: sending {s} to replica {}: {}", .{ self.replica, @tagName(message.header.command), replica, message.header, }); if (message.header.invalid()) |reason| { log.emerg("{}: send_message_to_replica: invalid ({s})", .{ self.replica, reason }); @panic("send_message_to_replica: invalid message"); } assert(message.header.cluster == self.cluster); // TODO According to message.header.command, assert on the destination replica. switch (message.header.command) { .request => { // Do not assert message.header.replica because we forward .request messages. assert(self.status == .normal); assert(message.header.view <= self.view); }, .prepare => { // Do not assert message.header.replica because we forward .prepare messages. switch (self.status) { .normal => assert(message.header.view <= self.view), .view_change => assert(message.header.view < self.view), else => unreachable, } }, .prepare_ok => { assert(self.status == .normal); assert(message.header.view == self.view); // We must only ever send a prepare_ok to the latest leader of the active view: // We must never straddle views by sending to a leader in an older view. // Otherwise, we would be enabling a partitioned leader to commit. assert(replica == self.leader_index(self.view)); assert(message.header.replica == self.replica); }, .start_view_change => { assert(self.status == .view_change); assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, .do_view_change => { assert(self.status == .view_change); assert(self.start_view_change_quorum); assert(!self.do_view_change_quorum); assert(message.header.view == self.view); assert(message.header.replica == self.replica); assert(message.header.op == self.op); assert(replica == self.leader_index(self.view)); }, .start_view => switch (self.status) { .normal => { // A follower may ask the leader to resend the start_view message. assert(!self.start_view_change_quorum); assert(!self.do_view_change_quorum); assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, .view_change => { assert(self.start_view_change_quorum); assert(self.do_view_change_quorum); assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, else => unreachable, }, .headers => { assert(self.status == .normal or self.status == .view_change); assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, .ping, .pong => { assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, .commit => { assert(self.status == .normal); assert(self.leader()); assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, .request_headers => { assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, .request_prepare => { assert(message.header.view == self.view); assert(message.header.replica == self.replica); }, .nack_prepare => { assert(message.header.view == self.view); assert(message.header.replica == self.replica); assert(replica == self.leader_index(self.view)); }, else => { log.notice("{}: send_message_to_replica: TODO {s}", .{ self.replica, @tagName(message.header.command), }); }, } if (replica == self.replica) { assert(self.loopback_queue == null); self.loopback_queue = message.ref(); } else { self.message_bus.send_message_to_replica(replica, message); } } /// Finds the header with the highest op number in a slice of headers from a replica. /// Searches only by op number to find the highest `self.op for the replica. fn set_latest_op(self: *Self, headers: []Header, latest: *Header) void { switch (latest.command) { .reserved, .prepare => assert(latest.valid_checksum()), else => unreachable, } for (headers) |header| { assert(header.valid_checksum()); assert(header.invalid() == null); assert(header.command == .prepare); if (latest.command == .reserved or header.op > latest.op) { // We are simply trying to find the latest `self.op` in the replica's log. // We therefore do not compare views here. latest.* = header; } } } fn set_latest_op_and_k( self: *Self, latest: *const Header, k: u64, method: []const u8, ) void { assert(self.status == .view_change); assert(latest.valid_checksum()); assert(latest.invalid() == null); assert(latest.command == .prepare); assert(latest.cluster == self.cluster); // The view may have started already, so we can have a prepare in the same view: assert(latest.view <= self.view); log.debug("{}: {s}: view={} op={}..{} commit={}..{} checksum={} offset={}", .{ self.replica, method, self.view, self.op, latest.op, self.commit_max, k, latest.checksum, latest.offset, }); // Uncommitted ops may not survive a view change so we must assert `latest.op` against // `commit_max` and not `self.op`. However, committed ops (`commit_max`) must survive: assert(latest.op >= self.commit_max); assert(latest.op >= latest.commit); assert(latest.op >= k); // We expect that `commit_max` (and `commit_min`) may be greater than `latest.commit` // because `latest.commit` is the commit number at the time the `latest.op` prepared. // We expect that `commit_max` (and `commit_min`) may also be greater even than `k` // because we may be the old leader joining towards the end of the view change and we // may have committed the `latest.op` already. However, this is bounded by pipelining. // The intersection property only requires that all "possibly" committed operations must // survive into the new view so that they can then be committed by the new leader. This // guarantees that if the old leader "possibly" committed the operation, then the new // leader will also commit the operation. if (k < self.commit_max and self.commit_min == self.commit_max) { log.debug("{}: {s}: k={} < commit_max={} and commit_min == commit_max", .{ self.replica, method, k, self.commit_max, }); } assert(k >= latest.commit); assert(k >= self.commit_max - std.math.min(config.pipelining_max, self.commit_max)); assert(self.commit_min <= self.commit_max); assert(self.op >= self.commit_max or self.op < self.commit_max); self.op = latest.op; // Crucially, we must never rewind `commit_max` (and then `commit_min`) because // `commit_min` represents what we have already applied to our state machine: self.commit_max = std.math.max(self.commit_max, k); assert(self.commit_min <= self.commit_max); assert(self.op >= self.commit_max); // Do not set the latest op as dirty if we already have it exactly: // Otherwise, this would trigger a repair and delay the view change, or worse, it would // prevent us from assisting another replica to recover when we do in fact have the op. if (self.journal.entry_for_op_exact_with_checksum(latest.op, latest.checksum)) |_| { log.debug("{}: {s}: latest op exists exactly", .{ self.replica, method }); } else { self.journal.set_entry_as_dirty(latest); } assert(self.op == latest.op); self.journal.remove_entries_from(self.op + 1); assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum); } fn start_view_as_the_new_leader(self: *Self) void { assert(self.status == .view_change); assert(self.leader_index(self.view) == self.replica); assert(self.do_view_change_quorum); assert(!self.committing); assert(!self.repairing_pipeline); assert(self.commit_min == self.commit_max); assert(self.repair_pipeline_op() == null); assert(self.commit_max + self.pipeline.count == self.op); assert(self.valid_hash_chain_between(self.commit_min, self.op)); var pipeline_op = self.commit_max + 1; var pipeline_parent = self.journal.entry_for_op_exact(self.commit_max).?.checksum; var iterator = self.pipeline.iterator(); while (iterator.next_ptr()) |prepare| { assert(prepare.message.header.command == .prepare); assert(prepare.message.header.op == pipeline_op); assert(prepare.message.header.parent == pipeline_parent); pipeline_parent = prepare.message.header.checksum; pipeline_op += 1; } assert(self.pipeline.count <= config.pipelining_max); assert(self.commit_max + self.pipeline.count == pipeline_op - 1); assert(self.journal.dirty.len == 0); assert(self.journal.faulty.len == 0); assert(self.nack_prepare_op == null); const start_view = self.create_view_change_message(.start_view) orelse { log.alert("{}: start_view_as_the_new_leader: waiting for message", .{self.replica}); return; }; defer self.message_bus.unref(start_view); self.transition_to_normal_status(self.view); // Detect if the transition to normal status above accidentally resets the pipeline: assert(self.commit_max + self.pipeline.count == self.op); assert(self.status == .normal); assert(self.leader()); assert(start_view.references == 1); assert(start_view.header.command == .start_view); assert(start_view.header.view == self.view); assert(start_view.header.op == self.op); assert(start_view.header.commit == self.commit_max); // Send prepare_ok messages to ourself to contribute to the pipeline. self.send_prepare_oks_after_view_change(); self.send_message_to_other_replicas(start_view); } fn transition_to_normal_status(self: *Self, new_view: u32) void { log.debug("{}: transition_to_normal_status: view={}", .{ self.replica, new_view }); // In the VRR paper it's possible to transition from normal to normal for the same view. // For example, this could happen after a state transfer triggered by an op jump. assert(new_view >= self.view); self.view = new_view; self.view_normal = new_view; self.status = .normal; if (self.leader()) { log.debug("{}: transition_to_normal_status: leader", .{self.replica}); self.ping_timeout.start(); self.commit_timeout.start(); self.normal_status_timeout.stop(); self.view_change_status_timeout.stop(); self.view_change_message_timeout.stop(); self.repair_timeout.start(); // Do not reset the pipeline as there may be uncommitted ops to drive to completion. if (self.pipeline.count > 0) { assert(!self.prepare_timeout.ticking); self.prepare_timeout.start(); } } else { log.debug("{}: transition_to_normal_status: follower", .{self.replica}); self.ping_timeout.start(); self.commit_timeout.stop(); self.normal_status_timeout.start(); self.view_change_status_timeout.stop(); self.view_change_message_timeout.stop(); self.repair_timeout.start(); self.reset_pipeline(); } self.reset_quorum_start_view_change(); self.reset_quorum_do_view_change(); self.reset_quorum_nack_prepare(); assert(self.start_view_change_quorum == false); assert(self.do_view_change_quorum == false); assert(self.nack_prepare_op == null); } /// A replica i that notices the need for a view change advances its view, sets its status to /// view_change, and sends a ⟨start_view_change v, i⟩ message to all the other replicas, /// where v identifies the new view. A replica notices the need for a view change either based /// on its own timer, or because it receives a start_view_change or do_view_change message for /// a view with a larger number than its own view. fn transition_to_view_change_status(self: *Self, new_view: u32) void { log.debug("{}: transition_to_view_change_status: view={}..{}", .{ self.replica, self.view, new_view, }); assert(new_view > self.view); self.view = new_view; self.status = .view_change; self.ping_timeout.stop(); self.commit_timeout.stop(); self.normal_status_timeout.stop(); self.view_change_status_timeout.start(); self.view_change_message_timeout.start(); self.repair_timeout.stop(); // Do not reset quorum counters only on entering a view, assuming that the view will be // followed only by a single subsequent view change to the next view, because multiple // successive view changes can fail, e.g. after a view change timeout. // We must therefore reset our counters here to avoid counting messages from an older view, // which would violate the quorum intersection property essential for correctness. self.reset_pipeline(); self.reset_quorum_start_view_change(); self.reset_quorum_do_view_change(); self.reset_quorum_nack_prepare(); assert(self.start_view_change_quorum == false); assert(self.do_view_change_quorum == false); assert(self.nack_prepare_op == null); self.send_start_view_change(); } fn unref_prepare_message_and_quorum_messages( self: *Self, prepare: *const Prepare, ) void { self.message_bus.unref(prepare.message); for (prepare.ok_from_all_replicas) |received, replica| { if (received) |prepare_ok| { assert(replica < self.replica_count); self.message_bus.unref(prepare_ok); } } } fn update_client_table_entry(self: *Self, reply: *Message) void { assert(reply.header.command == .reply); assert(reply.header.operation != .register); assert(reply.header.client > 0); assert(reply.header.context == 0); assert(reply.header.op == reply.header.commit); assert(reply.header.commit > 0); assert(reply.header.request > 0); if (self.client_table.getPtr(reply.header.client)) |entry| { assert(entry.reply.header.command == .reply); assert(entry.reply.header.context == 0); assert(entry.reply.header.op == entry.reply.header.commit); assert(entry.reply.header.commit >= entry.session); assert(entry.reply.header.client == reply.header.client); assert(entry.reply.header.request + 1 == reply.header.request); assert(entry.reply.header.op < reply.header.op); assert(entry.reply.header.commit < reply.header.commit); // TODO Use this reply's prepare to cross-check against the entry's prepare, if we // still have access to the prepare in the journal (it may have been snapshotted). log.debug("{}: update_client_table_entry: client={} session={} request={}", .{ self.replica, reply.header.client, entry.session, reply.header.request, }); self.message_bus.unref(entry.reply); entry.reply = reply.ref(); } else { // If no entry exists, then the session must have been evicted while being prepared. // We can still send the reply, the next request will receive an eviction message. } } /// Whether it is safe to commit or send prepare_ok messages. /// Returns true if the hash chain is valid and up to date for the current view. /// This is a stronger guarantee than `valid_hash_chain_between()` below. fn valid_hash_chain(self: *Self, method: []const u8) bool { // If we know we could validate the hash chain even further, then wait until we can: // This is partial defense-in-depth in case `self.op` is ever advanced by a reordered op. if (self.op < self.commit_max) { log.debug("{}: {s}: waiting for repair (op={} < commit={})", .{ self.replica, method, self.op, self.commit_max, }); return false; } // We must validate the hash chain as far as possible, since `self.op` may disclose a fork: if (!self.valid_hash_chain_between(self.commit_min, self.op)) { log.debug("{}: {s}: waiting for repair (hash chain)", .{ self.replica, method }); return false; } return true; } /// Returns true if all operations are present, correctly ordered and connected by hash chain, /// between `op_min` and `op_max` (both inclusive). fn valid_hash_chain_between(self: *Self, op_min: u64, op_max: u64) bool { assert(op_min <= op_max); // If we use anything less than self.op then we may commit ops for a forked hash chain that // have since been reordered by a new leader. assert(op_max == self.op); var b = self.journal.entry_for_op_exact(op_max).?; var op = op_max; while (op > op_min) { op -= 1; if (self.journal.entry_for_op_exact(op)) |a| { assert(a.op + 1 == b.op); if (a.checksum == b.parent) { assert(self.ascending_viewstamps(a, b)); b = a; } else { log.debug("{}: valid_hash_chain_between: break: A: {}", .{ self.replica, a }); log.debug("{}: valid_hash_chain_between: break: B: {}", .{ self.replica, b }); return false; } } else { log.debug("{}: valid_hash_chain_between: missing op={}", .{ self.replica, op }); return false; } } assert(b.op == op_min); return true; } fn view_jump(self: *Self, header: *const Header) void { const to: Status = switch (header.command) { .prepare, .commit => .normal, .start_view_change, .do_view_change, .start_view => .view_change, else => unreachable, }; if (self.status != .normal and self.status != .view_change) return; if (header.view < self.view) return; // Compare status transitions and decide whether to view jump or ignore: switch (self.status) { .normal => switch (to) { // If the transition is to `.normal`, then ignore if for the same view: .normal => if (header.view == self.view) return, // If the transition is to `.view_change`, then ignore if the view has started: .view_change => if (header.view == self.view) return, else => unreachable, }, .view_change => switch (to) { // This is an interesting special case: // If the transition is to `.normal` in the same view, then we missed the // `start_view` message and we must also consider this a view jump: // If we don't handle this below then our `view_change_status_timeout` will fire // and we will disrupt the cluster with another view change for a newer view. .normal => {}, // If the transition is to `.view_change`, then ignore if for the same view: .view_change => if (header.view == self.view) return, else => unreachable, }, else => unreachable, } switch (to) { .normal => { if (header.view == self.view) { assert(self.status == .view_change); log.debug("{}: view_jump: waiting to exit view change", .{self.replica}); } else { assert(header.view > self.view); assert(self.status == .view_change or self.status == .normal); log.debug("{}: view_jump: waiting to jump to newer view", .{self.replica}); } // TODO Debounce and decouple this from `on_message()` by moving into `tick()`: log.debug("{}: view_jump: requesting start_view message", .{self.replica}); self.send_header_to_replica(self.leader_index(header.view), .{ .command = .request_start_view, .cluster = self.cluster, .replica = self.replica, .view = header.view, }); }, .view_change => { assert(header.view > self.view); assert(self.status == .view_change or self.status == .normal); if (header.view == self.view + 1) { log.debug("{}: view_jump: jumping to view change", .{self.replica}); } else { log.debug("{}: view_jump: jumping to next view change", .{self.replica}); } self.transition_to_view_change_status(header.view); }, else => unreachable, } } fn write_prepare(self: *Self, message: *Message, trigger: Journal.Write.Trigger) void { assert(message.references > 0); assert(message.header.command == .prepare); assert(message.header.view <= self.view); assert(message.header.op <= self.op); if (!self.journal.has(message.header)) { log.debug("{}: write_prepare: ignoring op={} checksum={} (header changed)", .{ self.replica, message.header.op, message.header.checksum, }); return; } if (self.journal.writing(message.header.op, message.header.checksum)) { log.debug("{}: write_prepare: ignoring op={} checksum={} (already writing)", .{ self.replica, message.header.op, message.header.checksum, }); return; } self.journal.write_prepare(write_prepare_on_write, message, trigger); } fn write_prepare_on_write( self: *Self, wrote: ?*Message, trigger: Journal.Write.Trigger, ) void { // `null` indicates that we did not complete the write for some reason. const message = wrote orelse return; self.send_prepare_ok(message.header); defer self.flush_loopback_queue(); switch (trigger) { .append => {}, // If this was a repair, continue immediately to repair the next prepare: // This is an optimization to eliminate waiting until the next repair timeout. .repair => self.repair(), } } }; }
src/vsr/replica.zig
const std = @import("std"); const fs = std.fs; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = &gpa.allocator; const input = try fs.cwd().readFileAlloc(allocator, "data/input_11_1.txt", std.math.maxInt(usize)); var lines = std.mem.tokenize(input, "\n"); var input_sits = std.ArrayList(u8).init(allocator); defer input_sits.deinit(); var stride: usize = 0; while (lines.next()) |raw_line| { var line = std.mem.trim(u8, raw_line, " \r\n"); if (line.len == 0) break; try input_sits.appendSlice(line); if (stride == 0) { stride = line.len; } else { std.debug.assert(line.len == stride); } } { // Solution 1 var sits = std.ArrayList(u8).init(allocator); var next = std.ArrayList(u8).init(allocator); defer sits.deinit(); defer next.deinit(); try sits.resize(input_sits.items.len); try next.resize(input_sits.items.len); std.mem.copy(u8, sits.items, input_sits.items); std.mem.copy(u8, next.items, input_sits.items); var map_changed = true; var count: u32 = 0; while (map_changed) { map_changed = false; for (sits.items) |s, index| { if (s != '.') { const ncount = countNeighbors(sits.items, stride, index); if (s == 'L' and ncount == 0) { next.items[index] = '#'; map_changed = true; } else if (s == '#' and ncount >= 4) { next.items[index] = 'L'; map_changed = true; } else { next.items[index] = s; } } else { next.items[index] = s; } } std.mem.swap(std.ArrayList(u8), &sits, &next); // printMap(sits.items, stride); } var occupied: u32 = 0; for (sits.items) |s| { if (s == '#') occupied += 1; } std.debug.print("Day 11 - Solution 1: {}\n", .{occupied}); } { // Solution 2 var sits = std.ArrayList(u8).init(allocator); var next = std.ArrayList(u8).init(allocator); defer sits.deinit(); defer next.deinit(); try sits.resize(input_sits.items.len); try next.resize(input_sits.items.len); std.mem.copy(u8, sits.items, input_sits.items); std.mem.copy(u8, next.items, input_sits.items); var map_changed = true; var count: u32 = 0; while (map_changed) { map_changed = false; for (sits.items) |s, index| { if (s != '.') { const ncount = countDirections(sits.items, stride, index); if (s == 'L' and ncount == 0) { next.items[index] = '#'; map_changed = true; } else if (s == '#' and ncount >= 5) { next.items[index] = 'L'; map_changed = true; } else { next.items[index] = s; } } else { next.items[index] = s; } } std.mem.swap(std.ArrayList(u8), &sits, &next); // printMap(sits.items, stride); } var occupied: u32 = 0; for (sits.items) |s| { if (s == '#') occupied += 1; } std.debug.print("Day 11 - Solution 2: {}\n", .{occupied}); } } const Pos = struct { x: i64, y: i64, const Self = @This(); pub fn fromIndex(stride: usize, index: usize) Self { const sindex = @intCast(i64, index); const sstride = @intCast(i64, stride); const y = @divFloor(sindex, sstride); const x = sindex - y * sstride; return Self{.x = x, .y = y}; } pub fn toIndex(self: *const Self, stride: usize) usize { return @intCast(usize, self.*.y) * stride + @intCast(usize, self.*.x); } }; fn countNeighbors(sits: []u8, stride: usize, index: usize) u32 { const pos = Pos.fromIndex(stride, index); const max_y = sits.len / stride; const sstride = @intCast(i64, stride); const neighbors_indices = [8]Pos{Pos {.x = pos.x - 1, .y = pos.y - 1}, Pos {.x = pos.x, .y = pos.y - 1}, Pos {.x = pos.x + 1, .y = pos.y - 1}, Pos {.x = pos.x - 1, .y = pos.y}, Pos {.x = pos.x + 1, .y = pos.y}, Pos {.x = pos.x - 1, .y = pos.y + 1}, Pos {.x = pos.x, .y = pos.y + 1}, Pos {.x = pos.x + 1, .y = pos.y + 1}}; var acc: u32 = 0; for (neighbors_indices) |np| { if (np.x < 0 or np.x >= sstride or np.y < 0 or np.y >= max_y) continue; if (sits[np.toIndex(stride)] == '#') { acc += 1; } } return acc; } fn countDirections(sits: []u8, stride: usize, index: usize) u32 { var res: u32 = 0; if (isDirectionOccupied(sits, stride, index, -1, -1)) res += 1; if (isDirectionOccupied(sits, stride, index, 0, -1)) res += 1; if (isDirectionOccupied(sits, stride, index, 1, -1)) res += 1; if (isDirectionOccupied(sits, stride, index, -1, 0)) res += 1; if (isDirectionOccupied(sits, stride, index, 1, 0)) res += 1; if (isDirectionOccupied(sits, stride, index, -1, 1)) res += 1; if (isDirectionOccupied(sits, stride, index, 0, 1)) res += 1; if (isDirectionOccupied(sits, stride, index, 1, 1)) res += 1; return res; } fn isDirectionOccupied(sits: []u8, stride: usize, index: usize, dir_x: i64, dir_y: i64) bool { const pos = Pos.fromIndex(stride, index); const max_y = sits.len / stride; const sstride = @intCast(i64, stride); var np = pos; while (true) { np.x += dir_x; np.y += dir_y; if (np.x < 0 or np.x >= sstride or np.y < 0 or np.y >= max_y) return false; const c = sits[np.toIndex(stride)]; if (c == '#') return true; if (c == 'L') return false; } unreachable; } fn printMap(sits: []u8, stride: usize) void { var index: usize = 0; while (index < sits.len) : (index += stride) { std.debug.print("{}\n", .{ sits[index..index+stride] }); } }
2020/src/day_11.zig
const std = @import("std"); const debug = std.debug; const fmt = std.fmt; const heap = std.heap; const io = std.io; const mem = std.mem; const net = std.net; const os = std.os; const time = std.time; const Atomic = std.atomic.Atomic; const assert = std.debug.assert; const IO_Uring = std.os.linux.IO_Uring; const io_uring_cqe = std.os.linux.io_uring_cqe; const io_uring_sqe = std.os.linux.io_uring_sqe; const httpserver = @import("lib.zig"); const argsParser = @import("args"); const logger = std.log.scoped(.main); var global_running: Atomic(bool) = Atomic(bool).init(true); fn addSignalHandlers() void { // Ignore broken pipes { var act = os.Sigaction{ .handler = .{ .sigaction = os.SIG.IGN, }, .mask = os.empty_sigset, .flags = 0, }; os.sigaction(os.SIG.PIPE, &act, null); } // Catch SIGINT/SIGTERM for proper shutdown { var act = os.Sigaction{ .handler = .{ .handler = struct { fn wrapper(sig: c_int) callconv(.C) void { logger.info("caught signal {d}", .{sig}); global_running.store(false, .SeqCst); } }.wrapper, }, .mask = os.empty_sigset, .flags = 0, }; os.sigaction(os.SIG.TERM, &act, null); os.sigaction(os.SIG.INT, &act, null); } } const ServerContext = struct { const Self = @This(); id: usize, server: httpserver.Server(*Self), thread: std.Thread, pub fn format(self: *const Self, comptime fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = options; if (comptime !mem.eql(u8, "s", fmt_string)) @compileError("format string must be s"); try writer.print("{d}", .{self.id}); } fn handleRequest(self: *Self, per_request_allocator: mem.Allocator, peer: httpserver.Peer, req: httpserver.Request) anyerror!httpserver.Response { _ = per_request_allocator; logger.debug("ctx#{d:<4} IN HANDLER addr={s} method: {s}, path: {s}, minor version: {d}, body: \"{s}\"", .{ self.id, peer.addr, req.method.toString(), req.path, req.minor_version, req.body, }); if (mem.startsWith(u8, req.path, "/static")) { return httpserver.Response{ .send_file = .{ .status_code = .ok, .headers = &[_]httpserver.Header{}, .path = req.path[1..], }, }; } else { return httpserver.Response{ .response = .{ .status_code = .ok, .headers = &[_]httpserver.Header{}, .data = "Hello, World in handler!", }, }; } } }; pub fn main() anyerror!void { var gpa = heap.GeneralPurposeAllocator(.{}){}; defer if (gpa.deinit()) { debug.panic("leaks detected", .{}); }; var allocator = gpa.allocator(); // const options = try argsParser.parseForCurrentProcess(struct { @"listen-port": u16 = 3405, @"max-server-threads": usize = 1, @"max-ring-entries": u13 = 512, @"max-buffer-size": usize = 4096, @"max-connections": usize = 128, }, allocator, .print); defer options.deinit(); const listen_port = options.options.@"listen-port"; const max_server_threads = options.options.@"max-server-threads"; const max_ring_entries = options.options.@"max-ring-entries"; const max_buffer_size = options.options.@"max-buffer-size"; const max_connections = options.options.@"max-connections"; // NOTE(vincent): for debugging // var logging_allocator = heap.loggingAllocator(gpa.allocator()); // var allocator = logging_allocator.allocator(); addSignalHandlers(); // Create the server socket const server_fd = try httpserver.createSocket(listen_port); logger.info("listening on :{d}", .{listen_port}); logger.info("max server threads: {d}, max ring entries: {d}, max buffer size: {d}, max connections: {d}", .{ max_server_threads, max_ring_entries, max_buffer_size, max_connections, }); // Create the servers var servers = try allocator.alloc(ServerContext, max_server_threads); for (servers) |*item, i| { item.id = i; try item.server.init( allocator, .{ .max_ring_entries = max_ring_entries, .max_buffer_size = max_buffer_size, .max_connections = max_connections, }, &global_running, server_fd, item, ServerContext.handleRequest, ); } defer { for (servers) |*item| item.server.deinit(); allocator.free(servers); } for (servers) |*item| { item.thread = try std.Thread.spawn( .{}, struct { fn worker(server: *httpserver.Server(*ServerContext)) !void { return server.run(1 * time.ns_per_s); } }.worker, .{&item.server}, ); } for (servers) |*item| item.thread.join(); }
src/main.zig
const std = @import("std"); const assert = std.debug.assert; const warn = std.debug.warn; const mem = std.mem; pub const AnimationData = struct { frame_count: u32, frame_duration: u32, // microseconds bone_count: u32, bone_names: []const u8, matrices_relative: []const f32, matrices_absolute: []const f32, // This struct references (read-only) the data until delete is called (unless this function returns with an error) pub fn init(data: []align(4) const u8) !AnimationData { if (data.len < 16) { warn("AnimationData.init: Data length is only {}\n", .{data.len}); return error.FileTooSmall; } if (data.len % 4 != 0) { return error.InvalidFileSize; } const data_u32 = std.mem.bytesAsSlice(u32, data); const data_f32 = std.mem.bytesAsSlice(f32, data); if (data_u32[0] != 0xee334507) { warn("AnimationData.init: Magic field incorrect. Value was {}\n", .{data_u32[0]}); return error.NotAnAnimationFile; } const frame_count = data_u32[1]; const frame_duration = data_u32[2]; const bone_count = data_u32[3]; var overflow_bits: u32 = undefined; if (@mulWithOverflow(u32, frame_duration, frame_count, &overflow_bits)) { return error.AnimationTooLong; } var offset: u32 = 4; if (4 + bone_count > data_u32.len) { return error.FileTooSmall; } const bone_names_list_start = offset; var i: u32 = 0; while (i < bone_count) { if (offset + 1 > data_u32.len) { return error.FileTooSmall; } const stringLen = data_u32[offset] & 0xff; if (offset + (1 + stringLen + 3) / 4 > data_u32.len) { return error.FileTooSmall; } offset += (1 + stringLen + 3) / 4; i += 1; } const bone_names = std.mem.sliceAsBytes(data_u32[bone_names_list_start..offset]); const matrix_array_size = bone_count * frame_count * 4 * 4; if (offset + matrix_array_size * 2 > data_u32.len) { return error.FileTooSmall; } const matrices_relative = data_f32[offset .. offset + matrix_array_size]; const matrices_absolute = data_f32[offset + matrix_array_size .. offset + matrix_array_size * 2]; return AnimationData{ .frame_count = frame_count, .frame_duration = frame_duration, .bone_count = bone_count, .bone_names = bone_names, .matrices_relative = matrices_relative, .matrices_absolute = matrices_absolute, }; } pub fn getBoneIndex(self: AnimationData, bone_name: []const u8) !u32 { var i: u32 = 0; var offset: u32 = 0; while (i < self.bone_count) : (i += 1) { const stringLen = self.bone_names[offset]; if (std.mem.eql(u8, self.bone_names[offset + 1 .. offset + 1 + stringLen], bone_name)) { return i; } offset += 1 + stringLen; if (offset % 4 != 0) { offset += 4 - (offset % 4); } } return error.NoSuchBone; } };
src/ModelFiles/AnimationFiles.zig
pub usingnamespace @import("addr.zig"); /// Representations of various x86 specific structures and descriptor tables. pub const structures = @import("structures/structures.zig"); /// Access to various system and model specific registers. pub const registers = @import("registers/registers.zig"); /// Special x86_64 instructions. pub const instructions = @import("instructions/instructions.zig"); /// Various additional functionality in addition to the rust x86_64 crate pub const additional = @import("additional/additional.zig"); pub const PrivilegeLevel = enum(u8) { /// Privilege-level 0 (most privilege): This level is used by critical system-software /// components that require direct access to, and control over, all processor and system /// resources. This can include BIOS, memory-management functions, and interrupt handlers. Ring0 = 0, /// Privilege-level 1 (moderate privilege): This level is used by less-critical system- /// software services that can access and control a limited scope of processor and system /// resources. Software running at these privilege levels might include some device drivers /// and library routines. The actual privileges of this level are defined by the /// operating system. Ring1 = 1, /// Privilege-level 2 (moderate privilege): Like level 1, this level is used by /// less-critical system-software services that can access and control a limited scope of /// processor and system resources. The actual privileges of this level are defined by the /// operating system. Ring2 = 2, /// Privilege-level 3 (least privilege): This level is used by application software. /// Software running at privilege-level 3 is normally prevented from directly accessing /// most processor and system resources. Instead, applications request access to the /// protected processor and system resources by calling more-privileged service routines /// to perform the accesses. Ring3 = 3, }; /// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of /// data are not placed into the same cache line. Updating an atomic value invalidates the whole /// cache line it belongs to, which makes the next access to the same cache line slower for other /// CPU cores. /// /// Note that 128 is just a reasonable guess and is not guaranteed to match the actual cache line /// length of the machine the program is running on. On modern Intel architectures, spatial /// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that /// cache lines are 128 bytes long. pub const CACHE_LINE_LENGTH: usize = 128; /// Result of the `cpuid` instruction. pub const CpuidResult = struct { /// EAX register. eax: u32, /// EBX register. ebx: u32, /// ECX register. ecx: u32, /// EDX register. edx: u32, }; /// Returns the result of the `cpuid` instruction for a given `leaf` (`EAX`) and sub_leaf (`ECX`) equal to zero. /// See `cpuidWithSubleaf` pub fn cpuid(leaf: u32) CpuidResult { return cpuidWithSubleaf(leaf, 0); } /// Returns the result of the `cpuid` instruction for a given `leaf` (`EAX`) and `sub_leaf` (`ECX`). /// /// The highest-supported leaf value is returned by the first item of `cpuidMax(0)`. /// For leaves containing sub-leaves, the second item returns the highest-supported sub-leaf value. /// /// The CPUID Wikipedia page contains how to query which information using the `EAX` and `ECX` registers, and the interpretation of /// the results returned in `EAX`, `EBX`, `ECX`, and `EDX`. /// /// The references are: /// - Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2: Instruction Set Reference, A-Z /// - AMD64 Architecture Programmer's Manual, Volume 3: General-Purpose and System Instructions pub fn cpuidWithSubleaf(leaf: u32, sub_leaf: u32) CpuidResult { var eax: u32 = undefined; var ebx: u32 = undefined; var ecx: u32 = undefined; var edx: u32 = undefined; asm volatile ("cpuid" : [eax] "={eax}" (eax), [ebx] "={ebx}" (ebx), [ecx] "={ecx}" (ecx), [edx] "={edx}" (edx), : [eax] "{eax}" (leaf), [ecx] "{ecx}" (sub_leaf), ); return CpuidResult{ .eax = eax, .ebx = ebx, .ecx = ecx, .edx = edx, }; } /// Returns the highest-supported `leaf` (`EAX`) and sub-leaf (`ECX`) `cpuid` values. /// /// If `cpuid` is supported, and `leaf` is zero, then the first item contains the highest `leaf` value that `cpuid` supports. /// For `leaf`s containing sub-leafs, the second item contains the highest-supported sub-leaf value. pub fn cpuidMax(leaf: u32) [2]u32 { const result = cpuid(leaf); return [2]u32{ result.eax, result.ebx, }; } comptime { const std = @import("std"); std.testing.refAllDecls(@This()); }
src/index.zig
pub const Resolutions = struct { pub const Instagram = struct { pub const square = Res.init(1080, 1080); pub const portrait = AspectRatio.init(8, 10).shortSide(1080).portrait(); pub const landscape = AspectRatio.init(100, 191).longSide(1080); pub const story = AspectRatio.init(16, 9).shortSide(1080).portrait(); }; pub const Prints = struct { pub const _24x36 = Res.init(24, 36).scale(600); pub const _24x30 = Res.init(24, 30).scale(600); pub const _20x30 = Res.init(20, 30).scale(600); pub const _20x24 = Res.init(20, 24).scale(600); pub const _16x20 = Res.init(16, 20).scale(600); pub const _18x24 = Res.init(18, 24).scale(600); pub const _11x14 = Res.init(11, 14).scale(600); pub const _10x20 = Res.init(10, 20).scale(600); pub const _10x13 = Res.init(10, 13).scale(600); pub const _8x10 = Res.init(8, 10).scale(600); pub const _5x7 = Res.init(5, 7).scale(600); pub const _4x6 = Res.init(4, 6).scale(600); pub const _4x5_3 = Res.init(40, 53).scale(60); pub const _3x5 = Res.init(3, 5).scale(600); pub const _12x36 = Res.init(12, 36).scale(600); pub const _8x24 = Res.init(8, 24).scale(600); pub const _5x15 = Res.init(5, 15).scale(600); pub const _30x30 = Res.square(30).scale(600); pub const _20x20 = Res.square(20).scale(600); pub const _16x16 = Res.square(16).scale(600); pub const _12x12 = Res.square(12).scale(600); pub const _10x10 = Res.square(10).scale(600); pub const _8x8 = Res.square(8).scale(600); pub const _6x6 = Res.square(6).scale(600); pub const _5x5 = Res.square(5).scale(600); pub const _4x4 = Res.square(4).scale(600); }; pub const Wallpapers = struct { pub const _4k = AspectRatio.init(16, 9).shortSide(2160); pub const square_2160 = Res.square(2160); pub const iosParallax = Res.square(2662); pub const wide_1440 = Res.init(3440, 1440); pub const macbook_13 = AspectRatio.init(8, 5).shortSide(1600); }; pub const Screen = struct { pub const _240p = Res.init(320, 240); pub const _480p = Res.init(640, 480); pub const _720p = Res.init(1280, 720); pub const _1080p = Res.init(1920, 1080); pub const _1440p = Res.init(2560, 1440); pub const _2160p = Res.init(3840, 2160); pub const _4320p = Res.init(7680, 4320); pub const ldtv = _240p; pub const hdtv = _720p; pub const qhd = _1440p; pub const _4k = _2160p; pub const _8k = _4320p; pub const vga = Res.init(640, 480); pub const svga = Res.init(800, 600); }; pub const YouTube = struct { pub const _2160p = Res.init(3840, 2160); pub const _1440p = Res.init(2560, 1440); pub const _1080p = Res.init(1920, 1080); pub const _720p = Res.init(1280, 720); pub const _480p = Res.init(854, 480); pub const _360p = Res.init(640, 360); pub const _240p = Res.init(426, 240); }; }; pub const AspectRatio = struct { small: usize = 1, big: usize = 1, const Self = @This(); pub fn init(a: usize, b: usize) Self { return if (a < b) .{ .small = a, .big = b } else .{ .small = b, .big = a }; } pub fn square() Self { return .{}; } pub fn shortSide(self: Self, size: usize) Res { return Res{ .width = size * self.big / self.small, .height = size, }; } pub fn longSide(self: Self, size: usize) Res { return Res{ .width = size, .height = size * self.small / self.big, }; } }; pub const Res = struct { width: usize, height: usize, const Self = @This(); pub fn init(width: usize, height: usize) Self { return .{ .width = width, .height = height }; } pub fn square(size: usize) Self { return init(size, size); } pub fn limitPixels(self: Self, limit: usize) Self { var count = self.width * self.height; var i: u6 = 0; while (count > limit) { count >>= 2; i += 1; } return .{ .width = self.width >> i, .height = self.height >> i, }; } pub fn scale(self: Self, factor: usize) Self { return init(self.width * factor, self.height * factor); } pub fn portrait(self: Self) Self { return if (self.height >= self.width) self else init(self.height, self.width); } pub fn landscape(self: Self) Self { return if (self.width >= self.height) self else init(self.height, self.width); } };
lib/resolutions.zig
const std = @import("std"); const assert = std.debug.assert; const testing = std.testing; /// Returns `true` is the the bit at index `bit` is set (equals 1). /// Note: that index 0 is the least significant bit, while index `length() - 1` is the most significant bit. /// /// ```zig /// const a: u8 = 0b00000010; /// /// try testing.expect(!isBitSet(a, 0)); /// try testing.expect(isBitSet(a, 1)); /// ``` pub fn isBitSet(target: anytype, comptime bit: comptime_int) bool { const TargetType = @TypeOf(target); const MaskType = std.meta.Int(.unsigned, bit + 1); const mask: MaskType = comptime blk: { var temp: MaskType = std.math.maxInt(MaskType); temp <<= bit; break :blk temp; }; comptime { if (@typeInfo(TargetType) == .Int) { if (@typeInfo(TargetType).Int.signedness != .unsigned) { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } if (bit >= @bitSizeOf(TargetType)) { @compileError("bit index is out of bounds of the bit field"); } } else if (@typeInfo(TargetType) == .ComptimeInt) { if (target < 0) { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } } else { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } } return @truncate(MaskType, target) & mask != 0; } test "isBitSet" { const a: u8 = 0b00000000; try testing.expect(!isBitSet(a, 0)); try testing.expect(!isBitSet(a, 1)); const b: u8 = 0b11111111; try testing.expect(isBitSet(b, 0)); try testing.expect(isBitSet(b, 1)); const c: u8 = 0b00000010; try testing.expect(!isBitSet(c, 0)); try testing.expect(isBitSet(c, 1)); } test "isBitSet - comptime_int" { const a = 0b00000000; try testing.expect(!isBitSet(a, 0)); try testing.expect(!isBitSet(a, 1)); const b = 0b11111111; try testing.expect(isBitSet(b, 0)); try testing.expect(isBitSet(b, 1)); const c = 0b00000010; try testing.expect(!isBitSet(c, 0)); try testing.expect(isBitSet(c, 1)); } /// Get the value of the bit at index `bit`. /// Note: that index 0 is the least significant bit, while index `length() - 1` is the most significant bit. /// /// ```zig /// const a: u8 = 0b00000010; /// /// try testing.expect(getBit(a, 0) == 0); /// try testing.expect(getBit(a, 1) == 1); /// ``` pub fn getBit(target: anytype, comptime bit: comptime_int) u1 { return @boolToInt(isBitSet(target, bit)); } test "getBit" { const a: u8 = 0b00000000; try testing.expectEqual(@as(u1, 0), getBit(a, 0)); try testing.expectEqual(@as(u1, 0), getBit(a, 1)); const b: u8 = 0b11111111; try testing.expectEqual(@as(u1, 1), getBit(b, 0)); try testing.expectEqual(@as(u1, 1), getBit(b, 1)); const c: u8 = 0b00000010; try testing.expectEqual(@as(u1, 0), getBit(c, 0)); try testing.expectEqual(@as(u1, 1), getBit(c, 1)); } test "getBit - comptime_int" { const a = 0b00000000; try testing.expectEqual(@as(u1, 0), getBit(a, 0)); try testing.expectEqual(@as(u1, 0), getBit(a, 1)); const b = 0b11111111; try testing.expectEqual(@as(u1, 1), getBit(b, 0)); try testing.expectEqual(@as(u1, 1), getBit(b, 1)); const c = 0b00000010; try testing.expectEqual(@as(u1, 0), getBit(c, 0)); try testing.expectEqual(@as(u1, 1), getBit(c, 1)); } /// Obtains the `number_of_bits` bits starting at `start_bit` /// Where `start_bit` is the lowest significant bit to fetch /// /// ```zig /// const a: u8 = 0b01101100; /// const b = getBits(a, 2, 4); /// try testing.expectEqual(@as(u4,0b1011), b); /// ``` pub fn getBits(target: anytype, comptime start_bit: comptime_int, comptime number_of_bits: comptime_int) std.meta.Int(.unsigned, number_of_bits) { const TargetType = @TypeOf(target); const ReturnType = std.meta.Int(.unsigned, number_of_bits); comptime { if (number_of_bits == 0) @compileError("non-zero number_of_bits must be provided"); if (@typeInfo(TargetType) == .Int) { if (@typeInfo(TargetType).Int.signedness != .unsigned) { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } if (start_bit >= @bitSizeOf(TargetType)) { @compileError("start_bit index is out of bounds of the bit field"); } if (start_bit + number_of_bits > @bitSizeOf(TargetType)) { @compileError("start_bit + number_of_bits is out of bounds of the bit field"); } } else if (@typeInfo(TargetType) == .ComptimeInt) { if (target < 0) { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } } else { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } } return @truncate(ReturnType, target >> start_bit); } test "getBits" { const a: u8 = 0b01101100; const b = getBits(a, 2, 4); try testing.expectEqual(@as(u4, 0b1011), b); } test "getBits - comptime_int" { const a = 0b01101100; const b = getBits(a, 2, 4); try testing.expectEqual(@as(u4, 0b1011), b); } /// Sets the bit at the index `bit` to the value `value` (where true means a value of '1' and false means a value of '0') /// Note: that index 0 is the least significant bit, while index `length() - 1` is the most significant bit. /// /// ```zig /// var val: u8 = 0b00000000; /// try testing.expect(!getBit(val, 0)); /// setBit( &val, 0, true); /// try testing.expect(getBit(val, 0)); /// ``` pub fn setBit(target: anytype, comptime bit: comptime_int, value: bool) void { const ptr_type_info: std.builtin.TypeInfo = @typeInfo(@TypeOf(target)); comptime { if (ptr_type_info != .Pointer) @compileError("not a pointer"); } const TargetType = ptr_type_info.Pointer.child; comptime { if (@typeInfo(TargetType) == .Int) { if (@typeInfo(TargetType).Int.signedness != .unsigned) { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } if (bit >= @bitSizeOf(TargetType)) { @compileError("bit index is out of bounds of the bit field"); } } else if (@typeInfo(TargetType) == .ComptimeInt) { @compileError("comptime_int is unsupported"); } else { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } } const MaskType = std.meta.Int(.unsigned, bit + 1); const mask: MaskType = comptime blk: { var temp: MaskType = 1; temp <<= bit; break :blk temp; }; if (value) { target.* |= mask; } else { target.* &= ~(mask); } } test "setBit" { var val: u8 = 0b00000000; try testing.expect(!isBitSet(val, 0)); setBit(&val, 0, true); try testing.expect(isBitSet(val, 0)); setBit(&val, 0, false); try testing.expect(!isBitSet(val, 0)); } /// Sets the range of bits starting at `start_bit` upto and excluding `start_bit` + `number_of_bits` /// to be specific, if the range is N bits long, the N lower bits of `value` will be used; if any of /// the other bits in `value` are set to 1, this function will panic. /// /// ```zig /// var val: u8 = 0b10000000; /// setBits(&val, 2, 4, 0b00001101); /// try testing.expectEqual(@as(u8, 0b10110100), val); /// ``` /// /// ## Panics /// This method will panic if the `value` exceeds the bit range of the type of `target` pub fn setBits(target: anytype, comptime start_bit: comptime_int, comptime number_of_bits: comptime_int, value: anytype) void { const ptr_type_info: std.builtin.TypeInfo = @typeInfo(@TypeOf(target)); comptime { if (ptr_type_info != .Pointer) @compileError("not a pointer"); } const TargetType = ptr_type_info.Pointer.child; const end_bit = start_bit + number_of_bits; comptime { if (number_of_bits == 0) @compileError("non-zero number_of_bits must be provided"); if (@typeInfo(TargetType) == .Int) { if (@typeInfo(TargetType).Int.signedness != .unsigned) { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } if (start_bit >= @bitSizeOf(TargetType)) { @compileError("start_bit index is out of bounds of the bit field"); } if (end_bit > @bitSizeOf(TargetType)) { @compileError("start_bit + number_of_bits is out of bounds of the bit field"); } } else if (@typeInfo(TargetType) == .ComptimeInt) { @compileError("comptime_int is unsupported"); } else { @compileError("requires an unsigned integer, found " ++ @typeName(TargetType)); } } const peer_value = @as(TargetType, value); if (std.debug.runtime_safety) { if (getBits(peer_value, 0, (end_bit - start_bit)) != peer_value) @panic("value exceeds bit range"); } const bitmask: TargetType = comptime blk: { var bitmask = ~@as(TargetType, 0); bitmask <<= (@bitSizeOf(TargetType) - end_bit); bitmask >>= (@bitSizeOf(TargetType) - end_bit); bitmask >>= start_bit; bitmask <<= start_bit; break :blk ~bitmask; }; target.* = (target.* & bitmask) | (peer_value << start_bit); } test "setBits" { var val: u8 = 0b10000000; setBits(&val, 2, 4, 0b00001101); try testing.expectEqual(@as(u8, 0b10110100), val); } inline fn PtrCastPreserveCV(comptime T: type, comptime PtrToT: type, comptime NewT: type) type { return switch (PtrToT) { *T => *NewT, *const T => *const NewT, *volatile T => *volatile NewT, *const volatile T => *const volatile NewT, else => @compileError("invalid type " ++ @typeName(PtrToT) ++ " given to PtrCastPreserveCV"), }; } pub fn Bitfield(comptime FieldType: type, comptime shift_amount: usize, comptime num_bits: usize) type { if (shift_amount + num_bits > @bitSizeOf(FieldType)) { @compileError("bitfield doesn't fit"); } const self_mask: FieldType = ((1 << num_bits) - 1) << shift_amount; const ValueType = std.meta.Int(.unsigned, num_bits); return extern struct { dummy: FieldType, const Self = @This(); // This function uses `anytype` inorder to support both const and non-const pointers inline fn field(self: anytype) PtrCastPreserveCV(Self, @TypeOf(self), FieldType) { return @ptrCast(PtrCastPreserveCV(Self, @TypeOf(self), FieldType), self); } pub fn write(self: *Self, val: ValueType) void { self.field().* &= ~self_mask; self.field().* |= @intCast(FieldType, val) << shift_amount; } pub fn read(self: Self) ValueType { const val: FieldType = self.field().*; return @intCast(ValueType, (val & self_mask) >> shift_amount); } }; } test "bitfield" { const S = extern union { low: Bitfield(u32, 0, 16), high: Bitfield(u32, 16, 16), val: u32, }; try std.testing.expect(@sizeOf(S) == 4); try std.testing.expect(@bitSizeOf(S) == 32); var s: S = .{ .val = 0x13376969 }; try std.testing.expect(s.low.read() == 0x6969); try std.testing.expect(s.high.read() == 0x1337); s.low.write(0x1337); s.high.write(0x6969); try std.testing.expect(s.val == 0x69691337); } pub fn Bit(comptime FieldType: type, comptime shift_amount: usize) type { const self_bit: FieldType = (1 << shift_amount); return extern struct { bits: Bitfield(FieldType, shift_amount, 1), const Self = @This(); fn set(self: *Self) void { self.bits.field().* |= self_bit; } fn unset(self: *Self) void { self.bits.field().* &= ~self_bit; } pub fn read(self: Self) u1 { return @truncate(u1, self.bits.field().* >> shift_amount); } // Since these are mostly used with MMIO, I want to avoid // reading the memory just to write it again, also races pub fn write(self: *Self, val: u1) void { if (@bitCast(bool, val)) { self.set(); } else { self.unset(); } } comptime { std.testing.refAllDecls(@This()); } }; } test "bit" { const S = extern union { low: Bit(u32, 0), high: Bit(u32, 1), val: u32, }; try std.testing.expect(@sizeOf(S) == 4); try std.testing.expect(@bitSizeOf(S) == 32); var s: S = .{ .val = 1 }; try std.testing.expect(s.low.read() == 1); try std.testing.expect(s.high.read() == 0); s.low.write(0); s.high.write(1); try std.testing.expect(s.val == 2); } pub fn Boolean(comptime FieldType: type, comptime shift_amount: usize) type { const self_bit: FieldType = (1 << shift_amount); return extern struct { bits: Bitfield(FieldType, shift_amount, 1), const Self = @This(); fn set(self: *Self) void { self.bits.field().* |= self_bit; } fn unset(self: *Self) void { self.bits.field().* &= ~self_bit; } pub fn read(self: Self) bool { return @bitCast(bool, @truncate(u1, self.bits.field().* >> shift_amount)); } // Since these are mostly used with MMIO, I want to avoid // reading the memory just to write it again, also races pub fn write(self: *Self, val: bool) void { if (val) { self.set(); } else { self.unset(); } } comptime { std.testing.refAllDecls(@This()); } }; } test "boolean" { const S = extern union { low: Boolean(u32, 0), high: Boolean(u32, 1), val: u32, }; try std.testing.expect(@sizeOf(S) == 4); try std.testing.expect(@bitSizeOf(S) == 32); var s: S = .{ .val = 2 }; try std.testing.expect(s.low.read() == false); try std.testing.expect(s.high.read() == true); s.low.write(true); s.high.write(false); try std.testing.expect(s.val == 1); } comptime { std.testing.refAllDecls(@This()); }
src/internal/bitjuggle.zig
const std = @import("std"); const assert = std.debug.assert; const Camera = @import("Camera.zig"); const Light = @import("Light.zig"); const Material = @import("Material.zig"); const Mesh = @import("Mesh.zig"); const Renderer = @import("Renderer.zig"); const zp = @import("../../zplay.zig"); const drawcall = zp.graphics.common.drawcall; const ShaderProgram = zp.graphics.common.ShaderProgram; const VertexArray = zp.graphics.common.VertexArray; const alg = zp.deps.alg; const Vec2 = alg.Vec2; const Vec3 = alg.Vec3; const Vec4 = alg.Vec4; const Mat4 = alg.Mat4; const Self = @This(); /// vertex attribute locations pub const ATTRIB_LOCATION_POS = 0; pub const ATTRIB_LOCATION_TEX = 1; pub const ATTRIB_LOCATION_COLOR = 2; const vs = \\#version 330 core \\layout (location = 0) in vec3 a_pos; \\layout (location = 1) in vec2 a_tex; \\layout (location = 2) in vec4 a_color; \\ \\uniform mat4 u_model; \\uniform mat4 u_view; \\uniform mat4 u_project; \\ \\out vec3 v_pos; \\out vec2 v_tex; \\out vec4 v_color; \\ \\void main() \\{ \\ gl_Position = u_project * u_view * u_model * vec4(a_pos, 1.0); \\ v_pos = vec3(u_model * vec4(a_pos, 1.0)); \\ v_tex = a_tex; \\ v_color = a_color; \\} ; const fs = \\#version 330 core \\out vec4 frag_color; \\ \\in vec3 v_pos; \\in vec2 v_tex; \\in vec4 v_color; \\ \\uniform sampler2D u_texture; \\uniform float u_mix_factor; \\ \\void main() \\{ \\ frag_color = mix(texture(u_texture, v_tex), v_color, u_mix_factor); \\} ; /// lighting program program: ShaderProgram = undefined, /// set factor used to mix texture and vertex's colors mix_factor: f32 = 0, /// create a simple renderer pub fn init() Self { return .{ .program = ShaderProgram.init(vs, fs), }; } /// free resources pub fn deinit(self: *Self) void { self.program.deinit(); } /// get renderer pub fn renderer(self: *Self) Renderer { return Renderer.init(self, begin, end, render, renderMesh); } /// begin rendering fn begin(self: *Self) void { self.program.use(); } /// end rendering fn end(self: *Self) void { self.program.disuse(); } /// use material data fn applyMaterial(self: *Self, material: Material) void { switch (material.data) { .phong => |m| { self.program.setUniformByName("u_texture", m.diffuse_map.tex.getTextureUnit()); }, .single_texture => |t| { self.program.setUniformByName("u_texture", t.tex.getTextureUnit()); }, else => { std.debug.panic("unsupported material type", .{}); }, } } /// render geometries fn render( self: *Self, vertex_array: VertexArray, use_elements: bool, primitive: drawcall.PrimitiveType, offset: u32, count: u32, model: Mat4, projection: Mat4, camera: ?Camera, material: ?Material, instance_count: ?u32, ) !void { if (!self.program.isUsing()) { return error.RendererNotActive; } // set uniforms self.program.setUniformByName("u_model", model); self.program.setUniformByName("u_project", projection); if (camera) |c| { self.program.setUniformByName("u_view", c.getViewMatrix()); } else { self.program.setUniformByName("u_view", Mat4.identity()); } self.program.setUniformByName("u_mix_factor", self.mix_factor); if (material) |mr| { self.applyMaterial(mr); } // issue draw call vertex_array.use(); defer vertex_array.disuse(); if (use_elements) { drawcall.drawElements(primitive, offset, count, u32, instance_count); } else { drawcall.drawBuffer(primitive, offset, count, instance_count); } } fn renderMesh( self: *Self, mesh: Mesh, model: Mat4, projection: Mat4, camera: ?Camera, material: ?Material, instance_count: ?u32, ) !void { if (!self.program.isUsing()) { return error.RendererNotActive; } mesh.vertex_array.use(); defer mesh.vertex_array.disuse(); // attribute settings mesh.vertex_array.setAttribute(Mesh.vbo_positions, ATTRIB_LOCATION_POS, 3, f32, false, 0, 0); if (mesh.texcoords != null) { mesh.vertex_array.setAttribute(Mesh.vbo_texcoords, ATTRIB_LOCATION_TEX, 2, f32, false, 0, 0); } if (mesh.colors != null) { mesh.vertex_array.setAttribute(Mesh.vbo_colors, ATTRIB_LOCATION_COLOR, 4, f32, false, 0, 0); } // set uniforms self.program.setUniformByName("u_model", model); self.program.setUniformByName("u_project", projection); if (camera) |c| { self.program.setUniformByName("u_view", c.getViewMatrix()); } else { self.program.setUniformByName("u_view", Mat4.identity()); } self.program.setUniformByName("u_mix_factor", self.mix_factor); if (material) |mr| { self.applyMaterial(mr); } // issue draw call if (mesh.indices) |ids| { drawcall.drawElements(mesh.primitive_type, 0, @intCast(u32, ids.items.len), u32, instance_count); } else { drawcall.drawBuffer(mesh.primitive_type, 0, @intCast(u32, mesh.positions.items.len), instance_count); } }
src/graphics/3d/SimpleRenderer.zig
const std = @import("std"); const testing = std.testing; const cats = @import("../ziglyph.zig").derived_general_category; const numeric = @import("../ziglyph.zig").derived_numeric_type; const props = @import("../ziglyph.zig").prop_list; /// `isDecimal` detects all Unicode decimal numbers. pub fn isDecimal(cp: u21) bool { // ASCII optimization. if (cp >= '0' and cp <= '9') return true; return numeric.isDecimal(cp); } /// `isDigit` detects all Unicode digits.. pub fn isDigit(cp: u21) bool { // ASCII optimization. if (cp >= '0' and cp <= '9') return true; return numeric.isDigit(cp) or isDecimal(cp); } /// `isAsciiDigit` detects ASCII only digits. pub fn isAsciiDigit(cp: u21) bool { return cp >= '0' and cp <= '9'; } /// `isHex` detects the 16 ASCII characters 0-9 A-F, and a-f. pub fn isHexDigit(cp: u21) bool { // ASCII optimization. if ((cp >= 'a' and cp <= 'f') or (cp >= 'A' and cp <= 'F') or (cp >= '0' and cp <= '9')) return true; return props.isHexDigit(cp); } /// `isAsciiHexDigit` detects ASCII only hexadecimal digits. pub fn isAsciiHexDigit(cp: u21) bool { return (cp >= 'a' and cp <= 'f') or (cp >= 'A' and cp <= 'F') or (cp >= '0' and cp <= '9'); } /// `isNumber` covers all Unicode numbers, not just ASII. pub fn isNumber(cp: u21) bool { // ASCII optimization. if (cp >= '0' and cp <= '9') return true; return isDecimal(cp) or isDigit(cp) or cats.isLetterNumber(cp) or cats.isOtherNumber(cp); } /// isAsciiNumber detects ASCII only numbers. pub fn isAsciiNumber(cp: u21) bool { return cp >= '0' and cp <= '9'; } test "number isDecimal" { var cp: u21 = '0'; while (cp <= '9') : (cp += 1) { try testing.expect(isDecimal(cp)); try testing.expect(isAsciiDigit(cp)); try testing.expect(isAsciiNumber(cp)); } try testing.expect(!isDecimal('\u{0003}')); try testing.expect(!isDecimal('A')); } test "number isHexDigit" { var cp: u21 = '0'; while (cp <= '9') : (cp += 1) { try testing.expect(isHexDigit(cp)); } try testing.expect(!isHexDigit('\u{0003}')); try testing.expect(!isHexDigit('Z')); } test "number isNumber" { var cp: u21 = '0'; while (cp <= '9') : (cp += 1) { try testing.expect(isNumber(cp)); } try testing.expect(!isNumber('\u{0003}')); try testing.expect(!isNumber('A')); }
.gyro/ziglyph-jecolon-github.com-c37d93b6/pkg/src/category/number.zig
const std = @import("std"); const testing = std.testing; const grains = @import("grains.zig"); const ChessboardError = grains.ChessboardError; test "grains on square 1" { const expected = 1; const actual = comptime try grains.square(1); comptime testing.expectEqual(expected, actual); } test "grains on square 2" { const expected = 2; const actual = comptime try grains.square(2); comptime testing.expectEqual(expected, actual); } test "grains on square 3" { const expected = 4; const actual = comptime try grains.square(3); comptime testing.expectEqual(expected, actual); } test "grains on square 4" { const expected = 8; const actual = comptime try grains.square(4); comptime testing.expectEqual(expected, actual); } test "grains on square 16" { const expected = 32768; const actual = comptime try grains.square(16); comptime testing.expectEqual(expected, actual); } test "grains on square 32" { const expected = 2147483648; const actual = comptime try grains.square(32); comptime testing.expectEqual(expected, actual); } test "grains on square 64" { const expected = 9223372036854775808; const actual = comptime try grains.square(64); comptime testing.expectEqual(expected, actual); } test "square 0 raises an exception" { const expected = ChessboardError.IndexOutOfBounds; const actual = comptime grains.square(0); testing.expectError(expected, actual); } test "negative square raises an expection" { const expected = ChessboardError.IndexOutOfBounds; const actual = comptime grains.square(-1); testing.expectError(expected, actual); } test "square greater than 64 raises an exception" { const expected = ChessboardError.IndexOutOfBounds; const actual = comptime grains.square(65); testing.expectError(expected, actual); } test "returns the total number of grains on the board" { const expected = 18446744073709551615; const actual = comptime grains.total(); comptime testing.expectEqual(expected, actual); }
exercises/practice/grains/test_grains.zig
const std = @import("std"); const text = @import("text.zig"); const debug = std.debug; const heap = std.heap; const io = std.io; const math = std.math; const mem = std.mem; const testing = std.testing; const Content = Text.Content; const Cursor = text.Cursor; const Cursors = Text.Cursors; const Location = text.Location; const Text = text.Text; test "moveCursors" { var buf: [1024 * 1024]u8 = undefined; const TestCast = struct { amount: usize, to_move: Cursor.ToMove, dir: Cursor.Direction, before: []const u8, after: []const u8, fn init(amount: usize, to_move: Cursor.ToMove, dir: Cursor.Direction, before: []const u8, after: []const u8) @This() { return @This(){ .amount = amount, .to_move = to_move, .dir = dir, .before = before, .after = after, }; } }; inline for (comptime [_]TestCast{ TestCast.init(1, .Both, .Left, "[]a[]b[c]d[ef]", "[]ab[]cde[]f"), TestCast.init(1, .Index, .Left, "[]a[]b[c]d[ef]", "[a]b[]cd[e]f"), TestCast.init(1, .Selection, .Left, "[]a[]b[c]d[ef]", "[abcdef]"), TestCast.init(1, .Both, .Right, "[]a[]b[c]d[ef]", "a[]b[]cd[]ef[]"), TestCast.init(1, .Index, .Right, "[]a[]b[c]d[ef]", "[abcdef]"), TestCast.init(1, .Selection, .Right, "[]a[]b[c]d[ef]", "[ab]c[]de[f]"), TestCast.init(1, .Both, .Down, \\a[]bc \\defg \\h , \\abc \\d[]efg \\h ), TestCast.init(1, .Both, .Down, \\abc \\def[g] \\h , \\abc \\defg \\h[] ), TestCast.init(1, .Both, .Down, \\abc \\defg \\[]h , \\abc \\defg \\h[] ), TestCast.init(1, .Both, .Down, \\a[]bc \\def[g] \\[]h , \\abc \\d[]efg \\h[] ), TestCast.init(1, .Index, .Down, \\a[]bc \\def[g] \\[]h , \\a[bc \\d]ef[g \\h] ), TestCast.init(1, .Selection, .Down, \\a[]bc \\def[g] \\[]h , \\a[bc \\d]efg[ \\h] ), TestCast.init(1, .Both, .Up, \\a[]bc \\def[g] \\[]h , \\[]abc[] \\[]defg \\h ), TestCast.init(1, .Index, .Up, \\a[]bc \\def[g] \\[]h , \\[a]bc[ \\defg \\]h ), TestCast.init(1, .Selection, .Up, \\a[]bc \\def[g] \\[]h , \\[a]bc[ \\defg \\]h ), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).moveCursors(case.amount, case.to_move, case.dir); expect(@tagName(case.to_move) ++ " " ++ @tagName(case.dir), t, case.after); } } test "spawnCursor" { var buf: [1024 * 1024]u8 = undefined; const TestCast = struct { dir: Cursor.Direction, before: []const u8, after: []const u8, fn init(dir: Cursor.Direction, before: []const u8, after: []const u8) @This() { return @This(){ .dir = dir, .before = before, .after = after, }; } }; inline for (comptime [_]TestCast{ TestCast.init(.Left, "a[]b[]d[ef]", "[]a[]b[]d[ef]"), TestCast.init(.Left, "[]a[]b[]d[ef]", "[]a[]b[]d[ef]"), TestCast.init(.Right, "a[]b[]d[e]f", "a[]b[]d[e]f[]"), TestCast.init(.Right, "a[]b[]d[e]f[]", "a[]b[]d[e]f[]"), TestCast.init(.Up, \\abc \\defg[] \\[]h , \\abc[] \\defg[] \\[]h ), TestCast.init(.Up, \\abc[] \\defg \\[]h , \\[]abc[] \\defg \\[]h ), TestCast.init(.Up, \\[]abc[] \\defg \\[]h , \\[]abc[] \\defg \\[]h ), TestCast.init(.Down, \\[]abc \\defg[] \\h , \\[]abc \\defg[] \\h[] ), TestCast.init(.Down, \\[]abc \\defg[] \\[]h , \\[]abc \\defg[] \\[]h[] ), TestCast.init(.Down, \\[]abc \\defg[] \\h[] , \\[]abc \\defg[] \\h[] ), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).spawnCursor(case.dir); expect(@tagName(case.dir), t, case.after); } } test "removeAllButMainCursor" { var buf: [1024 * 1024]u8 = undefined; const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = makeText(allocator, \\a[]bc \\[]ab[]cd[]e \\fgh[]i ).removeAllButMainCursor(); expect("", t, \\abc \\abcde \\fgh[]i ); } test "delete" { var buf: [1024 * 1024]u8 = undefined; const TestCast = struct { dir: Text.DeleteDir, before: []const u8, after: []const u8, fn init(dir: Text.DeleteDir, before: []const u8, after: []const u8) @This() { return @This(){ .dir = dir, .before = before, .after = after, }; } }; inline for (comptime [_]TestCast{ TestCast.init(.Left, "a[]b[]d[ef]", "[]d[]"), TestCast.init(.Right, "a[]b[]d[ef]", "a[]"), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).delete(case.dir); expect(@tagName(case.dir), t, case.after); } } test "insert" { var buf: [1024 * 1024]u8 = undefined; const TestCast = struct { str: []const u8, before: []const u8, after: []const u8, fn init(str: []const u8, before: []const u8, after: []const u8) @This() { return @This(){ .str = str, .before = before, .after = after, }; } }; inline for (comptime [_]TestCast{ TestCast.init("a", "a[]b[]d[ef]", "aa[]ba[]da[]"), TestCast.init("aabbcc", "a[]b[]d[ef]", "aaabbcc[]baabbcc[]daabbcc[]"), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).insert(case.str); expect(case.str, t, case.after); } } test "paste" { var buf: [1024 * 1024]u8 = undefined; const TestCast = struct { str: []const u8, before: []const u8, after: []const u8, fn init(str: []const u8, before: []const u8, after: []const u8) @This() { return @This(){ .str = str, .before = before, .after = after, }; } }; inline for (comptime [_]TestCast{ TestCast.init("a", "a[]b[]d[ef]", "aa[]ba[]da[]"), TestCast.init("aabbcc", "a[]b[]d[ef]", "aaabbcc[]baabbcc[]daabbcc[]"), TestCast.init("a\nb\nc", "a[]b[]d[ef]", "aa[]bb[]dc[]"), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).paste(case.str); expect(case.str, t, case.after); } } test "insertText" { var buf: [1024 * 1024]u8 = undefined; const TestCase = struct { text: []const u8, before: []const u8, after: []const u8, fn init(t: []const u8, before: []const u8, after: []const u8) @This() { return @This(){ .text = t, .before = before, .after = after, }; } }; inline for (comptime [_]TestCase{ TestCase.init("[ab]", "a[]b[]d[ef]", "aab[]bab[]dab[]"), TestCase.init("[a]a[c]b[e]", "a[]b[]d[ef]", "aace[]bace[]dace[]"), TestCase.init("[]", "a[]b[]d[ef]", "a[]b[]d[]"), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).insertText(makeText(allocator, case.text)); expect(case.text, t, case.after); } } test "pasteText" { var buf: [1024 * 1024]u8 = undefined; const TestCase = struct { text: []const u8, before: []const u8, after: []const u8, fn init(t: []const u8, before: []const u8, after: []const u8) @This() { return @This(){ .text = t, .before = before, .after = after, }; } }; inline for (comptime [_]TestCase{ TestCase.init("[ab]", "a[]b[]d[ef]", "aab[]bab[]dab[]"), TestCase.init("[a]a[c]b[e]", "a[]b[]d[ef]", "aa[]bc[]de[]"), TestCase.init("[]", "a[]b[]d[ef]", "a[]b[]d[]"), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).pasteText(makeText(allocator, case.text)); expect(case.text, t, case.after); } } test "indent" { var buf: [1024 * 1024]u8 = undefined; const TestCase = struct { indent: u8, num: usize, before: []const u8, after: []const u8, fn init(indent: u8, num: usize, before: []const u8, after: []const u8) @This() { return @This(){ .indent = indent, .num = num, .before = before, .after = after, }; } }; inline for (comptime [_]TestCase{ TestCase.init(' ', 4, \\[] \\ [] \\ [] \\ [] \\ [] , \\ [] \\ [] \\ [] \\ [] \\ [] ), TestCase.init(' ', 4, \\[aa] \\ [aa] \\ [aa] \\ [aa] \\ [aa] , \\ [] \\ [] \\ [] \\ [] \\ [] ), }) |case| { const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const t = try makeText(allocator, case.before).indent(case.indent, case.num); expect(try std.fmt.allocPrint(allocator, "{} {}", .{ case.indent, case.num }), t, case.after); } } test "cursor columns are consistent" { var buf: [1024 * 1024]u8 = undefined; const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; var t = makeText(allocator, "[]aaaaaaaaaaaaaaaaa\nefg\nhij"); t = try t.moveCursors(1, .Both, .Right); expect("", t, "a[]aaaaaaaaaaaaaaaa\nefg\nhij"); t = try t.moveCursors(1, .Both, .Right); expect("", t, "aa[]aaaaaaaaaaaaaaa\nefg\nhij"); t = try t.moveCursors(1, .Both, .Right); expect("", t, "aaa[]aaaaaaaaaaaaaa\nefg\nhij"); t = try t.moveCursors(1, .Both, .Right); expect("", t, "aaaa[]aaaaaaaaaaaaa\nefg\nhij"); testing.expectEqual(@as(usize, 4), t.mainCursor().index.column); testing.expectEqual(@as(usize, 4), t.mainCursor().selection.column); t = try t.moveCursors(1, .Both, .Left); expect("", t, "aaa[]aaaaaaaaaaaaaa\nefg\nhij"); testing.expectEqual(@as(usize, 3), t.mainCursor().index.column); testing.expectEqual(@as(usize, 3), t.mainCursor().selection.column); } fn expect(str: []const u8, found: Text, e: []const u8) void { var buf: [1024 * 1024]u8 = undefined; var sos = io.fixedBufferStream(&buf); printText(sos.outStream(), found); if (!mem.eql(u8, e, sos.getWritten())) { debug.warn("\nTest failed!!!\n", .{}); debug.warn("########## Expect ##########\n{}\n", .{e}); debug.warn("########## Actual ##########\n{}\n", .{sos.getWritten()}); @panic(str); } } fn printText(stream: var, t: Text) void { var buf: [1024 * 1024]u8 = undefined; const allocator = &heap.FixedBufferAllocator.init(&buf).allocator; const content = t.content.toSlice(allocator) catch unreachable; const cursors = t.cursors.toSlice(allocator) catch unreachable; var offset: usize = 0; for (cursors) |cursor| { const start = cursor.start().index; const end = cursor.end().index; stream.print("{}[{}]", .{ content[offset..start], content[start..end] }) catch unreachable; offset = end; } stream.print("{}", .{content[offset..]}) catch unreachable; } // Creates 'Text' from a template string. '[' and ']' are used // to mark the start and end of cursors fn makeText(allocator: *mem.Allocator, comptime str: []const u8) Text { const Indexs = struct { start: usize, end: usize, }; comptime var t: []const u8 = ""; comptime var cursor_indexs: []const Indexs = &[_]Indexs{}; comptime { var offset: usize = 0; var tmp = str; while (mem.indexOfScalar(u8, tmp, '[')) |start| { t = t ++ tmp[0..start]; tmp = tmp[start + 1 ..]; const len = mem.indexOfScalar(u8, tmp, ']') orelse @compileError("Unmatched cursor"); t = t ++ tmp[0..len]; tmp = tmp[len + 1 ..]; cursor_indexs = cursor_indexs ++ [_]Indexs{Indexs{ .start = offset + start, .end = offset + start + len, }}; offset += len + start; } t = t ++ tmp; } const content = Content.fromSlice(allocator, t) catch unreachable; var cursors: [cursor_indexs.len]Cursor = undefined; for (cursor_indexs) |indexs, i| { cursors[i] = Cursor{ .selection = Location.fromIndex(indexs.start, content), .index = Location.fromIndex(indexs.end, content), }; } return Text{ .allocator = allocator, .content = content, .cursors = Cursors.fromSlice(allocator, &cursors) catch unreachable, }; }
src/core/text_tests.zig
const std = @import("std"); const net = std.net; const fs = std.fs; const os = std.os; const system = std.system; const print = std.debug.print; // pub const io_mode = .evented pub fn discover() void { print("doing a discover for some servers\n", .{}); // TODO - run this as a thread, looping forever, and // updating the list of servers to connect to const family = os.AF.INET; const flags = os.SOCK.DGRAM | os.SOCK.CLOEXEC; const proto = os.IPPROTO.UDP; const buffer: []u8 = ""; if (os.socket(family, flags, proto)) |fd| discover: { defer os.closeSocket(fd); print("socket fd {}\n", .{fd}); var addr = net.Address.parseIp4("172.16.31.10", 9090) catch unreachable; var remote_addr: net.Address = undefined; var ra_len: os.socklen_t = undefined; print("addr {}\n", .{addr}); if (os.bind(fd, &addr.any, addr.getOsSockLen())) |_| { print("bound socket {} to address {}\n", .{ fd, addr }); // TODO - loop forever, getting adverts for servers and adding them to the list //if (os.listen(fd, 1)) |l| { //print("listening {}\n", .{l}); // var sockLen: u32 = 0; // if (os.accept(fd, &remote_addr.any, &sockLen, 0)) |new_socket| { // _ = sockLen; // print("New Socket {} from {}\n", .{ new_socket, remote_addr }); // } else |err| { // print("accept error {}\n", .{err}); // break :discover; // } //} else |err| { //print("listen error {}\n", .{err}); //break :discover; //} if (os.recvfrom(fd, buffer, 0x0100, &remote_addr.any, &ra_len)) |got| { _ = got; print("got something ... {}\n", .{got}); break :discover; //print("Got {} bytes from {}\n", .{ got, remote_addr }); } else |err| { print("got recv error {}\n", .{err}); break :discover; } } else |err| { print("got bind error {}\n", .{err}); break :discover; } break :discover; } else |_| { print("no discover for you\n", .{}); } }
src/disco/discovery.zig
const std = @import("std"); const debug = std.debug; const assert = debug.assert; const testing = std.testing; fn productT(comptime A: type, comptime B: type) type { return struct { const Self = @This(); list1: []const A, list2: []const B, pub fn init(list1: []const A, list2: []const B) Self { return Self{ .list1 = list1, .list2 = list2, }; } pub const Tuple = struct { _1: A, _2: B, }; pub const Iterator = struct { list1: []const A, list2: []const B, _1: usize, _2: usize, pub fn next(self: *Iterator) ?Tuple { if (self.list1.len == 0 or self.list2.len == 0) return null; if (self._1 == self.list1.len) return null; var a = Tuple{._1 = self.list1[self._1], ._2 = self.list2[self._2]}; self._2 = self._2 + 1; if (self._2 == self.list2.len) { self._2 = 0; self._1 = self._1 + 1; } return a; } }; pub fn iterator(self: *Self) Iterator { return Iterator{ .list1 = self.list1, .list2 = self.list2, ._1 = 0, ._2 = 0, }; } }; } fn product(list1: var, list2: var) productT(@typeOf(list1).Child, @typeOf(list2).Child).Iterator { return productT(@typeOf(list1).Child, @typeOf(list2).Child).init(list1, list2).iterator(); } fn compare(comptime A: type, comptime B: type, iter: *productT(A, B).Iterator, expected: []const productT(A, B).Tuple) void { var i: usize = 0; while (i < expected.len) { var item = iter.next().?; testing.expect(expected[i]._1 == item._1); testing.expect(expected[i]._2 == item._2); i = i + 1; } testing.expect(iter.next() == null); } test "product.single_element" { const lst = []i32{1,2,3}; const TT = productT(i32, i32).Tuple; var iter = product(lst[0..1], lst[1..2]); const expected: []const TT = []TT{ TT{._1 = 1, ._2 = 2}, }; compare(i32, i32, &iter, expected[0..]); } test "product.iterator" { const AB = productT(i32, i8).Tuple; var iter = product(([]i32{1,2,3})[0..], ([]i8{-1,-2})[0..]); const expected = []AB{ AB{._1 = 1, ._2 = -1}, AB{._1 = 1, ._2 = -2}, AB{._1 = 2, ._2 = -1}, AB{._1 = 2, ._2 = -2}, AB{._1 = 3, ._2 = -1}, AB{._1 = 3, ._2 = -2}, }; compare(i32, i8, &iter, expected[0..]); } test "product.first_empty" { const lst = ([]i32{1})[0..]; var iter = product([]i32{}, lst); testing.expect(iter.next() == null); testing.expect(iter.next() == null); } test "product.second_empty" { const lst = ([]i32{1})[0..]; var iter = product(lst, []i32{}); testing.expect(iter.next() == null); testing.expect(iter.next() == null); }
src/product.zig
const std = @import("std"); const uefi = std.os.uefi; const Status = uefi.Status; const EfiBlockMedia = extern struct { /// The current media ID. If the media changes, this value is changed. media_id: u32, /// `true` if the media is removable; otherwise, `false`. removable_media: bool, /// `true` if there is a media currently present in the device media_present: bool, /// `true` if the `BlockIoProtocol` was produced to abstract /// partition structures on the disk. `false` if the `BlockIoProtocol` was /// produced to abstract the logical blocks on a hardware device. logical_partition: bool, /// `true` if the media is marked read-only otherwise, `false`. This field /// shows the read-only status as of the most recent `WriteBlocks()` read_only: bool, /// `true` if the WriteBlocks() function caches write data. write_caching: bool, /// The intrinsic block size of the device. If the media changes, then this // field is updated. Returns the number of bytes per logical block. block_size: u32, /// Supplies the alignment requirement for any buffer used in a data /// transfer. IoAlign values of 0 and 1 mean that the buffer can be /// placed anywhere in memory. Otherwise, IoAlign must be a power of /// 2, and the requirement is that the start address of a buffer must be /// evenly divisible by IoAlign with no remainder. io_align: u32, /// The last LBA on the device. If the media changes, then this field is updated. last_block: u64, // Revision 2 lowest_aligned_lba: u64, logical_blocks_per_physical_block: u32, optimal_transfer_length_granularity: u32, }; const BlockIoProtocol = extern struct { const Self = @This(); revision: u64, media: *EfiBlockMedia, _reset: fn (*BlockIoProtocol, extended_verification: bool) callconv(.C) Status, _read_blocks: fn (*BlockIoProtocol, media_id: u32, lba: u64, buffer_size: usize, buf: [*]u8) callconv(.C) Status, _write_blocks: fn (*BlockIoProtocol, media_id: u32, lba: u64, buffer_size: usize, buf: [*]u8) callconv(.C) Status, _flush_blocks: fn (*BlockIoProtocol) callconv(.C) Status, /// Resets the block device hardware. pub fn reset(self: *Self, extended_verification: bool) Status { return self._reset(self, extended_verification); } /// Reads the number of requested blocks from the device. pub fn readBlocks(self: *Self, media_id: u32, lba: u64, buffer_size: usize, buf: [*]u8) Status { return self._read_blocks(self, media_id, lba, buffer_size, buf); } /// Writes a specified number of blocks to the device. pub fn writeBlocks(self: *Self, media_id: u32, lba: u64, buffer_size: usize, buf: [*]u8) Status { return self._write_blocks(self, media_id, lba, buffer_size, buf); } /// Flushes all modified data to a physical block device. pub fn flushBlocks(self: *Self) Status { return self._flush_blocks(self); } pub const guid align(8) = uefi.Guid{ .time_low = 0x964e5b21, .time_mid = 0x6459, .time_high_and_version = 0x11d2, .clock_seq_high_and_reserved = 0x8e, .clock_seq_low = 0x39, .node = [_]u8{ 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b }, }; };
lib/std/os/uefi/protocols/block_io_protocol.zig
const std = @import("std"); const pokemon = @import("index.zig"); const nds = @import("../nds/index.zig"); const utils = @import("../utils/index.zig"); const common = @import("common.zig"); const fun = @import("../../lib/fun-with-zig/src/index.zig"); const mem = std.mem; const lu16 = fun.platform.lu16; const lu32 = fun.platform.lu32; const lu128 = fun.platform.lu128; pub const constants = @import("gen4-constants.zig"); pub const BasePokemon = packed struct { stats: common.Stats, types: [2]Type, catch_rate: u8, base_exp_yield: u8, evs: common.EvYield, items: [2]lu16, gender_ratio: u8, egg_cycles: u8, base_friendship: u8, growth_rate: common.GrowthRate, egg_group1: common.EggGroup, egg_group1_pad: u4, egg_group2: common.EggGroup, egg_group2_pad: u4, abilities: [2]u8, flee_rate: u8, color: common.Color, color_padding: bool, // Memory layout // TMS 01-92, HMS 01-08 machine_learnset: lu128, }; pub const MoveTutor = packed struct { move: lu16, cost: u8, tutor: u8, }; /// All party members have this as the base. /// * If trainer.party_type & 0b10 then there is an additional u16 after the base, which is the held /// item. /// * If trainer.party_type & 0b01 then there is an additional 4 * u16 after the base, which are /// the party members moveset. /// In HG/SS/Plat, this struct is always padded with a u16 at the end, no matter the party_type pub const PartyMember = packed struct { iv: u8, gender: u4, ability: u4, level: lu16, species: u10, form: u6, }; pub const Trainer = packed struct { const has_item = 0b10; const has_moves = 0b01; party_type: u8, class: u8, battle_type: u8, // TODO: This should probably be an enum party_size: u8, items: [4]lu16, ai: lu32, battle_type2: u8, }; pub const Type = enum(u8) { Normal = 0x00, Fighting = 0x01, Flying = 0x02, Poison = 0x03, Ground = 0x04, Rock = 0x05, Bug = 0x06, Ghost = 0x07, Steel = 0x08, Fire = 0x0A, Water = 0x0B, Grass = 0x0C, Electric = 0x0D, Psychic = 0x0E, Ice = 0x0F, Dragon = 0x10, Dark = 0x11, }; // TODO: This is the first data structure I had to decode from scratch as I couldn't find a proper // resource for it... Fill it out! pub const Move = packed struct { u8_0: u8, u8_1: u8, category: common.MoveCategory, power: u8, @"type": Type, accuracy: u8, pp: u8, u8_7: u8, u8_8: u8, u8_9: u8, u8_10: u8, u8_11: u8, u8_12: u8, u8_13: u8, u8_14: u8, u8_15: u8, }; pub const LevelUpMove = packed struct { move_id: u9, level: u7, }; pub const DpptWildPokemons = packed struct { grass_rate: lu32, grass: [12]Grass, swarm_replacements: [2]Replacement, // Replaces grass[0, 1] day_replacements: [2]Replacement, // Replaces grass[2, 3] night_replacements: [2]Replacement, // Replaces grass[2, 3] radar_replacements: [4]Replacement, // Replaces grass[4, 5, 10, 11] unknown_replacements: [6]Replacement, // ??? gba_replacements: [10]Replacement, // Each even replaces grass[8], each uneven replaces grass[9] surf: [5]Sea, sea_unknown: [5]Sea, old_rod: [5]Sea, good_rod: [5]Sea, super_rod: [5]Sea, pub const Grass = packed struct { level: u8, pad1: [3]u8, species: lu16, pad2: [2]u8, }; pub const Sea = packed struct { level_max: u8, level_min: u8, pad1: [2]u8, species: lu16, pad2: [2]u8, }; pub const Replacement = packed struct { species: lu16, pad: [2]u8, }; }; pub const HgssWildPokemons = packed struct { grass_rate: u8, sea_rates: [5]u8, unknown: [2]u8, grass_levels: [12]u8, grass_morning: [12]lu16, grass_day: [12]lu16, grass_night: [12]lu16, radio: [4]lu16, surf: [5]Sea, sea_unknown: [2]Sea, old_rod: [5]Sea, good_rod: [5]Sea, super_rod: [5]Sea, swarm: [4]lu16, pub const Sea = packed struct { level_min: u8, level_max: u8, species: lu16, }; }; pub const Game = struct { base: pokemon.BaseGame, base_stats: *const nds.fs.Narc, moves: *const nds.fs.Narc, level_up_moves: *const nds.fs.Narc, trainers: *const nds.fs.Narc, parties: *const nds.fs.Narc, wild_pokemons: *const nds.fs.Narc, tms: []align(1) lu16, hms: []align(1) lu16, pub fn fromRom(rom: nds.Rom) !Game { const info = try getInfo(rom.header.game_title, rom.header.gamecode); const hm_tm_prefix_index = mem.indexOf(u8, rom.arm9, info.hm_tm_prefix) orelse return error.CouldNotFindTmsOrHms; const hm_tm_index = hm_tm_prefix_index + info.hm_tm_prefix.len; const hm_tms_len = (constants.tm_count + constants.hm_count) * @sizeOf(u16); const hm_tms = @bytesToSlice(lu16, rom.arm9[hm_tm_index..][0..hm_tms_len]); return Game{ .base = pokemon.BaseGame{ .version = info.version }, .base_stats = try common.getNarc(rom.root, info.base_stats), .level_up_moves = try common.getNarc(rom.root, info.level_up_moves), .moves = try common.getNarc(rom.root, info.moves), .trainers = try common.getNarc(rom.root, info.trainers), .parties = try common.getNarc(rom.root, info.parties), .wild_pokemons = try common.getNarc(rom.root, info.wild_pokemons), .tms = hm_tms[0..92], .hms = hm_tms[92..], }; } fn getInfo(game_title: []const u8, gamecode: []const u8) !constants.Info { for (constants.infos) |info| { //if (!mem.eql(u8, info.game_title, game_title)) // continue; if (!mem.eql(u8, info.gamecode, gamecode)) continue; return info; } return error.NotGen4Game; } };
src/pokemon/gen4.zig
const std = @import("std"); const c = @import("c.zig"); const helpers = @import("helpers.zig"); const varint = @import("varint.zig"); const constants = @import("constants.zig"); const Decoders = struct { pub fn String(env: c.napi_env, source: []u8, start: u32, len: u32) !c.napi_value { const end = start + len; var string: c.napi_value = undefined; if (c.napi_create_string_utf8(env, @ptrCast([*c]u8, source[start..end]), len, &string) != .napi_ok) { return helpers.throw(env, "Failed to decode string"); } return string; } pub fn Buffer(env: c.napi_env, source: []u8, start: u32, len: u32) !c.napi_value { const end = start + len; var buffer: c.napi_value = undefined; // TODO: do we a napi_finalize to clean up the underlying bytes, or not? if (c.napi_create_external_buffer(env, len, @ptrCast(*c_void, source[start..end]), null, null, &buffer) != .napi_ok) { return helpers.throw(env, "Failed to decode buffer"); } return buffer; } pub fn Integer(env: c.napi_env, source: []u8, start: u32, len: u32) !c.napi_value { const end = start + len; const i: i32 = std.mem.readIntSliceLittle(i32, source[start..end]); var integer: c.napi_value = undefined; if (c.napi_create_int32(env, i, &integer) != .napi_ok) { return helpers.throw(env, "Failed to decode integer"); } return integer; } pub fn Double(env: c.napi_env, source: []u8, start: u32, len: u32) !c.napi_value { if (len != 8) { return helpers.throw(env, "Bad length for decoding a double"); } var end = start + len; const double: f64 = @ptrCast(*const align(1) f64, source[start..end].ptr).*; var number: c.napi_value = undefined; if (c.napi_create_double(env, double, &number) != .napi_ok) { return helpers.throw(env, "Failed to decode double"); } return number; } pub fn Array(env: c.napi_env, source: []u8, start: u32, len: u32) !c.napi_value { var array: c.napi_value = undefined; if (c.napi_create_array(env, &array) != .napi_ok) { return helpers.throw(env, "Failed to create decodable array"); } const end = start + len; var position: u32 = start; while (position < end) { const elemTag = varint.decode(source, position) catch return null; const elemType = @intCast(u8, elemTag.res & constants.TAG_MASK); const elemLen = elemTag.res >> constants.TAG_SIZE; position += elemTag.bytes; var elem = Decoders.Any(env, elemType, source, position, elemLen) catch return null; helpers.arrayPush(env, array, elem) catch return null; position += elemLen; } return array; } pub fn Object(env: c.napi_env, source: []u8, start: u32, len: u32) !c.napi_value { var object: c.napi_value = undefined; if (c.napi_create_object(env, &object) != .napi_ok) { return helpers.throw(env, "Failed to create decodable object"); } const end = start + len; var position: u32 = start; while (position < end) { const keyTag = varint.decode(source, position) catch return null; const keyType = @intCast(u8, keyTag.res & constants.TAG_MASK); const keyLen = keyTag.res >> constants.TAG_SIZE; position += keyTag.bytes; var key = Decoders.Any(env, keyType, source, position, keyLen) catch return null; position += keyLen; const valueTag = varint.decode(source, position) catch return null; const valueType = @intCast(u8, valueTag.res & constants.TAG_MASK); const valueLen = valueTag.res >> constants.TAG_SIZE; position += valueTag.bytes; var value = Decoders.Any(env, valueType, source, position, valueLen) catch return null; if (c.napi_set_property(env, object, key, value) != .napi_ok) { return helpers.throw(env, "Failed to set property"); } position += valueLen; } return object; } pub fn BoolNull(env: c.napi_env, source: []u8, start: u32, len: u32) !c.napi_value { var result: c.napi_value = undefined; if (len == 0) { if (c.napi_get_null(env, &result) != .napi_ok) { return helpers.throw(env, "Failed to decode null"); } return result; } if (len != 1) { return helpers.throw(env, "Bad length for decoding a bool or null"); } if (source[start] == 0) { if (c.napi_get_boolean(env, false, &result) != .napi_ok) { return helpers.throw(env, "Failed to decode boolean false"); } } else if (source[start] == 1) { if (c.napi_get_boolean(env, true, &result) != .napi_ok) { return helpers.throw(env, "Failed to decode boolean true"); } } else if (source[start] == 2) { if (c.napi_get_undefined(env, &result) != .napi_ok) { return helpers.throw(env, "Failed to decode undefined"); } } return result; } pub fn Any(env: c.napi_env, _type: u8, buffer: []u8, start: u32, len: u32) !c.napi_value { return switch (_type) { constants.STRING => Decoders.String(env, buffer, start, len), constants.BUFFER => Decoders.Buffer(env, buffer, start, len), constants.INT => Decoders.Integer(env, buffer, start, len), constants.DOUBLE => Decoders.Double(env, buffer, start, len), constants.ARRAY => Decoders.Array(env, buffer, start, len), constants.OBJECT => Decoders.Object(env, buffer, start, len), constants.BOOLNULL => Decoders.BoolNull(env, buffer, start, len), else => null, }; } }; pub fn decode(env: c.napi_env, buffer: []u8, start: u32) !c.napi_value { const tag = varint.decode(buffer, start) catch return null; const _type = @intCast(u8, tag.res & constants.TAG_MASK); const len = tag.res >> constants.TAG_SIZE; return Decoders.Any(env, _type, buffer, start + tag.bytes, len) catch return null; }
src/decode.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day06.txt"); const Fishes = struct { ages: [9]u64 = [_]u64{0} ** 9, fn nextDay(self: *Fishes) void { var next: Fishes = .{}; next.ages[0] = self.ages[1]; next.ages[1] = self.ages[2]; next.ages[2] = self.ages[3]; next.ages[3] = self.ages[4]; next.ages[4] = self.ages[5]; next.ages[5] = self.ages[6]; next.ages[6] = self.ages[7] + self.ages[0]; next.ages[7] = self.ages[8]; next.ages[8] = self.ages[0]; self.* = next; } fn totalFish(self: Fishes) u64 { var total: u64 = 0; for (self.ages) |count| { total += count; } return total; } }; pub fn main() !void { var fish: Fishes = .{}; var fish_it = tokenize(u8, data, ",\r\n"); while (fish_it.next()) |fish_age| { const age = parseInt(u8, fish_age, 10) catch unreachable; fish.ages[age] += 1; } var day: usize = 0; while (day < 80) : (day += 1) { fish.nextDay(); } const part1 = fish.totalFish(); while (day < 256) : (day += 1) { fish.nextDay(); } const part2 = fish.totalFish(); print("part1={}, part2={}\n", .{part1, part2}); } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const eql = std.mem.eql; const parseEnum = std.meta.stringToEnum; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day06.zig
const std = @import("std"); const utils = @import("./utils.zig"); const locks = .{ // ------------ Spin Locks --------------- //@import("locks/ticket_lock.zig").Lock, //@import("locks/mcs_lock.zig").Lock, // ------------ System Locks --------------- @import("locks/os_lock.zig").Lock, @import("locks/os_raw_lock.zig").Lock, //if (utils.is_windows) @import("locks/keyed_event_lock.zig").Lock else void, // ------------ Custom Locks --------------- @import("locks/futex_lock.zig").Lock, @import("locks/count_lock.zig").Lock, //@import("locks/word_lock.zig").Lock, //@import("locks/parking_lot.zig").Lock, }; fn help() void { print("{s}", .{ \\Usage: zig run bench.zig [measure] [threads] [locked] [unlocked] \\ \\where: \\ [measure]: [csv-ranged:time] \\ List of time spent measuring for each mutex benchmark \\ [threads]: [csv-ranged:count] \\ List of thread counts for each benchmark \\ [locked]: [csv-ranged:time] \\ List of time spent inside the lock for each benchmark \\ [unlocked]: [csv-ranged:time] \\ List of time spent outside the lock for each benchmark \\ \\where: \\ [count]: {usize} \\ [time]: {u128}[time_unit] \\ [time_unit]: "ns" | "us" | "ms" | "s" \\ \\ [csv_ranged:{rule}]: \\ | {rule} \\ single value \\ | {rule} "-" {rule} \\ randomized value in range \\ | [csv_ranged:{rule}] "," [csv_ranged:{rule}] \\ multiple permutations \\ }); } // Circumvent going through std.debug.print // as when theres a segfault that happens while std.debug.stderr_mutex is being held, // then the panic handler will try and grab the mutex again which will result in a dead-lock. fn print(comptime fmt: []const u8, args: anytype) void { nosuspend std.io.getStdErr().writer().print(fmt, args) catch return; } pub fn main() !void { // allocator which can be shared between threads const shared_allocator = blk: { if (std.builtin.link_libc) { break :blk std.heap.c_allocator; } if (utils.is_windows) { const Static = struct { var heap = std.heap.HeapAllocator.init(); }; Static.heap.heap_handle = std.os.windows.kernel32.GetProcessHeap() orelse @panic("GetProcessHeap"); break :blk &Static.heap.allocator; } const Static = struct { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; }; break :blk &Static.gpa.allocator; }; // use an arena allocator for all future allocations var arena = std.heap.ArenaAllocator.init(shared_allocator); defer arena.deinit(); const allocator = &arena.allocator; var measures = std.ArrayList(Duration).init(allocator); defer measures.deinit(); var threads = std.ArrayList(usize).init(allocator); defer threads.deinit(); var locked = std.ArrayList(WorkUnit).init(allocator); defer locked.deinit(); var unlocked = std.ArrayList(WorkUnit).init(allocator); defer unlocked.deinit(); var args = std.process.args(); _ = try (args.next(allocator) orelse unreachable); Parser.parse(allocator, &args, &measures, Parser.toMeasure) catch return help(); Parser.parse(allocator, &args, &threads, Parser.toThread) catch return help(); Parser.parse(allocator, &args, &locked, Parser.toWorkUnit) catch return help(); Parser.parse(allocator, &args, &unlocked, Parser.toWorkUnit) catch return help(); const nanos_per_work_unit = try WorkUnit.nanosPerUnit(); for (unlocked.items) |work_unlocked| { for (locked.items) |work_locked| { for (threads.items) |num_threads| { for (measures.items) |measure| { print("measure={} threads={} locked={} unlocked={}\n{s}\n", .{ measure, num_threads, work_locked, work_unlocked, "-" ** 90, }); const header_result = Result{}; print("{}\n", .{header_result}); inline for (locks) |Lock| { if (Lock != void) { const result = try bench(Lock, BenchConfig{ .allocator = allocator, .shared_allocator = shared_allocator, .num_threads = num_threads, .measure = measure, .work_locked = work_locked.scaled(nanos_per_work_unit), .work_unlocked = work_unlocked.scaled(nanos_per_work_unit), }); print("{}\n", .{result}); } } print("\n", .{}); } } } } } const BenchConfig = struct { allocator: *std.mem.Allocator, shared_allocator: *std.mem.Allocator, num_threads: usize, measure: Duration, work_locked: WorkUnit, work_unlocked: WorkUnit, }; fn bench(comptime Lock: type, config: BenchConfig) !Result { const workers = try config.allocator.alloc(Worker, config.num_threads); defer config.allocator.free(workers); var spawned: usize = 0; defer for (workers[0..spawned]) |*w| w.arena.deinit(); { var lock: Lock = undefined; lock.init(); defer lock.deinit(); var barrier = Barrier{}; defer { barrier.stop(); for (workers[0..spawned]) |w| w.thread.join(); } const runFn = Worker.getRunner(Lock).run; while (spawned < workers.len) : (spawned += 1) { workers[spawned] = .{ .thread = undefined, .arena = std.heap.ArenaAllocator.init(config.shared_allocator), .latencies = std.ArrayList(u64).init(&workers[spawned].arena.allocator), }; workers[spawned].thread = try std.Thread.spawn(.{}, runFn, .{ &workers[spawned], &lock, &barrier, config.work_locked, config.work_unlocked, }); } barrier.start(); std.time.sleep(config.measure.nanos); } var latencies = std.ArrayList(u64).init(config.allocator); defer latencies.deinit(); var sum: u64 = 0; var max: u64 = 0; var min: u64 = std.math.maxInt(u64); for (workers) |w| { sum += w.iters; min = std.math.min(min, w.iters); max = std.math.max(max, w.iters); try latencies.appendSlice(w.latencies.items); } const mean = @intToFloat(f64, sum) / @intToFloat(f64, workers.len); var stdev: f64 = 0; for (workers) |w| { const r = @intToFloat(f64, w.iters) - mean; stdev += r * r; } if (workers.len > 1) { stdev /= @intToFloat(f64, workers.len - 1); stdev = @sqrt(stdev); } const items = latencies.items; const cmp = comptime std.sort.asc(u64); std.sort.sort(u64, items, {}, cmp); var latency_percentiles: [2]u64 = undefined; for ([_]f64{ 50.0, 99.0 }) |percentile, index| { const p = percentile / 100.0; const i = @round(p * @intToFloat(f64, items.len)); const v = std.math.min(items.len, @floatToInt(usize, i)); latency_percentiles[index] = items[v]; } const latency_p50 = latency_percentiles[0]; const latency_p99 = latency_percentiles[1]; return Result{ .name = Lock.name, .mean = mean, .stdev = stdev, .min = @intToFloat(f64, min), .max = @intToFloat(f64, max), .sum = @intToFloat(f64, sum), .@"lat. <50%" = latency_p50, .@"lat. <99%" = latency_p99, }; } const Barrier = struct { state: std.atomic.Atomic(u32) = std.atomic.Atomic(u32).init(0), fn wait(self: *const Barrier) void { while (self.state.load(.Acquire) == 0) { std.Thread.Futex.wait(&self.state, 0, null) catch unreachable; } } fn isRunning(self: *const Barrier) bool { return self.state.load(.Acquire) == 1; } fn wake(self: *Barrier, value: u32) void { self.state.store(value, .Release); std.Thread.Futex.wake(&self.state, std.math.maxInt(u32)); } fn start(self: *Barrier) void { self.wake(1); } fn stop(self: *Barrier) void { self.wake(2); } }; const Worker = struct { iters: u64 = 0, thread: std.Thread, latencies: std.ArrayList(u64), arena: std.heap.ArenaAllocator, fn getRunner(comptime Lock: type) type { return struct { pub fn run( noalias self: *Worker, noalias lock: *Lock, noalias barrier: *const Barrier, work_locked: WorkUnit, work_unlocked: WorkUnit, ) void { var prng = @as(u64, @ptrToInt(self) ^ @ptrToInt(lock)); var locked: u64 = 0; var unlocked: u64 = 0; barrier.wait(); while (barrier.isRunning()) : (self.iters += 1) { if (self.iters % 32 == 0) { locked = work_locked.count(&prng); unlocked = work_unlocked.count(&prng); } WorkUnit.run(locked); const acquire_begin = utils.nanotime(); lock.acquire(); const acquire_end = utils.nanotime(); WorkUnit.run(unlocked); lock.release(); const latency = acquire_end - acquire_begin; self.latencies.append(latency) catch {}; } } }; } }; const Parser = struct { fn parse( allocator: *std.mem.Allocator, args: *std.process.ArgIterator, results: anytype, comptime resolveFn: anytype, ) !void { var input = try (args.next(allocator) orelse return error.ExpectedArg); while (input.len > 0) { const a = try Item.read(&input); const b = blk: { if (input.len == 0 or input[0] != '-') break :blk null; input = input[1..]; const b = try Item.read(&input); break :blk b; }; try resolveFn(results, a, b); if (input.len > 0) { if (input[0] != ',') return error.InvalidSeparator; input = input[1..]; } } } fn toMeasure(results: *std.ArrayList(Duration), a: Item, b: ?Item) !void { if (b != null) return error.MeasureDoesntSupportRanges; const mult = a.mult orelse return error.MeasureRequiresTimeUnit; try results.append(Duration{ .nanos = a.value * mult }); } fn toThread(results: *std.ArrayList(usize), a: Item, b: ?Item) !void { if (b) |real_b| { if (real_b.mult != null) return error.ThreadsTakeValuesNotTimeUnits; if (a.value > real_b.value) return error.InvalidThreadRange; var thread = a.value; while (thread <= real_b.value) : (thread += 1) try results.append(@intCast(usize, thread)); } else if (a.mult != null) { return error.ThreadsTakeValuesNotTimeUnits; } else { try results.append(@intCast(usize, a.value)); } } fn toWorkUnit(results: *std.ArrayList(WorkUnit), a: Item, b: ?Item) !void { var work_unit = WorkUnit{ .from = a.value * (a.mult orelse return error.WorkUnitRequiresTimeUnit), .to = null, }; if (b) |real_b| { const mult = real_b.mult orelse return error.WorkUnitRequiresTimeUnit; work_unit.to = real_b.value * mult; } if (work_unit.to) |to| { if (work_unit.from >= to) { return error.InvalidWorkUnitRange; } } try results.append(work_unit); } const Item = struct { value: u64, mult: ?u64, fn read(input: *[]const u8) !Item { var buf = input.*; defer input.* = buf; const value = blk: { var val: ?u64 = null; while (buf.len > 0) { if (buf[0] < '0' or buf[0] > '9') break; val = ((val orelse 0) * 10) + (buf[0] - '0'); buf = buf[1..]; } break :blk (val orelse return error.NoValueProvided); }; var mult: ?u64 = null; if (buf.len > 0 and buf[0] != '-' and buf[0] != ',') { var m: u64 = switch (buf[0]) { 'n' => 1, 'u' => std.time.ns_per_us, 'm' => std.time.ns_per_ms, 's' => std.time.ns_per_s, else => return error.InvalidTimeUnit, }; buf = buf[1..]; if (m != std.time.ns_per_s) { if (buf.len == 0 or buf[0] != 's') return error.InvalidTimeUnit; buf = buf[1..]; } mult = m; } return Item{ .value = value, .mult = mult, }; } }; }; const Result = struct { name: ?[]const u8 = null, mean: ?f64 = null, stdev: ?f64 = null, min: ?f64 = null, max: ?f64 = null, sum: ?f64 = null, @"lat. <50%": ?u64 = null, @"lat. <99%": ?u64 = null, const name_align = 18; const val_align = 8; fn toStr(comptime int: u8) []const u8 { if (int < 10) return &[_]u8{ '0' + int }; return &[_]u8{ '0' + (int / 10), '0' + (int % 10), }; } pub fn format( self: Result, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; _ = options; const name_fmt = "{s:<" ++ toStr(name_align) ++ "} |"; const name: []const u8 = self.name orelse "name"[0..]; try std.fmt.format(writer, name_fmt, .{name}); inline for ([_][]const u8 { "mean", "stdev", "min", "max", "sum", }) |field| { const valign = val_align - 2; if (@field(self, field)) |value| { if (value < 1_000) { try std.fmt.format(writer, " {d:>" ++ toStr(valign) ++ "} |", .{@round(value)}); } else if (value < 1_000_000) { try std.fmt.format(writer, " {d:>" ++ toStr(valign - 1) ++ ".0}k |", .{value / 1_000}); } else if (value < 1_000_000_000) { try std.fmt.format(writer, " {d:>" ++ toStr(valign - 1) ++ ".2}m |", .{value / 1_000_000}); } else { try std.fmt.format(writer, " {d:>" ++ toStr(valign - 1) ++ ".2}b |", .{value / 1_000_000_000}); } } else { try std.fmt.format(writer, " {s:>" ++ toStr(valign) ++ "} |", .{field}); } } inline for ([_][]const u8 { "lat. <50%", "lat. <99%", }) |field| { const valign = val_align + 1; if (@field(self, field)) |value| { if (value < 1_000) { try std.fmt.format(writer, " {:>" ++ toStr(valign - 2) ++ "}ns |", .{value}); } else if (value < 1_000_000) { try std.fmt.format(writer, " {d:>" ++ toStr(valign - 2) ++ ".2}us |", .{@intToFloat(f64, value) / 1_000}); } else if (value < 1_000_000_000) { try std.fmt.format(writer, " {d:>" ++ toStr(valign - 2) ++ ".2}ms |", .{@intToFloat(f64, value) / 1_000_000}); } else { try std.fmt.format(writer, " {d:>" ++ toStr(valign - 1) ++ ".2}s |", .{@intToFloat(f64, value) / 1_000_000_000}); } } else { try std.fmt.format(writer, " {s:>" ++ toStr(valign) ++ "} |", .{field}); } } } }; const WorkUnit = struct { from: u64, to: ?u64, fn nanosPerUnit() !f64 { var attempts: [10]f64 = undefined; for (attempts) |*attempt| { const num_works = 10_000; const start = utils.nanotime(); WorkUnit.run(num_works); const elapsed = @intToFloat(f64, utils.nanotime() - start); attempt.* = elapsed / @as(f64, num_works); } var sum: f64 = 0; for (attempts) |attempt| sum += attempt; return sum / @intToFloat(f64, attempts.len); } fn scaled(self: WorkUnit, ns_per_unit: f64) WorkUnit { return WorkUnit{ .from = scale(self.from, ns_per_unit), .to = if (self.to) |t| scale(t, ns_per_unit) else null, }; } fn scale(value: u64, ns_per_unit: f64) u64 { return @floatToInt(u64, @intToFloat(f64, value) / ns_per_unit); } fn count(self: WorkUnit, prng: *u64) u64 { const min = self.from; const max = self.to orelse return min; const rng = blk: { var xs = prng.*; xs ^= xs << 13; xs ^= xs >> 7; xs ^= xs << 17; prng.* = xs; break :blk xs; }; return std.math.max(1, (rng % (max - min + 1)) + min); } fn work() void { std.atomic.spinLoopHint(); } fn run(iterations: u64) void { var i = iterations; while (i != 0) : (i -= 1) { WorkUnit.work(); } } pub fn format( self: WorkUnit, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; _ = options; const from_duration = Duration{ .nanos = self.from }; if (self.to) |to| { const to_duration = Duration{ .nanos = to }; try std.fmt.format(writer, "rand({}, {})", .{from_duration, to_duration}); } else { try std.fmt.format(writer, "{}", .{from_duration}); } } }; const Duration = struct { nanos: u64, pub fn format( self: Duration, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; _ = options; if (self.nanos < std.time.ns_per_us) { try std.fmt.format(writer, "{}ns", .{self.nanos}); } else if (self.nanos < std.time.ns_per_ms) { try std.fmt.format(writer, "{}us", .{self.nanos / std.time.ns_per_us}); } else if (self.nanos < std.time.ns_per_s) { try std.fmt.format(writer, "{}ms", .{self.nanos / std.time.ns_per_ms}); } else { try std.fmt.format(writer, "{}s", .{self.nanos / std.time.ns_per_s}); } } };
bench.zig
const std = @import("std"); const warn = std.debug.warn; pub usingnamespace @import("zig_grammar.tokens.zig"); const identifier_state = [128]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }; pub const Lexer = struct { source: []const u8, index: usize = 0, first: usize = 0, peek: i32 = -1, pub fn init(source: []const u8) Lexer { return Lexer{ .source = source, .peek = if (source.len == 0) @as(i32, -1) else @intCast(i32, source[0]), }; } fn getc(self: *Lexer) void { self.index += 1; self.peek = if (self.index < self.source.len) @intCast(i32, self.source[self.index]) else @as(i32, -1); } fn getcx(self: *Lexer) i32 { @inlineCall(self.getc); return self.peek; } fn identifier(self: *Lexer) Id { while (true) { const peek: i8 = if (self.peek >= 0) @truncate(i8, self.peek) else return Id.Identifier; if (peek < 0 or identifier_state[@bitCast(u8, peek)] == 0) return Id.Identifier; self.getc(); } } fn identifierOr(self: *Lexer, default: Id) Id { const peek: i8 = if (self.peek >= 0) @truncate(i8, self.peek) else return default; if (peek < 0 or identifier_state[@bitCast(u8, peek)] == 0) return default; self.getc(); return self.identifier(); } fn comment(self: *Lexer) Id { const id = blk: { if (self.peek == '/') { if (self.getcx() == '/') break :blk Id.LineComment; break :blk Id.DocComment; } break :blk Id.LineComment; }; while (true) { switch (self.peek) { '\n', -1 => return id, else => {}, } self.getc(); } } fn hex(self: *Lexer) Id { var id = Id.Invalid; self.getc(); while (true) { switch (self.peek) { '0'...'9', 'a'...'f', 'A'...'F' => { id = Id.IntegerLiteral; }, '.' => { return if (id == Id.Invalid) id else self.float_digits(true); }, else => return id, } self.getc(); } } fn octal(self: *Lexer) Id { var id = Id.Invalid; self.getc(); while (true) { switch (self.peek) { '0'...'7' => { id = Id.IntegerLiteral; }, else => return id, } self.getc(); } } fn binary(self: *Lexer) Id { var id = Id.Invalid; self.getc(); while (true) { switch (self.peek) { '0'...'1' => { id = Id.IntegerLiteral; }, else => return id, } self.getc(); } } fn digits(self: *Lexer) Id { while (true) { switch (self.peek) { '0'...'9' => {}, '.', 'e' => return self.float_digits(false), else => return Id.IntegerLiteral, } self.getc(); } } fn float_digits(self: *Lexer, allow_hex: bool) Id { self.getc(); if (self.peek == '.') { self.index -= 1; return Id.IntegerLiteral; } while (true) { switch (self.peek) { '0'...'9' => {}, 'a'...'f', 'A'...'F' => { if (!allow_hex) return Id.FloatLiteral; }, else => return Id.FloatLiteral, } self.getc(); } } fn linestring(self: *Lexer, id: Id) Id { while (true) { switch (self.peek) { '\n', -1 => return id, else => {}, } self.getc(); } } fn string(self: *Lexer) Id { while (true) { switch (self.peek) { '\n', -1 => { // TODO: error return Id.StringLiteral; }, '\\' => { _ = self.getc(); }, '"' => { _ = self.getc(); return Id.StringLiteral; }, else => {}, } _ = self.getc(); } } pub fn next(self: *Lexer) Token { while (true) { self.first = self.index; const peek: i8 = if (self.peek >= 0) @truncate(i8, self.peek) else return Token{ .start = self.first, .end = self.index, .id = Id.Eof }; if (peek < 0) { self.getc(); return Token{ .start = self.first, .end = self.index, .id = Id.Invalid }; } const id = self.nextId(peek); if (id != .Ignore) return Token{ .start = self.first, .end = self.index, .id = id }; } } fn nextId(self: *Lexer, peek: i8) Id { _ = self.getc(); switch (peek) { '\n' => { return Id.Newline; }, ' ' => { while (self.peek == ' ') self.getc(); return Id.Ignore; }, '!' => { if (self.peek == '=') { self.getc(); return Id.BangEqual; } return Id.Bang; }, '"' => { return self.string(); }, '#' => { if (self.index == 1 and self.peek == '!') { while (self.peek != '\n' and self.peek != -1) self.getc(); return Id.ShebangLine; } return Id.Invalid; }, '%' => { if (self.peek == '=') { self.getc(); return Id.PercentEqual; } return Id.Percent; }, '&' => { if (self.peek == '=') { self.getc(); return Id.AmpersandEqual; } if (self.peek == '&') { self.getc(); return Id.AmpersandAmpersand; } return Id.Ampersand; }, '\'' => { while (true) { switch (self.peek) { '\n', -1 => return Id.Invalid, '\\' => { const escape = self.getcx(); if (escape == '\n' or escape == -1) // TODO: error return Id.Identifier; }, '\'' => { self.getc(); return Id.CharLiteral; }, else => {}, } self.getc(); } }, '(' => { return Id.LParen; }, ')' => { return Id.RParen; }, '*' => { if (self.peek == '=') { self.getc(); return Id.AsteriskEqual; } if (self.peek == '%') { self.getc(); if (self.peek == '=') { self.getc(); return Id.AsteriskPercentEqual; } return Id.AsteriskPercent; } if (self.peek == '*') { self.getc(); return Id.AsteriskAsterisk; } return Id.Asterisk; }, '+' => { if (self.peek == '=') { self.getc(); return Id.PlusEqual; } if (self.peek == '%') { self.getc(); if (self.peek == '=') { self.getc(); return Id.PlusPercentEqual; } return Id.PlusPercent; } if (self.peek == '+') { self.getc(); return Id.PlusPlus; } return Id.Plus; }, ',' => { return Id.Comma; }, '-' => { if (self.peek == '=') { self.getc(); return Id.MinusEqual; } if (self.peek == '%') { self.getc(); if (self.peek == '=') { self.getc(); return Id.MinusPercentEqual; } return Id.MinusPercent; } if (self.peek == '>') { self.getc(); return Id.MinusAngleBracketRight; } return Id.Minus; }, '.' => { if (self.peek == '.') { self.getc(); if (self.peek == '.') { self.getc(); return Id.Ellipsis3; } return Id.Ellipsis2; } if (self.peek == '?') { self.getc(); return Id.PeriodQuestionMark; } if (self.peek == '*') { self.getc(); return Id.PeriodAsterisk; } return Id.Period; }, '/' => { if (self.peek == '/') { self.getc(); return self.comment(); } if (self.peek == '=') { self.getc(); return Id.SlashEqual; } return Id.Slash; }, ':' => { return Id.Colon; }, ';' => { return Id.Semicolon; }, '<' => { if (self.peek == '=') { self.getc(); return Id.AngleBracketLeftEqual; } if (self.peek == '<') { self.getc(); if (self.peek == '=') { self.getc(); return Id.AngleBracketAngleBracketLeftEqual; } return Id.AngleBracketAngleBracketLeft; } return Id.AngleBracketLeft; }, '=' => { if (self.peek == '=') { self.getc(); return Id.EqualEqual; } if (self.peek == '>') { self.getc(); return Id.EqualAngleBracketRight; } return Id.Equal; }, '>' => { if (self.peek == '=') { self.getc(); return Id.AngleBracketRightEqual; } if (self.peek == '>') { self.getc(); if (self.peek == '=') { self.getc(); return Id.AngleBracketAngleBracketRightEqual; } return Id.AngleBracketAngleBracketRight; } return Id.AngleBracketRight; }, '?' => { return Id.QuestionMark; }, '@' => { if (self.peek == '"') { while (true) { switch (self.getcx()) { '\n', -1 => { // TODO: error return Id.Identifier; }, '\\' => { const escape = self.getcx(); if (escape == '\n' or escape == -1) // TODO: error return Id.Identifier; }, '"' => { self.getc(); return Id.Identifier; }, else => {}, } } } _ = self.identifier(); if (self.first + 1 < self.index) return Id.Builtin; return Id.Invalid; }, '[' => { if (self.peek == '*') { self.getc(); if (self.peek == 'c') { self.getc(); if (self.peek == ']') { self.getc(); return Id.BracketStarCBracket; } self.index -= 2; return Id.LBracket; } if (self.peek == ']') { self.getc(); return Id.BracketStarBracket; } self.index -= 1; } return Id.LBracket; }, '\\' => { if (self.peek == '\\') { self.getc(); return self.linestring(Id.LineString); } return Id.Invalid; }, ']' => { return Id.RBracket; }, '^' => { if (self.peek == '=') { self.getc(); return Id.CaretEqual; } return Id.Caret; }, 'a' => { //Keyword_and if (self.peek == 'n') { if (self.getcx() != 'd') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_and); } if (self.peek == 's') { self.getc(); //Keyword_async if (self.peek == 'y') { if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_async); } //Keyword_asm if (self.peek == 'm') { self.getc(); return self.identifierOr(Id.Keyword_asm); } return self.identifier(); } if (self.peek == 'l') { self.getc(); //Keyword_align if (self.peek == 'i') { if (self.getcx() != 'g') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_align); } //Keyword_allowzero if (self.peek == 'l') { if (self.getcx() != 'o') return self.identifier(); if (self.getcx() != 'w') return self.identifier(); if (self.getcx() != 'z') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'r') return self.identifier(); if (self.getcx() != 'o') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_allowzero); } return self.identifier(); } //Keyword_await if (self.peek == 'w') { if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 't') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_await); } return self.identifier(); }, 'b' => { //Keyword_break if (self.peek != 'r') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'k') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_break); }, 'c' => { if (self.peek == 'o') { self.getc(); if (self.peek == 'n') { self.getc(); //Keyword_const if (self.peek == 's') { if (self.getcx() != 't') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_const); } //Keyword_continue if (self.peek == 't') { if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'u') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_continue); } return self.identifier(); } //Keyword_comptime if (self.peek == 'm') { if (self.getcx() != 'p') return self.identifier(); if (self.getcx() != 't') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'm') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_comptime); } return self.identifier(); } if (self.peek == 'a') { self.getc(); //Keyword_catch if (self.peek == 't') { if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'h') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_catch); } //Keyword_cancel if (self.peek == 'n') { if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_cancel); } return self.identifier(); } // CString if (self.peek == '"') { self.getc(); return self.string(); } // LineCString if (self.peek == '\\') { if (self.getcx() != '\\') return Id.Invalid; return self.linestring(Id.LineCString); } return self.identifier(); }, 'd' => { //Keyword_defer if (self.peek != 'e') return self.identifier(); if (self.getcx() != 'f') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'r') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_defer); }, 'e' => { //Keyword_else if (self.peek == 'l') { if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_else); } if (self.peek == 'r') { if (self.getcx() != 'r') return self.identifier(); self.getc(); //Keyword_errdefer if (self.peek == 'd') { if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'f') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'r') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_errdefer); } //Keyword_error if (self.peek == 'o') { if (self.getcx() != 'r') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_error); } return self.identifier(); } //Keyword_enum if (self.peek == 'n') { if (self.getcx() != 'u') return self.identifier(); if (self.getcx() != 'm') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_enum); } if (self.peek == 'x') { self.getc(); //Keyword_extern if (self.peek == 't') { if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'r') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_extern); } //Keyword_export if (self.peek == 'p') { if (self.getcx() != 'o') return self.identifier(); if (self.getcx() != 'r') return self.identifier(); if (self.getcx() != 't') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_export); } return self.identifier(); } return self.identifier(); }, 'f' => { //Keyword_fn if (self.peek == 'n') { self.getc(); return self.identifierOr(Id.Keyword_fn); } //Keyword_for if (self.peek == 'o') { if (self.getcx() != 'r') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_for); } //Keyword_false if (self.peek == 'a') { if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_false); } return self.identifier(); }, 'i' => { //Keyword_if if (self.peek == 'f') { self.getc(); return self.identifierOr(Id.Keyword_if); } //Keyword_inline if (self.peek == 'n') { if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_inline); } return self.identifier(); }, 'l' => { //Keyword_linksection if (self.peek != 'i') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'k') return self.identifier(); if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 't') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'o') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_linksection); }, 'n' => { //Keyword_null if (self.peek == 'u') { if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_null); } //Keyword_noalias if (self.peek == 'o') { if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 's') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_noalias); } //Keyword_nakedcc if (self.peek == 'a') { if (self.getcx() != 'k') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'd') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_nakedcc); } return self.identifier(); }, 'o' => { if (self.peek != 'r') return self.identifier(); //Keyword_or if (self.getcx() != 'e') return self.identifierOr(Id.Keyword_or); //Keyword_orelse if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_orelse); }, 'p' => { //Keyword_pub if (self.peek == 'u') { if (self.getcx() != 'b') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_pub); } //Keyword_packed if (self.peek == 'a') { if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'k') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'd') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_packed); } //Keyword_promise if (self.peek == 'r') { if (self.getcx() != 'o') return self.identifier(); if (self.getcx() != 'm') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_promise); } return self.identifier(); }, 'r' => { if (self.peek != 'e') return self.identifier(); self.getc(); //Keyword_return if (self.peek == 't') { if (self.getcx() != 'u') return self.identifier(); if (self.getcx() != 'r') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_return); } //Keyword_resume if (self.peek == 's') { if (self.getcx() != 'u') return self.identifier(); if (self.getcx() != 'm') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_resume); } return self.identifier(); }, 's' => { if (self.peek == 't') { self.getc(); //Keyword_struct if (self.peek == 'r') { if (self.getcx() != 'u') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 't') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_struct); } //Keyword_stdcallcc if (self.peek == 'd') { if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_stdcallcc); } return self.identifier(); } //Keyword_switch if (self.peek == 'w') { if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 't') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'h') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_switch); } //Keyword_suspend if (self.peek == 'u') { if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 'p') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'd') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_suspend); } return self.identifier(); }, 't' => { if (self.peek == 'r') { self.getc(); //Keyword_try if (self.peek == 'y') { self.getc(); return self.identifierOr(Id.Keyword_try); } //Keyword_true if (self.peek == 'u') { if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_true); } return self.identifier(); } //Keyword_test if (self.peek == 'e') { if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 't') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_test); } //Keyword_threadlocal if (self.peek == 'h') { if (self.getcx() != 'r') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'd') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'o') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_threadlocal); } return self.identifier(); }, 'u' => { if (self.peek == 's') { self.getc(); //Keyword_use if (self.peek == 'e') { self.getc(); return self.identifierOr(Id.Keyword_use); } //Keyword_usingnamespace if (self.peek != 'i') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'g') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'm') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 's') return self.identifier(); if (self.getcx() != 'p') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_usingnamespace); } if (self.peek == 'n') { self.getc(); //Keyword_union if (self.peek == 'i') { if (self.getcx() != 'o') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_union); } //Keyword_undefined if (self.peek == 'd') { if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'f') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'n') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'd') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_undefined); } //Keyword_unreachable if (self.peek == 'r') { if (self.getcx() != 'e') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'c') return self.identifier(); if (self.getcx() != 'h') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 'b') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_unreachable); } } }, 'v' => { //Keyword_var if (self.peek == 'a') { if (self.getcx() != 'r') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_var); } //Keyword_volatile if (self.peek == 'o') { if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'a') return self.identifier(); if (self.getcx() != 't') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_volatile); } return self.identifier(); }, 'w' => { //Keyword_while if (self.peek != 'h') return self.identifier(); if (self.getcx() != 'i') return self.identifier(); if (self.getcx() != 'l') return self.identifier(); if (self.getcx() != 'e') return self.identifier(); self.getc(); return self.identifierOr(Id.Keyword_while); }, '{' => { if (self.index == 1) return Id.LBrace; switch (self.source[self.index - 2]) { '(', ':', ' ', '\t', '\r', '\n' => return Id.LBrace, else => return Id.LCurly, } }, '|' => { if (self.peek == '=') { self.getc(); return Id.PipeEqual; } if (self.peek == '|') { self.getc(); return Id.PipePipe; } return Id.Pipe; }, '}' => { return Id.RBrace; }, '~' => { return Id.Tilde; }, '0' => { if (self.peek == 'x') return self.hex(); if (self.peek == 'o') return self.octal(); if (self.peek == 'b') return self.binary(); return self.digits(); }, '1'...'9' => { return self.digits(); }, else => {}, } if (identifier_state[@bitCast(u8, peek)] == 1) return self.identifier(); return Id.Invalid; } }; pub fn main() void { var lexer = Lexer.init("while(true) { var @vv; volatile; }"); while (true) { const token = lexer.next(); warn("{}\n", token); if (token.id == .Eof) break; } } test "zig_lexer.zig" { _ = @import("zig_lexer.test.zig"); }
zig/zig_lexer.zig
const std = @import("std"); const fmt = std.fmt; const dns = @import("./dns.zig"); const Type = dns.ResourceType; pub const SOAData = struct { mname: dns.Name, rname: dns.Name, serial: u32, refresh: u32, retry: u32, expire: u32, minimum: u32, }; pub const MXData = struct { preference: u16, exchange: dns.Name, }; pub const SRVData = struct { priority: u16, weight: u16, port: u16, target: dns.Name, }; /// Common representations of DNS' Resource Data. pub const ResourceData = union(Type) { A: std.net.Address, AAAA: std.net.Address, NS: dns.Name, MD: dns.Name, MF: dns.Name, CNAME: dns.Name, SOA: SOAData, MB: dns.Name, MG: dns.Name, MR: dns.Name, // ???? NULL: void, // TODO WKS bit map WKS: struct { addr: u32, proto: u8, // how to define bit map? align(8)? }, PTR: dns.Name, // TODO replace by Name? HINFO: struct { cpu: []const u8, os: []const u8, }, MINFO: struct { rmailbx: dns.Name, emailbx: dns.Name, }, MX: MXData, TXT: []const u8, SRV: SRVData, const Self = @This(); pub fn size(self: Self) usize { return switch (self) { .A => 4, .AAAA => 16, .NS, .MD, .MF, .MB, .MG, .MR, .CNAME, .PTR => |name| name.size(), .TXT => |text| blk: { var len: usize = 0; len += @sizeOf(u16) * text.len; for (text) |string| { len += string.len; } }, else => @panic("TODO"), }; } /// Format the RData into a prettier version of it. /// /// For example, a resource data of type A would be /// formatted to its representing IPv4 address. pub fn format(self: Self, comptime f: []const u8, options: fmt.FormatOptions, writer: anytype) !void { if (f.len != 0) { @compileError("Unknown format character: '" ++ f ++ "'"); } switch (self) { .A, .AAAA => |addr| return fmt.format(writer, "{}", .{addr}), .NS, .MD, .MF, .MB, .MG, .MR, .CNAME, .PTR => |name| return fmt.format(writer, "{}", .{name}), .SOA => |soa| return fmt.format(writer, "{} {} {} {} {} {} {}", .{ soa.mname, soa.rname, soa.serial, soa.refresh, soa.retry, soa.expire, soa.minimum, }), .MX => |mx| return fmt.format(writer, "{} {}", .{ mx.preference, mx.exchange }), .SRV => |srv| return fmt.format(writer, "{} {} {} {}", .{ srv.priority, srv.weight, srv.port, srv.target, }), .TXT => |text| return fmt.format(writer, "{}", .{text}), else => return fmt.format(writer, "TODO support {}", .{@tagName(self)}), } } pub fn serialize(self: Self, serializer: anytype) !void { switch (self) { .A => |addr| { try serializer.serialize(addr.in.sa.addr); }, .AAAA => |addr| try serializer.serialize(addr.in6.sa.addr), .NS, .MD, .MF, .MB, .MG, .MR, .CNAME, .PTR => |name| try serializer.serialize(name), .SOA => |soa_data| { try serializer.serialize(soa_data.mname); try serializer.serialize(soa_data.rname); try serializer.serialize(soa_data.serial); try serializer.serialize(soa_data.refresh); try serializer.serialize(soa_data.retry); try serializer.serialize(soa_data.expire); try serializer.serialize(soa_data.minimum); }, .MX => |mxdata| { try serializer.serialize(mxdata.preference); try serializer.serialize(mxdata.exchange); }, .SRV => |srv| { try serializer.serialize(srv.priority); try serializer.serialize(srv.weight); try serializer.serialize(srv.port); try serializer.serialize(srv.target); }, else => @panic("not implemented"), } } /// Deserialize a given opaque resource data. pub fn fromOpaque( ctx: *dns.DeserializationContext, typ: dns.ResourceType, opaque_resource_data: []const u8, ) !ResourceData { const BufferT = std.io.FixedBufferStream([]const u8); var stream = BufferT{ .buffer = opaque_resource_data, .pos = 0 }; const DeserializerT = std.io.Deserializer(.Big, .Bit, BufferT.Reader); var deserializer = DeserializerT.init(stream.reader()); var rdata = switch (typ) { .A => blk: { var ip4addr: [4]u8 = undefined; for (ip4addr) |_, i| { ip4addr[i] = try deserializer.deserialize(u8); } break :blk ResourceData{ .A = std.net.Address.initIp4(ip4addr, 0), }; }, .AAAA => blk: { var ip6_addr: [16]u8 = undefined; for (ip6_addr) |byte, i| { ip6_addr[i] = try deserializer.deserialize(u8); } break :blk ResourceData{ .AAAA = std.net.Address.initIp6(ip6_addr, 0, 0, 0), }; }, .NS => ResourceData{ .NS = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null) }, .CNAME => ResourceData{ .CNAME = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null) }, .PTR => ResourceData{ .PTR = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null) }, .MX => blk: { break :blk ResourceData{ .MX = MXData{ .preference = try deserializer.deserialize(u16), .exchange = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null), }, }; }, .MD => ResourceData{ .MD = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null) }, .MF => ResourceData{ .MF = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null) }, .SOA => blk: { var mname = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null); var rname = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null); var serial = try deserializer.deserialize(u32); var refresh = try deserializer.deserialize(u32); var retry = try deserializer.deserialize(u32); var expire = try deserializer.deserialize(u32); var minimum = try deserializer.deserialize(u32); break :blk ResourceData{ .SOA = SOAData{ .mname = mname, .rname = rname, .serial = serial, .refresh = refresh, .retry = retry, .expire = expire, .minimum = minimum, }, }; }, .SRV => blk: { const priority = try deserializer.deserialize(u16); const weight = try deserializer.deserialize(u16); const port = try deserializer.deserialize(u16); var target = try dns.Packet.readName(&deserializer, ctx, try createNameBuffer(ctx), null); break :blk ResourceData{ .SRV = .{ .priority = priority, .weight = weight, .port = port, .target = target, }, }; }, .TXT => blk: { var txt_buffer = try ctx.allocator.alloc(u8, 256); try ctx.label_pool.append(txt_buffer); const length = try deserializer.deserialize(u8); var idx: usize = 0; while (idx < length) : (idx += 1) { txt_buffer[idx] = try deserializer.deserialize(u8); } break :blk ResourceData{ .TXT = txt_buffer[0..idx] }; }, else => { std.debug.warn("unexpected rdata: {}\n", .{typ}); return error.InvalidRData; }, }; return rdata; } }; fn createNameBuffer(ctx: *dns.DeserializationContext) ![][]const u8 { // TODO should we just keep this hardcoded? how could we better manage those // name buffers? var name_buffer = try ctx.allocator.alloc([]const u8, 128); try ctx.name_pool.append(name_buffer); return name_buffer; }
src/pkg2/rdata.zig
const std = @import("std"); const nvg = @import("nanovg"); pub fn iconNew(vg: nvg) void { vg.beginPath(); vg.moveTo(2.5, 0.5); vg.lineTo(2.5, 15.5); vg.lineTo(13.5, 15.5); vg.lineTo(13.5, 3.5); vg.lineTo(10.5, 0.5); vg.closePath(); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(8.5, 0.5); vg.lineTo(8.5, 5.5); vg.lineTo(13.5, 5.5); vg.stroke(); } pub fn iconOpen(vg: nvg) void { vg.beginPath(); vg.moveTo(1.5, 1.5); vg.lineTo(0.5, 2.5); vg.lineTo(0.5, 14.5); vg.lineTo(12.5, 14.5); vg.lineTo(13.5, 13.5); vg.lineTo(15.5, 8.5); vg.lineTo(15.5, 7.5); vg.lineTo(13.5, 7.5); vg.lineTo(13.5, 2.5); vg.lineTo(6.5, 2.5); vg.lineTo(5.5, 1.5); vg.closePath(); vg.fillColor(nvg.rgb(245, 218, 97)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(13.5, 7.5); vg.lineTo(4.5, 7.5); vg.lineTo(2.5, 12.5); vg.stroke(); } pub fn iconSave(vg: nvg) void { vg.beginPath(); vg.moveTo(0.5, 0.5); vg.lineTo(0.5, 14.5); vg.lineTo(1.5, 15.5); vg.lineTo(15.5, 15.5); vg.lineTo(15.5, 0.5); vg.closePath(); vg.fillColor(nvg.rgb(40, 140, 200)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(3, 10); vg.lineTo(3, 15); vg.lineTo(13, 15); vg.lineTo(13, 10); vg.fillColor(nvg.rgb(171, 171, 171)); vg.fill(); vg.beginPath(); vg.moveTo(4, 11); vg.lineTo(4, 14); vg.lineTo(6, 14); vg.lineTo(6, 11); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.moveTo(3, 1); vg.lineTo(3, 8); vg.lineTo(13, 8); vg.lineTo(13, 1); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); vg.beginPath(); vg.moveTo(3, 1); vg.lineTo(3, 2); vg.lineTo(13, 2); vg.lineTo(13, 1); vg.fillColor(nvg.rgb(250, 10, 0)); vg.fill(); vg.beginPath(); vg.moveTo(4, 3); vg.lineTo(4, 5); vg.lineTo(12, 5); vg.lineTo(12, 3); vg.moveTo(4, 6); vg.lineTo(4, 7); vg.lineTo(12, 7); vg.lineTo(12, 6); vg.fillColor(nvg.rgb(224, 224, 224)); vg.fill(); } pub fn iconSaveAs(vg: nvg) void { vg.save(); defer vg.restore(); iconSave(vg); vg.translate(1, 1); iconToolPen(vg); } pub fn iconUndoEnabled(vg: nvg) void { iconUndo(vg, true); } pub fn iconUndoDisabled(vg: nvg) void { iconUndo(vg, false); } fn iconUndo(vg: nvg, enabled: bool) void { vg.beginPath(); vg.arc(8, 8, 6, -0.75 * std.math.pi, 0.75 * std.math.pi, .cw); vg.lineCap(.round); vg.strokeColor(if (enabled) nvg.rgb(80, 80, 80) else nvg.rgb(170, 170, 170)); vg.strokeWidth(4); vg.stroke(); vg.beginPath(); vg.moveTo(0.5, 7.5); vg.lineTo(0.5, 0.5); vg.lineTo(1.5, 0.5); vg.lineTo(7.5, 6.5); vg.lineTo(7.5, 7.5); vg.closePath(); vg.fillColor(if (enabled) nvg.rgb(255, 255, 255) else nvg.rgb(170, 170, 170)); vg.fill(); vg.strokeWidth(1); vg.strokeColor(if (enabled) nvg.rgb(80, 80, 80) else nvg.rgb(170, 170, 170)); vg.stroke(); vg.beginPath(); vg.arc(8, 8, 6, -0.75 * std.math.pi, 0.75 * std.math.pi, .cw); vg.strokeColor(if (enabled) nvg.rgb(255, 255, 255) else nvg.rgb(170, 170, 170)); vg.strokeWidth(2); vg.stroke(); // reset vg.lineCap(.butt); vg.strokeWidth(1); } pub fn iconRedoEnabled(vg: nvg) void { iconRedo(vg, true); } pub fn iconRedoDisabled(vg: nvg) void { iconRedo(vg, false); } fn iconRedo(vg: nvg, enabled: bool) void { vg.beginPath(); vg.arc(8, 8, 6, -0.25 * std.math.pi, 0.25 * std.math.pi, .ccw); vg.lineCap(.round); vg.strokeColor(if (enabled) nvg.rgb(80, 80, 80) else nvg.rgb(170, 170, 170)); vg.strokeWidth(4); vg.stroke(); vg.beginPath(); vg.moveTo(15.5, 7.5); vg.lineTo(15.5, 0.5); vg.lineTo(14.5, 0.5); vg.lineTo(8.5, 6.5); vg.lineTo(8.5, 7.5); vg.closePath(); vg.fillColor(if (enabled) nvg.rgb(255, 255, 255) else nvg.rgb(170, 170, 170)); vg.fill(); vg.strokeWidth(1); vg.strokeColor(if (enabled) nvg.rgb(80, 80, 80) else nvg.rgb(170, 170, 170)); vg.stroke(); vg.beginPath(); vg.arc(8, 8, 6, -0.25 * std.math.pi, 0.25 * std.math.pi, .ccw); vg.strokeColor(if (enabled) nvg.rgb(255, 255, 255) else nvg.rgb(170, 170, 170)); vg.strokeWidth(2); vg.stroke(); // reset vg.lineCap(.butt); vg.strokeWidth(1); } pub fn iconCut(vg: nvg) void { vg.beginPath(); vg.ellipse(4, 13, 2, 2); vg.ellipse(12, 13, 2, 2); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.strokeWidth(2); vg.stroke(); vg.beginPath(); vg.moveTo(10, 10); vg.lineTo(4.5, 0.5); vg.lineTo(3.5, 0.5); vg.lineTo(3.5, 3.5); vg.lineTo(3.5, 3.5); vg.lineTo(7, 10); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); vg.strokeWidth(1); vg.stroke(); vg.beginPath(); vg.moveTo(6, 10); vg.lineTo(11.5, 0.5); vg.lineTo(12.5, 0.5); vg.lineTo(12.5, 3.5); vg.lineTo(12.5, 3.5); vg.lineTo(9, 10); vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(6, 9); vg.lineTo(4.5, 10.5); vg.lineTo(7, 13); vg.lineTo(7, 11.5); vg.lineTo(7.5, 11); vg.lineTo(8.5, 11); vg.lineTo(9, 11.5); vg.lineTo(9, 13); vg.lineTo(11.5, 10.5); vg.lineTo(10, 9); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconCopyEnabled(vg: nvg) void { iconCopy(vg, true); } pub fn iconCopyDisabled(vg: nvg) void { iconCopy(vg, false); } pub fn iconCopy(vg: nvg, enabled: bool) void { for ([_]u0{ 0, 0 }) |_| { vg.beginPath(); vg.moveTo(2.5, 0.5); vg.lineTo(2.5, 10.5); vg.lineTo(10.5, 10.5); vg.lineTo(10.5, 2.5); vg.lineTo(8.5, 0.5); vg.closePath(); vg.fillColor(if (enabled) nvg.rgb(255, 255, 255) else nvg.rgb(224, 224, 224)); vg.fill(); vg.strokeColor(if (enabled) nvg.rgb(66, 66, 66) else nvg.rgb(170, 170, 170)); vg.stroke(); vg.beginPath(); vg.moveTo(7.5, 0.5); vg.lineTo(7.5, 3.5); vg.lineTo(10.5, 3.5); vg.stroke(); vg.translate(3, 5); } } pub fn iconPasteEnabled(vg: nvg) void { iconPaste(vg, true); } pub fn iconPasteDisabled(vg: nvg) void { iconPaste(vg, false); } pub fn iconPaste(vg: nvg, enabled: bool) void { const stroke_color = if (enabled) nvg.rgb(66, 66, 66) else nvg.rgb(170, 170, 170); vg.beginPath(); vg.roundedRect(1.5, 1.5, 13, 14, 1.5); vg.fillColor(if (enabled) nvg.rgb(215, 162, 71) else stroke_color); vg.fill(); vg.strokeColor(stroke_color); vg.stroke(); vg.beginPath(); vg.rect(3.5, 3.5, 9, 10); vg.fillColor(if (enabled) nvg.rgb(255, 255, 255) else nvg.rgb(224, 224, 224)); // TODO: use gui constant or alpha vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(6.5, 0.5); vg.lineTo(6.5, 1.5); vg.lineTo(5.5, 2.5); vg.lineTo(5.5, 4.5); vg.lineTo(10.5, 4.5); vg.lineTo(10.5, 2.5); vg.lineTo(9.5, 1.5); vg.lineTo(9.5, 0.5); vg.closePath(); vg.fillColor(nvg.rgb(170, 170, 170)); vg.fill(); vg.stroke(); vg.beginPath(); vg.rect(5, 6, 6, 1); vg.rect(5, 8, 4, 1); vg.rect(5, 10, 5, 1); vg.fillColor(stroke_color); vg.fill(); } pub fn iconToolCrop(vg: nvg) void { vg.fillColor(nvg.rgb(170, 170, 170)); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.beginPath(); vg.moveTo(2.5, 0.5); vg.lineTo(2.5, 13.5); vg.lineTo(15.5, 13.5); vg.lineTo(15.5, 10.5); vg.lineTo(5.5, 10.5); vg.lineTo(5.5, 0.5); vg.closePath(); vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(0.5, 5.5); vg.lineTo(10.5, 5.5); vg.lineTo(10.5, 15.5); vg.lineTo(13.5, 15.5); vg.lineTo(13.5, 2.5); vg.lineTo(0.5, 2.5); vg.closePath(); vg.fill(); vg.stroke(); } pub fn iconToolSelect(vg: nvg) void { vg.beginPath(); vg.moveTo(1.5, 4); vg.lineTo(1.5, 1.5); vg.lineTo(4, 1.5); vg.moveTo(6, 1.5); vg.lineTo(10, 1.5); vg.moveTo(12, 1.5); vg.lineTo(14.5, 1.5); vg.lineTo(14.5, 4); vg.moveTo(14.5, 6); vg.lineTo(14.5, 10); vg.moveTo(14.5, 12); vg.lineTo(14.5, 14.5); vg.lineTo(12, 14.5); vg.moveTo(10, 14.5); vg.lineTo(6, 14.5); vg.moveTo(4, 14.5); vg.lineTo(1.5, 14.5); vg.lineTo(1.5, 12); vg.moveTo(1.5, 10); vg.lineTo(1.5, 6); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); } pub fn iconToolLine(vg: nvg) void { vg.beginPath(); vg.moveTo(13, 1); vg.lineTo(5, 5); vg.lineTo(10, 10); vg.lineTo(2, 14); vg.lineCap(.Round); defer vg.lineCap(.Butt); vg.strokeWidth(2); defer vg.strokeWidth(1); vg.strokeColor(nvg.rgb(0, 0, 0)); vg.stroke(); } pub fn iconToolPen(vg: nvg) void { vg.beginPath(); vg.moveTo(5.5, 14.5); vg.lineTo(5.5, 12.5); vg.lineTo(15.5, 2.5); vg.lineTo(15.5, 4.5); vg.closePath(); vg.fillColor(nvg.rgb(68, 137, 26)); vg.fill(); vg.beginPath(); vg.moveTo(5.5, 12.5); vg.lineTo(3.5, 10.5); vg.lineTo(13.5, 0.5); vg.lineTo(15.5, 2.5); vg.closePath(); vg.fillColor(nvg.rgb(163, 206, 39)); vg.fill(); vg.beginPath(); vg.moveTo(3.5, 10.5); vg.lineTo(1.5, 10.5); vg.lineTo(11.5, 0.5); vg.lineTo(13.5, 0.5); vg.closePath(); vg.fillColor(nvg.rgb(213, 228, 102)); vg.fill(); vg.lineJoin(.round); defer vg.lineJoin(.miter); vg.beginPath(); vg.moveTo(0.5, 15.5); vg.lineTo(1.5, 10.5); vg.lineTo(11.5, 0.5); vg.lineTo(13.5, 0.5); vg.lineTo(15.5, 2.5); vg.lineTo(15.5, 4.5); vg.lineTo(5.5, 14.5); vg.closePath(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(0.5, 15.5); vg.lineTo(1.5, 10.5); vg.lineTo(3.5, 10.5); vg.lineTo(5.5, 12.5); vg.lineTo(5.5, 14.5); vg.closePath(); vg.fillColor(nvg.rgb(217, 190, 138)); vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(0.5, 15.5); vg.lineTo(1, 13.5); vg.lineTo(2.5, 15); vg.closePath(); vg.stroke(); } pub fn iconToolBucket(vg: nvg) void { vg.beginPath(); vg.moveTo(9.5, 2.5); vg.lineTo(3.5, 8.5); vg.lineTo(8.5, 13.5); vg.bezierTo(9.5, 14.5, 11.5, 14.5, 12.5, 13.5); vg.lineTo(14.5, 11.5); vg.bezierTo(15.5, 10.5, 15.5, 8.5, 14.5, 7.5); vg.closePath(); vg.fillColor(nvg.rgb(171, 171, 171)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(4.5, 9.5); vg.lineTo(10.5, 3.5); vg.stroke(); vg.beginPath(); vg.roundedRect(8.5, 0.5, 2, 9, 1); vg.fill(); vg.stroke(); vg.beginPath(); vg.ellipse(9.5, 8.5, 1, 1); vg.stroke(); vg.beginPath(); vg.moveTo(3.5, 10.5); vg.lineTo(3.5, 8.5); vg.lineTo(6.5, 5.5); vg.lineTo(5, 5.5); vg.bezierTo(2, 5.5, 0.5, 7, 0.5, 10.5); vg.bezierTo(0.5, 12, 1, 12.5, 2, 12.5); vg.bezierTo(3, 12.5, 3.5, 12, 3.5, 10.5); vg.fillColor(nvg.rgb(210, 80, 60)); vg.fill(); vg.stroke(); } pub fn iconMirrorHorizontally(vg: nvg) void { vg.beginPath(); var y: f32 = 0; while (y < 16) : (y += 2) { vg.moveTo(7.5, y + 0); vg.lineTo(7.5, y + 1); } vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(5, 2); vg.lineTo(5, 3); vg.lineTo(9, 3); vg.lineTo(9, 5); vg.lineTo(11.5, 2.5); vg.lineTo(9, 0); vg.lineTo(9, 2); vg.closePath(); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.rect(0.5, 5.5, 5, 5); vg.strokeColor(nvg.rgb(170, 170, 170)); vg.stroke(); vg.beginPath(); vg.rect(9.5, 5.5, 5, 5); vg.fillColor(nvg.rgb(247, 226, 107)); vg.fill(); vg.strokeColor(nvg.rgb(164, 100, 34)); vg.stroke(); } pub fn iconMirrorVertically(vg: nvg) void { vg.save(); defer vg.restore(); vg.scale(-1, 1); vg.rotate(0.5 * std.math.pi); iconMirrorHorizontally(vg); } pub fn iconRotateCw(vg: nvg) void { vg.beginPath(); vg.rect(1.5, 8.5, 14, 6); vg.strokeColor(nvg.rgb(170, 170, 170)); vg.stroke(); vg.beginPath(); vg.rect(9.5, 0.5, 6, 14); vg.fillColor(nvg.rgb(247, 226, 107)); vg.fill(); vg.strokeColor(nvg.rgb(164, 100, 34)); vg.stroke(); vg.beginPath(); vg.moveTo(3.5, 7); vg.quadTo(3.5, 4.5, 6, 4.5); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(6, 7.5); vg.lineTo(9, 4.5); vg.lineTo(6, 1.5); vg.closePath(); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconRotateCcw(vg: nvg) void { vg.beginPath(); vg.rect(0.5, 8.5, 14, 6); vg.strokeColor(nvg.rgb(170, 170, 170)); vg.stroke(); vg.beginPath(); vg.rect(0.5, 0.5, 6, 14); vg.fillColor(nvg.rgb(247, 226, 107)); vg.fill(); vg.strokeColor(nvg.rgb(164, 100, 34)); vg.stroke(); vg.beginPath(); vg.moveTo(12.5, 7); vg.quadTo(12.5, 4.5, 10, 4.5); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(10, 7.5); vg.lineTo(7, 4.5); vg.lineTo(10, 1.5); vg.closePath(); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconPixelGrid(vg: nvg) void { vg.beginPath(); vg.moveTo(0, 0.5); vg.lineTo(16, 0.5); vg.moveTo(0, 5.5); vg.lineTo(16, 5.5); vg.moveTo(0, 10.5); vg.lineTo(16, 10.5); vg.moveTo(0, 15.5); vg.lineTo(16, 15.5); vg.moveTo(0.5, 0); vg.lineTo(0.5, 16); vg.moveTo(5.5, 0); vg.lineTo(5.5, 16); vg.moveTo(10.5, 0); vg.lineTo(10.5, 16); vg.moveTo(15.5, 0); vg.lineTo(15.5, 16); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); } pub fn iconCustomGrid(vg: nvg) void { vg.beginPath(); vg.moveTo(1, 2.5); vg.lineTo(4, 2.5); vg.moveTo(5, 2.5); vg.lineTo(7, 2.5); vg.moveTo(8, 2.5); vg.lineTo(10, 2.5); vg.moveTo(11, 2.5); vg.lineTo(14, 2.5); vg.moveTo(1, 12.5); vg.lineTo(4, 12.5); vg.moveTo(5, 12.5); vg.lineTo(7, 12.5); vg.moveTo(8, 12.5); vg.lineTo(10, 12.5); vg.moveTo(11, 12.5); vg.lineTo(14, 12.5); vg.moveTo(2.5, 1); vg.lineTo(2.5, 4); vg.moveTo(2.5, 5); vg.lineTo(2.5, 7); vg.moveTo(2.5, 8); vg.lineTo(2.5, 10); vg.moveTo(2.5, 11); vg.lineTo(2.5, 14); vg.moveTo(12.5, 1); vg.lineTo(12.5, 4); vg.moveTo(12.5, 5); vg.lineTo(12.5, 7); vg.moveTo(12.5, 8); vg.lineTo(12.5, 10); vg.moveTo(12.5, 11); vg.lineTo(12.5, 14); vg.strokeColor(nvg.rgb(40, 140, 200)); vg.stroke(); } pub fn iconSnapEnabled(vg: nvg) void { iconSnap(vg, true); } pub fn iconSnapDisabled(vg: nvg) void { iconSnap(vg, false); } pub fn iconSnap(vg: nvg, enabled: bool) void { vg.beginPath(); vg.moveTo(1.5, 0.5); vg.lineTo(1.5, 12.5); vg.lineTo(2.5, 14.5); vg.lineTo(4.5, 15.5); vg.lineTo(11.5, 15.5); vg.lineTo(13.5, 14.5); vg.lineTo(14.5, 12.5); vg.lineTo(14.5, 0.5); vg.lineTo(10.5, 0.5); vg.lineTo(10.5, 10.5); vg.lineTo(9.5, 11.5); vg.lineTo(6.5, 11.5); vg.lineTo(5.5, 10.5); vg.lineTo(5.5, 0.5); vg.closePath(); vg.fillColor(if (enabled) nvg.rgb(250, 8, 0) else nvg.rgb(170, 170, 170)); vg.fill(); vg.strokeColor(if (enabled) nvg.rgb(66, 66, 66) else nvg.rgb(170, 170, 170)); vg.stroke(); vg.beginPath(); vg.moveTo(2, 1); vg.lineTo(2, 4); vg.lineTo(5, 4); vg.lineTo(5, 1); vg.moveTo(11, 1); vg.lineTo(11, 4); vg.lineTo(14, 4); vg.lineTo(14, 1); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); } pub fn iconAbout(vg: nvg) void { vg.beginPath(); vg.ellipse(8, 8, 6.5, 6.5); vg.fillColor(nvg.rgb(40, 140, 200)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.ellipse(8, 5, 1, 1); vg.moveTo(6, 12); vg.lineTo(10, 12); vg.lineTo(10, 11); vg.lineTo(9, 11); vg.lineTo(9, 7); vg.lineTo(6, 7); vg.lineTo(6, 8); vg.lineTo(7, 8); vg.lineTo(7, 11); vg.lineTo(6, 11); vg.closePath(); vg.fillColor(nvg.rgbf(1, 1, 1)); vg.fill(); } pub fn iconColorPalette(vg: nvg) void { vg.beginPath(); vg.moveTo(8, 1.5); vg.bezierTo(12, 1.5, 15.5, 4, 15.5, 8); vg.bezierTo(15.5, 12, 12, 14.5, 8, 14.5); vg.bezierTo(4, 14.5, 4, 11.5, 3, 10.5); vg.bezierTo(2, 9.5, 0.5, 10, 0.5, 8); vg.bezierTo(0.5, 4, 4, 1.5, 8, 1.5); vg.closePath(); vg.pathWinding(.ccw); vg.circle(7, 11, 1.5); vg.pathWinding(.cw); vg.fillColor(nvg.rgb(245, 218, 97)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.circle(4, 7, 2); vg.fillColor(nvg.rgb(250, 10, 0)); vg.fill(); vg.beginPath(); vg.circle(8, 5, 2); vg.fillColor(nvg.rgb(30, 170, 15)); vg.fill(); vg.beginPath(); vg.circle(12, 7, 2); vg.fillColor(nvg.rgb(40, 140, 200)); vg.fill(); } pub fn iconPlus(vg: nvg) void { vg.beginPath(); vg.moveTo(10.5, 8.5); vg.lineTo(10.5, 10.5); vg.lineTo(8.5, 10.5); vg.lineTo(8.5, 13.5); vg.lineTo(10.5, 13.5); vg.lineTo(10.5, 15.5); vg.lineTo(13.5, 15.5); vg.lineTo(13.5, 13.5); vg.lineTo(15.5, 13.5); vg.lineTo(15.5, 10.5); vg.lineTo(13.5, 10.5); vg.lineTo(13.5, 8.5); vg.closePath(); vg.fillColor(nvg.rgb(60, 175, 45)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); } pub fn iconMinus(vg: nvg) void { vg.beginPath(); vg.moveTo(8.5, 10.5); vg.lineTo(8.5, 13.5); vg.lineTo(15.5, 13.5); vg.lineTo(15.5, 10.5); vg.closePath(); vg.fillColor(nvg.rgb(250, 10, 0)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); } pub fn iconDelete(vg: nvg) void { vg.beginPath(); vg.moveTo(7, 0.5); vg.lineTo(6.5, 1); vg.lineTo(6.5, 2.5); vg.lineTo(3, 2.5); vg.lineTo(2.5, 3); vg.lineTo(2.5, 5.5); vg.lineTo(3.5, 5.5); vg.lineTo(3.5, 15); vg.lineTo(4, 15.5); vg.lineTo(12, 15.5); vg.lineTo(12.5, 15); vg.lineTo(12.5, 5.5); vg.lineTo(13.5, 5.5); vg.lineTo(13.5, 3); vg.lineTo(13, 2.5); vg.lineTo(9.5, 2.5); vg.lineTo(9.5, 1); vg.lineTo(9, 0.5); vg.closePath(); vg.fillColor(nvg.rgb(170, 170, 170)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); vg.beginPath(); vg.moveTo(6.5, 2.5); vg.lineTo(9.5, 2.5); vg.moveTo(3.5, 5.5); vg.lineTo(12.5, 5.5); vg.moveTo(6.5, 7); vg.lineTo(6.5, 14); vg.moveTo(9.5, 7); vg.lineTo(9.5, 14); vg.stroke(); } pub fn iconMoveUp(vg: nvg) void { vg.beginPath(); vg.moveTo(8, 1); vg.lineTo(1.5, 7.5); vg.lineTo(1.5, 9.5); vg.lineTo(4.5, 9.5); vg.lineTo(4.5, 14.5); vg.lineTo(11.5, 14.5); vg.lineTo(11.5, 9.5); vg.lineTo(14.5, 9.5); vg.lineTo(14.5, 7.5); vg.closePath(); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); } pub fn iconMoveDown(vg: nvg) void { vg.beginPath(); vg.moveTo(8, 15); vg.lineTo(14.5, 8.5); vg.lineTo(14.5, 6.5); vg.lineTo(11.5, 6.5); vg.lineTo(11.5, 1.5); vg.lineTo(4.5, 1.5); vg.lineTo(4.5, 6.5); vg.lineTo(1.5, 6.5); vg.lineTo(1.5, 8.5); vg.closePath(); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); } pub fn iconCapButt(vg: nvg) void { vg.beginPath(); vg.rect(7, 0, 8, 15); vg.rect(5, 5, 5, 5); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.rect(7, 7, 8, 1); vg.rect(6, 6, 3, 3); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); } pub fn iconCapRound(vg: nvg) void { vg.beginPath(); vg.rect(7.5, 0, 7.5, 15); vg.circle(7.5, 7.5, 7.5); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.rect(7, 7, 8, 1); vg.rect(6, 6, 3, 3); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); } pub fn iconCapSquare(vg: nvg) void { vg.beginPath(); vg.rect(0, 0, 15, 15); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.rect(7, 7, 8, 1); vg.rect(6, 6, 3, 3); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); } pub fn iconJoinRound(vg: nvg) void { vg.beginPath(); vg.moveTo(15, 15); vg.lineTo(15, 0); vg.arcTo(0, 0, 0, 7.5, 7.5); vg.lineTo(0, 15); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.rect(7, 7, 8, 1); vg.rect(7, 7, 1, 8); vg.rect(6, 6, 3, 3); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); } pub fn iconJoinBevel(vg: nvg) void { vg.beginPath(); vg.moveTo(15, 15); vg.lineTo(15, 0); vg.lineTo(7.5, 0); vg.lineTo(0, 7.5); vg.lineTo(0, 15); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.rect(7, 7, 8, 1); vg.rect(7, 7, 1, 8); vg.rect(6, 6, 3, 3); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); } pub fn iconJoinSquare(vg: nvg) void { vg.beginPath(); vg.rect(0, 0, 15, 15); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); vg.beginPath(); vg.rect(7, 7, 8, 1); vg.rect(7, 7, 1, 8); vg.rect(6, 6, 3, 3); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); } pub fn iconCross(vg: nvg) void { vg.beginPath(); vg.moveTo(4, 4); vg.lineTo(11, 11); vg.moveTo(4, 11); vg.lineTo(11, 4); vg.lineCap(.Round); defer vg.lineCap(.Butt); vg.strokeWidth(2); defer vg.strokeWidth(1); vg.strokeColor(nvg.rgb(66, 66, 66)); vg.stroke(); } pub fn iconTimelineBegin(vg: nvg) void { vg.beginPath(); vg.moveTo(11, 2); vg.lineTo(11, 11); vg.lineTo(2, 6.5); vg.closePath(); vg.rect(2, 2, 1, 9); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconTimelineLeft(vg: nvg) void { vg.beginPath(); vg.moveTo(6, 2); vg.lineTo(6, 11); vg.lineTo(1.5, 6.5); vg.closePath(); vg.rect(7, 2, 2, 9); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconTimelinePlay(vg: nvg) void { vg.beginPath(); vg.moveTo(2, 2); vg.lineTo(2, 11); vg.lineTo(11, 6.5); vg.closePath(); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconTimelinePause(vg: nvg) void { vg.beginPath(); vg.rect(4, 2, 2, 9); vg.rect(7, 2, 2, 9); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconTimelineRight(vg: nvg) void { vg.beginPath(); vg.moveTo(7, 2); vg.lineTo(7, 11); vg.lineTo(11.5, 6.5); vg.closePath(); vg.rect(4, 2, 2, 9); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconTimelineEnd(vg: nvg) void { vg.beginPath(); vg.moveTo(2, 2); vg.lineTo(2, 11); vg.lineTo(11, 6.5); vg.closePath(); vg.rect(10, 2, 1, 9); vg.fillColor(nvg.rgb(66, 66, 66)); vg.fill(); } pub fn iconOnionSkinning(vg: nvg) void { vg.beginPath(); vg.rect(1.5, 1.5, 9, 9); vg.fillColor(nvg.rgb(203, 219, 252)); vg.fill(); vg.strokeColor(nvg.rgb(95, 205, 228)); vg.stroke(); vg.beginPath(); vg.rect(5.5, 5.5, 9, 9); vg.fillColor(nvg.rgb(99, 155, 255)); vg.fill(); vg.strokeColor(nvg.rgb(48, 96, 130)); vg.stroke(); } pub fn cursorArrow(vg: nvg) void { vg.beginPath(); vg.moveTo(-0.5, -0.5); vg.lineTo(-0.5, 12.5); vg.lineTo(3.5, 8.5); vg.lineTo(8.5, 8.5); vg.closePath(); vg.fillColor(nvg.rgb(0, 0, 0)); vg.fill(); vg.strokeColor(nvg.rgb(255, 255, 255)); vg.stroke(); } pub fn cursorArrowInverted(vg: nvg) void { vg.beginPath(); vg.moveTo(-0.5, -0.5); vg.lineTo(-0.5, 12.5); vg.lineTo(3.5, 8.5); vg.lineTo(8.5, 8.5); vg.closePath(); vg.fillColor(nvg.rgb(255, 255, 255)); vg.fill(); vg.strokeColor(nvg.rgb(0, 0, 0)); vg.stroke(); } pub fn cursorCrosshair(vg: nvg) void { vg.beginPath(); vg.moveTo(-5.5, 0.5); vg.lineTo(-2.5, 0.5); vg.moveTo(3.5, 0.5); vg.lineTo(6.5, 0.5); vg.moveTo(0.5, -5.5); vg.lineTo(0.5, -2.5); vg.moveTo(0.5, 3.5); vg.lineTo(0.5, 6.5); vg.lineCap(.square); defer vg.lineCap(.butt); vg.strokeColor(nvg.rgbf(1, 1, 1)); vg.strokeWidth(2); vg.stroke(); vg.strokeWidth(1); vg.strokeColor(nvg.rgb(0, 0, 0)); vg.stroke(); vg.beginPath(); vg.rect(-0.5, -0.5, 2, 2); vg.fillColor(nvg.rgbf(1, 1, 1)); vg.fill(); vg.beginPath(); vg.rect(0, 0, 1, 1); vg.fillColor(nvg.rgb(0, 0, 0)); vg.fill(); } pub fn cursorPen(vg: nvg) void { vg.fillColor(nvg.rgbf(1, 1, 1)); vg.strokeColor(nvg.rgb(0, 0, 0)); vg.save(); vg.scale(1, -1); vg.translate(0, -16); vg.lineJoin(.round); defer vg.restore(); vg.beginPath(); vg.moveTo(0.5, 15.5); vg.lineTo(1.5, 10.5); vg.lineTo(11.5, 0.5); vg.lineTo(13.5, 0.5); vg.lineTo(15.5, 2.5); vg.lineTo(15.5, 4.5); vg.lineTo(5.5, 14.5); vg.closePath(); vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(0.5, 15.5); vg.lineTo(1.5, 10.5); vg.lineTo(3.5, 10.5); vg.lineTo(5.5, 12.5); vg.lineTo(5.5, 14.5); vg.closePath(); vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(0.5, 15.5); vg.lineTo(1, 13.5); vg.lineTo(2.5, 15); vg.closePath(); vg.stroke(); } pub fn cursorBucket(vg: nvg) void { cursorCrosshair(vg); vg.fillColor(nvg.rgbf(1, 1, 1)); vg.strokeColor(nvg.rgb(0, 0, 0)); vg.save(); defer vg.restore(); vg.translate(3, -15); vg.beginPath(); vg.moveTo(9.5, 2.5); vg.lineTo(3.5, 8.5); vg.lineTo(8.5, 13.5); vg.bezierTo(9.5, 14.5, 11.5, 14.5, 12.5, 13.5); vg.lineTo(14.5, 11.5); vg.bezierTo(15.5, 10.5, 15.5, 8.5, 14.5, 7.5); vg.closePath(); vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(4.5, 9.5); vg.lineTo(10.5, 3.5); vg.stroke(); vg.beginPath(); vg.roundedRect(8.5, 0.5, 2, 9, 1); vg.fill(); vg.stroke(); vg.beginPath(); vg.ellipse(9.5, 8.5, 1, 1); vg.stroke(); vg.beginPath(); vg.moveTo(3.5, 10.5); vg.lineTo(3.5, 8.5); vg.lineTo(6.5, 5.5); vg.lineTo(5, 5.5); vg.bezierTo(2, 5.5, 0.5, 7, 0.5, 10.5); vg.bezierTo(0.5, 12, 1, 12.5, 2, 12.5); vg.bezierTo(3, 12.5, 3.5, 12, 3.5, 10.5); vg.fill(); vg.stroke(); } pub fn cursorPipette(vg: nvg) void { vg.save(); defer vg.restore(); vg.translate(0, -15); vg.lineJoin(.round); vg.fillColor(nvg.rgbf(1, 1, 1)); vg.strokeColor(nvg.rgb(0, 0, 0)); vg.beginPath(); vg.moveTo(10.5, 3.5); vg.lineTo(1.5, 12.5); vg.lineTo(1.5, 13.5); vg.lineTo(0.5, 14.5); vg.lineTo(0.5, 15.5); vg.lineTo(1.5, 15.5); vg.lineTo(2.5, 14.5); vg.lineTo(3.5, 14.5); vg.lineTo(12.5, 5.5); vg.fill(); vg.stroke(); vg.beginPath(); vg.moveTo(11.5, 6.5); vg.lineTo(13.5, 6.5); vg.lineTo(13.5, 4.5); vg.lineTo(14.5, 3.5); vg.lineTo(15.5, 3.5); vg.lineTo(15.5, 1.5); vg.lineTo(14.5, 0.5); vg.lineTo(12.5, 0.5); vg.lineTo(12.5, 1.5); vg.lineTo(11.5, 2.5); vg.lineTo(9.5, 2.5); vg.lineTo(9.5, 4.5); vg.lineTo(10.5, 4.5); vg.lineTo(11.5, 5.5); vg.closePath(); vg.fill(); vg.stroke(); } pub fn cursorMove(vg: nvg) void { vg.beginPath(); vg.moveTo(-0.5, -0.5); vg.lineTo(-0.5, -3.5); vg.lineTo(-1.5, -3.5); vg.lineTo(-1.5, -4); vg.lineTo(0, -6.5); vg.lineTo(1, -6.5); vg.lineTo(2.5, -4); vg.lineTo(2.5, -3.5); vg.lineTo(1.5, -3.5); vg.lineTo(1.5, -0.5); vg.lineTo(4.5, -0.5); vg.lineTo(4.5, -1.5); vg.lineTo(5, -1.5); vg.lineTo(7.5, 0); vg.lineTo(7.5, 1); vg.lineTo(5, 2.5); vg.lineTo(4.5, 2.5); vg.lineTo(4.5, 1.5); vg.lineTo(1.5, 1.5); vg.lineTo(1.5, 4.5); vg.lineTo(2.5, 4.5); vg.lineTo(2.5, 5); vg.lineTo(1, 7.5); vg.lineTo(0, 7.5); vg.lineTo(-1.5, 5); vg.lineTo(-1.5, 4.5); vg.lineTo(-0.5, 4.5); vg.lineTo(-0.5, 1.5); vg.lineTo(-3.5, 1.5); vg.lineTo(-3.5, 2.5); vg.lineTo(-4, 2.5); vg.lineTo(-6.5, 1); vg.lineTo(-6.5, 0); vg.lineTo(-4, -1.5); vg.lineTo(-3.5, -1.5); vg.lineTo(-3.5, -0.5); vg.closePath(); vg.fillColor(nvg.rgbf(1, 1, 1)); vg.fill(); vg.strokeColor(nvg.rgb(0, 0, 0)); vg.stroke(); }
src/icons.zig
const build_options = @import("build_options"); const print = @import("std").debug.print; const CPU = @import("emu").CPU; var mem: [0x10000]u8 = undefined; // tick callback fn tick(num_ticks: usize, pins: usize, userdata: usize) u64 { _ = num_ticks; _ = userdata; if (0 != (pins & CPU.MREQ)) { // a memory request if (0 != (pins & CPU.RD)) { // a memory read access return CPU.setData(pins, mem[CPU.getAddr(pins)]); } else if (0 != (pins & CPU.WR)) { // a memory write access mem[CPU.getAddr(pins)] = CPU.getData(pins); } } // NOTE: we don't need to handle IO requests for the ZEX tests return pins; } fn putChar(c: u8) void { print("{c}", .{c}); } fn copy(start_addr: u16, bytes: []const u8) void { var addr = start_addr; for (bytes) |byte| { mem[addr] = byte; addr +%= 1; } } // emulate required CP/M system calls fn cpmBDOS(cpu: *CPU) bool { var retval: bool = true; switch (cpu.regs[CPU.C]) { 2 => { // output character in register E putChar(cpu.regs[CPU.E]); }, 9 => { // output $-terminated string pointed to by register DE var addr = cpu.r16(CPU.DE); while (mem[addr] != '$'): (addr +%= 1) { putChar(mem[addr]); } }, else => { print("Unhandled CP/M system call: {X}\n", .{ cpu.regs[CPU.C] }); retval = false; } } // emulate a RET const z: u16 = mem[cpu.SP]; cpu.SP +%= 1; const w: u16 = mem[cpu.SP]; cpu.SP +%= 1; cpu.WZ = (w<<8) | z; cpu.PC = cpu.WZ; return retval; } // run the currently configured test fn runTest(cpu: *CPU, name: []const u8) void { print("Running {s}:\n\n", .{ name }); var ticks: usize = 0; while (true) { ticks += cpu.exec(0, .{ .func=tick, .userdata=0 }); switch (cpu.PC) { 0 => break, // done 5 => { if (!cpmBDOS(cpu)) break; }, else => { } } } print("\n\n", .{}); } // run the ZEXDOC test fn zexdoc() void { copy(0x0100, @embedFile("roms/zexdoc.com")); var cpu = CPU{ .SP = 0xF000, .PC = 0x0100 }; runTest(&cpu, "ZEXDOC"); } // run the ZEXALL test fn zexall() void { copy(0x0100, @embedFile("roms/zexall.com")); var cpu = CPU{ .SP = 0xF000, .PC = 0x0100 }; runTest(&cpu, "ZEXALL"); } pub fn main() void { if (build_options.zexdoc) { zexdoc(); } if (build_options.zexall) { zexall(); } }
tests/z80zex.zig
fn destroy(context: *Context, object: Object) anyerror!void { // return error.DebugFunctionNotImplemented; } fn add(context: *Context, zwp_linux_buffer_params: Object, fd: i32, plane_idx: u32, offset: u32, stride: u32, modifier_hi: u32, modifier_lo: u32) anyerror!void { const params = @intToPtr(*Params, zwp_linux_buffer_params.container); try params.planes.writeItem(Plane{ .fd = fd, .plane_idx = plane_idx, .offset = offset, .stride = stride, .modifier_hi = modifier_hi, .modifier_lo = modifier_lo, }); } fn create(context: *Context, zwp_linux_buffer_params: Object, width: i32, height: i32, format: u32, flags: u32) anyerror!void { const params = @intToPtr(*Params, zwp_linux_buffer_params.container); const next_id: u32 = context.client.nextServerId(); var attribs: [49]isize = [_]isize{c.EGL_NONE} ** 49; var i: usize = 0; // TODO: this is currently wrong because it only references PLANE0 // see: https://github.com/wayland-project/weston/blob/ad41ad968afbab4c56cb81becf79bb47d575d388/libweston/renderer-gl/gl-renderer.c#L1930 while (params.planes.readItem()) |plane| { attribs[i] = c.EGL_WIDTH; i += 1; attribs[i] = width; i += 1; attribs[i] = c.EGL_HEIGHT; i += 1; attribs[i] = height; i += 1; attribs[i] = c.EGL_LINUX_DRM_FOURCC_EXT; i += 1; attribs[i] = @intCast(i32, format); i += 1; attribs[i] = c.EGL_DMA_BUF_PLANE0_FD_EXT; i += 1; attribs[i] = plane.fd; i += 1; attribs[i] = c.EGL_DMA_BUF_PLANE0_OFFSET_EXT; i += 1; attribs[i] = @intCast(i32, plane.offset); i += 1; attribs[i] = c.EGL_DMA_BUF_PLANE0_PITCH_EXT; i += 1; attribs[i] = @intCast(i32, plane.stride); i += 1; } switch (main.OUTPUT.backend) { .DRM => |drm| { const optional_image = c.eglCreateImage(drm.egl.display, null, c.EGL_LINUX_DMA_BUF_EXT, null, &attribs[0]); if (optional_image) |image| { const buffer = try dmabuf.newDmaBuffer(context.client, zwp_linux_buffer_params.id, next_id, width, height, format, image); const wl_buffer = prot.new_wl_buffer(next_id, context, @ptrToInt(buffer)); try prot.zwp_linux_buffer_params_v1_send_created(zwp_linux_buffer_params, next_id); try context.register(wl_buffer); } }, else => { try prot.zwp_linux_buffer_params_v1_send_failed(zwp_linux_buffer_params); }, } } fn create_immed(context: *Context, zwp_linux_buffer_params: Object, buffer_id: u32, width: i32, height: i32, format: u32, flags: u32) anyerror!void { return error.DebugFunctionNotImplemented; } pub fn init() void { prot.ZWP_LINUX_BUFFER_PARAMS_V1 = prot.zwp_linux_buffer_params_v1_interface{ .destroy = destroy, .add = add, .create = create, .create_immed = create_immed, }; } const prot = @import("../protocols.zig"); const dmabuf = @import("../dmabuf.zig"); const main = @import("../main.zig"); const Params = @import("../dmabuf_params.zig").Params; const Plane = @import("../dmabuf_params.zig").Plane; const Context = @import("../client.zig").Context; const Object = @import("../client.zig").Object; const c = @cImport({ @cInclude("EGL/egl.h"); @cInclude("EGL/eglext.h"); });
src/implementations/zwp_linux_buffer_params.zig
const std = @import("std"); const testing = std.testing; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const mustache = @import("../mustache.zig"); const TemplateOptions = mustache.options.TemplateOptions; const Element = mustache.Element; const ref_counter = @import("ref_counter.zig"); const parsing = @import("parsing.zig"); const Delimiters = parsing.Delimiters; const IndexBookmark = parsing.IndexBookmark; pub fn Node(comptime options: TemplateOptions) type { const RefCounter = ref_counter.RefCounter(options); const has_trimming = options.features.preseve_line_breaks_and_indentation; const allow_lambdas = options.features.lambdas == .Enabled; return struct { const Self = @This(); pub const List = std.ArrayListUnmanaged(Self); pub const TextPart = parsing.TextPart(options); index: u32 = 0, identifier: ?[]const u8, text_part: TextPart, children_count: u32 = 0, delimiters: ?Delimiters = null, inner_text: if (allow_lambdas) struct { content: ?[]const u8 = null, ref_counter: RefCounter = .{}, bookmark: ?IndexBookmark = null, } else void = if (allow_lambdas) .{} else {}, pub fn unRef(self: *Self, allocator: Allocator) void { if (comptime options.isRefCounted()) { self.text_part.unRef(allocator); if (allow_lambdas) { self.inner_text.ref_counter.unRef(allocator); } } } pub fn trimStandAlone(self: *Self, list: *List) void { if (comptime !has_trimming) return; var text_part = &self.text_part; if (text_part.part_type == .static_text) { switch (text_part.trimming.left) { .PreserveWhitespaces => {}, .Trimmed => assert(false), .AllowTrimming => { const can_trim = trimPreviousNodesRight(list, self.index); if (can_trim) { text_part.trimLeft(); } else { text_part.trimming.left = .PreserveWhitespaces; } }, } } } pub fn trimLast(self: *Self, allocator: Allocator, nodes: *List) void { if (comptime !has_trimming) return; if (nodes.items.len == 0) return; var text_part = &self.text_part; if (text_part.part_type == .static_text) { if (!text_part.is_stand_alone) { var index = nodes.items.len - 1; if (self.index == index) return; assert(self.index < index); while (self.index < index) : (index -= 1) { const node = &nodes.items[index]; if (!node.text_part.is_stand_alone) { return; } } } if (text_part.trimRight()) |*indentation| { if (self.index == nodes.items.len - 1) { // The last tag can't produce any meaningful indentation, so we discard it indentation.ref_counter.unRef(allocator); } else { var next_node = &nodes.items[self.index + 1]; next_node.text_part.indentation = indentation.*; } } } } pub fn getIndentation(self: *const Self) ?[]const u8 { return if (comptime has_trimming) switch (self.text_part.part_type) { .partial, .parent, => if (self.text_part.indentation) |indentation| indentation.slice else null, else => null, } else null; } pub fn getInnerText(self: *const Self) ?[]const u8 { if (comptime allow_lambdas) { if (self.inner_text.content) |node_inner_text| { return node_inner_text; } } return null; } fn trimPreviousNodesRight(nodes: *List, index: u32) bool { if (comptime !has_trimming) return false; if (index > 0) { var current_node = &nodes.items[index]; const prev_index = index - 1; var node = &nodes.items[prev_index]; var text_part = &node.text_part; if (text_part.part_type == .static_text) { switch (text_part.trimming.right) { .AllowTrimming => |trimming| { // Non standalone tags must check the previous node const can_trim = trimming.stand_alone or trimPreviousNodesRight(nodes, prev_index); if (can_trim) { if (text_part.trimRight()) |indentation| { current_node.text_part.indentation = indentation; } return true; } else { text_part.trimming.right = .PreserveWhitespaces; return false; } }, .Trimmed => return true, .PreserveWhitespaces => return false, } } else if (text_part.is_stand_alone) { // Depends on the previous node return trimPreviousNodesRight(nodes, prev_index); } else { // Interpolation tags must preserve whitespaces return false; } } else { // No parent node, the first node can always be considered stand-alone return true; } } }; }
src/parsing/node.zig